mirror of
https://github.com/Azure/k8s-deploy.git
synced 2026-04-24 17:12:32 +08:00
Compare commits
262 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e17e3d873d | ||
|
|
2208360a18 | ||
|
|
37ab655aa8 | ||
|
|
4ce14be7f2 | ||
|
|
d1db10bbe0 | ||
|
|
9b1aca534a | ||
|
|
566b1027eb | ||
|
|
4bab0cb90d | ||
|
|
2c9e6e7762 | ||
|
|
104a7063f6 | ||
|
|
e7aa4f9d0c | ||
|
|
30dbc03366 | ||
|
|
f4bacf1216 | ||
|
|
49d0f2a6fd | ||
|
|
a4d35bd653 | ||
|
|
e202ed4d60 | ||
|
|
80628307ac | ||
|
|
332b91818e | ||
|
|
8bb8e3e420 | ||
|
|
f3086d990b | ||
|
|
01cfe404ef | ||
|
|
84e2095bf0 | ||
|
|
1ad0b3bc5b | ||
|
|
8f82d97be7 | ||
|
|
d9732d2f20 | ||
|
|
fc94f1c6e0 | ||
|
|
ac0cc3d225 | ||
|
|
cf2c9c0edd | ||
|
|
d206adcc7f | ||
|
|
68aff7a5a7 | ||
|
|
1748cb02b8 | ||
|
|
6aac2fd790 | ||
|
|
511707c4a0 | ||
|
|
b35cf6be4f | ||
|
|
07c26e70d3 | ||
|
|
0090ff3ba3 | ||
|
|
f67ee4df16 | ||
|
|
1cd48cb18d | ||
|
|
f754598e02 | ||
|
|
2842c5ae3c | ||
|
|
c384f1639a | ||
|
|
b497d9edf8 | ||
|
|
4f9a03ce42 | ||
|
|
0ca601cacf | ||
|
|
e237e65ac2 | ||
|
|
862964743d | ||
|
|
f647f23716 | ||
|
|
d703e5aec9 | ||
|
|
65b4b3bcfe | ||
|
|
790189df18 | ||
|
|
8e0af0340e | ||
|
|
16365ba04f | ||
|
|
233b2737fd | ||
|
|
a4659bf19e | ||
|
|
e73e618811 | ||
|
|
1bc669b02c | ||
|
|
a6356b08f6 | ||
|
|
c0773c9877 | ||
|
|
bf3422cff9 | ||
|
|
c93bf5dafe | ||
|
|
4755eabeba | ||
|
|
7a954ab84c | ||
|
|
7395c391d9 | ||
|
|
f17d8559ed | ||
|
|
b832d899e2 | ||
|
|
6b432c15b6 | ||
|
|
ac0b58c9a5 | ||
|
|
e207ec429b | ||
|
|
901a2fa489 | ||
|
|
e3266b84c0 | ||
|
|
cc1e193d23 | ||
|
|
b9529f8427 | ||
|
|
5b189c0bf7 | ||
|
|
fd0d4accb4 | ||
|
|
6fd713ca6a | ||
|
|
1feba4ce5c | ||
|
|
1baea844ac | ||
|
|
76a7e4f5b5 | ||
|
|
ba7d4d1daf | ||
|
|
e1c4475ce4 | ||
|
|
1a3dd6ebf8 | ||
|
|
bba7c16f36 | ||
|
|
fbde009ab5 | ||
|
|
f09b591a1a | ||
|
|
33d7498881 | ||
|
|
7004a1f114 | ||
|
|
648274edaf | ||
|
|
b2568065ec | ||
|
|
ac1831102a | ||
|
|
3b11c64ce0 | ||
|
|
99510dff95 | ||
|
|
5dfb05d024 | ||
|
|
68cb22352a | ||
|
|
67def0664b | ||
|
|
76046dd320 | ||
|
|
8b4e45d97b | ||
|
|
2c1455e4a0 | ||
|
|
c53a656438 | ||
|
|
312cb89665 | ||
|
|
c171eee779 | ||
|
|
adfb4aae0b | ||
|
|
0a30921563 | ||
|
|
3fc12aea57 | ||
|
|
8e9d5d375a | ||
|
|
824feb5b2b | ||
|
|
c51f8ea3d8 | ||
|
|
6c8a34f5c5 | ||
|
|
74f99ab42e | ||
|
|
09a8725f44 | ||
|
|
d9f52cdb50 | ||
|
|
bc5b13e4ce | ||
|
|
291044bf75 | ||
|
|
059e5441ef | ||
|
|
bb318e252f | ||
|
|
3d107b044d | ||
|
|
d1acc1a47b | ||
|
|
bf768b3109 | ||
|
|
d3b3950a9c | ||
|
|
4b49af4189 | ||
|
|
0c838316d4 | ||
|
|
e5725dfe9f | ||
|
|
b34f3e7f18 | ||
|
|
10d196d204 | ||
|
|
df58fb461e | ||
|
|
a999ffcd6c | ||
|
|
00795b0b56 | ||
|
|
d565a17533 | ||
|
|
1811836de2 | ||
|
|
10d9433b15 | ||
|
|
52dfbef986 | ||
|
|
074d812926 | ||
|
|
e10b599478 | ||
|
|
93550c22f0 | ||
|
|
1fea8281df | ||
|
|
1b1edcdfc7 | ||
|
|
8cbe18c310 | ||
|
|
8efbc8ba92 | ||
|
|
699a70732d | ||
|
|
a1d061da9d | ||
|
|
7c36b75ebe | ||
|
|
2f2901757b | ||
|
|
4aba7c26f3 | ||
|
|
d6508445a1 | ||
|
|
a462095a3c | ||
|
|
e52890db9e | ||
|
|
dd4bbd13a5 | ||
|
|
ecb488266d | ||
|
|
756cc0a511 | ||
|
|
dcaec012e2 | ||
|
|
7dae909398 | ||
|
|
e8a841df59 | ||
|
|
da1e907ad7 | ||
|
|
8ce7d1dcdd | ||
|
|
b9a9965750 | ||
|
|
47445fb82f | ||
|
|
c875a14bde | ||
|
|
58ba3f0665 | ||
|
|
e9693a7cdd | ||
|
|
a6cfc31f7a | ||
|
|
e917b5a666 | ||
|
|
57d0489e1f | ||
|
|
d64c205796 | ||
|
|
c8f050230d | ||
|
|
a0b037b13e | ||
|
|
7fd0e52a8b | ||
|
|
659bbb3802 | ||
|
|
3c0579b484 | ||
|
|
b11eda66ea | ||
|
|
c117b29f9e | ||
|
|
01a65512ea | ||
|
|
531cfdcc3d | ||
|
|
0b5795551a | ||
|
|
bb0278db72 | ||
|
|
71e93a71d4 | ||
|
|
19d66d6bdb | ||
|
|
72a09f4051 | ||
|
|
a17f35ba63 | ||
|
|
7b11ddb1d5 | ||
|
|
ecec5912ba | ||
|
|
dcd9bc6b1a | ||
|
|
976c5c4981 | ||
|
|
4ebf668e6f | ||
|
|
15920eb094 | ||
|
|
507f2d4fc7 | ||
|
|
06a06b13b9 | ||
|
|
fa093f2922 | ||
|
|
aabcfcba3e | ||
|
|
fd893fd074 | ||
|
|
659e414483 | ||
|
|
1e490c6238 | ||
|
|
75cb5d47f7 | ||
|
|
bcdb90f36f | ||
|
|
ee3c5aed75 | ||
|
|
961b316a51 | ||
|
|
4810ff9a3e | ||
|
|
ca8d2604ac | ||
|
|
5cbd4acaca | ||
|
|
18ff12030c | ||
|
|
8898d95f4f | ||
|
|
33608d18f7 | ||
|
|
acd12a4705 | ||
|
|
81557b8633 | ||
|
|
e1b9842236 | ||
|
|
26cb2cdb5f | ||
|
|
5ebbfbbefe | ||
|
|
ac49626466 | ||
|
|
d332939666 | ||
|
|
2577009bcb | ||
|
|
a58ad23e7f | ||
|
|
ee4b5d33e0 | ||
|
|
625898f6eb | ||
|
|
8d257fed50 | ||
|
|
4f6b70e29a | ||
|
|
202bacc71b | ||
|
|
2c09684db9 | ||
|
|
282a81e1fc | ||
|
|
ce7c8f066f | ||
|
|
25ded46b9d | ||
|
|
56e4abca5e | ||
|
|
4bd69f56a9 | ||
|
|
04921d7d06 | ||
|
|
51b95a5ca2 | ||
|
|
49257c6f33 | ||
|
|
895952654c | ||
|
|
0fd84a1b0d | ||
|
|
d35174fe93 | ||
|
|
f80ed6c460 | ||
|
|
72bc167726 | ||
|
|
21d3af2857 | ||
|
|
a80355209a | ||
|
|
92589546e8 | ||
|
|
b9146889f3 | ||
|
|
29552c24a9 | ||
|
|
d394c2bba2 | ||
|
|
e643530d85 | ||
|
|
a4ebc55d69 | ||
|
|
15e04b8f7e | ||
|
|
b4bc3003e8 | ||
|
|
c9b54fdae2 | ||
|
|
6773ba4167 | ||
|
|
f1898e0618 | ||
|
|
11a48a4e1d | ||
|
|
6da51b24cd | ||
|
|
99c4423993 | ||
|
|
eb22293c53 | ||
|
|
e4bc5e8873 | ||
|
|
a3bb31ec16 | ||
|
|
e7d77ef817 | ||
|
|
fdacb8e073 | ||
|
|
4c0e9cfbff | ||
|
|
305d883d74 | ||
|
|
4a2857109f | ||
|
|
0527303033 | ||
|
|
31a423057f | ||
|
|
ad0b77acbe | ||
|
|
2a3bf64395 | ||
|
|
26d4801e5e | ||
|
|
637684d130 | ||
|
|
b635499b62 | ||
|
|
e6399ba4e7 | ||
|
|
6687f5458e | ||
|
|
be01c3f321 |
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1 @@
|
||||
* @Azure/cloud-native-github-action-owners
|
||||
36
.github/ISSUE_TEMPLATE/bugReportForm.yml
vendored
Normal file
36
.github/ISSUE_TEMPLATE/bugReportForm.yml
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
name: Bug Report
|
||||
description: File a bug report specifying all inputs you provided for the action, we will respond to this thread with any questions.
|
||||
title: 'Bug: '
|
||||
labels: ['bug', 'triage']
|
||||
assignees: '@Azure/aks-atlanta'
|
||||
body:
|
||||
- type: textarea
|
||||
id: What-happened
|
||||
attributes:
|
||||
label: What happened?
|
||||
description: Tell us what happened and how is it different from the expected?
|
||||
placeholder: Tell us what you see!
|
||||
validations:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
id: Version
|
||||
attributes:
|
||||
label: Version
|
||||
options:
|
||||
- label: I am using the latest version
|
||||
required: true
|
||||
- type: input
|
||||
id: Runner
|
||||
attributes:
|
||||
label: Runner
|
||||
description: What runner are you using?
|
||||
placeholder: Mention the runner info (self-hosted, operating system)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: Logs
|
||||
attributes:
|
||||
label: Relevant log output
|
||||
description: Run in debug mode for the most verbose logs. Please feel free to attach a screenshot of the logs
|
||||
validations:
|
||||
required: true
|
||||
6
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
6
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: GitHub Action "k8s-deploy" Support
|
||||
url: https://github.com/Azure/k8s-deploy
|
||||
security: https://github.com/Azure/k8s-deploy/blob/main/SECURITY.md
|
||||
about: Please ask and answer questions here.
|
||||
13
.github/ISSUE_TEMPLATE/featureRequestForm.yml
vendored
Normal file
13
.github/ISSUE_TEMPLATE/featureRequestForm.yml
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
name: Feature Request
|
||||
description: File a Feature Request form, we will respond to this thread with any questions.
|
||||
title: 'Feature Request: '
|
||||
labels: ['Feature']
|
||||
assignees: '@Azure/aks-atlanta'
|
||||
body:
|
||||
- type: textarea
|
||||
id: Feature_request
|
||||
attributes:
|
||||
label: Feature request
|
||||
description: Provide example functionality and links to relevant docs
|
||||
validations:
|
||||
required: true
|
||||
53
.github/actions/minikube-setup/action.yml
vendored
Normal file
53
.github/actions/minikube-setup/action.yml
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
name: 'Setup Minikube Test Environment'
|
||||
description: 'Common setup steps for minikube integration tests'
|
||||
inputs:
|
||||
install-smi:
|
||||
description: 'Install Linkerd SMI for service mesh tests'
|
||||
required: false
|
||||
default: 'false'
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
rm -rf node_modules/
|
||||
npm install
|
||||
|
||||
- name: Build
|
||||
shell: bash
|
||||
run: npm run build
|
||||
|
||||
- name: Install conntrack
|
||||
shell: bash
|
||||
run: sudo apt-get install -y conntrack
|
||||
|
||||
- uses: Azure/setup-kubectl@776406bce94f63e41d621b960d78ee25c8b76ede # v4.0.1
|
||||
name: Install Kubectl
|
||||
|
||||
- id: setup-minikube
|
||||
name: Setup Minikube
|
||||
uses: medyagh/setup-minikube@e9e035a86bbc3caea26a450bd4dbf9d0c453682e # v0.0.21
|
||||
with:
|
||||
minikube-version: 1.37.0
|
||||
kubernetes-version: 1.31.0
|
||||
driver: 'docker'
|
||||
|
||||
- name: Install Linkerd and SMI
|
||||
if: inputs.install-smi == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSfL https://run.linkerd.io/install-edge | sh
|
||||
export PATH=$PATH:/home/runner/.linkerd2/bin
|
||||
curl -sL https://linkerd.github.io/linkerd-smi/install | sh
|
||||
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.2.0/standard-install.yaml
|
||||
|
||||
linkerd install --crds | kubectl apply -f -
|
||||
linkerd install --set proxyInit.runAsRoot=true | kubectl apply -f -
|
||||
linkerd smi install | kubectl apply -f -
|
||||
|
||||
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # 6.1.0
|
||||
name: Install Python
|
||||
with:
|
||||
python-version: '3.x'
|
||||
18
.github/dependabot.yml
vendored
Normal file
18
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: npm
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
groups:
|
||||
actions:
|
||||
patterns:
|
||||
- '*'
|
||||
- package-ecosystem: github-actions
|
||||
directory: .github/workflows
|
||||
schedule:
|
||||
interval: weekly
|
||||
groups:
|
||||
actions:
|
||||
patterns:
|
||||
- '*'
|
||||
49
.github/workflows/codeql.yml
vendored
Normal file
49
.github/workflows/codeql.yml
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
name: 'Code scanning - action'
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 19 * * 0'
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
# CodeQL runs on ubuntu-latest and windows-latest
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd #v6.0.2
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
fetch-depth: 2
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 #v3.29.5
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
# with:
|
||||
# languages: go, javascript, csharp, python, cpp, java
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 #v3.29.5
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 #v3.29.5
|
||||
35
.github/workflows/defaultLabels.yml
vendored
Normal file
35
.github/workflows/defaultLabels.yml
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
name: setting-default-labels
|
||||
|
||||
# Controls when the action will run.
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0/3 * * *'
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
jobs:
|
||||
build:
|
||||
# The type of runner that the job will run on
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
- uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0
|
||||
name: Setting issue as idle
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'This issue is idle because it has been open for 14 days with no activity.'
|
||||
stale-issue-label: 'idle'
|
||||
days-before-stale: 14
|
||||
days-before-close: -1
|
||||
operations-per-run: 100
|
||||
exempt-issue-labels: 'backlog'
|
||||
|
||||
- uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0
|
||||
name: Setting PR as idle
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: 'This PR is idle because it has been open for 14 days with no activity.'
|
||||
stale-pr-label: 'idle'
|
||||
days-before-stale: 14
|
||||
days-before-close: -1
|
||||
operations-per-run: 100
|
||||
18
.github/workflows/prettify-code.yml
vendored
Normal file
18
.github/workflows/prettify-code.yml
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
name: 'Run prettify'
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
prettier:
|
||||
name: Prettier Check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: install deps
|
||||
run: npm install
|
||||
|
||||
- name: Enforce Prettier
|
||||
run: npm run format-check
|
||||
18
.github/workflows/release-pr.yml
vendored
Normal file
18
.github/workflows/release-pr.yml
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
name: Release Project
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- CHANGELOG.md
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
release:
|
||||
permissions:
|
||||
actions: read
|
||||
contents: write
|
||||
uses: Azure/action-release-workflows/.github/workflows/release_js_project.yaml@v1
|
||||
with:
|
||||
changelogPath: ./CHANGELOG.md
|
||||
52
.github/workflows/run-integration-tests-basic.yml
vendored
Normal file
52
.github/workflows/run-integration-tests-basic.yml
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
name: Minikube Integration Tests - basic
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
run-integration-test:
|
||||
name: Run Minikube Integration Tests
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
KUBECONFIG: /home/runner/.kube/config
|
||||
NAMESPACE: test-${{ github.run_id }}
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- uses: ./.github/actions/minikube-setup
|
||||
name: Setup Minikube Environment
|
||||
timeout-minutes: 5
|
||||
|
||||
- name: Create namespace to run tests
|
||||
run: kubectl create ns ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Cleaning any previously created items
|
||||
run: |
|
||||
python test/integration/k8s-deploy-delete.py 'Service' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Ingress' 'all' ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Executing deploy action for pod
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
test/integration/manifests/manifest_test_dir/test.yml
|
||||
action: deploy
|
||||
|
||||
- name: Checking if deployments and services were created
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:1.14.2 labels=app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_basic selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_basic selectorLabels=app:nginx
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment3 containerName=nginx:1.14.2 labels=app:nginx3,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_basic selectorLabels=app:nginx3
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service3 labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_basic selectorLabels=app:nginx3
|
||||
156
.github/workflows/run-integration-tests-bluegreen-ingress.yml
vendored
Normal file
156
.github/workflows/run-integration-tests-bluegreen-ingress.yml
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
name: Minikube Integration Tests - blue-green ingress
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
run-integration-test:
|
||||
name: Blue-Green Ingress Tests
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
KUBECONFIG: /home/runner/.kube/config
|
||||
NAMESPACE: test-${{ github.run_id }}
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- uses: ./.github/actions/minikube-setup
|
||||
name: Setup Minikube Environment
|
||||
timeout-minutes: 5
|
||||
|
||||
- name: Create namespace to run tests
|
||||
run: kubectl create ns ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Cleaning any previously created items
|
||||
run: |
|
||||
python test/integration/k8s-deploy-delete.py 'Service' 'nginx-service' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Service' 'nginx-service-green' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'nginx-deployment-green' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'nginx-deployment' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Ingress' 'nginx-ingress' ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Executing deploy action for ingress
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-ingress.yml
|
||||
strategy: blue-green
|
||||
route-method: ingress
|
||||
action: deploy
|
||||
|
||||
- name: Checking if deployments, services and ingresses were created with green labels and original tag
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-green containerName=nginx:1.14.2 labels=k8s.deploy.color:green,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-green labels=k8s.deploy.color:green,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Ingress name=nginx-ingress ingressServices=nginx-service-green,unrouted-service
|
||||
|
||||
- name: Executing promote action for ingress
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-ingress.yml
|
||||
strategy: blue-green
|
||||
route-method: ingress
|
||||
action: promote
|
||||
|
||||
- name: Checking if deployments, services and ingresses were created with none labels after first promote
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:1.14.2 labels=k8s.deploy.color:None,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=k8s.deploy.color:None,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Ingress name=nginx-ingress ingressServices=nginx-service,unrouted-service
|
||||
|
||||
- name: Executing second deploy action for ingress with new tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:latest
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-ingress.yml
|
||||
strategy: blue-green
|
||||
route-method: ingress
|
||||
action: deploy
|
||||
|
||||
- name: Checking if deployments (with new tag), services and ingresses were created with green labels after deploy, and old deployment persists
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-green containerName=nginx:latest labels=k8s.deploy.color:green,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:1.14.2 labels=k8s.deploy.color:None,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-green labels=k8s.deploy.color:green,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=k8s.deploy.color:None,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Ingress name=nginx-ingress ingressServices=nginx-service-green,unrouted-service
|
||||
|
||||
- name: Executing second promote action for ingress now using new image tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:latest
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-ingress.yml
|
||||
strategy: blue-green
|
||||
route-method: ingress
|
||||
action: promote
|
||||
|
||||
- name: Checking if deployments, services and ingresses were created with none labels after promote for new tag
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:latest labels=k8s.deploy.color:None,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=k8s.deploy.color:None,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Ingress name=nginx-ingress ingressServices=nginx-service,unrouted-service
|
||||
|
||||
- name: Executing deploy action for ingress to be rejected using old tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-ingress.yml
|
||||
strategy: blue-green
|
||||
route-method: ingress
|
||||
action: deploy
|
||||
|
||||
- name: Checking if new deployments (with old tag), services and ingresses were created with green labels after deploy, and old deployment (with latest tag) persists
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-green containerName=nginx:1.14.2 labels=k8s.deploy.color:green,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:latest labels=k8s.deploy.color:None,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-green labels=k8s.deploy.color:green,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=k8s.deploy.color:None,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Ingress name=nginx-ingress ingressServices=nginx-service-green,unrouted-service
|
||||
|
||||
- name: Executing reject action for ingress to reject new deployment with 1.14.2 tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-ingress.yml
|
||||
strategy: blue-green
|
||||
route-method: ingress
|
||||
action: reject
|
||||
|
||||
# MAY BE USEFUL TO ADD AN ANTI-CHECK - CHECK TO MAKE SURE CERTAIN OBJECTS DON'T EXIST
|
||||
- name: Checking if deployments, services and ingresses were created with none labels and latest tag after reject
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:latest labels=k8s.deploy.color:None,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=k8s.deploy.color:None,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_ingress selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Ingress name=nginx-ingress ingressServices=nginx-service,unrouted-service
|
||||
|
||||
- name: Cleaning up current set up
|
||||
run: |
|
||||
python test/integration/k8s-deploy-delete.py 'Service' 'nginx-service' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'nginx-deployment' ${{ env.NAMESPACE }}
|
||||
|
||||
- if: ${{ always() }}
|
||||
name: Delete created namespace
|
||||
run: kubectl delete ns ${{ env.NAMESPACE }}
|
||||
143
.github/workflows/run-integration-tests-bluegreen-service.yml
vendored
Normal file
143
.github/workflows/run-integration-tests-bluegreen-service.yml
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
name: Minikube Integration Tests - blue-green service
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
run-integration-test:
|
||||
name: Blue-Green Service Tests
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
KUBECONFIG: /home/runner/.kube/config
|
||||
NAMESPACE: test-${{ github.run_id }}
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- uses: ./.github/actions/minikube-setup
|
||||
name: Setup Minikube Environment
|
||||
timeout-minutes: 5
|
||||
|
||||
- name: Create namespace to run tests
|
||||
run: kubectl create ns ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Cleaning any previously created items
|
||||
run: |
|
||||
python test/integration/k8s-deploy-delete.py 'Service' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Ingress' 'all' ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Executing deploy action for service
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-service.yml
|
||||
strategy: blue-green
|
||||
route-method: service
|
||||
action: deploy
|
||||
|
||||
- name: Checking if deployments and services were created with green labels and original tag
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-green containerName=nginx:1.14.2 labels=k8s.deploy.color:green,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_service selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=k8s.deploy.color:green,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_service selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
- name: Executing promote action for service
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-service.yml
|
||||
strategy: blue-green
|
||||
route-method: service
|
||||
action: promote
|
||||
|
||||
- name: Checking if deployments and services were created with none labels after first promote
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:1.14.2 labels=k8s.deploy.color:None,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_service selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=k8s.deploy.color:None,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_service selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
- name: Executing second deploy action for service with new tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:latest
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-service.yml
|
||||
strategy: blue-green
|
||||
route-method: service
|
||||
action: deploy
|
||||
|
||||
- name: Checking if deployments (with new tag) and services were created with green labels after deploy, and old deployment persists
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-green containerName=nginx:latest labels=k8s.deploy.color:green,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_service selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:1.14.2 labels=k8s.deploy.color:None,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_service selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=k8s.deploy.color:green,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_service selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
|
||||
- name: Executing second promote action for service now using new image tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:latest
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-service.yml
|
||||
strategy: blue-green
|
||||
route-method: service
|
||||
action: promote
|
||||
|
||||
- name: Checking if deployments and services were created with none labels after promote for new tag
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:latest labels=k8s.deploy.color:None,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_service selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=k8s.deploy.color:None,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_service selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
- name: Executing deploy action for service to be rejected using old tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-service.yml
|
||||
strategy: blue-green
|
||||
route-method: service
|
||||
action: deploy
|
||||
|
||||
- name: Checking if new deployments (with old tag) and services were created with green labels after deploy, and old deployment (with latest tag) persists
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-green containerName=nginx:1.14.2 labels=k8s.deploy.color:green,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_service selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:latest labels=k8s.deploy.color:None,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_service selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=k8s.deploy.color:green,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_service selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
|
||||
- name: Executing reject action for service to reject new deployment with 1.14.2 tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-service.yml
|
||||
strategy: blue-green
|
||||
route-method: service
|
||||
action: reject
|
||||
|
||||
# MAY BE USEFUL TO ADD AN ANTI-CHECK - CHECK TO MAKE SURE CERTAIN OBJECTS DON'T EXIST
|
||||
- name: Checking if deployments and services were created with none labels and latest tag after reject
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:latest labels=k8s.deploy.color:None,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_service selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=k8s.deploy.color:None,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_service selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
- name: Cleaning up current set up
|
||||
run: |
|
||||
python test/integration/k8s-deploy-delete.py 'Service' 'nginx-service' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'nginx-deployment' ${{ env.NAMESPACE }}
|
||||
|
||||
- if: ${{ always() }}
|
||||
name: Delete created namespace
|
||||
run: kubectl delete ns ${{ env.NAMESPACE }}
|
||||
173
.github/workflows/run-integration-tests-bluegreen-smi.yml
vendored
Normal file
173
.github/workflows/run-integration-tests-bluegreen-smi.yml
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
name: Minikube Integration Tests - blue-green SMI
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
run-integration-test:
|
||||
name: Blue-Green SMI Tests
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
KUBECONFIG: /home/runner/.kube/config
|
||||
NAMESPACE: test-${{ github.run_id }}
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- uses: ./.github/actions/minikube-setup
|
||||
name: Setup Minikube Environment
|
||||
timeout-minutes: 5
|
||||
with:
|
||||
install-smi: 'true'
|
||||
|
||||
- name: Create namespace to run tests
|
||||
run: kubectl create ns ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Cleaning any previously created items
|
||||
run: |
|
||||
python test/integration/k8s-deploy-delete.py 'Service' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Ingress' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'TrafficSplit' 'all' ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Executing deploy action for smi
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-service.yml
|
||||
strategy: blue-green
|
||||
route-method: smi
|
||||
action: deploy
|
||||
|
||||
- name: Checking if deployments, services, and ts objects were created with green labels and original tag
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-green containerName=nginx:1.14.2 labels=k8s.deploy.color:green,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-stable labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI,k8s.deploy.color:None selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-green labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI,k8s.deploy.color:green selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=TrafficSplit name=nginx-service-trafficsplit tsServices=nginx-service-stable:0,nginx-service-green:100
|
||||
|
||||
- name: Executing promote action for smi
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-service.yml
|
||||
strategy: blue-green
|
||||
route-method: smi
|
||||
action: promote
|
||||
|
||||
# another good place for anti-test - ensure old deps are deleted after promote
|
||||
- name: Checking if deployments, services, and ts objects were created with none labels after first promote
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:1.14.2 labels=k8s.deploy.color:None,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-stable labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI,k8s.deploy.color:None selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=TrafficSplit name=nginx-service-trafficsplit tsServices=nginx-service-stable:100,nginx-service-green:0
|
||||
|
||||
- name: Executing second deploy action for smi with new tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:latest
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-service.yml
|
||||
strategy: blue-green
|
||||
route-method: smi
|
||||
action: deploy
|
||||
|
||||
- name: Checking if deployments (with new tag) and services were created with green labels after deploy, and old deployment persists
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:1.14.2 labels=k8s.deploy.color:None,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-green containerName=nginx:latest labels=k8s.deploy.color:green,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-stable labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI,k8s.deploy.color:None selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-green labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI,k8s.deploy.color:green selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=TrafficSplit name=nginx-service-trafficsplit tsServices=nginx-service-stable:0,nginx-service-green:100
|
||||
|
||||
- name: Executing second promote action for smi now using new image tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:latest
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-service.yml
|
||||
strategy: blue-green
|
||||
route-method: smi
|
||||
action: promote
|
||||
|
||||
- name: Checking if deployments and services were created with none labels after promote for new tag, ts is stable
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:latest labels=k8s.deploy.color:None,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-stable labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI,k8s.deploy.color:None selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=TrafficSplit name=nginx-service-trafficsplit tsServices=nginx-service-stable:100,nginx-service-green:0
|
||||
|
||||
- name: Executing deploy action for smi to be rejected using old tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-service.yml
|
||||
strategy: blue-green
|
||||
route-method: smi
|
||||
action: deploy
|
||||
|
||||
- name: Checking if new deployments (with old tag) and services were created with green labels after deploy, and old deployment (with latest tag) persists
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-green containerName=nginx:1.14.2 labels=k8s.deploy.color:green,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:latest labels=k8s.deploy.color:None,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-stable labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI,k8s.deploy.color:None selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-green labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI,k8s.deploy.color:green selectorLabels=app:nginx,k8s.deploy.color:green
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=TrafficSplit name=nginx-service-trafficsplit tsServices=nginx-service-stable:0,nginx-service-green:100
|
||||
|
||||
- name: Executing reject action for smi to reject new deployment with 1.14.2 tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/blue-green/test-service.yml
|
||||
strategy: blue-green
|
||||
route-method: smi
|
||||
action: reject
|
||||
|
||||
# MAY BE USEFUL TO ADD AN ANTI-CHECK - CHECK TO MAKE SURE CERTAIN OBJECTS DON'T EXIST
|
||||
- name: Checking if deployments and services were created with none labels and latest tag after reject
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:latest labels=k8s.deploy.color:None,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-stable labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_blue-green_SMI,k8s.deploy.color:None selectorLabels=app:nginx,k8s.deploy.color:None
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=TrafficSplit name=nginx-service-trafficsplit tsServices=nginx-service-stable:100,nginx-service-green:0
|
||||
|
||||
- name: Cleaning up current set up
|
||||
run: |
|
||||
python test/integration/k8s-deploy-delete.py 'Service' 'nginx-service' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'nginx-deployment' ${{ env.NAMESPACE }}
|
||||
|
||||
- if: ${{ always() }}
|
||||
name: Delete created namespace
|
||||
run: kubectl delete ns ${{ env.NAMESPACE }}
|
||||
152
.github/workflows/run-integration-tests-canary-pod.yml
vendored
Normal file
152
.github/workflows/run-integration-tests-canary-pod.yml
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
name: Minikube Integration Tests - canary pod
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
run-integration-test:
|
||||
name: Canary Pod Tests
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
KUBECONFIG: /home/runner/.kube/config
|
||||
NAMESPACE: test-${{ github.run_id }}
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- uses: ./.github/actions/minikube-setup
|
||||
name: Setup Minikube Environment
|
||||
timeout-minutes: 5
|
||||
|
||||
- name: Create namespace to run tests
|
||||
run: kubectl create ns ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Cleaning any previously created items
|
||||
run: |
|
||||
python test/integration/k8s-deploy-delete.py 'Service' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Ingress' 'all' ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Executing deploy action for pod
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
strategy: canary
|
||||
percentage: 50
|
||||
traffic-split-method: pod
|
||||
action: deploy
|
||||
|
||||
- name: Checking if deployments and services were created with canary labels and original tag
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-canary containerName=nginx:1.14.2 labels=workflow/version:canary,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_pod selectorLabels=app:nginx,workflow/version:canary
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_pod selectorLabels=app:nginx
|
||||
|
||||
- name: Executing promote action for pod
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
strategy: canary
|
||||
percentage: 50
|
||||
traffic-split-method: pod
|
||||
action: promote
|
||||
|
||||
# another good place for anti-test - ensure old deps are deleted after promote
|
||||
- name: Checking if deployments and services were created with stable labels after first promote
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:1.14.2 labels=app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_pod selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_pod selectorLabels=app:nginx
|
||||
|
||||
- name: Executing second deploy action for pod with new tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:latest
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
strategy: canary
|
||||
percentage: 50
|
||||
traffic-split-method: pod
|
||||
action: deploy
|
||||
|
||||
- name: Checking if deployments (with new tag) and services were created with canary labels after deploy, and old deployment persists
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:1.14.2 labels=app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_pod selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-canary containerName=nginx:latest labels=workflow/version:canary,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_pod selectorLabels=app:nginx,workflow/version:canary
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_pod selectorLabels=app:nginx
|
||||
|
||||
- name: Executing second promote action for pod now using new image tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:latest
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
strategy: canary
|
||||
percentage: 50
|
||||
traffic-split-method: pod
|
||||
action: promote
|
||||
|
||||
- name: Checking if deployments and services were created with stable labels after promote for new tag, ts is stable
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:latest labels=app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_pod selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_pod selectorLabels=app:nginx
|
||||
|
||||
- name: Executing deploy action for pod to be rejected using old tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
strategy: canary
|
||||
percentage: 50
|
||||
traffic-split-method: pod
|
||||
action: deploy
|
||||
|
||||
- name: Checking if new deployments (with old tag) and services were created with canary and baseline labels after deploy, and stable deployment (with latest tag) persists
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-canary containerName=nginx:1.14.2 labels=workflow/version:canary,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_pod selectorLabels=app:nginx,workflow/version:canary
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:latest labels=app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_pod selectorLabels=app:nginx
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_pod selectorLabels=app:nginx
|
||||
|
||||
- name: Executing reject action for pod to reject new deployment with 1.14.2 tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
strategy: canary
|
||||
percentage: 50
|
||||
traffic-split-method: pod
|
||||
action: reject
|
||||
|
||||
# MAY BE USEFUL TO ADD AN ANTI-CHECK - CHECK TO MAKE SURE CERTAIN OBJECTS DON'T EXIST
|
||||
- name: Checking if deployments and services were created with stable labels and latest tag after reject
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:latest labels=app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_pod selectorLabels=app:nginx
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_pod selectorLabels=app:nginx
|
||||
|
||||
- name: Cleaning up current set up
|
||||
run: |
|
||||
python test/integration/k8s-deploy-delete.py 'Service' 'nginx-service' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'nginx-deployment' ${{ env.NAMESPACE }}
|
||||
|
||||
- if: ${{ always() }}
|
||||
name: Delete created namespace
|
||||
run: kubectl delete ns ${{ env.NAMESPACE }}
|
||||
185
.github/workflows/run-integration-tests-canary-smi.yml
vendored
Normal file
185
.github/workflows/run-integration-tests-canary-smi.yml
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
name: Minikube Integration Tests - canary SMI
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
run-integration-test:
|
||||
name: Canary SMI Tests
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
KUBECONFIG: /home/runner/.kube/config
|
||||
NAMESPACE: test-${{ github.run_id }}
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- uses: ./.github/actions/minikube-setup
|
||||
name: Setup Minikube Environment
|
||||
timeout-minutes: 5
|
||||
with:
|
||||
install-smi: 'true'
|
||||
|
||||
- name: Create namespace to run tests
|
||||
run: kubectl create ns ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Cleaning any previously created items
|
||||
run: |
|
||||
python test/integration/k8s-deploy-delete.py 'Service' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Ingress' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'TrafficSplit' 'all' ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Executing deploy action for smi
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
strategy: canary
|
||||
percentage: 50
|
||||
traffic-split-method: smi
|
||||
action: deploy
|
||||
|
||||
- name: Checking if deployments, services, and ts objects were created with canary labels and original tag
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-canary containerName=nginx:1.14.2 labels=workflow/version:canary,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx,workflow/version:canary
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-canary labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI,workflow/version:canary selectorLabels=app:nginx,workflow/version:canary
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=TrafficSplit name=nginx-service-workflow-rollout tsServices=nginx-service-stable:0,nginx-service-canary:1000,nginx-service-baseline:0
|
||||
|
||||
- name: Executing promote action for smi
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
strategy: canary
|
||||
percentage: 50
|
||||
traffic-split-method: smi
|
||||
action: promote
|
||||
|
||||
# another good place for anti-test - ensure old deps are deleted after promote
|
||||
- name: Checking if deployments, services, and ts objects were created with stable labels after first promote
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-stable containerName=nginx:1.14.2 labels=workflow/version:stable,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx,workflow/version:stable
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-stable labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI,workflow/version:stable selectorLabels=app:nginx,workflow/version:stable
|
||||
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=TrafficSplit name=nginx-service-workflow-rollout tsServices=nginx-service-stable:1000,nginx-service-canary:0,nginx-service-baseline:0
|
||||
|
||||
- name: Executing second deploy action for smi with new tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:latest
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
strategy: canary
|
||||
percentage: 50
|
||||
traffic-split-method: smi
|
||||
action: deploy
|
||||
|
||||
- name: Checking if deployments (with new tag) and services were created with canary labels after deploy, and old deployment persists
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-stable containerName=nginx:1.14.2 labels=workflow/version:stable,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx,workflow/version:stable
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-canary containerName=nginx:latest labels=workflow/version:canary,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx,workflow/version:canary
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-baseline containerName=nginx:1.14.2 labels=workflow/version:baseline,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx,workflow/version:baseline
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-stable labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI,workflow/version:stable selectorLabels=app:nginx,workflow/version:stable
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-canary labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI,workflow/version:canary selectorLabels=app:nginx,workflow/version:canary
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-baseline labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI,workflow/version:baseline selectorLabels=app:nginx,workflow/version:baseline
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=TrafficSplit name=nginx-service-workflow-rollout tsServices=nginx-service-stable:500,nginx-service-canary:250,nginx-service-baseline:250
|
||||
|
||||
- name: Executing second promote action for smi now using new image tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:latest
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
strategy: canary
|
||||
percentage: 50
|
||||
traffic-split-method: smi
|
||||
action: promote
|
||||
|
||||
- name: Checking if deployments and services were created with stable labels after promote for new tag, ts is stable
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-stable containerName=nginx:latest labels=workflow/version:stable,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx,workflow/version:stable
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-stable labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI,workflow/version:stable selectorLabels=app:nginx,workflow/version:stable
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=TrafficSplit name=nginx-service-workflow-rollout tsServices=nginx-service-stable:1000,nginx-service-canary:0,nginx-service-baseline:0
|
||||
|
||||
- name: Executing deploy action for smi to be rejected using old tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
strategy: canary
|
||||
percentage: 50
|
||||
traffic-split-method: smi
|
||||
action: deploy
|
||||
|
||||
- name: Checking if new deployments (with old tag) and services were created with canary and baseline labels after deploy, and stable deployment (with latest tag) persists
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-canary containerName=nginx:1.14.2 labels=workflow/version:canary,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx,workflow/version:canary
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-baseline containerName=nginx:latest labels=workflow/version:baseline,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx,workflow/version:baseline
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-stable containerName=nginx:latest labels=workflow/version:stable,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx,workflow/version:stable
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-stable labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI,workflow/version:stable selectorLabels=app:nginx,workflow/version:stable
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-baseline labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI,workflow/version:baseline selectorLabels=app:nginx,workflow/version:baseline
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-canary labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI,workflow/version:canary selectorLabels=app:nginx,workflow/version:canary
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=TrafficSplit name=nginx-service-workflow-rollout tsServices=nginx-service-stable:500,nginx-service-canary:250,nginx-service-baseline:250
|
||||
|
||||
- name: Executing reject action for smi to reject new deployment with 1.14.2 tag
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
strategy: canary
|
||||
percentage: 50
|
||||
traffic-split-method: smi
|
||||
action: reject
|
||||
|
||||
# MAY BE USEFUL TO ADD AN ANTI-CHECK - CHECK TO MAKE SURE CERTAIN OBJECTS DON'T EXIST
|
||||
- name: Checking if deployments and services were created with stable labels and latest tag after reject
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment-stable containerName=nginx:latest labels=workflow/version:stable,app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx,workflow/version:stable
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service-stable labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_canary_SMI,workflow/version:stable selectorLabels=app:nginx,workflow/version:stable
|
||||
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=TrafficSplit name=nginx-service-workflow-rollout tsServices=nginx-service-stable:1000,nginx-service-canary:0,nginx-service-baseline:0
|
||||
|
||||
- name: Cleaning up current set up
|
||||
run: |
|
||||
python test/integration/k8s-deploy-delete.py 'Service' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Ingress' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'TrafficSplit' 'all' ${{ env.NAMESPACE }}
|
||||
|
||||
- if: ${{ always() }}
|
||||
name: Delete created namespace
|
||||
run: kubectl delete ns ${{ env.NAMESPACE }}
|
||||
113
.github/workflows/run-integration-tests-namespace-optional.yml
vendored
Normal file
113
.github/workflows/run-integration-tests-namespace-optional.yml
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
# This workflow is inspired by `run-integration-tests-bluegreen-ingress.yml` and introduces namespace-specific testing for manifests.
|
||||
# It ensures deployments respect manifest-defined namespaces and tests deployments to multiple namespaces.
|
||||
name: Minikube Integration Tests - Namespace Optional
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
run-integration-test:
|
||||
name: Namespace Optional Tests
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
KUBECONFIG: /home/runner/.kube/config
|
||||
NAMESPACE1: integration-test-namespace1-${{ github.run_id }}
|
||||
NAMESPACE2: integration-test-namespace2-${{ github.run_id }}
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- uses: ./.github/actions/minikube-setup
|
||||
name: Setup Minikube Environment
|
||||
timeout-minutes: 5
|
||||
|
||||
- name: Create namespaces for tests
|
||||
run: |
|
||||
kubectl create ns ${{ env.NAMESPACE1 }}
|
||||
kubectl create ns ${{ env.NAMESPACE2 }}
|
||||
kubectl create ns test-namespace
|
||||
|
||||
- name: Cleaning any previously created items
|
||||
run: |
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'all' ${{ env.NAMESPACE1 }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'all' ${{ env.NAMESPACE2 }}
|
||||
|
||||
# This tests whether the deployment respects the namespace defined in the manifest instead of defaulting to "default" when namespace is not provided.
|
||||
- name: Test - Handles namespace correctly based on manifest
|
||||
uses: ./
|
||||
with:
|
||||
images: nginx
|
||||
manifests: |
|
||||
test/integration/manifests/test_with_ns.yaml
|
||||
test/integration/manifests/test_no_ns.yaml
|
||||
action: deploy # Deploys manifests to specified namespaces or uses the namespace defined in the manifest
|
||||
|
||||
- name: Verify Deployment - test_with_ns.yaml (test-namespace)
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py \
|
||||
namespace=test-namespace \
|
||||
kind=Deployment \
|
||||
name=test-deployment \
|
||||
containerName=nginx \
|
||||
labels=app:test-app,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_Namespace_Optional \
|
||||
selectorLabels=app:test-app
|
||||
|
||||
- name: Verify Deployment - test_no_ns.yaml (default namespace)
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py \
|
||||
namespace=default \
|
||||
kind=Deployment \
|
||||
name=test-deployment-no-ns \
|
||||
containerName=nginx \
|
||||
labels=app:test-app,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_Namespace_Optional \
|
||||
selectorLabels=app:test-app
|
||||
|
||||
# This tests whether the deployment works when a file is deployed to two different provided namespaces.
|
||||
- name: Test - Deploys the resource to namespace1
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE1 }}
|
||||
images: nginx
|
||||
manifests: |
|
||||
test/integration/manifests/test_no_ns.yaml
|
||||
action: deploy
|
||||
|
||||
- name: Test - Deploys the resource to namespace2
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE2 }}
|
||||
images: nginx
|
||||
manifests: |
|
||||
test/integration/manifests/test_no_ns.yaml
|
||||
action: deploy
|
||||
|
||||
- name: Verify Deployments in NAMESPACE1 & NAMESPACE2
|
||||
run: |
|
||||
for ns in ${{ env.NAMESPACE1 }} ${{ env.NAMESPACE2 }}; do
|
||||
python test/integration/k8s-deploy-test.py \
|
||||
namespace=$ns \
|
||||
kind=Deployment \
|
||||
name=test-deployment-no-ns \
|
||||
containerName=nginx \
|
||||
labels=app:test-app,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_Namespace_Optional \
|
||||
selectorLabels=app:test-app
|
||||
done
|
||||
|
||||
- name: Cleanup
|
||||
run: |
|
||||
echo "Cleaning up resources..."
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'test-deployment' test-namespace
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'test-deployment-no-ns' ${{ env.NAMESPACE1 }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'test-deployment-no-ns' ${{ env.NAMESPACE2 }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'test-deployment-no-ns' default
|
||||
|
||||
kubectl delete ns ${{ env.NAMESPACE1 }}
|
||||
kubectl delete ns ${{ env.NAMESPACE2 }}
|
||||
kubectl delete ns test-namespace
|
||||
rm -rf test_with_ns.yaml test_no_ns.yaml
|
||||
85
.github/workflows/run-integration-tests-private.yml
vendored
Normal file
85
.github/workflows/run-integration-tests-private.yml
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
name: Cluster Integration Tests - private cluster
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'releases/*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
run-integration-test:
|
||||
name: Run Minikube Integration Tests
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
KUBECONFIG: /home/runner/.kube/config
|
||||
NAMESPACE: test-${{ github.run_id }}
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
rm -rf node_modules/
|
||||
npm install
|
||||
|
||||
- name: Build
|
||||
run: npm run build
|
||||
|
||||
- name: Azure login
|
||||
uses: azure/login@v3.0.0
|
||||
with:
|
||||
client-id: ${{ secrets.AZURE_CLIENT_ID }}
|
||||
tenant-id: ${{ secrets.AZURE_TENANT_ID }}
|
||||
subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
|
||||
|
||||
- uses: Azure/setup-kubectl@829323503d1be3d00ca8346e5391ca0b07a9ab0d # v5.1.0
|
||||
name: Install Kubectl
|
||||
|
||||
- name: Create private AKS cluster and set context
|
||||
run: |
|
||||
set +x
|
||||
# create cluster
|
||||
az group create --location eastus2 --name ${{ env.NAMESPACE }}
|
||||
az aks create --name ${{ env.NAMESPACE }} --resource-group ${{ env.NAMESPACE }} --enable-private-cluster --generate-ssh-keys
|
||||
az aks get-credentials --resource-group ${{ env.NAMESPACE }} --name ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Create namespace to run tests
|
||||
run: |
|
||||
az aks command invoke --resource-group ${{ env.NAMESPACE }} --name ${{ env.NAMESPACE }} --command "kubectl create ns ${{ env.NAMESPACE }}"
|
||||
|
||||
- uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # 6.2.0
|
||||
name: Install Python
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
- name: Executing deploy action for pod
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
test/integration/manifests/test2.yml
|
||||
action: deploy
|
||||
private-cluster: true
|
||||
resource-group: ${{ env.NAMESPACE }}
|
||||
name: ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Checking if deployments and services were created
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py private=${{ env.NAMESPACE }} namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:1.14.2 labels=app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Cluster_Integration_Tests_-_private_cluster selectorLabels=app:nginx
|
||||
python test/integration/k8s-deploy-test.py private=${{ env.NAMESPACE }} namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Cluster_Integration_Tests_-_private_cluster selectorLabels=app:nginx
|
||||
|
||||
python test/integration/k8s-deploy-test.py private=${{ env.NAMESPACE }} namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment2 containerName=nginx:1.14.2 labels=app:nginx2,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Cluster_Integration_Tests_-_private_cluster selectorLabels=app:nginx2
|
||||
python test/integration/k8s-deploy-test.py private=${{ env.NAMESPACE }} namespace=${{ env.NAMESPACE }} kind=Service name=nginx-service2 labels=workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Cluster_Integration_Tests_-_private_cluster selectorLabels=app:nginx2
|
||||
|
||||
- name: Clean up AKS cluster
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
echo "deleting AKS cluster and resource group"
|
||||
az aks delete --yes --resource-group ${{ env.NAMESPACE }} --name ${{ env.NAMESPACE }}
|
||||
az group delete --resource-group ${{ env.NAMESPACE }} --yes
|
||||
65
.github/workflows/run-integration-tests-resource-annotation.yml
vendored
Normal file
65
.github/workflows/run-integration-tests-resource-annotation.yml
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
name: Minikube Integration Tests - resource annotation
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
run-integration-test:
|
||||
name: Resource Annotation Tests
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
KUBECONFIG: /home/runner/.kube/config
|
||||
NAMESPACE: test-${{ github.run_id }}
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- uses: ./.github/actions/minikube-setup
|
||||
name: Setup Minikube Environment
|
||||
timeout-minutes: 5
|
||||
|
||||
- name: Create namespace to run tests
|
||||
run: kubectl create ns ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Cleaning any previously created items
|
||||
run: |
|
||||
python test/integration/k8s-deploy-delete.py 'Service' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'all' ${{ env.NAMESPACE }}
|
||||
python test/integration/k8s-deploy-delete.py 'Ingress' 'all' ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Executing deploy action for pod with resource annotation enabled by default
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
action: deploy
|
||||
|
||||
- name: Checking if deployments is created with additional resource annotation
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:1.14.2 labels=app:nginx,workflow:actions.github.com-k8s-deploy,workflowFriendlyName:Minikube_Integration_Tests_-_resource_annotation selectorLabels=app:nginx annotations=actions.github.com/k8s-deploy,deployment.kubernetes.io/revision,kubectl.kubernetes.io/last-applied-configuration
|
||||
|
||||
- name: Cleaning previously created deployment
|
||||
run: |
|
||||
python test/integration/k8s-deploy-delete.py 'Deployment' 'all' ${{ env.NAMESPACE }}
|
||||
|
||||
- name: Executing deploy action for pod with resource annotation disabled
|
||||
uses: ./
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
images: nginx:1.14.2
|
||||
manifests: |
|
||||
test/integration/manifests/test.yml
|
||||
action: deploy
|
||||
annotate-resources: false
|
||||
|
||||
- name: Checking if deployment is created without additional resource annotation
|
||||
run: |
|
||||
python test/integration/k8s-deploy-test.py namespace=${{ env.NAMESPACE }} kind=Deployment name=nginx-deployment containerName=nginx:1.14.2 selectorLabels=app:nginx annotations=deployment.kubernetes.io/revision,kubectl.kubernetes.io/last-applied-configuration
|
||||
20
.github/workflows/test.yml
vendored
20
.github/workflows/test.yml
vendored
@ -1,20 +0,0 @@
|
||||
name: "build-test"
|
||||
on: # rebuild any PRs and main branch changes
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- 'releases/*'
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- 'releases/*'
|
||||
|
||||
jobs:
|
||||
build: # make sure build/ci works properly
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- run: |
|
||||
npm install
|
||||
npm build
|
||||
npm test
|
||||
20
.github/workflows/unit-tests.yml
vendored
Normal file
20
.github/workflows/unit-tests.yml
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
name: 'Run unit tests.'
|
||||
on: # rebuild any PRs and main branch changes
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 'releases/*'
|
||||
|
||||
jobs:
|
||||
build: # make sure build/ci works properly
|
||||
name: Run Unit Tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- run: |
|
||||
npm install
|
||||
npm test
|
||||
332
.gitignore
vendored
332
.gitignore
vendored
@ -1,329 +1,7 @@
|
||||
## Ignore Visual Studio temporary files, build results, and
|
||||
## files generated by popular Visual Studio add-ons.
|
||||
##
|
||||
## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
|
||||
node_modules
|
||||
|
||||
# User-specific files
|
||||
*.suo
|
||||
*.user
|
||||
*.userosscache
|
||||
*.sln.docstates
|
||||
.DS_Store
|
||||
.idea
|
||||
lib/
|
||||
|
||||
# User-specific files (MonoDevelop/Xamarin Studio)
|
||||
*.userprefs
|
||||
|
||||
# Build results
|
||||
[Dd]ebug/
|
||||
[Dd]ebugPublic/
|
||||
[Rr]elease/
|
||||
[Rr]eleases/
|
||||
x64/
|
||||
x86/
|
||||
bld/
|
||||
[Bb]in/
|
||||
[Oo]bj/
|
||||
[Ll]og/
|
||||
|
||||
# Visual Studio 2015/2017 cache/options directory
|
||||
.vs/
|
||||
# Uncomment if you have tasks that create the project's static files in wwwroot
|
||||
#wwwroot/
|
||||
|
||||
# Visual Studio 2017 auto generated files
|
||||
Generated\ Files/
|
||||
|
||||
# MSTest test Results
|
||||
[Tt]est[Rr]esult*/
|
||||
[Bb]uild[Ll]og.*
|
||||
|
||||
# NUNIT
|
||||
*.VisualState.xml
|
||||
TestResult.xml
|
||||
|
||||
# Build Results of an ATL Project
|
||||
[Dd]ebugPS/
|
||||
[Rr]eleasePS/
|
||||
dlldata.c
|
||||
|
||||
# Benchmark Results
|
||||
BenchmarkDotNet.Artifacts/
|
||||
|
||||
# .NET Core
|
||||
project.lock.json
|
||||
project.fragment.lock.json
|
||||
artifacts/
|
||||
**/Properties/launchSettings.json
|
||||
|
||||
# StyleCop
|
||||
StyleCopReport.xml
|
||||
|
||||
# Files built by Visual Studio
|
||||
*_i.c
|
||||
*_p.c
|
||||
*_i.h
|
||||
*.ilk
|
||||
*.meta
|
||||
*.obj
|
||||
*.iobj
|
||||
*.pch
|
||||
*.pdb
|
||||
*.ipdb
|
||||
*.pgc
|
||||
*.pgd
|
||||
*.rsp
|
||||
*.sbr
|
||||
*.tlb
|
||||
*.tli
|
||||
*.tlh
|
||||
*.tmp
|
||||
*.tmp_proj
|
||||
*.log
|
||||
*.vspscc
|
||||
*.vssscc
|
||||
.builds
|
||||
*.pidb
|
||||
*.svclog
|
||||
*.scc
|
||||
|
||||
# Chutzpah Test files
|
||||
_Chutzpah*
|
||||
|
||||
# Visual C++ cache files
|
||||
ipch/
|
||||
*.aps
|
||||
*.ncb
|
||||
*.opendb
|
||||
*.opensdf
|
||||
*.sdf
|
||||
*.cachefile
|
||||
*.VC.db
|
||||
*.VC.VC.opendb
|
||||
|
||||
# Visual Studio profiler
|
||||
*.psess
|
||||
*.vsp
|
||||
*.vspx
|
||||
*.sap
|
||||
|
||||
# Visual Studio Trace Files
|
||||
*.e2e
|
||||
|
||||
# TFS 2012 Local Workspace
|
||||
$tf/
|
||||
|
||||
# Guidance Automation Toolkit
|
||||
*.gpState
|
||||
|
||||
# ReSharper is a .NET coding add-in
|
||||
_ReSharper*/
|
||||
*.[Rr]e[Ss]harper
|
||||
*.DotSettings.user
|
||||
|
||||
# JustCode is a .NET coding add-in
|
||||
.JustCode
|
||||
|
||||
# TeamCity is a build add-in
|
||||
_TeamCity*
|
||||
|
||||
# DotCover is a Code Coverage Tool
|
||||
*.dotCover
|
||||
|
||||
# AxoCover is a Code Coverage Tool
|
||||
.axoCover/*
|
||||
!.axoCover/settings.json
|
||||
|
||||
# Visual Studio code coverage results
|
||||
*.coverage
|
||||
*.coveragexml
|
||||
|
||||
# NCrunch
|
||||
_NCrunch_*
|
||||
.*crunch*.local.xml
|
||||
nCrunchTemp_*
|
||||
|
||||
# MightyMoose
|
||||
*.mm.*
|
||||
AutoTest.Net/
|
||||
|
||||
# Web workbench (sass)
|
||||
.sass-cache/
|
||||
|
||||
# Installshield output folder
|
||||
[Ee]xpress/
|
||||
|
||||
# DocProject is a documentation generator add-in
|
||||
DocProject/buildhelp/
|
||||
DocProject/Help/*.HxT
|
||||
DocProject/Help/*.HxC
|
||||
DocProject/Help/*.hhc
|
||||
DocProject/Help/*.hhk
|
||||
DocProject/Help/*.hhp
|
||||
DocProject/Help/Html2
|
||||
DocProject/Help/html
|
||||
|
||||
# Click-Once directory
|
||||
publish/
|
||||
|
||||
# Publish Web Output
|
||||
*.[Pp]ublish.xml
|
||||
*.azurePubxml
|
||||
# Note: Comment the next line if you want to checkin your web deploy settings,
|
||||
# but database connection strings (with potential passwords) will be unencrypted
|
||||
*.pubxml
|
||||
*.publishproj
|
||||
|
||||
# Microsoft Azure Web App publish settings. Comment the next line if you want to
|
||||
# checkin your Azure Web App publish settings, but sensitive information contained
|
||||
# in these scripts will be unencrypted
|
||||
PublishScripts/
|
||||
|
||||
# NuGet Packages
|
||||
*.nupkg
|
||||
# The packages folder can be ignored because of Package Restore
|
||||
**/[Pp]ackages/*
|
||||
# except build/, which is used as an MSBuild target.
|
||||
!**/[Pp]ackages/build/
|
||||
# Uncomment if necessary however generally it will be regenerated when needed
|
||||
#!**/[Pp]ackages/repositories.config
|
||||
# NuGet v3's project.json files produces more ignorable files
|
||||
*.nuget.props
|
||||
*.nuget.targets
|
||||
|
||||
# Microsoft Azure Build Output
|
||||
csx/
|
||||
*.build.csdef
|
||||
|
||||
# Microsoft Azure Emulator
|
||||
ecf/
|
||||
rcf/
|
||||
|
||||
# Windows Store app package directories and files
|
||||
AppPackages/
|
||||
BundleArtifacts/
|
||||
Package.StoreAssociation.xml
|
||||
_pkginfo.txt
|
||||
*.appx
|
||||
|
||||
# Visual Studio cache files
|
||||
# files ending in .cache can be ignored
|
||||
*.[Cc]ache
|
||||
# but keep track of directories ending in .cache
|
||||
!*.[Cc]ache/
|
||||
|
||||
# Others
|
||||
ClientBin/
|
||||
~$*
|
||||
*~
|
||||
*.dbmdl
|
||||
*.dbproj.schemaview
|
||||
*.jfm
|
||||
*.pfx
|
||||
*.publishsettings
|
||||
orleans.codegen.cs
|
||||
|
||||
# Including strong name files can present a security risk
|
||||
# (https://github.com/github/gitignore/pull/2483#issue-259490424)
|
||||
#*.snk
|
||||
|
||||
# Since there are multiple workflows, uncomment next line to ignore bower_components
|
||||
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
|
||||
#bower_components/
|
||||
|
||||
# RIA/Silverlight projects
|
||||
Generated_Code/
|
||||
|
||||
# Backup & report files from converting an old project file
|
||||
# to a newer Visual Studio version. Backup files are not needed,
|
||||
# because we have git ;-)
|
||||
_UpgradeReport_Files/
|
||||
Backup*/
|
||||
UpgradeLog*.XML
|
||||
UpgradeLog*.htm
|
||||
ServiceFabricBackup/
|
||||
*.rptproj.bak
|
||||
|
||||
# SQL Server files
|
||||
*.mdf
|
||||
*.ldf
|
||||
*.ndf
|
||||
|
||||
# Business Intelligence projects
|
||||
*.rdl.data
|
||||
*.bim.layout
|
||||
*.bim_*.settings
|
||||
*.rptproj.rsuser
|
||||
|
||||
# Microsoft Fakes
|
||||
FakesAssemblies/
|
||||
|
||||
# GhostDoc plugin setting file
|
||||
*.GhostDoc.xml
|
||||
|
||||
# Node.js Tools for Visual Studio
|
||||
.ntvs_analysis.dat
|
||||
|
||||
# Visual Studio 6 build log
|
||||
*.plg
|
||||
|
||||
# Visual Studio 6 workspace options file
|
||||
*.opt
|
||||
|
||||
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
|
||||
*.vbw
|
||||
|
||||
# Visual Studio LightSwitch build output
|
||||
**/*.HTMLClient/GeneratedArtifacts
|
||||
**/*.DesktopClient/GeneratedArtifacts
|
||||
**/*.DesktopClient/ModelManifest.xml
|
||||
**/*.Server/GeneratedArtifacts
|
||||
**/*.Server/ModelManifest.xml
|
||||
_Pvt_Extensions
|
||||
|
||||
# Paket dependency manager
|
||||
.paket/paket.exe
|
||||
paket-files/
|
||||
|
||||
# FAKE - F# Make
|
||||
.fake/
|
||||
|
||||
# JetBrains Rider
|
||||
.idea/
|
||||
*.sln.iml
|
||||
|
||||
# CodeRush
|
||||
.cr/
|
||||
|
||||
# Python Tools for Visual Studio (PTVS)
|
||||
__pycache__/
|
||||
*.pyc
|
||||
|
||||
# Cake - Uncomment if you are using it
|
||||
# tools/**
|
||||
# !tools/packages.config
|
||||
|
||||
# Tabs Studio
|
||||
*.tss
|
||||
|
||||
# Telerik's JustMock configuration file
|
||||
*.jmconfig
|
||||
|
||||
# BizTalk build output
|
||||
*.btp.cs
|
||||
*.btm.cs
|
||||
*.odx.cs
|
||||
*.xsd.cs
|
||||
|
||||
# OpenCover UI analysis results
|
||||
OpenCover/
|
||||
|
||||
# Azure Stream Analytics local run output
|
||||
ASALocalRun/
|
||||
|
||||
# MSBuild Binary and Structured Log
|
||||
*.binlog
|
||||
|
||||
# NVidia Nsight GPU debugger configuration file
|
||||
*.nvuser
|
||||
|
||||
# MFractors (Xamarin productivity tool) working folder
|
||||
.mfractor/
|
||||
coverage/
|
||||
13
.husky/pre-commit
Normal file
13
.husky/pre-commit
Normal file
@ -0,0 +1,13 @@
|
||||
npm run typecheck || {
|
||||
echo ""
|
||||
echo "❌ Type check failed."
|
||||
echo "💡 Run 'npm run typecheck' to see errors."
|
||||
exit 1
|
||||
}
|
||||
npm test
|
||||
npm run format-check || {
|
||||
echo ""
|
||||
echo "❌ Formatting check failed."
|
||||
echo "💡 Run 'npm run format' or 'prettier --write .' to fix formatting issues."
|
||||
exit 1
|
||||
}
|
||||
4
.prettierignore
Normal file
4
.prettierignore
Normal file
@ -0,0 +1,4 @@
|
||||
# dependencies
|
||||
/node_modules
|
||||
coverage
|
||||
/lib
|
||||
8
.prettierrc.json
Normal file
8
.prettierrc.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"trailingComma": "none",
|
||||
"bracketSpacing": false,
|
||||
"semi": false,
|
||||
"tabWidth": 3,
|
||||
"singleQuote": true,
|
||||
"printWidth": 80
|
||||
}
|
||||
88
CHANGELOG.md
Normal file
88
CHANGELOG.md
Normal file
@ -0,0 +1,88 @@
|
||||
# Changelog
|
||||
|
||||
## [6.0.0] - 2026-04-17
|
||||
|
||||
### Changed
|
||||
|
||||
- #504 [Update Node.js runtime from node20 to node24](https://github.com/Azure/k8s-deploy/pull/504)
|
||||
- #500 [Update action version references in README to latest majors](https://github.com/Azure/k8s-deploy/pull/500)
|
||||
|
||||
### Security
|
||||
|
||||
- #506 [Bump undici from 6.23.0 to 6.24.1](https://github.com/Azure/k8s-deploy/pull/506)
|
||||
- #513 [Bump vite from 8.0.3 to 8.0.5](https://github.com/Azure/k8s-deploy/pull/513)
|
||||
- #509 [Bump the actions group across 1 directory with 4 updates](https://github.com/Azure/k8s-deploy/pull/509)
|
||||
- #510 [Bump the actions group across 1 directory with 2 updates](https://github.com/Azure/k8s-deploy/pull/510)
|
||||
- #511 [Bump vitest from 4.1.1 to 4.1.2](https://github.com/Azure/k8s-deploy/pull/511)
|
||||
- #514 [Bump the actions group with 2 updates](https://github.com/Azure/k8s-deploy/pull/514)
|
||||
- #501 [Bump @types/node from 25.3.3 to 25.4.0](https://github.com/Azure/k8s-deploy/pull/501)
|
||||
|
||||
## [5.1.0] - 2026-03-03
|
||||
|
||||
### Added
|
||||
|
||||
- #458 [Ensure error messages display the correct namespace](https://github.com/Azure/k8s-deploy/pull/458)
|
||||
- #482 [docker driver](https://github.com/Azure/k8s-deploy/pull/482)
|
||||
- #492 [Migrate to esbuild/Vitest and upgrade @actions/\* to ESM-only versions](https://github.com/Azure/k8s-deploy/pull/492)
|
||||
- #498 [Add typecheck to build script](https://github.com/Azure/k8s-deploy/pull/498)
|
||||
|
||||
## [5.0.4] - 2025-08-05
|
||||
|
||||
### Added
|
||||
|
||||
- #408 [Add missing README.md and action.yml parameters](https://github.com/Azure/k8s-deploy/pull/408)
|
||||
- #414 [Fix the major update packages including Jest](https://github.com/Azure/k8s-deploy/pull/414)
|
||||
- #418 [Add husky pre-commit hook.](https://github.com/Azure/k8s-deploy/pull/418)
|
||||
- #420 [Make namespace input optional](https://github.com/Azure/k8s-deploy/pull/420)
|
||||
- #424 [add server-side option for kubectl apply commands](https://github.com/Azure/k8s-deploy/pull/424)
|
||||
- #425 [Add timeout to the rollout status](https://github.com/Azure/k8s-deploy/pull/425)
|
||||
- #428 [Added additional check in getTempdirectory function](https://github.com/Azure/k8s-deploy/pull/428)
|
||||
- #432 [Added error check for canary promote actions](https://github.com/Azure/k8s-deploy/pull/432)
|
||||
- #436 [Add support for ScaledJob](https://github.com/Azure/k8s-deploy/pull/436)
|
||||
- #440 [Add Enhanced Deployment Error Reporting and Logging](https://github.com/Azure/k8s-deploy/pull/440)
|
||||
- #441 [Added timeout input description to README](https://github.com/Azure/k8s-deploy/pull/441)
|
||||
|
||||
## [5.0.3] - 2025-04-16
|
||||
|
||||
### Added
|
||||
|
||||
- #398 case-insensitive resource type
|
||||
|
||||
## [5.0.2] - 2025-04-15
|
||||
|
||||
### Added
|
||||
|
||||
- #396 Update new resource-type input for action
|
||||
|
||||
## [5.0.1] - 2024-03-12
|
||||
|
||||
### Added
|
||||
|
||||
- #356 Add fleet support
|
||||
|
||||
## [5.0.0] - 2024-03-12
|
||||
|
||||
### Changed
|
||||
|
||||
- #309 Updated to Node20 and upgraded release workflows to @v1 tag
|
||||
- #306 update release workflow to use new prefix, remove deprecated release
|
||||
- #303 fix: ensure imageNames are not empty strings
|
||||
- #299 bump release workflow sha
|
||||
- #298 bump minikube to fix runner deps
|
||||
- #297 update release workflow
|
||||
|
||||
### Added
|
||||
|
||||
- #304 add v prefix for version tagging
|
||||
- #302 adding ncc to build
|
||||
- #301 adding release workflow artifact fix
|
||||
|
||||
## [4.10.0] - 2023-10-30
|
||||
|
||||
### Added
|
||||
|
||||
- #287 Make annotating resources optional
|
||||
- #283 Fix “Service” route-method of the Blue-Green strategy with some manifest files
|
||||
- #281 bump codeql to node 16
|
||||
- #279 upgrade codeql
|
||||
- #276 Fixes multiple namespaces bug
|
||||
@ -1,9 +1,9 @@
|
||||
# Microsoft Open Source Code of Conduct
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
|
||||
Resources:
|
||||
|
||||
- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
|
||||
- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
|
||||
- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns
|
||||
# Microsoft Open Source Code of Conduct
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
|
||||
Resources:
|
||||
|
||||
- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
|
||||
- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
|
||||
- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns
|
||||
|
||||
702
README.md
702
README.md
@ -1,199 +1,503 @@
|
||||
# Deploy manifest action for Kubernetes
|
||||
Use this action to bake and deploy manifests to Kubernetes clusters.
|
||||
|
||||
Assumes that the deployment target K8s cluster context was set earlier in the workflow by using either [`Azure/aks-set-context`](https://github.com/Azure/aks-set-context/tree/releases/v1) or [`Azure/k8s-set-context`](https://github.com/Azure/k8s-set-context/tree/releases/v1)
|
||||
|
||||
#### Artifact substitution
|
||||
The deploy action takes as input a list of container images which can be specified along with their tags or digests. The same is substituted into the non-templatized version of manifest files before applying to the cluster to ensure that the right version of the image is pulled by the cluster nodes.
|
||||
|
||||
#### Manifest stability
|
||||
Rollout status is checked for the Kubernetes objects deployed. This is done to incorporate stability checks while computing the task status as success/failure.
|
||||
|
||||
#### Secret handling
|
||||
The manifest files specfied as inputs are augmented with appropriate imagePullSecrets before deploying to the cluster.
|
||||
|
||||
#### Sample YAML to run a basic deployment
|
||||
|
||||
```yaml
|
||||
- uses: Azure/k8s-deploy@v1
|
||||
with:
|
||||
namespace: 'myapp' # optional
|
||||
images: 'contoso.azurecr.io/myapp:${{ event.run_id }} '
|
||||
imagepullsecrets: |
|
||||
image-pull-secret1
|
||||
image-pull-secret2
|
||||
manifests: '/manifests/*.*'
|
||||
kubectl-version: 'latest' # optional
|
||||
```
|
||||
|
||||
### Deployment Strategies
|
||||
|
||||
#### Pod Canary
|
||||
|
||||
```yaml
|
||||
- uses: Azure/k8s-deploy@v1
|
||||
with:
|
||||
namespace: 'myapp' # optional
|
||||
images: 'contoso.azurecr.io/myapp:${{ event.run_id }} '
|
||||
imagepullsecrets: |
|
||||
image-pull-secret1
|
||||
image-pull-secret2
|
||||
manifests: '/manifests/*.*'
|
||||
strategy: canary
|
||||
percentage: 20
|
||||
```
|
||||
|
||||
Inorder to promote or reject your canary deployment use the following:
|
||||
```yaml
|
||||
- uses: Azure/k8s-deploy@v1
|
||||
with:
|
||||
namespace: 'myapp' # optional
|
||||
images: 'contoso.azurecr.io/myapp:${{ event.run_id }} '
|
||||
imagepullsecrets: |
|
||||
image-pull-secret1
|
||||
image-pull-secret2
|
||||
manifests: '/manifests/*.*'
|
||||
strategy: canary
|
||||
percentage: 20
|
||||
action: promote # set to reject if you want to reject it
|
||||
```
|
||||
|
||||
#### SMI Canary
|
||||
|
||||
```yaml
|
||||
- uses: Azure/k8s-deploy@v1
|
||||
with:
|
||||
namespace: 'myapp' # optional
|
||||
images: 'contoso.azurecr.io/myapp:${{ event.run_id }} '
|
||||
imagepullsecrets: |
|
||||
image-pull-secret1
|
||||
image-pull-secret2
|
||||
manifests: '/manifests/*.*'
|
||||
strategy: canary
|
||||
traffic-split-method: smi
|
||||
percentage: 20
|
||||
baseline-and-canary-replicas: 1
|
||||
```
|
||||
|
||||
Inorder to promote or reject your canary deployment use the following:
|
||||
|
||||
```yaml
|
||||
- uses: Azure/k8s-deploy@v1
|
||||
with:
|
||||
namespace: 'myapp' # optional
|
||||
images: 'contoso.azurecr.io/myapp:${{ event.run_id }} '
|
||||
imagepullsecrets: |
|
||||
image-pull-secret1
|
||||
image-pull-secret2
|
||||
manifests: '/manifests/*.*'
|
||||
strategy: canary
|
||||
traffic-split-method: smi
|
||||
percentage: 20
|
||||
baseline-and-canary-replicas: 1
|
||||
action: promote # set to reject if you want to reject it
|
||||
```
|
||||
|
||||
Refer to the action metadata file for details about all the inputs https://github.com/Azure/k8s-deploy/blob/master/action.yml
|
||||
|
||||
## End to end workflow for building container images and deploying to an Azure Kubernetes Service cluster
|
||||
|
||||
```yaml
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
|
||||
- uses: Azure/docker-login@v1
|
||||
with:
|
||||
login-server: contoso.azurecr.io
|
||||
username: ${{ secrets.REGISTRY_USERNAME }}
|
||||
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
|
||||
- run: |
|
||||
docker build . -t contoso.azurecr.io/k8sdemo:${{ github.sha }}
|
||||
docker push contoso.azurecr.io/k8sdemo:${{ github.sha }}
|
||||
|
||||
# Set the target AKS cluster.
|
||||
- uses: Azure/aks-set-context@v1
|
||||
with:
|
||||
creds: '${{ secrets.AZURE_CREDENTIALS }}'
|
||||
cluster-name: contoso
|
||||
resource-group: contoso-rg
|
||||
|
||||
- uses: Azure/k8s-create-secret@v1
|
||||
with:
|
||||
container-registry-url: contoso.azurecr.io
|
||||
container-registry-username: ${{ secrets.REGISTRY_USERNAME }}
|
||||
container-registry-password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
secret-name: demo-k8s-secret
|
||||
|
||||
- uses: Azure/k8s-deploy@v1
|
||||
with:
|
||||
manifests: |
|
||||
manifests/deployment.yml
|
||||
manifests/service.yml
|
||||
images: |
|
||||
demo.azurecr.io/k8sdemo:${{ github.sha }}
|
||||
imagepullsecrets: |
|
||||
demo-k8s-secret
|
||||
```
|
||||
|
||||
## End to end workflow for building container images and deploying to a Kubernetes cluster
|
||||
|
||||
```yaml
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
|
||||
- uses: Azure/docker-login@v1
|
||||
with:
|
||||
login-server: contoso.azurecr.io
|
||||
username: ${{ secrets.REGISTRY_USERNAME }}
|
||||
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
|
||||
- run: |
|
||||
docker build . -t contoso.azurecr.io/k8sdemo:${{ github.sha }}
|
||||
docker push contoso.azurecr.io/k8sdemo:${{ github.sha }}
|
||||
|
||||
- uses: Azure/k8s-set-context@v1
|
||||
with:
|
||||
kubeconfig: ${{ secrets.KUBE_CONFIG }}
|
||||
|
||||
- uses: Azure/k8s-create-secret@v1
|
||||
with:
|
||||
container-registry-url: contoso.azurecr.io
|
||||
container-registry-username: ${{ secrets.REGISTRY_USERNAME }}
|
||||
container-registry-password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
secret-name: demo-k8s-secret
|
||||
|
||||
- uses: Azure/k8s-deploy@v1
|
||||
with:
|
||||
manifests: |
|
||||
manifests/deployment.yml
|
||||
manifests/service.yml
|
||||
images: |
|
||||
demo.azurecr.io/k8sdemo:${{ github.sha }}
|
||||
imagepullsecrets: |
|
||||
demo-k8s-secret
|
||||
```
|
||||
|
||||
# Contributing
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
||||
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
|
||||
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
|
||||
|
||||
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
|
||||
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
|
||||
provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
# Deploy manifests action for Kubernetes
|
||||
|
||||
This action is used to deploy manifests to Kubernetes clusters. It requires that the cluster context be set earlier in the workflow by using either the [Azure/aks-set-context](https://github.com/Azure/aks-set-context/tree/releases/v4) action or the [Azure/k8s-set-context](https://github.com/Azure/k8s-set-context/tree/releases/v4) action. It also requires Kubectl to be installed (you can use the [Azure/setup-kubectl](https://github.com/Azure/setup-kubectl) action).
|
||||
|
||||
If you are looking to automate your workflows to deploy to [Azure Web Apps](https://azure.microsoft.com/en-us/services/app-service/web/) and [Azure Web App for Containers](https://azure.microsoft.com/en-us/services/app-service/containers/), consider using [`Azure/webapps-deploy`](https://github.com/Azure/webapps-deploy) action.
|
||||
|
||||
This action requires the following permissions from your workflow:
|
||||
|
||||
```yaml
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
actions: read
|
||||
```
|
||||
|
||||
## Action capabilities
|
||||
|
||||
Following are the key capabilities of this action:
|
||||
|
||||
- **Artifact substitution**: Takes a list of container images which can be specified along with their tags or digests. They are substituted into the non-templatized version of manifest files before applying to the cluster to ensure that the right version of the image is pulled by the cluster nodes.
|
||||
|
||||
- **Object stability checks**: Rollout status is checked for the Kubernetes objects deployed. This is done to incorporate stability checks while computing the action status as success/failure.
|
||||
|
||||
- **Secret handling**: The secret names specified as inputs in the action are used to augment the input manifest files with imagePullSecrets values before deploying to the cluster. Also, checkout the [Azure/k8s-create-secret](https://github.com/Azure/k8s-create-secret) action for creation of generic or docker-registry secrets in the cluster.
|
||||
|
||||
- **Deployment strategy** Supports both canary and blue-green deployment strategies
|
||||
- **Canary strategy**: Workloads suffixed with '-baseline' and '-canary' are created. There are two methods of traffic splitting supported:
|
||||
- **Service Mesh Interface**: Service Mesh Interface abstraction allows for plug-and-play configuration with service mesh providers such as [Linkerd](https://linkerd.io/) and [Istio](https://istio.io/). Meanwhile, this action takes away the hard work of mapping SMI's TrafficSplit objects to the stable, baseline and canary services during the lifecycle of the deployment strategy. Service mesh based canary deployments using this action are more accurate as service mesh providers enable granular percentage traffic split (via service registry and sidecar containers injected into pods alongside application containers).
|
||||
- **Only Kubernetes (no service mesh)**: In the absence of service mesh, while it may not be possible to achieve exact percentage split at the request level, it is still possible to perform canary deployments by deploying -baseline and -canary workload variants next to the stable variant. The service routes requests to pods of all three workload variants as the selector-label constraints are met (KubernetesManifest will honor these when creating -baseline and -canary variants). This achieves the intended effect of routing only a portion of total requests to the canary.
|
||||
- **Blue-Green strategy**: Choosing blue-green strategy with this action leads to creation of workloads suffixed with '-green'. An identified service is one that is supplied as part of the input manifest(s) and targets a workload in the supplied manifest(s). There are three route-methods supported in the action:
|
||||
- **Service route-method**: Identified services are configured to target the green deployments.
|
||||
- **Ingress route-method**: Along with deployments, new services are created with '-green' suffix (for identified services), and the ingresses are in turn updated to target the new services.
|
||||
- **SMI route-method**: A new [TrafficSplit](https://github.com/servicemeshinterface/smi-spec/blob/master/apis/traffic-split/v1alpha3/traffic-split.md) object is created for each identified service. The TrafficSplit object is updated to target the new deployments. This works only if SMI is set up in the cluster.
|
||||
|
||||
Traffic is routed to the new workloads only after the time provided as `version-switch-buffer` input has passed. The `promote` action creates workloads and services with new configurations but without any suffix. `reject` routes traffic back to the old workloads and deletes the '-green' workloads.
|
||||
|
||||
## Action inputs
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Action inputs</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tr>
|
||||
<td>action </br></br>(Required)</td>
|
||||
<td>Acceptable values: deploy/promote/reject.</br>Promote or reject actions are used to promote or reject canary/blue-green deployments. Sample YAML snippets are provided below for guidance.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>manifests </br></br>(Required)</td>
|
||||
<td>Path to the manifest files to be used for deployment. These can also be directories containing manifest files, in which case, all manifest files in the referenced directory at every depth will be deployed, or URLs to manifest files (like https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/controllers/nginx-deployment.yaml). Files and URLs not ending in .yml or .yaml will be ignored.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>strategy </br></br>(Required)</td>
|
||||
<td>Acceptable values: basic/canary/blue-green. <br>
|
||||
Default value: basic
|
||||
<br>Deployment strategy to be used while applying manifest files on the cluster.
|
||||
<br>basic - Template is force applied to all pods when deploying to cluster. NOTE: Can only be used with action == deploy
|
||||
<br>canary - Canary deployment strategy is used when deploying to the cluster.<br>blue-green - Blue-Green deployment strategy is used when deploying to cluster.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>namespace </br></br>(Optional)
|
||||
<td>Namespace within the cluster to deploy to.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>images </br></br>(Optional)</td>
|
||||
<td>Fully qualified resource URL of the image(s) to be used for substitutions on the manifest files. This multiline input accepts specifying multiple artifact substitutions in newline separated form. For example:<br>
|
||||
<code><br>images: |<br>  contosodemo.azurecr.io/foo:test1<br>  contosodemo.azurecr.io/bar:test2<br></code><br>
|
||||
In this example, all references to contosodemo.azurecr.io/foo and contosodemo.azurecr.io/bar are searched for in the image field of the input manifest files. For the matches found, the tags test1 and test2 are substituted.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>imagepullsecrets </br></br>(Optional)</td>
|
||||
<td>Multiline input where each line contains the name of a docker-registry secret that has already been setup within the cluster. Each of these secret names are added under imagePullSecrets field for the workloads found in the input manifest files</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>pull-images</br></br>(Optional)</td>
|
||||
<td>Acceptable values: true/false</br>Default value: true</br>Switch whether to pull the images from the registry before deployment to find out Dockerfile's path in order to add it to the annotations</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>traffic-split-method </br></br>(Optional)</td>
|
||||
<td>Acceptable values: pod/smi.<br> Default value: pod <br>SMI: Percentage traffic split is done at request level using service mesh. Service mesh has to be setup by cluster admin. Orchestration of <a href="https://github.com/servicemeshinterface/smi-spec/blob/master/apis/traffic-split/v1alpha3/traffic-split.md" data-raw-source="TrafficSplit](https://github.com/deislabs/smi-spec/blob/master/traffic-split.md)">TrafficSplit</a> objects of SMI is handled by this action. <br>Pod: Percentage split not possible at request level in the absence of service mesh. Percentage input is used to calculate the replicas for baseline and canary as a percentage of replicas specified in the input manifests for the stable variant.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>traffic-split-annotations </br></br>(Optional)</td>
|
||||
<td>Annotations in the form of key/value pair to be added to TrafficSplit.</td>
|
||||
<tr>
|
||||
<td>percentage </br></br>(Optional but required if strategy is canary)</td>
|
||||
<td>Used to compute the number of replicas of '-baseline' and '-canary' variants of the workloads found in manifest files. For the specified percentage input, if (percentage * numberOfDesirerdReplicas)/100 is not a round number, the floor of this number is used while creating '-baseline' and '-canary'.<br/><br/>For example, if Deployment hello-world was found in the input manifest file with 'replicas: 4' and if 'strategy: canary' and 'percentage: 25' are given as inputs to the action, then the Deployments hello-world-baseline and hello-world-canary are created with 1 replica each. The '-baseline' variant is created with the same image and tag as the stable version (4 replica variant prior to deployment) while the '-canary' variant is created with the image and tag corresponding to the new changes being deployed</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>baseline-and-canary-replicas </br></br> (Optional and relevant only if strategy is canary and traffic-split-method is smi)</td>
|
||||
<td>The number of baseline and canary replicas. Percentage traffic split is controlled in the service mesh plane, the actual number of replicas for canary and baseline variants could be controlled independently of the traffic split. For example, assume that the input Deployment manifest desired 30 replicas to be used for stable and that the following inputs were specified for the action </br></br><code> strategy: canary<br> trafficSplitMethod: smi<br> percentage: 20<br> baselineAndCanaryReplicas: 1</code></br></br> In this case, stable variant will receive 80% traffic while baseline and canary variants will receive 10% each (20% split equally between baseline and canary). However, instead of creating baseline and canary with 3 replicas each, the explicit count of baseline and canary replicas is honored. That is, only 1 replica each is created for baseline and canary variants.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>route-method </br></br>(Optional and relevant only if strategy is blue-green)</td>
|
||||
<td>Acceptable values: service/ingress/smi.</br>Default value: service.</br>Traffic is routed based on this input.
|
||||
<br>Service: Service selector labels are updated to target '-green' workloads.
|
||||
<br>Ingress: Ingress backends are updated to target the new '-green' services which in turn target '-green' deployments.
|
||||
<br>SMI: A <a href="https://github.com/servicemeshinterface/smi-spec/blob/master/apis/traffic-split/v1alpha3/traffic-split.md" data-raw-source="TrafficSplit](https://github.com/deislabs/smi-spec/blob/master/traffic-split.md)">TrafficSplit</a> object is created for each required service to route traffic to new workloads.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>version-switch-buffer </br></br>(Optional and relevant only if strategy is blue-green)</td>
|
||||
<td>Acceptable values: 1-300.</br>Default value: 0.</br>Waits for the given input in minutes before routing traffic to '-green' workloads.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>private-cluster </br></br>(Optional and relevant only using K8's deploy for a cluster with private cluster enabled)</td>
|
||||
<td>Acceptable values: true, false</br>Default value: false.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>resource-group</br></br>(Optional)</td>
|
||||
<td>Name of resource group - Only required if using private cluster</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>name</br></br>(Optional)</td>
|
||||
<td>Name of the private cluster - Only required if using private cluster</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>force</br></br>(Optional)</td>
|
||||
<td>Deploy when a previous deployment already exists. If true then '--force' argument is added to the apply command. Using '--force' argument is not recommended in production.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>server-side </br></br>(Optional)</td>
|
||||
<td>The apply command runs in the server instead of the client. If true then '--server-side' argument is added to the apply command.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>timeout</br></br>(Optional)</td>
|
||||
<td>Default value: 10m</br>Timeout for the rollout status. Accepts time units like '10m', '1h', '30s'. If only a number is provided (e.g., '30'), it is assumed to be minutes.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>annotate-resources</br></br>(Optional)</td>
|
||||
<td>Acceptable values: true/false</br>Default value: true</br>Switch whether to annotate the resources or not. If set to false all annotations are skipped completely.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>annotate-namespace</br></br>(Optional)</td>
|
||||
<td>Acceptable values: true/false</br>Default value: true</br>Switch whether to annotate the namespace resources object or not. Ignored when annotate-resources is set to false.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>skip-tls-verify</br></br>(Optional)</td>
|
||||
<td>Acceptable values: true/false</br>Default value: false</br>True if the insecure-skip-tls-verify option should be used</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>resource-type (Optional)</td>
|
||||
<td>Acceptable values: `Microsoft.ContainerService/managedClusters` (default), 'Microsoft.ContainerService/fleets'</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Basic deployment (without any deployment strategy)
|
||||
|
||||
```yaml
|
||||
- uses: Azure/k8s-deploy@v5
|
||||
with:
|
||||
namespace: 'myapp'
|
||||
manifests: |
|
||||
dir/manifestsDirectory
|
||||
images: 'contoso.azurecr.io/myapp:${{ event.run_id }}'
|
||||
imagepullsecrets: |
|
||||
image-pull-secret1
|
||||
image-pull-secret2
|
||||
```
|
||||
|
||||
### Private cluster deployment
|
||||
|
||||
```yaml
|
||||
- uses: Azure/k8s-deploy@v5
|
||||
with:
|
||||
resource-group: yourResourceGroup
|
||||
name: yourClusterName
|
||||
action: deploy
|
||||
strategy: basic
|
||||
|
||||
private-cluster: true
|
||||
manifests: |
|
||||
manifests/azure-vote-backend-deployment.yaml
|
||||
manifests/azure-vote-backend-service.yaml
|
||||
manifests/azure-vote-frontend-deployment.yaml
|
||||
manifests/azure-vote-frontend-service.yaml
|
||||
images: |
|
||||
registry.azurecr.io/containername
|
||||
```
|
||||
|
||||
### Canary deployment without service mesh
|
||||
|
||||
```yaml
|
||||
- uses: Azure/k8s-deploy@v5
|
||||
with:
|
||||
namespace: 'myapp'
|
||||
images: 'contoso.azurecr.io/myapp:${{ event.run_id }}'
|
||||
imagepullsecrets: |
|
||||
image-pull-secret1
|
||||
image-pull-secret2
|
||||
manifests: |
|
||||
deployment.yaml
|
||||
service.yaml
|
||||
dir/manifestsDirectory
|
||||
strategy: canary
|
||||
action: deploy
|
||||
percentage: 20
|
||||
```
|
||||
|
||||
To promote/reject the canary created by the above snippet, the following YAML snippet could be used:
|
||||
|
||||
```yaml
|
||||
- uses: Azure/k8s-deploy@v5
|
||||
with:
|
||||
namespace: 'myapp'
|
||||
images: 'contoso.azurecr.io/myapp:${{ event.run_id }}'
|
||||
imagepullsecrets: |
|
||||
image-pull-secret1
|
||||
image-pull-secret2
|
||||
manifests: |
|
||||
deployment.yaml
|
||||
service.yaml
|
||||
dir/manifestsDirectory
|
||||
strategy: canary
|
||||
action: promote # substitute reject if you want to reject
|
||||
```
|
||||
|
||||
### Canary deployment based on Service Mesh Interface
|
||||
|
||||
```yaml
|
||||
- uses: Azure/k8s-deploy@v5
|
||||
with:
|
||||
namespace: 'myapp'
|
||||
images: 'contoso.azurecr.io/myapp:${{ event.run_id }}'
|
||||
imagepullsecrets: |
|
||||
image-pull-secret1
|
||||
image-pull-secret2
|
||||
manifests: |
|
||||
deployment.yaml
|
||||
service.yaml
|
||||
dir/manifestsDirectory
|
||||
strategy: canary
|
||||
action: deploy
|
||||
traffic-split-method: smi
|
||||
percentage: 20
|
||||
baseline-and-canary-replicas: 1
|
||||
```
|
||||
|
||||
To promote/reject the canary created by the above snippet, the following YAML snippet could be used:
|
||||
|
||||
```yaml
|
||||
- uses: Azure/k8s-deploy@v5
|
||||
with:
|
||||
namespace: 'myapp'
|
||||
images: 'contoso.azurecr.io/myapp:${{ event.run_id }} '
|
||||
imagepullsecrets: |
|
||||
image-pull-secret1
|
||||
image-pull-secret2
|
||||
manifests: |
|
||||
deployment.yaml
|
||||
service.yaml
|
||||
dir/manifestsDirectory
|
||||
strategy: canary
|
||||
traffic-split-method: smi
|
||||
action: reject # substitute promote if you want to promote
|
||||
```
|
||||
|
||||
### Blue-Green deployment with different route methods
|
||||
|
||||
```yaml
|
||||
- uses: Azure/k8s-deploy@v5
|
||||
with:
|
||||
namespace: 'myapp'
|
||||
images: 'contoso.azurecr.io/myapp:${{ event.run_id }}'
|
||||
imagepullsecrets: |
|
||||
image-pull-secret1
|
||||
image-pull-secret2
|
||||
manifests: |
|
||||
deployment.yaml
|
||||
service.yaml
|
||||
ingress.yml
|
||||
strategy: blue-green
|
||||
action: deploy
|
||||
route-method: ingress # substitute with service/smi as per need
|
||||
version-switch-buffer: 15
|
||||
```
|
||||
|
||||
To promote/reject the green workload created by the above snippet, the following YAML snippet could be used:
|
||||
|
||||
```yaml
|
||||
- uses: Azure/k8s-deploy@v5
|
||||
with:
|
||||
namespace: 'myapp'
|
||||
images: 'contoso.azurecr.io/myapp:${{ event.run_id }}'
|
||||
imagepullsecrets: |
|
||||
image-pull-secret1
|
||||
image-pull-secret2
|
||||
manifests: |
|
||||
deployment.yaml
|
||||
service.yaml
|
||||
ingress.yml
|
||||
strategy: blue-green
|
||||
route-method: ingress # should be the same as the value when action was deploy
|
||||
action: promote # substitute reject if you want to reject
|
||||
```
|
||||
|
||||
## End to end workflows
|
||||
|
||||
Following are a few examples of not just this action, but how this action could be used along with other container and k8s related actions for building images and deploying objects onto k8s clusters:
|
||||
|
||||
### Build container image and deploy to Azure Kubernetes Service cluster
|
||||
|
||||
```yaml
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- uses: Azure/docker-login@v2
|
||||
with:
|
||||
login-server: contoso.azurecr.io
|
||||
username: ${{ secrets.REGISTRY_USERNAME }}
|
||||
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
|
||||
- run: |
|
||||
docker build . -t contoso.azurecr.io/k8sdemo:${{ github.sha }}
|
||||
docker push contoso.azurecr.io/k8sdemo:${{ github.sha }}
|
||||
|
||||
- uses: azure/setup-kubectl@v4
|
||||
|
||||
# Set the target AKS cluster.
|
||||
- uses: Azure/aks-set-context@v4
|
||||
with:
|
||||
creds: '${{ secrets.AZURE_CREDENTIALS }}'
|
||||
cluster-name: contoso
|
||||
resource-group: contoso-rg
|
||||
|
||||
- uses: Azure/k8s-create-secret@v5
|
||||
with:
|
||||
container-registry-url: contoso.azurecr.io
|
||||
container-registry-username: ${{ secrets.REGISTRY_USERNAME }}
|
||||
container-registry-password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
secret-name: demo-k8s-secret
|
||||
|
||||
- uses: Azure/k8s-deploy@v5
|
||||
with:
|
||||
action: deploy
|
||||
manifests: |
|
||||
manifests/deployment.yml
|
||||
manifests/service.yml
|
||||
images: |
|
||||
demo.azurecr.io/k8sdemo:${{ github.sha }}
|
||||
imagepullsecrets: |
|
||||
demo-k8s-secret
|
||||
```
|
||||
|
||||
### Build container image and deploy to any Azure Kubernetes Service cluster
|
||||
|
||||
```yaml
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- uses: Azure/docker-login@v2
|
||||
with:
|
||||
login-server: contoso.azurecr.io
|
||||
username: ${{ secrets.REGISTRY_USERNAME }}
|
||||
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
|
||||
- run: |
|
||||
docker build . -t contoso.azurecr.io/k8sdemo:${{ github.sha }}
|
||||
docker push contoso.azurecr.io/k8sdemo:${{ github.sha }}
|
||||
|
||||
- uses: azure/setup-kubectl@v4
|
||||
|
||||
- uses: Azure/k8s-set-context@v4
|
||||
with:
|
||||
kubeconfig: ${{ secrets.KUBE_CONFIG }}
|
||||
|
||||
- uses: Azure/k8s-create-secret@v5
|
||||
with:
|
||||
container-registry-url: contoso.azurecr.io
|
||||
container-registry-username: ${{ secrets.REGISTRY_USERNAME }}
|
||||
container-registry-password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
secret-name: demo-k8s-secret
|
||||
|
||||
- uses: Azure/k8s-deploy@v5
|
||||
with:
|
||||
action: deploy
|
||||
manifests: |
|
||||
manifests/deployment.yml
|
||||
manifests/service.yml
|
||||
images: |
|
||||
demo.azurecr.io/k8sdemo:${{ github.sha }}
|
||||
imagepullsecrets: |
|
||||
demo-k8s-secret
|
||||
```
|
||||
|
||||
### Build image and add `dockerfile-path` label to it
|
||||
|
||||
We can use this image in other workflows once built.
|
||||
|
||||
```yaml
|
||||
on: [push]
|
||||
env:
|
||||
NAMESPACE: demo-ns2
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- uses: Azure/docker-login@v2
|
||||
with:
|
||||
login-server: contoso.azurecr.io
|
||||
username: ${{ secrets.REGISTRY_USERNAME }}
|
||||
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
|
||||
- run: |
|
||||
docker build . -t contoso.azurecr.io/k8sdemo:${{ github.sha }} --label dockerfile-path=https://github.com/${{github.repo}}/blob/${{github.sha}}/Dockerfile
|
||||
docker push contoso.azurecr.io/k8sdemo:${{ github.sha }}
|
||||
```
|
||||
|
||||
### Use bake action to get manifests deploying to a Kubernetes cluster
|
||||
|
||||
```yaml
|
||||
on: [push]
|
||||
env:
|
||||
NAMESPACE: demo-ns2
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- uses: Azure/docker-login@v2
|
||||
with:
|
||||
login-server: contoso.azurecr.io
|
||||
username: ${{ secrets.REGISTRY_USERNAME }}
|
||||
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
|
||||
- uses: azure/setup-kubectl@v4
|
||||
|
||||
# Set the target AKS cluster.
|
||||
- uses: Azure/aks-set-context@v4
|
||||
with:
|
||||
creds: '${{ secrets.AZURE_CREDENTIALS }}'
|
||||
cluster-name: contoso
|
||||
resource-group: contoso-rg
|
||||
|
||||
- uses: Azure/k8s-create-secret@v5
|
||||
with:
|
||||
namespace: ${{ env.NAMESPACE }}
|
||||
container-registry-url: contoso.azurecr.io
|
||||
container-registry-username: ${{ secrets.REGISTRY_USERNAME }}
|
||||
container-registry-password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
secret-name: demo-k8s-secret
|
||||
|
||||
- uses: azure/k8s-bake@v3
|
||||
with:
|
||||
renderEngine: 'helm'
|
||||
helmChart: './aks-helloworld/'
|
||||
overrideFiles: './aks-helloworld/values-override.yaml'
|
||||
overrides: |
|
||||
replicas:2
|
||||
helm-version: 'latest'
|
||||
id: bake
|
||||
|
||||
- uses: Azure/k8s-deploy@v5
|
||||
with:
|
||||
action: deploy
|
||||
manifests: ${{ steps.bake.outputs.manifestsBundle }}
|
||||
images: |
|
||||
contoso.azurecr.io/k8sdemo:${{ github.sha }}
|
||||
imagepullsecrets: |
|
||||
demo-k8s-secret
|
||||
```
|
||||
|
||||
## Traceability Fields Support
|
||||
|
||||
- Environment variable `HELM_CHART_PATHS` is a list of helmchart files expected by k8s-deploy - it will be populated automatically if you are using k8s-bake to generate the manifests.
|
||||
- Use script to build image and add dockerfile-path label to it. The value expected is the link to the dockerfile: https://github.com/${{github.repo}}/blob/${{github.sha}}/Dockerfile. If your dockerfile is in the same repo and branch where the workflow is run, it can be a relative path and it will be converted to a link for traceability.
|
||||
- Run docker login action for each image registry - in case image build and image deploy are two distinct jobs in the same or separate workflows.
|
||||
|
||||
## Contributing
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
||||
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
|
||||
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
|
||||
|
||||
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
|
||||
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
|
||||
provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
|
||||
## Support
|
||||
|
||||
k8s-deploy is an open source project that is [**not** covered by the Microsoft Azure support policy](https://support.microsoft.com/en-us/help/2941892/support-for-linux-and-open-source-technology-in-azure). [Please search open issues here](https://github.com/Azure/k8s-deploy/issues), and if your issue isn't already represented please [open a new one](https://github.com/Azure/k8s-deploy/issues/new/choose). The project maintainers will respond to the best of their abilities.
|
||||
|
||||
70
SECURITY.md
70
SECURITY.md
@ -1,35 +1,35 @@
|
||||
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.1 BLOCK -->
|
||||
|
||||
## Security
|
||||
|
||||
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [many more](https://opensource.microsoft.com/).
|
||||
|
||||
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets Microsoft's [definition](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)) of a security vulnerability, please report it to us as described below.
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
**Please do not report security vulnerabilities through public GitHub issues.** Instead, please report them to the Microsoft Security Response Center at [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://technet.microsoft.com/en-us/security/dn606155).
|
||||
|
||||
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
|
||||
|
||||
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
||||
|
||||
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
||||
* Full paths of source file(s) related to the manifestation of the issue
|
||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||
* Any special configuration required to reproduce the issue
|
||||
* Step-by-step instructions to reproduce the issue
|
||||
* Proof-of-concept or exploit code (if possible)
|
||||
* Impact of the issue, including how an attacker might exploit the issue
|
||||
|
||||
This information will help us triage your report more quickly.
|
||||
|
||||
## Preferred Languages
|
||||
|
||||
We prefer all communications to be in English.
|
||||
|
||||
## Policy
|
||||
|
||||
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd).
|
||||
|
||||
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
||||
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.1 BLOCK -->
|
||||
|
||||
## Security
|
||||
|
||||
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [many more](https://opensource.microsoft.com/).
|
||||
|
||||
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets Microsoft's [definition](<https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)>) of a security vulnerability, please report it to us as described below.
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
**Please do not report security vulnerabilities through public GitHub issues.** Instead, please report them to the Microsoft Security Response Center at [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://technet.microsoft.com/en-us/security/dn606155).
|
||||
|
||||
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
|
||||
|
||||
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
||||
|
||||
- Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
||||
- Full paths of source file(s) related to the manifestation of the issue
|
||||
- The location of the affected source code (tag/branch/commit or direct URL)
|
||||
- Any special configuration required to reproduce the issue
|
||||
- Step-by-step instructions to reproduce the issue
|
||||
- Proof-of-concept or exploit code (if possible)
|
||||
- Impact of the issue, including how an attacker might exploit the issue
|
||||
|
||||
This information will help us triage your report more quickly.
|
||||
|
||||
## Preferred Languages
|
||||
|
||||
We prefer all communications to be in English.
|
||||
|
||||
## Policy
|
||||
|
||||
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd).
|
||||
|
||||
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
||||
|
||||
@ -1,16 +0,0 @@
|
||||
apiVersion : apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: testapp
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: testapp
|
||||
spec:
|
||||
containers:
|
||||
- name: testapp
|
||||
image: testcr.azurecr.io/testapp
|
||||
ports:
|
||||
- containerPort: 80
|
||||
@ -1,430 +0,0 @@
|
||||
import * as KubernetesManifestUtility from '../src/utilities/manifest-stability-utility';
|
||||
import * as KubernetesObjectUtility from '../src/utilities/resource-object-utility';
|
||||
import * as action from '../src/run';
|
||||
import * as core from '@actions/core';
|
||||
import * as deployment from '../src/utilities/strategy-helpers/deployment-helper';
|
||||
import * as fs from 'fs';
|
||||
import * as io from '@actions/io';
|
||||
import * as toolCache from '@actions/tool-cache';
|
||||
import * as fileHelper from '../src/utilities/files-helper';
|
||||
import { workflowAnnotations } from '../src/constants';
|
||||
import * as inputParam from '../src/input-parameters';
|
||||
|
||||
import { Kubectl, Resource } from '../src/kubectl-object-model';
|
||||
|
||||
import { getkubectlDownloadURL } from "../src/utilities/kubectl-util";
|
||||
import { mocked } from 'ts-jest/utils';
|
||||
|
||||
var path = require('path');
|
||||
const os = require("os");
|
||||
|
||||
const coreMock = mocked(core, true);
|
||||
const ioMock = mocked(io, true);
|
||||
const inputParamMock = mocked(inputParam, true);
|
||||
|
||||
const toolCacheMock = mocked(toolCache, true);
|
||||
const fileUtility = mocked(fs, true);
|
||||
|
||||
const stableVersionUrl = 'https://storage.googleapis.com/kubernetes-release/release/stable.txt';
|
||||
|
||||
var deploymentYaml = "";
|
||||
|
||||
const getAllPodsMock = {
|
||||
'code': 0,
|
||||
'stdout': '{"apiVersion": "v1","items": [{"apiVersion": "v1","kind": "Pod","metadata": {"labels": {"app": "testapp","pod-template-hash": "776cbc86f9"},"name": "testpod-776cbc86f9-pjrb6","namespace": "testnamespace","ownerReferences": [{"apiVersion": "apps/v1","blockOwnerDeletion": true,"controller": true,"kind": "ReplicaSet","name": "testpod-776cbc86f9","uid": "de544628-6589-4354-81fe-05faf00d336a"}],"resourceVersion": "12362496","selfLink": "/api/v1/namespaces/akskodey8187/pods/akskodey-776cbc86f9-pjrb6","uid": "c7d5f4c1-11a1-4884-8a66-09b015c72f69"},"spec": {"containers": [{"image": "imageId","imagePullPolicy": "IfNotPresent","name": "containerName","ports": [{"containerPort": 80,"protocol": "TCP"}]}]},"status": {"hostIP": "10.240.0.4","phase": "Running","podIP": "10.244.0.25","qosClass": "BestEffort","startTime": "2020-06-04T07:59:42Z"}}]}'
|
||||
};
|
||||
|
||||
const getNamespaceMock = {
|
||||
'code': 0,
|
||||
'stdout': '{"apiVersion": "v1","kind": "Namespace","metadata": {"annotations": {"workflow": ".github/workflows/workflow.yml","runUri": "https://github.com/testRepo/actions/runs/12345"}},"spec": {"finalizers": ["kubernetes"]},"status": {"phase": "Active"}}'
|
||||
};
|
||||
|
||||
const resources: Resource[] = [{ type: "Deployment", name: "AppName" }];
|
||||
|
||||
beforeEach(() => {
|
||||
deploymentYaml = fs.readFileSync(path.join(__dirname, 'manifests', 'deployment.yml'), 'utf8');
|
||||
|
||||
process.env["KUBECONFIG"] = 'kubeConfig';
|
||||
process.env['GITHUB_RUN_ID'] = '12345';
|
||||
process.env['GITHUB_WORKFLOW'] = '.github/workflows/workflow.yml';
|
||||
process.env['GITHUB_JOB'] = 'build-and-deploy';
|
||||
process.env['GITHUB_ACTOR'] = 'testUser';
|
||||
process.env['GITHUB_REPOSITORY'] = 'testRepo';
|
||||
process.env['GITHUB_SHA'] = 'testCommit';
|
||||
process.env['GITHUB_REF'] = 'testBranch';
|
||||
})
|
||||
|
||||
test("setKubectlPath() - install a particular version", async () => {
|
||||
const kubectlVersion = 'v1.18.0'
|
||||
//Mocks
|
||||
coreMock.getInput = jest.fn().mockReturnValue(kubectlVersion);
|
||||
toolCacheMock.find = jest.fn().mockReturnValue(undefined);
|
||||
toolCacheMock.downloadTool = jest.fn().mockReturnValue('downloadpath');
|
||||
toolCacheMock.cacheFile = jest.fn().mockReturnValue('cachepath');
|
||||
fileUtility.chmodSync = jest.fn();
|
||||
|
||||
//Invoke and assert
|
||||
await expect(action.run()).resolves.not.toThrow();
|
||||
expect(toolCacheMock.find).toBeCalledWith('kubectl', kubectlVersion);
|
||||
expect(toolCacheMock.downloadTool).toBeCalledWith(getkubectlDownloadURL(kubectlVersion));
|
||||
});
|
||||
|
||||
test("setKubectlPath() - install a latest version", async () => {
|
||||
const kubectlVersion = 'latest'
|
||||
//Mocks
|
||||
coreMock.getInput = jest.fn().mockReturnValue(kubectlVersion);
|
||||
jest.spyOn(fs, 'readFileSync').mockImplementation(() => "");
|
||||
toolCacheMock.find = jest.fn().mockReturnValue(undefined);
|
||||
toolCacheMock.downloadTool = jest.fn().mockResolvedValue('');
|
||||
toolCacheMock.cacheFile = jest.fn().mockReturnValue('cachepath');
|
||||
fileUtility.chmodSync = jest.fn();
|
||||
|
||||
//Invoke and assert
|
||||
await expect(action.run()).resolves.not.toThrow();
|
||||
expect(toolCacheMock.find).toBeCalledWith('kubectl', kubectlVersion);
|
||||
expect(toolCacheMock.downloadTool).toBeCalledWith(stableVersionUrl);
|
||||
|
||||
});
|
||||
|
||||
test("setKubectlPath() - kubectl version already avilable", async () => {
|
||||
const kubectlVersion = 'v1.18.0'
|
||||
//Mock
|
||||
coreMock.getInput = jest.fn().mockReturnValue(kubectlVersion);
|
||||
toolCacheMock.find = jest.fn().mockReturnValue('validPath');
|
||||
toolCacheMock.downloadTool = jest.fn().mockReturnValue('downloadpath');
|
||||
toolCacheMock.cacheFile = jest.fn().mockReturnValue('cachepath');
|
||||
fileUtility.chmodSync = jest.fn();
|
||||
|
||||
//Invoke and assert
|
||||
await expect(action.run()).resolves.not.toThrow();
|
||||
expect(toolCacheMock.find).toBeCalledWith('kubectl', kubectlVersion);
|
||||
expect(toolCacheMock.downloadTool).toBeCalledTimes(0);
|
||||
});
|
||||
|
||||
test("setKubectlPath() - kubectl version not provided and kubectl avilable on machine", async () => {
|
||||
//Mock
|
||||
coreMock.getInput = jest.fn().mockReturnValue(undefined);
|
||||
ioMock.which = jest.fn().mockReturnValue('validPath');
|
||||
|
||||
//Invoke and assert
|
||||
await expect(action.run()).resolves.not.toThrow();
|
||||
expect(ioMock.which).toBeCalledWith('kubectl', false);
|
||||
expect(toolCacheMock.downloadTool).toBeCalledTimes(0);
|
||||
});
|
||||
|
||||
test("setKubectlPath() - kubectl version not provided and kubectl not avilable on machine", async () => {
|
||||
//Mock
|
||||
coreMock.getInput = jest.fn().mockReturnValue(undefined);
|
||||
ioMock.which = jest.fn().mockReturnValue(undefined);
|
||||
toolCacheMock.findAllVersions = jest.fn().mockReturnValue(undefined);
|
||||
|
||||
//Invoke and assert
|
||||
await expect(action.run()).rejects.toThrowError();
|
||||
expect(ioMock.which).toBeCalledWith('kubectl', false);
|
||||
});
|
||||
|
||||
test("run() - action not provided", async () => {
|
||||
const kubectlVersion = 'v1.18.0'
|
||||
coreMock.getInput = jest.fn().mockImplementation((name) => {
|
||||
if (name == 'action') {
|
||||
return undefined;
|
||||
}
|
||||
return kubectlVersion;
|
||||
});
|
||||
coreMock.setFailed = jest.fn();
|
||||
//Mocks
|
||||
toolCacheMock.find = jest.fn().mockReturnValue(undefined);
|
||||
toolCacheMock.downloadTool = jest.fn().mockReturnValue('downloadpath');
|
||||
toolCacheMock.cacheFile = jest.fn().mockReturnValue('cachepath');
|
||||
fileUtility.chmodSync = jest.fn();
|
||||
|
||||
//Invoke and assert
|
||||
await expect(action.run()).resolves.not.toThrow();
|
||||
expect(coreMock.setFailed).toBeCalledWith('Not a valid action. The allowed actions are deploy, promote, reject');
|
||||
});
|
||||
|
||||
test("run() - deploy - Manifiest not provided", async () => {
|
||||
//Mocks
|
||||
const kubectlVersion = 'v1.18.0'
|
||||
coreMock.getInput = jest.fn().mockImplementation((name) => {
|
||||
if (name == 'manifests') {
|
||||
return undefined;
|
||||
}
|
||||
if (name == 'action') {
|
||||
return 'deploy';
|
||||
}
|
||||
return kubectlVersion;
|
||||
});
|
||||
coreMock.setFailed = jest.fn();
|
||||
toolCacheMock.find = jest.fn().mockReturnValue(undefined);
|
||||
toolCacheMock.downloadTool = jest.fn().mockReturnValue('downloadpath');
|
||||
toolCacheMock.cacheFile = jest.fn().mockReturnValue('cachepath');
|
||||
fileUtility.chmodSync = jest.fn();
|
||||
|
||||
//Invoke and assert
|
||||
await expect(action.run()).resolves.not.toThrow();
|
||||
expect(coreMock.setFailed).toBeCalledWith('No manifests supplied to deploy');
|
||||
});
|
||||
|
||||
test("run() - deploy - Only one manifest with no delimiters", async () => {
|
||||
const kubectlVersion = 'v1.18.0'
|
||||
coreMock.getInput = jest.fn().mockImplementation((name) => {
|
||||
if (name == 'manifests') {
|
||||
return "bg-smi.yml";
|
||||
}
|
||||
if (name == 'action') {
|
||||
return 'deploy';
|
||||
}
|
||||
return kubectlVersion;
|
||||
});
|
||||
coreMock.setFailed = jest.fn();
|
||||
toolCacheMock.find = jest.fn().mockReturnValue(undefined);
|
||||
toolCacheMock.downloadTool = jest.fn().mockReturnValue('downloadpath');
|
||||
toolCacheMock.cacheFile = jest.fn().mockReturnValue('cachepath');
|
||||
fileUtility.chmodSync = jest.fn();
|
||||
|
||||
//Invoke and assert
|
||||
await expect(action.run()).resolves.not.toThrow();
|
||||
});
|
||||
|
||||
test("run() - deploy - Manifests provided by new line delimiter", async () => {
|
||||
const kubectlVersion = 'v1.18.0'
|
||||
coreMock.getInput = jest.fn().mockImplementation((name) => {
|
||||
if (name == 'manifests') {
|
||||
return "bg-smi.yml\n bg.yml\ndeployment.yml";
|
||||
}
|
||||
if (name == 'action') {
|
||||
return 'deploy';
|
||||
}
|
||||
return kubectlVersion;
|
||||
});
|
||||
coreMock.setFailed = jest.fn();
|
||||
toolCacheMock.find = jest.fn().mockReturnValue(undefined);
|
||||
toolCacheMock.downloadTool = jest.fn().mockReturnValue('downloadpath');
|
||||
toolCacheMock.cacheFile = jest.fn().mockReturnValue('cachepath');
|
||||
fileUtility.chmodSync = jest.fn();
|
||||
|
||||
//Invoke and assert
|
||||
await expect(action.run()).resolves.not.toThrow();
|
||||
});
|
||||
|
||||
test("run() - deploy - Manifests provided by comma as a delimiter", async () => {
|
||||
const kubectlVersion = 'v1.18.0'
|
||||
coreMock.getInput = jest.fn().mockImplementation((name) => {
|
||||
if (name == 'manifests') {
|
||||
return "bg-smi.yml, bg.yml, deployment.yml";
|
||||
}
|
||||
if (name == 'action') {
|
||||
return 'deploy';
|
||||
}
|
||||
return kubectlVersion;
|
||||
});
|
||||
coreMock.setFailed = jest.fn();
|
||||
toolCacheMock.find = jest.fn().mockReturnValue(undefined);
|
||||
toolCacheMock.downloadTool = jest.fn().mockReturnValue('downloadpath');
|
||||
toolCacheMock.cacheFile = jest.fn().mockReturnValue('cachepath');
|
||||
fileUtility.chmodSync = jest.fn();
|
||||
|
||||
//Invoke and assert
|
||||
await expect(action.run()).resolves.not.toThrow();
|
||||
});
|
||||
|
||||
test("run() - deploy - Manifests provided by both new line and comma as a delimiter", async () => {
|
||||
const kubectlVersion = 'v1.18.0'
|
||||
coreMock.getInput = jest.fn().mockImplementation((name) => {
|
||||
if (name == 'manifests') {
|
||||
return "bg-smi.yml\nbg.yml,deployment.yml";
|
||||
}
|
||||
if (name == 'action') {
|
||||
return 'deploy';
|
||||
}
|
||||
return kubectlVersion;
|
||||
});
|
||||
coreMock.setFailed = jest.fn();
|
||||
toolCacheMock.find = jest.fn().mockReturnValue(undefined);
|
||||
toolCacheMock.downloadTool = jest.fn().mockReturnValue('downloadpath');
|
||||
toolCacheMock.cacheFile = jest.fn().mockReturnValue('cachepath');
|
||||
fileUtility.chmodSync = jest.fn();
|
||||
|
||||
//Invoke and assert
|
||||
await expect(action.run()).resolves.not.toThrow();
|
||||
});
|
||||
|
||||
test("run() - deploy - Manifests provided by both new line and comma and semi-colon as a delimiter", async () => {
|
||||
const kubectlVersion = 'v1.18.0'
|
||||
coreMock.getInput = jest.fn().mockImplementation((name) => {
|
||||
if (name == 'manifests') {
|
||||
return "bg-smi.yml\nbg.yml,deployment.yml;bg.yml";
|
||||
}
|
||||
if (name == 'action') {
|
||||
return 'deploy';
|
||||
}
|
||||
return kubectlVersion;
|
||||
});
|
||||
coreMock.setFailed = jest.fn();
|
||||
toolCacheMock.find = jest.fn().mockReturnValue(undefined);
|
||||
toolCacheMock.downloadTool = jest.fn().mockReturnValue('downloadpath');
|
||||
toolCacheMock.cacheFile = jest.fn().mockReturnValue('cachepath');
|
||||
fileUtility.chmodSync = jest.fn();
|
||||
|
||||
//Invoke and assert
|
||||
await expect(action.run()).resolves.not.toThrow();
|
||||
});
|
||||
|
||||
test("deployment - deploy() - Invokes with no manifestfiles", async () => {
|
||||
const kubeCtl: jest.Mocked<Kubectl> = new Kubectl("") as any;
|
||||
|
||||
//Invoke and assert
|
||||
await expect(deployment.deploy(kubeCtl, [], undefined)).rejects.toThrowError('ManifestFileNotFound');
|
||||
});
|
||||
|
||||
test("run() - deploy", async () => {
|
||||
const kubectlVersion = 'v1.18.0'
|
||||
//Mocks
|
||||
coreMock.getInput = jest.fn().mockImplementation((name) => {
|
||||
if (name == 'manifests') {
|
||||
return 'manifests/deployment.yaml';
|
||||
}
|
||||
if (name == 'action') {
|
||||
return 'deploy';
|
||||
}
|
||||
if (name == 'strategy') {
|
||||
return undefined;
|
||||
}
|
||||
return kubectlVersion;
|
||||
});
|
||||
|
||||
coreMock.setFailed = jest.fn();
|
||||
toolCacheMock.find = jest.fn().mockReturnValue('validPath');
|
||||
toolCacheMock.downloadTool = jest.fn().mockReturnValue('downloadpath');
|
||||
toolCacheMock.cacheFile = jest.fn().mockReturnValue('cachepath');
|
||||
fileUtility.chmodSync = jest.fn();
|
||||
const deploySpy = jest.spyOn(deployment, 'deploy').mockImplementation();
|
||||
|
||||
//Invoke and assert
|
||||
await expect(action.run()).resolves.not.toThrow();
|
||||
expect(deploySpy).toBeCalledWith({ "ignoreSSLErrors": false, "kubectlPath": 'validPath', "namespace": "v1.18.0" }, ['manifests/deployment.yaml'], undefined);
|
||||
deploySpy.mockRestore();
|
||||
});
|
||||
|
||||
test("deployment - deploy() - Invokes with manifestfiles", async () => {
|
||||
const KubernetesManifestUtilityMock = mocked(KubernetesManifestUtility, true);
|
||||
const KubernetesObjectUtilityMock = mocked(KubernetesObjectUtility, true);
|
||||
const kubeCtl: jest.Mocked<Kubectl> = new Kubectl("") as any;
|
||||
kubeCtl.apply = jest.fn().mockReturnValue("");
|
||||
KubernetesObjectUtilityMock.getResources = jest.fn().mockReturnValue(resources);
|
||||
kubeCtl.getResource = jest.fn().mockReturnValue(getNamespaceMock);
|
||||
kubeCtl.getAllPods = jest.fn().mockReturnValue(getAllPodsMock);
|
||||
kubeCtl.describe = jest.fn().mockReturnValue("");
|
||||
kubeCtl.annotateFiles = jest.fn().mockReturnValue("");
|
||||
kubeCtl.annotate = jest.fn().mockReturnValue("");
|
||||
KubernetesManifestUtilityMock.checkManifestStability = jest.fn().mockReturnValue("");
|
||||
|
||||
const readFileSpy = jest.spyOn(fs, 'readFileSync').mockImplementation(() => deploymentYaml);
|
||||
|
||||
//Invoke and assert
|
||||
await expect(deployment.deploy(kubeCtl, ['manifests/deployment.yaml'], undefined)).resolves.not.toThrowError();
|
||||
expect(readFileSpy).toBeCalledWith("manifests/deployment.yaml");
|
||||
expect(kubeCtl.getResource).toBeCalledWith("ingress", "AppName");
|
||||
});
|
||||
|
||||
test("deployment - deploy() - deploy force flag on", async () => {
|
||||
//Mocks
|
||||
inputParamMock.forceDeployment = true;
|
||||
const applyResMock = {
|
||||
'code': 0,
|
||||
'stderr': '',
|
||||
'error': Error(""),
|
||||
'stdout': 'changes configured'
|
||||
};
|
||||
const KubernetesManifestUtilityMock = mocked(KubernetesManifestUtility, true);
|
||||
const KubernetesObjectUtilityMock = mocked(KubernetesObjectUtility, true);
|
||||
const kubeCtl: jest.Mocked<Kubectl> = new Kubectl("") as any;
|
||||
KubernetesObjectUtilityMock.getResources = jest.fn().mockReturnValue(resources);
|
||||
kubeCtl.getResource = jest.fn().mockReturnValue(getNamespaceMock);
|
||||
kubeCtl.getAllPods = jest.fn().mockReturnValue(getAllPodsMock);
|
||||
kubeCtl.describe = jest.fn().mockReturnValue("");
|
||||
kubeCtl.annotateFiles = jest.fn().mockReturnValue("");
|
||||
kubeCtl.annotate = jest.fn().mockReturnValue("");
|
||||
KubernetesManifestUtilityMock.checkManifestStability = jest.fn().mockReturnValue("");
|
||||
|
||||
const deploySpy = jest.spyOn(kubeCtl, 'apply').mockImplementation(() => applyResMock);
|
||||
|
||||
//Invoke and assert
|
||||
await expect(deployment.deploy(kubeCtl, ['manifests/deployment.yaml'], undefined)).resolves.not.toThrowError();
|
||||
expect(deploySpy).toBeCalledWith(expect.anything(), true);
|
||||
deploySpy.mockRestore();
|
||||
});
|
||||
|
||||
test("deployment - deploy() - Annotate resources", async () => {
|
||||
const KubernetesManifestUtilityMock = mocked(KubernetesManifestUtility, true);
|
||||
KubernetesManifestUtilityMock.checkManifestStability = jest.fn().mockReturnValue("");
|
||||
const KubernetesObjectUtilityMock = mocked(KubernetesObjectUtility, true);
|
||||
KubernetesObjectUtilityMock.getResources = jest.fn().mockReturnValue(resources);
|
||||
const fileHelperMock = mocked(fileHelper, true);
|
||||
fileHelperMock.writeObjectsToFile = jest.fn().mockReturnValue(["~/Deployment_testapp_currentTimestamp"]);
|
||||
const kubeCtl: jest.Mocked<Kubectl> = new Kubectl("") as any;
|
||||
kubeCtl.apply = jest.fn().mockReturnValue("");
|
||||
kubeCtl.getResource = jest.fn().mockReturnValue(getNamespaceMock);
|
||||
kubeCtl.getAllPods = jest.fn().mockReturnValue(getAllPodsMock);
|
||||
kubeCtl.getNewReplicaSet = jest.fn().mockReturnValue("testpod-776cbc86f9");
|
||||
kubeCtl.annotateFiles = jest.fn().mockReturnValue("");
|
||||
kubeCtl.annotate = jest.fn().mockReturnValue("");
|
||||
|
||||
//Invoke and assert
|
||||
await expect(deployment.deploy(kubeCtl, ['manifests/deployment.yaml'], undefined)).resolves.not.toThrowError();
|
||||
expect(kubeCtl.annotateFiles).toBeCalledWith(["~/Deployment_testapp_currentTimestamp"], workflowAnnotations, true);
|
||||
expect(kubeCtl.annotate).toBeCalledTimes(2);
|
||||
});
|
||||
|
||||
test("deployment - deploy() - Skip Annotate namespace", async () => {
|
||||
process.env['GITHUB_REPOSITORY'] = 'test1Repo';
|
||||
const KubernetesManifestUtilityMock = mocked(KubernetesManifestUtility, true);
|
||||
KubernetesManifestUtilityMock.checkManifestStability = jest.fn().mockReturnValue("");
|
||||
const KubernetesObjectUtilityMock = mocked(KubernetesObjectUtility, true);
|
||||
KubernetesObjectUtilityMock.getResources = jest.fn().mockReturnValue(resources);
|
||||
const fileHelperMock = mocked(fileHelper, true);
|
||||
fileHelperMock.writeObjectsToFile = jest.fn().mockReturnValue(["~/Deployment_testapp_currentTimestamp"]);
|
||||
const kubeCtl: jest.Mocked<Kubectl> = new Kubectl("") as any;
|
||||
kubeCtl.apply = jest.fn().mockReturnValue("");
|
||||
kubeCtl.getResource = jest.fn().mockReturnValue(getNamespaceMock);
|
||||
kubeCtl.getAllPods = jest.fn().mockReturnValue(getAllPodsMock);
|
||||
kubeCtl.getNewReplicaSet = jest.fn().mockReturnValue("testpod-776cbc86f9");
|
||||
kubeCtl.annotateFiles = jest.fn().mockReturnValue("");
|
||||
kubeCtl.annotate = jest.fn().mockReturnValue("");
|
||||
|
||||
const consoleOutputSpy = jest.spyOn(process.stdout, "write").mockImplementation();
|
||||
|
||||
//Invoke and assert
|
||||
await expect(deployment.deploy(kubeCtl, ['manifests/deployment.yaml'], undefined)).resolves.not.toThrowError();
|
||||
expect(kubeCtl.annotateFiles).toBeCalledWith(["~/Deployment_testapp_currentTimestamp"], workflowAnnotations, true);
|
||||
expect(kubeCtl.annotate).toBeCalledTimes(1);
|
||||
expect(consoleOutputSpy).toHaveBeenNthCalledWith(2, `::debug::Skipping 'annotate namespace' as namespace annotated by other workflow` + os.EOL)
|
||||
});
|
||||
|
||||
test("deployment - deploy() - Annotate resources failed", async () => {
|
||||
//Mocks
|
||||
inputParamMock.forceDeployment = true;
|
||||
const annotateMock = {
|
||||
'code': 1,
|
||||
'stderr': 'kubectl annotate failed',
|
||||
'error': Error(""),
|
||||
'stdout': ''
|
||||
};
|
||||
const KubernetesManifestUtilityMock = mocked(KubernetesManifestUtility, true);
|
||||
const KubernetesObjectUtilityMock = mocked(KubernetesObjectUtility, true);
|
||||
const kubeCtl: jest.Mocked<Kubectl> = new Kubectl("") as any;
|
||||
KubernetesObjectUtilityMock.getResources = jest.fn().mockReturnValue(resources);
|
||||
kubeCtl.apply = jest.fn().mockReturnValue("");
|
||||
kubeCtl.getResource = jest.fn().mockReturnValue(getNamespaceMock);
|
||||
kubeCtl.getAllPods = jest.fn().mockReturnValue(getAllPodsMock);
|
||||
kubeCtl.describe = jest.fn().mockReturnValue("");
|
||||
kubeCtl.annotateFiles = jest.fn().mockReturnValue("");
|
||||
kubeCtl.annotate = jest.fn().mockReturnValue(annotateMock);
|
||||
KubernetesManifestUtilityMock.checkManifestStability = jest.fn().mockReturnValue("");
|
||||
|
||||
const consoleOutputSpy = jest.spyOn(process.stdout, "write").mockImplementation();
|
||||
//Invoke and assert
|
||||
await expect(deployment.deploy(kubeCtl, ['manifests/deployment.yaml'], undefined)).resolves.not.toThrowError();
|
||||
expect(consoleOutputSpy).toHaveBeenNthCalledWith(2, '::warning::kubectl annotate failed' + os.EOL)
|
||||
});
|
||||
153
action.yml
153
action.yml
@ -1,53 +1,100 @@
|
||||
name: 'Deploy to Kubernetes cluster'
|
||||
description: 'Deploy to a Kubernetes cluster including, but not limited to Azure Kubernetes Service (AKS) clusters'
|
||||
inputs:
|
||||
# Please ensure you have used either azure/k8s-actions/aks-set-context or azure/k8s-actions/k8s-set-context in the workflow before this action
|
||||
namespace:
|
||||
description: 'Choose the target Kubernetes namespace. If the namespace is not provided, the commands will run in the default namespace.'
|
||||
required: false
|
||||
manifests:
|
||||
description: 'Path to the manifest files which will be used for deployment.'
|
||||
required: true
|
||||
default: ''
|
||||
images:
|
||||
description: 'Fully qualified resource URL of the image(s) to be used for substitutions on the manifest files Example: contosodemo.azurecr.io/helloworld:test'
|
||||
required: false
|
||||
imagepullsecrets:
|
||||
description: 'Name of a docker-registry secret that has already been set up within the cluster. Each of these secret names are added under imagePullSecrets field for the workloads found in the input manifest files'
|
||||
required: false
|
||||
kubectl-version:
|
||||
description: 'Version of kubectl. Installs a specific version of kubectl binary'
|
||||
required: false
|
||||
strategy:
|
||||
description: 'Deployment strategy to be used. Allowed values are none, canary'
|
||||
required: false
|
||||
default: 'none'
|
||||
traffic-split-method:
|
||||
description: "Traffic split method to be used. Allowed values are pod, smi"
|
||||
required: false
|
||||
default: 'pod'
|
||||
baseline-and-canary-replicas:
|
||||
description: 'Baseline and canary replicas count; valid value i.e between 0 to 100.'
|
||||
required: false
|
||||
default: 0
|
||||
percentage:
|
||||
description: 'Percentage of traffic redirect to canary deployment'
|
||||
required: false
|
||||
default: 0
|
||||
args:
|
||||
description: 'Arguments'
|
||||
required: false
|
||||
action:
|
||||
description: 'deploy/promote/reject'
|
||||
required: true
|
||||
default: 'deploy'
|
||||
force:
|
||||
description: 'Deploy when a previous deployment already exists. If true then --force argument is added to the apply command.'
|
||||
required: false
|
||||
default: false
|
||||
|
||||
branding:
|
||||
color: 'green' # optional, decorates the entry in the GitHub Marketplace
|
||||
runs:
|
||||
using: 'node12'
|
||||
main: 'lib/run.js'
|
||||
name: 'Deploy to Kubernetes cluster'
|
||||
description: 'Deploy to a Kubernetes cluster including, but not limited to Azure Kubernetes Service (AKS) clusters'
|
||||
inputs:
|
||||
# Please ensure you have used either azure/k8s-actions/aks-set-context or azure/k8s-actions/k8s-set-context in the workflow before this action
|
||||
# You also need to have kubectl installed (azure/setup-kubectl)
|
||||
namespace:
|
||||
description: 'Choose the target Kubernetes namespace. If the namespace is not provided, the commands will automatically use the namespace defined in the manifest files first or otherwise run in the default namespace.'
|
||||
required: false
|
||||
default: ''
|
||||
manifests:
|
||||
description: 'Path to the manifest files which will be used for deployment.'
|
||||
required: true
|
||||
images:
|
||||
description: 'Fully qualified resource URL of the image(s) to be used for substitutions on the manifest files Example: contosodemo.azurecr.io/helloworld:test'
|
||||
required: false
|
||||
imagepullsecrets:
|
||||
description: 'Name of a docker-registry secret that has already been set up within the cluster. Each of these secret names are added under imagePullSecrets field for the workloads found in the input manifest files'
|
||||
required: false
|
||||
pull-images:
|
||||
description: "Switch whether to pull the images from the registry before deployment to find out Dockerfile's path in order to add it to the annotations"
|
||||
required: false
|
||||
default: true
|
||||
strategy:
|
||||
description: 'Deployment strategy to be used. Allowed values are basic, canary and blue-green'
|
||||
required: true
|
||||
default: 'basic'
|
||||
route-method:
|
||||
description: 'Route based on service, ingress or SMI for blue-green strategy'
|
||||
required: false
|
||||
default: 'service'
|
||||
version-switch-buffer:
|
||||
description: 'Indicates the buffer time in minutes before the switch is made to the green version (max is 300 min ie. 5hrs)'
|
||||
required: false
|
||||
default: 0
|
||||
traffic-split-method:
|
||||
description: 'Traffic split method to be used. Allowed values are pod and smi'
|
||||
required: false
|
||||
default: 'pod'
|
||||
traffic-split-annotations:
|
||||
description: 'Annotations in the form of key/value pair to be added to TrafficSplit. Relevant only if deployement strategy is blue-green or canary'
|
||||
required: false
|
||||
baseline-and-canary-replicas:
|
||||
description: 'Baseline and canary replicas count. Valid value between 0 to 100 (inclusive)'
|
||||
required: false
|
||||
default: ''
|
||||
percentage:
|
||||
description: 'Percentage of traffic redirect to canary deployment'
|
||||
required: false
|
||||
default: 0
|
||||
action:
|
||||
description: 'deploy, promote, or reject'
|
||||
required: true
|
||||
default: 'deploy'
|
||||
force:
|
||||
description: 'Deploy when a previous deployment already exists. If true then --force argument is added to the apply command'
|
||||
required: false
|
||||
default: false
|
||||
server-side:
|
||||
description: 'The apply command runs in the server instead of the client. If true then --server-side argument is added to the apply command.'
|
||||
required: false
|
||||
default: false
|
||||
timeout:
|
||||
description: 'Timeout for the rollout status'
|
||||
required: false
|
||||
default: 10m
|
||||
token:
|
||||
description: 'Github token'
|
||||
default: ${{ github.token }}
|
||||
required: true
|
||||
annotate-resources:
|
||||
description: 'Annotate the resources. If set to false all annotations are skipped completely.'
|
||||
required: false
|
||||
default: true
|
||||
annotate-namespace:
|
||||
description: 'Annotate the target namespace. Ignored when annotate-resources is set to false or no namespace is provided.'
|
||||
required: false
|
||||
default: true
|
||||
private-cluster:
|
||||
description: 'True if cluster is AKS private cluster'
|
||||
required: false
|
||||
default: false
|
||||
resource-group:
|
||||
description: 'Name of resource group - Only required if using private cluster'
|
||||
required: false
|
||||
name:
|
||||
description: 'Name of the private cluster - Only required if using private cluster'
|
||||
required: false
|
||||
skip-tls-verify:
|
||||
description: True if the insecure-skip-tls-verify option should be used. Input should be 'true' or 'false'.
|
||||
default: false
|
||||
resource-type:
|
||||
description: Either Microsoft.ContainerService/managedClusters or Microsoft.ContainerService/fleets'.
|
||||
required: false
|
||||
default: 'Microsoft.ContainerService/managedClusters'
|
||||
|
||||
branding:
|
||||
color: 'green'
|
||||
runs:
|
||||
using: 'node24'
|
||||
main: 'lib/index.js'
|
||||
|
||||
6
babel.config.js
Normal file
6
babel.config.js
Normal file
@ -0,0 +1,6 @@
|
||||
module.exports = {
|
||||
presets: [
|
||||
['@babel/preset-env', {targets: {node: 'current'}}],
|
||||
'@babel/preset-typescript'
|
||||
]
|
||||
}
|
||||
@ -1,10 +0,0 @@
|
||||
module.exports = {
|
||||
clearMocks: true,
|
||||
moduleFileExtensions: ['js', 'ts'],
|
||||
testEnvironment: 'node',
|
||||
testMatch: ['**/*.test.ts'],
|
||||
transform: {
|
||||
'^.+\\.ts$': 'ts-jest'
|
||||
},
|
||||
verbose: true
|
||||
}
|
||||
@ -1,52 +0,0 @@
|
||||
'use strict';
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.promote = void 0;
|
||||
const core = require("@actions/core");
|
||||
const deploymentHelper = require("../utilities/strategy-helpers/deployment-helper");
|
||||
const canaryDeploymentHelper = require("../utilities/strategy-helpers/canary-deployment-helper");
|
||||
const SMICanaryDeploymentHelper = require("../utilities/strategy-helpers/smi-canary-deployment-helper");
|
||||
const utils = require("../utilities/manifest-utilities");
|
||||
const TaskInputParameters = require("../input-parameters");
|
||||
const kubectl_object_model_1 = require("../kubectl-object-model");
|
||||
function promote(ignoreSslErrors) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const kubectl = new kubectl_object_model_1.Kubectl(yield utils.getKubectl(), TaskInputParameters.namespace, ignoreSslErrors);
|
||||
if (!canaryDeploymentHelper.isCanaryDeploymentStrategy()) {
|
||||
core.debug('Strategy is not canary deployment. Invalid request.');
|
||||
throw ('InvalidPromotetActionDeploymentStrategy');
|
||||
}
|
||||
let includeServices = false;
|
||||
if (canaryDeploymentHelper.isSMICanaryStrategy()) {
|
||||
includeServices = true;
|
||||
// In case of SMI traffic split strategy when deployment is promoted, first we will redirect traffic to
|
||||
// Canary deployment, then update stable deployment and then redirect traffic to stable deployment
|
||||
core.debug('Redirecting traffic to canary deployment');
|
||||
SMICanaryDeploymentHelper.redirectTrafficToCanaryDeployment(kubectl, TaskInputParameters.manifests);
|
||||
core.debug('Deploying input manifests with SMI canary strategy');
|
||||
yield deploymentHelper.deploy(kubectl, TaskInputParameters.manifests, 'None');
|
||||
core.debug('Redirecting traffic to stable deployment');
|
||||
SMICanaryDeploymentHelper.redirectTrafficToStableDeployment(kubectl, TaskInputParameters.manifests);
|
||||
}
|
||||
else {
|
||||
core.debug('Deploying input manifests');
|
||||
yield deploymentHelper.deploy(kubectl, TaskInputParameters.manifests, 'None');
|
||||
}
|
||||
core.debug('Deployment strategy selected is Canary. Deleting canary and baseline workloads.');
|
||||
try {
|
||||
canaryDeploymentHelper.deleteCanaryDeployment(kubectl, TaskInputParameters.manifests, includeServices);
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning('Exception occurred while deleting canary and baseline workloads. Exception: ' + ex);
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.promote = promote;
|
||||
@ -1,36 +0,0 @@
|
||||
'use strict';
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.reject = void 0;
|
||||
const core = require("@actions/core");
|
||||
const canaryDeploymentHelper = require("../utilities/strategy-helpers/canary-deployment-helper");
|
||||
const SMICanaryDeploymentHelper = require("../utilities/strategy-helpers/smi-canary-deployment-helper");
|
||||
const kubectl_object_model_1 = require("../kubectl-object-model");
|
||||
const utils = require("../utilities/manifest-utilities");
|
||||
const TaskInputParameters = require("../input-parameters");
|
||||
function reject(ignoreSslErrors) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const kubectl = new kubectl_object_model_1.Kubectl(yield utils.getKubectl(), TaskInputParameters.namespace, ignoreSslErrors);
|
||||
if (!canaryDeploymentHelper.isCanaryDeploymentStrategy()) {
|
||||
core.debug('Strategy is not canary deployment. Invalid request.');
|
||||
throw ('InvalidRejectActionDeploymentStrategy');
|
||||
}
|
||||
let includeServices = false;
|
||||
if (canaryDeploymentHelper.isSMICanaryStrategy()) {
|
||||
core.debug('Reject deployment with SMI canary strategy');
|
||||
includeServices = true;
|
||||
SMICanaryDeploymentHelper.redirectTrafficToStableDeployment(kubectl, TaskInputParameters.manifests);
|
||||
}
|
||||
core.debug('Deployment strategy selected is Canary. Deleting baseline and canary workloads.');
|
||||
canaryDeploymentHelper.deleteCanaryDeployment(kubectl, TaskInputParameters.manifests, includeServices);
|
||||
});
|
||||
}
|
||||
exports.reject = reject;
|
||||
@ -1,39 +0,0 @@
|
||||
'use strict';
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.workflowAnnotations = exports.workloadTypesWithRolloutStatus = exports.workloadTypes = exports.deploymentTypes = exports.ServiceTypes = exports.DiscoveryAndLoadBalancerResource = exports.KubernetesWorkload = void 0;
|
||||
class KubernetesWorkload {
|
||||
}
|
||||
exports.KubernetesWorkload = KubernetesWorkload;
|
||||
KubernetesWorkload.pod = 'Pod';
|
||||
KubernetesWorkload.replicaset = 'Replicaset';
|
||||
KubernetesWorkload.deployment = 'Deployment';
|
||||
KubernetesWorkload.statefulSet = 'StatefulSet';
|
||||
KubernetesWorkload.daemonSet = 'DaemonSet';
|
||||
KubernetesWorkload.job = 'job';
|
||||
KubernetesWorkload.cronjob = 'cronjob';
|
||||
class DiscoveryAndLoadBalancerResource {
|
||||
}
|
||||
exports.DiscoveryAndLoadBalancerResource = DiscoveryAndLoadBalancerResource;
|
||||
DiscoveryAndLoadBalancerResource.service = 'service';
|
||||
DiscoveryAndLoadBalancerResource.ingress = 'ingress';
|
||||
class ServiceTypes {
|
||||
}
|
||||
exports.ServiceTypes = ServiceTypes;
|
||||
ServiceTypes.loadBalancer = 'LoadBalancer';
|
||||
ServiceTypes.nodePort = 'NodePort';
|
||||
ServiceTypes.clusterIP = 'ClusterIP';
|
||||
exports.deploymentTypes = ['deployment', 'replicaset', 'daemonset', 'pod', 'statefulset'];
|
||||
exports.workloadTypes = ['deployment', 'replicaset', 'daemonset', 'pod', 'statefulset', 'job', 'cronjob'];
|
||||
exports.workloadTypesWithRolloutStatus = ['deployment', 'daemonset', 'statefulset'];
|
||||
exports.workflowAnnotations = [
|
||||
`run=${process.env['GITHUB_RUN_ID']}`,
|
||||
`repository=${process.env['GITHUB_REPOSITORY']}`,
|
||||
`workflow=${process.env['GITHUB_WORKFLOW']}`,
|
||||
`jobName=${process.env['GITHUB_JOB']}`,
|
||||
`createdBy=${process.env['GITHUB_ACTOR']}`,
|
||||
`runUri=https://github.com/${process.env['GITHUB_REPOSITORY']}/actions/runs/${process.env['GITHUB_RUN_ID']}`,
|
||||
`commit=${process.env['GITHUB_SHA']}`,
|
||||
`branch=${process.env['GITHUB_REF']}`,
|
||||
`deployTimestamp=${Date.now()}`,
|
||||
`provider=GitHub`
|
||||
];
|
||||
@ -1,40 +0,0 @@
|
||||
'use strict';
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.forceDeployment = exports.args = exports.baselineAndCanaryReplicas = exports.trafficSplitMethod = exports.deploymentStrategy = exports.canaryPercentage = exports.manifests = exports.imagePullSecrets = exports.containers = exports.namespace = void 0;
|
||||
const core = require("@actions/core");
|
||||
exports.namespace = core.getInput('namespace');
|
||||
exports.containers = core.getInput('images').split('\n');
|
||||
exports.imagePullSecrets = core.getInput('imagepullsecrets').split('\n').filter(secret => secret.trim().length > 0);
|
||||
exports.manifests = core.getInput('manifests').split(/[\n,;]+/).filter(manifest => manifest.trim().length > 0);
|
||||
exports.canaryPercentage = core.getInput('percentage');
|
||||
exports.deploymentStrategy = core.getInput('strategy');
|
||||
exports.trafficSplitMethod = core.getInput('traffic-split-method');
|
||||
exports.baselineAndCanaryReplicas = core.getInput('baseline-and-canary-replicas');
|
||||
exports.args = core.getInput('arguments');
|
||||
exports.forceDeployment = core.getInput('force').toLowerCase() == 'true';
|
||||
if (!exports.namespace) {
|
||||
core.debug('Namespace was not supplied; using "default" namespace instead.');
|
||||
exports.namespace = 'default';
|
||||
}
|
||||
try {
|
||||
const pe = parseInt(exports.canaryPercentage);
|
||||
if (pe < 0 || pe > 100) {
|
||||
core.setFailed('A valid percentage value is between 0 and 100');
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.setFailed("Enter a valid 'percentage' integer value ");
|
||||
process.exit(1);
|
||||
}
|
||||
try {
|
||||
const pe = parseInt(exports.baselineAndCanaryReplicas);
|
||||
if (pe < 0 || pe > 100) {
|
||||
core.setFailed('A valid baseline-and-canary-replicas value is between 0 and 100');
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.setFailed("Enter a valid 'baseline-and-canary-replicas' integer value");
|
||||
process.exit(1);
|
||||
}
|
||||
@ -1,114 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Kubectl = void 0;
|
||||
const tool_runner_1 = require("./utilities/tool-runner");
|
||||
class Kubectl {
|
||||
constructor(kubectlPath, namespace, ignoreSSLErrors) {
|
||||
this.kubectlPath = kubectlPath;
|
||||
this.ignoreSSLErrors = !!ignoreSSLErrors;
|
||||
if (!!namespace) {
|
||||
this.namespace = namespace;
|
||||
}
|
||||
else {
|
||||
this.namespace = 'default';
|
||||
}
|
||||
}
|
||||
apply(configurationPaths, force) {
|
||||
let applyArgs = ['apply', '-f', this.createInlineArray(configurationPaths)];
|
||||
if (!!force) {
|
||||
console.log("force flag is on, deployment will continue even if previous deployment already exists");
|
||||
applyArgs.push('--force');
|
||||
}
|
||||
return this.execute(applyArgs);
|
||||
}
|
||||
describe(resourceType, resourceName, silent) {
|
||||
return this.execute(['describe', resourceType, resourceName], silent);
|
||||
}
|
||||
getNewReplicaSet(deployment) {
|
||||
let newReplicaSet = '';
|
||||
const result = this.describe('deployment', deployment, true);
|
||||
if (result && result.stdout) {
|
||||
const stdout = result.stdout.split('\n');
|
||||
stdout.forEach((line) => {
|
||||
if (!!line && line.toLowerCase().indexOf('newreplicaset') > -1) {
|
||||
newReplicaSet = line.substr(14).trim().split(' ')[0];
|
||||
}
|
||||
});
|
||||
}
|
||||
return newReplicaSet;
|
||||
}
|
||||
annotate(resourceType, resourceName, annotations, overwrite) {
|
||||
let args = ['annotate', resourceType, resourceName];
|
||||
args = args.concat(annotations);
|
||||
if (!!overwrite) {
|
||||
args.push(`--overwrite`);
|
||||
}
|
||||
return this.execute(args);
|
||||
}
|
||||
annotateFiles(files, annotations, overwrite) {
|
||||
let args = ['annotate'];
|
||||
args = args.concat(['-f', this.createInlineArray(files)]);
|
||||
args = args.concat(annotations);
|
||||
if (!!overwrite) {
|
||||
args.push(`--overwrite`);
|
||||
}
|
||||
return this.execute(args);
|
||||
}
|
||||
getAllPods() {
|
||||
return this.execute(['get', 'pods', '-o', 'json'], true);
|
||||
}
|
||||
getClusterInfo() {
|
||||
return this.execute(['cluster-info'], true);
|
||||
}
|
||||
checkRolloutStatus(resourceType, name) {
|
||||
return this.execute(['rollout', 'status', resourceType + '/' + name]);
|
||||
}
|
||||
getResource(resourceType, name) {
|
||||
return this.execute(['get', resourceType + '/' + name, '-o', 'json']);
|
||||
}
|
||||
getResources(applyOutput, filterResourceTypes) {
|
||||
const outputLines = applyOutput.split('\n');
|
||||
const results = [];
|
||||
outputLines.forEach(line => {
|
||||
const words = line.split(' ');
|
||||
if (words.length > 2) {
|
||||
const resourceType = words[0].trim();
|
||||
const resourceName = JSON.parse(words[1].trim());
|
||||
if (filterResourceTypes.filter(type => !!type && resourceType.toLowerCase().startsWith(type.toLowerCase())).length > 0) {
|
||||
results.push({
|
||||
type: resourceType,
|
||||
name: resourceName
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
return results;
|
||||
}
|
||||
executeCommand(customCommand, args) {
|
||||
if (!customCommand)
|
||||
throw new Error('NullCommandForKubectl');
|
||||
return args ? this.execute([customCommand, args]) : this.execute([customCommand]);
|
||||
}
|
||||
delete(args) {
|
||||
if (typeof args === 'string')
|
||||
return this.execute(['delete', args]);
|
||||
else
|
||||
return this.execute(['delete'].concat(args));
|
||||
}
|
||||
execute(args, silent) {
|
||||
if (this.ignoreSSLErrors) {
|
||||
args.push('--insecure-skip-tls-verify');
|
||||
}
|
||||
args = args.concat(['--namespace', this.namespace]);
|
||||
const command = new tool_runner_1.ToolRunner(this.kubectlPath);
|
||||
command.arg(args);
|
||||
return command.execSync({ silent: !!silent });
|
||||
}
|
||||
createInlineArray(str) {
|
||||
if (typeof str === 'string') {
|
||||
return str;
|
||||
}
|
||||
return str.join(',');
|
||||
}
|
||||
}
|
||||
exports.Kubectl = Kubectl;
|
||||
@ -1,80 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const os = require("os");
|
||||
const path = require("path");
|
||||
const util = require("util");
|
||||
const fs = require("fs");
|
||||
const toolCache = require("@actions/tool-cache");
|
||||
const core = require("@actions/core");
|
||||
const kubectlToolName = 'kubectl';
|
||||
const stableKubectlVersion = 'v1.15.0';
|
||||
const stableVersionUrl = 'https://storage.googleapis.com/kubernetes-release/release/stable.txt';
|
||||
function getExecutableExtension() {
|
||||
if (os.type().match(/^Win/)) {
|
||||
return '.exe';
|
||||
}
|
||||
return '';
|
||||
}
|
||||
function getkubectlDownloadURL(version) {
|
||||
switch (os.type()) {
|
||||
case 'Linux':
|
||||
return util.format('https://storage.googleapis.com/kubernetes-release/release/%s/bin/linux/amd64/kubectl', version);
|
||||
case 'Darwin':
|
||||
return util.format('https://storage.googleapis.com/kubernetes-release/release/%s/bin/darwin/amd64/kubectl', version);
|
||||
case 'Windows_NT':
|
||||
default:
|
||||
return util.format('https://storage.googleapis.com/kubernetes-release/release/%s/bin/windows/amd64/kubectl.exe', version);
|
||||
}
|
||||
}
|
||||
function getStableKubectlVersion() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
return toolCache.downloadTool(stableVersionUrl).then((downloadPath) => {
|
||||
let version = fs.readFileSync(downloadPath, 'utf8').toString().trim();
|
||||
if (!version) {
|
||||
version = stableKubectlVersion;
|
||||
}
|
||||
return version;
|
||||
}, (error) => {
|
||||
core.debug(error);
|
||||
core.warning('GetStableVersionFailed');
|
||||
return stableKubectlVersion;
|
||||
});
|
||||
});
|
||||
}
|
||||
exports.getStableKubectlVersion = getStableKubectlVersion;
|
||||
function downloadKubectl(version) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let cachedToolpath = toolCache.find(kubectlToolName, version);
|
||||
let kubectlDownloadPath = '';
|
||||
if (!cachedToolpath) {
|
||||
try {
|
||||
kubectlDownloadPath = yield toolCache.downloadTool(getkubectlDownloadURL(version));
|
||||
}
|
||||
catch (exception) {
|
||||
throw new Error('DownloadKubectlFailed');
|
||||
}
|
||||
cachedToolpath = yield toolCache.cacheFile(kubectlDownloadPath, kubectlToolName + getExecutableExtension(), kubectlToolName, version);
|
||||
}
|
||||
const kubectlPath = path.join(cachedToolpath, kubectlToolName + getExecutableExtension());
|
||||
fs.chmodSync(kubectlPath, '777');
|
||||
return kubectlPath;
|
||||
});
|
||||
}
|
||||
exports.downloadKubectl = downloadKubectl;
|
||||
function getTrafficSplitAPIVersion(kubectl) {
|
||||
const result = kubectl.executeCommand('api-versions');
|
||||
const trafficSplitAPIVersion = result.stdout.split('\n').find(version => version.startsWith('split.smi-spec.io'));
|
||||
if (trafficSplitAPIVersion == null || typeof trafficSplitAPIVersion == 'undefined') {
|
||||
throw new Error('UnableToCreateTrafficSplitManifestFile');
|
||||
}
|
||||
return trafficSplitAPIVersion;
|
||||
}
|
||||
exports.getTrafficSplitAPIVersion = getTrafficSplitAPIVersion;
|
||||
96
lib/run.js
96
lib/run.js
@ -1,96 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.run = void 0;
|
||||
const core = require("@actions/core");
|
||||
const io = require("@actions/io");
|
||||
const path = require("path");
|
||||
const toolCache = require("@actions/tool-cache");
|
||||
const kubectl_util_1 = require("./utilities/kubectl-util");
|
||||
const utility_1 = require("./utilities/utility");
|
||||
const kubectl_object_model_1 = require("./kubectl-object-model");
|
||||
const deployment_helper_1 = require("./utilities/strategy-helpers/deployment-helper");
|
||||
const promote_1 = require("./actions/promote");
|
||||
const reject_1 = require("./actions/reject");
|
||||
let kubectlPath = "";
|
||||
function setKubectlPath() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
if (core.getInput('kubectl-version')) {
|
||||
const version = core.getInput('kubectl-version');
|
||||
kubectlPath = toolCache.find('kubectl', version);
|
||||
if (!kubectlPath) {
|
||||
kubectlPath = yield installKubectl(version);
|
||||
}
|
||||
}
|
||||
else {
|
||||
kubectlPath = yield io.which('kubectl', false);
|
||||
if (!kubectlPath) {
|
||||
const allVersions = toolCache.findAllVersions('kubectl');
|
||||
kubectlPath = allVersions.length > 0 ? toolCache.find('kubectl', allVersions[0]) : '';
|
||||
if (!kubectlPath) {
|
||||
throw new Error('Kubectl is not installed, either add install-kubectl action or provide "kubectl-version" input to download kubectl');
|
||||
}
|
||||
kubectlPath = path.join(kubectlPath, `kubectl${utility_1.getExecutableExtension()}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
function installKubectl(version) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
if (utility_1.isEqual(version, 'latest')) {
|
||||
version = yield kubectl_util_1.getStableKubectlVersion();
|
||||
}
|
||||
return yield kubectl_util_1.downloadKubectl(version);
|
||||
});
|
||||
}
|
||||
function checkClusterContext() {
|
||||
if (!process.env["KUBECONFIG"]) {
|
||||
core.warning('KUBECONFIG env is not explicitly set. Ensure cluster context is set by using k8s-set-context / aks-set-context action.');
|
||||
}
|
||||
}
|
||||
function run() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
checkClusterContext();
|
||||
yield setKubectlPath();
|
||||
let manifestsInput = core.getInput('manifests');
|
||||
if (!manifestsInput) {
|
||||
core.setFailed('No manifests supplied to deploy');
|
||||
return;
|
||||
}
|
||||
let namespace = core.getInput('namespace');
|
||||
if (!namespace) {
|
||||
namespace = 'default';
|
||||
}
|
||||
let action = core.getInput('action');
|
||||
let manifests = manifestsInput.split(/[\n,;]+/).filter(manifest => manifest.trim().length > 0);
|
||||
if (manifests.length > 0) {
|
||||
manifests = manifests.map(manifest => {
|
||||
return manifest.trim();
|
||||
});
|
||||
}
|
||||
if (action === 'deploy') {
|
||||
let strategy = core.getInput('strategy');
|
||||
console.log("strategy: ", strategy);
|
||||
yield deployment_helper_1.deploy(new kubectl_object_model_1.Kubectl(kubectlPath, namespace), manifests, strategy);
|
||||
}
|
||||
else if (action === 'promote') {
|
||||
yield promote_1.promote(true);
|
||||
}
|
||||
else if (action === 'reject') {
|
||||
yield reject_1.reject(true);
|
||||
}
|
||||
else {
|
||||
core.setFailed('Not a valid action. The allowed actions are deploy, promote, reject');
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.run = run;
|
||||
run().catch(core.setFailed);
|
||||
@ -1,78 +0,0 @@
|
||||
'use strict';
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.writeManifestToFile = exports.writeObjectsToFile = exports.assertFileExists = exports.ensureDirExists = exports.getNewUserDirPath = exports.getTempDirectory = void 0;
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
const core = require("@actions/core");
|
||||
const os = require("os");
|
||||
function getTempDirectory() {
|
||||
return process.env['runner.tempDirectory'] || os.tmpdir();
|
||||
}
|
||||
exports.getTempDirectory = getTempDirectory;
|
||||
function getNewUserDirPath() {
|
||||
let userDir = path.join(getTempDirectory(), 'kubectlTask');
|
||||
ensureDirExists(userDir);
|
||||
userDir = path.join(userDir, getCurrentTime().toString());
|
||||
ensureDirExists(userDir);
|
||||
return userDir;
|
||||
}
|
||||
exports.getNewUserDirPath = getNewUserDirPath;
|
||||
function ensureDirExists(dirPath) {
|
||||
if (!fs.existsSync(dirPath)) {
|
||||
fs.mkdirSync(dirPath);
|
||||
}
|
||||
}
|
||||
exports.ensureDirExists = ensureDirExists;
|
||||
function assertFileExists(path) {
|
||||
if (!fs.existsSync(path)) {
|
||||
core.error(`FileNotFoundException : ${path}`);
|
||||
throw new Error(`FileNotFoundException: ${path}`);
|
||||
}
|
||||
}
|
||||
exports.assertFileExists = assertFileExists;
|
||||
function writeObjectsToFile(inputObjects) {
|
||||
const newFilePaths = [];
|
||||
if (!!inputObjects) {
|
||||
inputObjects.forEach((inputObject) => {
|
||||
try {
|
||||
const inputObjectString = JSON.stringify(inputObject);
|
||||
if (!!inputObject.kind && !!inputObject.metadata && !!inputObject.metadata.name) {
|
||||
const fileName = getManifestFileName(inputObject.kind, inputObject.metadata.name);
|
||||
fs.writeFileSync(path.join(fileName), inputObjectString);
|
||||
newFilePaths.push(fileName);
|
||||
}
|
||||
else {
|
||||
core.debug('Input object is not proper K8s resource object. Object: ' + inputObjectString);
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug('Exception occurred while writing object to file : ' + inputObject + ' . Exception: ' + ex);
|
||||
}
|
||||
});
|
||||
}
|
||||
return newFilePaths;
|
||||
}
|
||||
exports.writeObjectsToFile = writeObjectsToFile;
|
||||
function writeManifestToFile(inputObjectString, kind, name) {
|
||||
if (inputObjectString) {
|
||||
try {
|
||||
const fileName = getManifestFileName(kind, name);
|
||||
fs.writeFileSync(path.join(fileName), inputObjectString);
|
||||
return fileName;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug('Exception occurred while writing object to file : ' + inputObjectString + ' . Exception: ' + ex);
|
||||
}
|
||||
}
|
||||
return '';
|
||||
}
|
||||
exports.writeManifestToFile = writeManifestToFile;
|
||||
function getManifestFileName(kind, name) {
|
||||
const filePath = kind + '_' + name + '_' + getCurrentTime().toString();
|
||||
const tempDirectory = getTempDirectory();
|
||||
const fileName = path.join(tempDirectory, path.basename(filePath));
|
||||
return fileName;
|
||||
}
|
||||
function getCurrentTime() {
|
||||
return new Date().getTime();
|
||||
}
|
||||
@ -1,84 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getTrafficSplitAPIVersion = exports.downloadKubectl = exports.getStableKubectlVersion = exports.getkubectlDownloadURL = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const os = require("os");
|
||||
const path = require("path");
|
||||
const toolCache = require("@actions/tool-cache");
|
||||
const util = require("util");
|
||||
const kubectlToolName = 'kubectl';
|
||||
const stableKubectlVersion = 'v1.15.0';
|
||||
const stableVersionUrl = 'https://storage.googleapis.com/kubernetes-release/release/stable.txt';
|
||||
const trafficSplitAPIVersionPrefix = 'split.smi-spec.io';
|
||||
function getExecutableExtension() {
|
||||
if (os.type().match(/^Win/)) {
|
||||
return '.exe';
|
||||
}
|
||||
return '';
|
||||
}
|
||||
function getkubectlDownloadURL(version) {
|
||||
switch (os.type()) {
|
||||
case 'Linux':
|
||||
return util.format('https://storage.googleapis.com/kubernetes-release/release/%s/bin/linux/amd64/kubectl', version);
|
||||
case 'Darwin':
|
||||
return util.format('https://storage.googleapis.com/kubernetes-release/release/%s/bin/darwin/amd64/kubectl', version);
|
||||
case 'Windows_NT':
|
||||
default:
|
||||
return util.format('https://storage.googleapis.com/kubernetes-release/release/%s/bin/windows/amd64/kubectl.exe', version);
|
||||
}
|
||||
}
|
||||
exports.getkubectlDownloadURL = getkubectlDownloadURL;
|
||||
function getStableKubectlVersion() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
return toolCache.downloadTool(stableVersionUrl).then((downloadPath) => {
|
||||
let version = fs.readFileSync(downloadPath, 'utf8').toString().trim();
|
||||
if (!version) {
|
||||
version = stableKubectlVersion;
|
||||
}
|
||||
return version;
|
||||
}, (error) => {
|
||||
core.debug(error);
|
||||
core.warning('GetStableVersionFailed');
|
||||
return stableKubectlVersion;
|
||||
});
|
||||
});
|
||||
}
|
||||
exports.getStableKubectlVersion = getStableKubectlVersion;
|
||||
function downloadKubectl(version) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let cachedToolpath = toolCache.find(kubectlToolName, version);
|
||||
let kubectlDownloadPath = '';
|
||||
if (!cachedToolpath) {
|
||||
try {
|
||||
kubectlDownloadPath = yield toolCache.downloadTool(getkubectlDownloadURL(version));
|
||||
}
|
||||
catch (exception) {
|
||||
throw new Error('DownloadKubectlFailed');
|
||||
}
|
||||
cachedToolpath = yield toolCache.cacheFile(kubectlDownloadPath, kubectlToolName + getExecutableExtension(), kubectlToolName, version);
|
||||
}
|
||||
const kubectlPath = path.join(cachedToolpath, kubectlToolName + getExecutableExtension());
|
||||
fs.chmodSync(kubectlPath, '777');
|
||||
return kubectlPath;
|
||||
});
|
||||
}
|
||||
exports.downloadKubectl = downloadKubectl;
|
||||
function getTrafficSplitAPIVersion(kubectl) {
|
||||
const result = kubectl.executeCommand('api-versions');
|
||||
const trafficSplitAPIVersion = result.stdout.split('\n').find(version => version.startsWith(trafficSplitAPIVersionPrefix));
|
||||
if (!trafficSplitAPIVersion) {
|
||||
throw new Error('UnableToCreateTrafficSplitManifestFile');
|
||||
}
|
||||
return trafficSplitAPIVersion;
|
||||
}
|
||||
exports.getTrafficSplitAPIVersion = getTrafficSplitAPIVersion;
|
||||
@ -1,158 +0,0 @@
|
||||
'use strict';
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.checkPodStatus = exports.checkManifestStability = void 0;
|
||||
const core = require("@actions/core");
|
||||
const utils = require("./utility");
|
||||
const KubernetesConstants = require("../constants");
|
||||
function checkManifestStability(kubectl, resources) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let rolloutStatusHasErrors = false;
|
||||
const numberOfResources = resources.length;
|
||||
for (let i = 0; i < numberOfResources; i++) {
|
||||
const resource = resources[i];
|
||||
if (KubernetesConstants.workloadTypesWithRolloutStatus.indexOf(resource.type.toLowerCase()) >= 0) {
|
||||
try {
|
||||
var result = kubectl.checkRolloutStatus(resource.type, resource.name);
|
||||
utils.checkForErrors([result]);
|
||||
}
|
||||
catch (ex) {
|
||||
core.error(ex);
|
||||
kubectl.describe(resource.type, resource.name);
|
||||
rolloutStatusHasErrors = true;
|
||||
}
|
||||
}
|
||||
if (utils.isEqual(resource.type, KubernetesConstants.KubernetesWorkload.pod, true)) {
|
||||
try {
|
||||
yield checkPodStatus(kubectl, resource.name);
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning(`CouldNotDeterminePodStatus ${JSON.stringify(ex)}`);
|
||||
kubectl.describe(resource.type, resource.name);
|
||||
}
|
||||
}
|
||||
if (utils.isEqual(resource.type, KubernetesConstants.DiscoveryAndLoadBalancerResource.service, true)) {
|
||||
try {
|
||||
const service = getService(kubectl, resource.name);
|
||||
const spec = service.spec;
|
||||
const status = service.status;
|
||||
if (utils.isEqual(spec.type, KubernetesConstants.ServiceTypes.loadBalancer, true)) {
|
||||
if (!isLoadBalancerIPAssigned(status)) {
|
||||
yield waitForServiceExternalIPAssignment(kubectl, resource.name);
|
||||
}
|
||||
else {
|
||||
console.log('ServiceExternalIP', resource.name, status.loadBalancer.ingress[0].ip);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (ex) {
|
||||
core.warning(`CouldNotDetermineServiceStatus of: ${resource.name} Error: ${JSON.stringify(ex)}`);
|
||||
kubectl.describe(resource.type, resource.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (rolloutStatusHasErrors) {
|
||||
throw new Error('RolloutStatusTimedout');
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.checkManifestStability = checkManifestStability;
|
||||
function checkPodStatus(kubectl, podName) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const sleepTimeout = 10 * 1000; // 10 seconds
|
||||
const iterations = 60; // 60 * 10 seconds timeout = 10 minutes max timeout
|
||||
let podStatus;
|
||||
let kubectlDescribeNeeded = false;
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
yield utils.sleep(sleepTimeout);
|
||||
core.debug(`Polling for pod status: ${podName}`);
|
||||
podStatus = getPodStatus(kubectl, podName);
|
||||
if (podStatus.phase && podStatus.phase !== 'Pending' && podStatus.phase !== 'Unknown') {
|
||||
break;
|
||||
}
|
||||
}
|
||||
podStatus = getPodStatus(kubectl, podName);
|
||||
switch (podStatus.phase) {
|
||||
case 'Succeeded':
|
||||
case 'Running':
|
||||
if (isPodReady(podStatus)) {
|
||||
console.log(`pod/${podName} is successfully rolled out`);
|
||||
}
|
||||
else {
|
||||
kubectlDescribeNeeded = true;
|
||||
}
|
||||
break;
|
||||
case 'Pending':
|
||||
if (!isPodReady(podStatus)) {
|
||||
core.warning(`pod/${podName} rollout status check timedout`);
|
||||
kubectlDescribeNeeded = true;
|
||||
}
|
||||
break;
|
||||
case 'Failed':
|
||||
core.error(`pod/${podName} rollout failed`);
|
||||
kubectlDescribeNeeded = true;
|
||||
break;
|
||||
default:
|
||||
core.warning(`pod/${podName} rollout status: ${podStatus.phase}`);
|
||||
}
|
||||
if (kubectlDescribeNeeded) {
|
||||
kubectl.describe('pod', podName);
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.checkPodStatus = checkPodStatus;
|
||||
function getPodStatus(kubectl, podName) {
|
||||
const podResult = kubectl.getResource('pod', podName);
|
||||
utils.checkForErrors([podResult]);
|
||||
const podStatus = JSON.parse(podResult.stdout).status;
|
||||
core.debug(`Pod Status: ${JSON.stringify(podStatus)}`);
|
||||
return podStatus;
|
||||
}
|
||||
function isPodReady(podStatus) {
|
||||
let allContainersAreReady = true;
|
||||
podStatus.containerStatuses.forEach(container => {
|
||||
if (container.ready === false) {
|
||||
console.log(`'${container.name}' status: ${JSON.stringify(container.state)}`);
|
||||
allContainersAreReady = false;
|
||||
}
|
||||
});
|
||||
if (!allContainersAreReady) {
|
||||
core.warning('AllContainersNotInReadyState');
|
||||
}
|
||||
return allContainersAreReady;
|
||||
}
|
||||
function getService(kubectl, serviceName) {
|
||||
const serviceResult = kubectl.getResource(KubernetesConstants.DiscoveryAndLoadBalancerResource.service, serviceName);
|
||||
utils.checkForErrors([serviceResult]);
|
||||
return JSON.parse(serviceResult.stdout);
|
||||
}
|
||||
function waitForServiceExternalIPAssignment(kubectl, serviceName) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const sleepTimeout = 10 * 1000; // 10 seconds
|
||||
const iterations = 18; // 18 * 10 seconds timeout = 3 minutes max timeout
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
console.log(`waitForServiceIpAssignment : ${serviceName}`);
|
||||
yield utils.sleep(sleepTimeout);
|
||||
let status = (getService(kubectl, serviceName)).status;
|
||||
if (isLoadBalancerIPAssigned(status)) {
|
||||
console.log('ServiceExternalIP', serviceName, status.loadBalancer.ingress[0].ip);
|
||||
return;
|
||||
}
|
||||
}
|
||||
core.warning(`waitForServiceIpAssignmentTimedOut ${serviceName}`);
|
||||
});
|
||||
}
|
||||
function isLoadBalancerIPAssigned(status) {
|
||||
if (status && status.loadBalancer && status.loadBalancer.ingress && status.loadBalancer.ingress.length > 0) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -1,236 +0,0 @@
|
||||
'use strict';
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isWorkloadEntity = exports.updateImagePullSecrets = exports.updateContainerImagesInManifestFiles = exports.substituteImageNameInSpecFile = exports.getDeleteCmdArgs = exports.createKubectlArgs = exports.getKubectl = exports.getManifestFiles = void 0;
|
||||
const core = require("@actions/core");
|
||||
const kubectlutility = require("./kubectl-util");
|
||||
const io = require("@actions/io");
|
||||
const utility_1 = require("./utility");
|
||||
function getManifestFiles(manifestFilePaths) {
|
||||
if (!manifestFilePaths) {
|
||||
core.debug('file input is not present');
|
||||
return null;
|
||||
}
|
||||
return manifestFilePaths;
|
||||
}
|
||||
exports.getManifestFiles = getManifestFiles;
|
||||
function getKubectl() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
try {
|
||||
return Promise.resolve(io.which('kubectl', true));
|
||||
}
|
||||
catch (ex) {
|
||||
return kubectlutility.downloadKubectl(yield kubectlutility.getStableKubectlVersion());
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.getKubectl = getKubectl;
|
||||
function createKubectlArgs(kinds, names) {
|
||||
let args = '';
|
||||
if (!!kinds && kinds.size > 0) {
|
||||
args = args + createInlineArray(Array.from(kinds.values()));
|
||||
}
|
||||
if (!!names && names.size > 0) {
|
||||
args = args + ' ' + Array.from(names.values()).join(' ');
|
||||
}
|
||||
return args;
|
||||
}
|
||||
exports.createKubectlArgs = createKubectlArgs;
|
||||
function getDeleteCmdArgs(argsPrefix, inputArgs) {
|
||||
let args = '';
|
||||
if (!!argsPrefix && argsPrefix.length > 0) {
|
||||
args = argsPrefix;
|
||||
}
|
||||
if (!!inputArgs && inputArgs.length > 0) {
|
||||
if (args.length > 0) {
|
||||
args = args + ' ';
|
||||
}
|
||||
args = args + inputArgs;
|
||||
}
|
||||
return args;
|
||||
}
|
||||
exports.getDeleteCmdArgs = getDeleteCmdArgs;
|
||||
/*
|
||||
For example,
|
||||
currentString: `image: "example/example-image"`
|
||||
imageName: `example/example-image`
|
||||
imageNameWithNewTag: `example/example-image:identifiertag`
|
||||
|
||||
This substituteImageNameInSpecFile function would return
|
||||
return Value: `image: "example/example-image:identifiertag"`
|
||||
*/
|
||||
function substituteImageNameInSpecFile(currentString, imageName, imageNameWithNewTag) {
|
||||
if (currentString.indexOf(imageName) < 0) {
|
||||
core.debug(`No occurence of replacement token: ${imageName} found`);
|
||||
return currentString;
|
||||
}
|
||||
return currentString.split('\n').reduce((acc, line) => {
|
||||
const imageKeyword = line.match(/^ *image:/);
|
||||
if (imageKeyword) {
|
||||
let [currentImageName, currentImageTag] = line
|
||||
.substring(imageKeyword[0].length) // consume the line from keyword onwards
|
||||
.trim()
|
||||
.replace(/[',"]/g, '') // replace allowed quotes with nothing
|
||||
.split(':');
|
||||
if (!currentImageTag && currentImageName.indexOf(' ') > 0) {
|
||||
currentImageName = currentImageName.split(' ')[0]; // Stripping off comments
|
||||
}
|
||||
if (currentImageName === imageName) {
|
||||
return acc + `${imageKeyword[0]} ${imageNameWithNewTag}\n`;
|
||||
}
|
||||
}
|
||||
return acc + line + '\n';
|
||||
}, '');
|
||||
}
|
||||
exports.substituteImageNameInSpecFile = substituteImageNameInSpecFile;
|
||||
function createInlineArray(str) {
|
||||
if (typeof str === 'string') {
|
||||
return str;
|
||||
}
|
||||
return str.join(',');
|
||||
}
|
||||
function getImagePullSecrets(inputObject) {
|
||||
if (!inputObject || !inputObject.spec) {
|
||||
return;
|
||||
}
|
||||
if (utility_1.isEqual(inputObject.kind, 'pod')
|
||||
&& inputObject
|
||||
&& inputObject.spec
|
||||
&& inputObject.spec.imagePullSecrets) {
|
||||
return inputObject.spec.imagePullSecrets;
|
||||
}
|
||||
else if (utility_1.isEqual(inputObject.kind, 'cronjob')
|
||||
&& inputObject
|
||||
&& inputObject.spec
|
||||
&& inputObject.spec.jobTemplate
|
||||
&& inputObject.spec.jobTemplate.spec
|
||||
&& inputObject.spec.jobTemplate.spec.template
|
||||
&& inputObject.spec.jobTemplate.spec.template.spec
|
||||
&& inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets) {
|
||||
return inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
else if (inputObject
|
||||
&& inputObject.spec
|
||||
&& inputObject.spec.template
|
||||
&& inputObject.spec.template.spec
|
||||
&& inputObject.spec.template.spec.imagePullSecrets) {
|
||||
return inputObject.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
}
|
||||
function setImagePullSecrets(inputObject, newImagePullSecrets) {
|
||||
if (!inputObject || !inputObject.spec || !newImagePullSecrets) {
|
||||
return;
|
||||
}
|
||||
if (utility_1.isEqual(inputObject.kind, 'pod')) {
|
||||
if (inputObject
|
||||
&& inputObject.spec) {
|
||||
if (newImagePullSecrets.length > 0) {
|
||||
inputObject.spec.imagePullSecrets = newImagePullSecrets;
|
||||
}
|
||||
else {
|
||||
delete inputObject.spec.imagePullSecrets;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (utility_1.isEqual(inputObject.kind, 'cronjob')) {
|
||||
if (inputObject
|
||||
&& inputObject.spec
|
||||
&& inputObject.spec.jobTemplate
|
||||
&& inputObject.spec.jobTemplate.spec
|
||||
&& inputObject.spec.jobTemplate.spec.template
|
||||
&& inputObject.spec.jobTemplate.spec.template.spec) {
|
||||
if (newImagePullSecrets.length > 0) {
|
||||
inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets = newImagePullSecrets;
|
||||
}
|
||||
else {
|
||||
delete inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (!!inputObject.spec.template && !!inputObject.spec.template.spec) {
|
||||
if (inputObject
|
||||
&& inputObject.spec
|
||||
&& inputObject.spec.template
|
||||
&& inputObject.spec.template.spec) {
|
||||
if (newImagePullSecrets.length > 0) {
|
||||
inputObject.spec.template.spec.imagePullSecrets = newImagePullSecrets;
|
||||
}
|
||||
else {
|
||||
delete inputObject.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
function substituteImageNameInSpecContent(currentString, imageName, imageNameWithNewTag) {
|
||||
if (currentString.indexOf(imageName) < 0) {
|
||||
core.debug(`No occurence of replacement token: ${imageName} found`);
|
||||
return currentString;
|
||||
}
|
||||
return currentString.split('\n').reduce((acc, line) => {
|
||||
const imageKeyword = line.match(/^ *image:/);
|
||||
if (imageKeyword) {
|
||||
const [currentImageName, currentImageTag] = line
|
||||
.substring(imageKeyword[0].length) // consume the line from keyword onwards
|
||||
.trim()
|
||||
.replace(/[',"]/g, '') // replace allowed quotes with nothing
|
||||
.split(':');
|
||||
if (currentImageName === imageName) {
|
||||
return acc + `${imageKeyword[0]} ${imageNameWithNewTag}\n`;
|
||||
}
|
||||
}
|
||||
return acc + line + '\n';
|
||||
}, '');
|
||||
}
|
||||
function updateContainerImagesInManifestFiles(contents, containers) {
|
||||
if (!!containers && containers.length > 0) {
|
||||
containers.forEach((container) => {
|
||||
let imageName = container.split(':')[0];
|
||||
if (imageName.indexOf('@') > 0) {
|
||||
imageName = imageName.split('@')[0];
|
||||
}
|
||||
if (contents.indexOf(imageName) > 0) {
|
||||
contents = substituteImageNameInSpecContent(contents, imageName, container);
|
||||
}
|
||||
});
|
||||
}
|
||||
return contents;
|
||||
}
|
||||
exports.updateContainerImagesInManifestFiles = updateContainerImagesInManifestFiles;
|
||||
function updateImagePullSecrets(inputObject, newImagePullSecrets) {
|
||||
if (!inputObject || !inputObject.spec || !newImagePullSecrets) {
|
||||
return;
|
||||
}
|
||||
let newImagePullSecretsObjects;
|
||||
if (newImagePullSecrets.length > 0) {
|
||||
newImagePullSecretsObjects = Array.from(newImagePullSecrets, x => { return !!x ? { 'name': x } : null; });
|
||||
}
|
||||
else {
|
||||
newImagePullSecretsObjects = [];
|
||||
}
|
||||
let existingImagePullSecretObjects = getImagePullSecrets(inputObject);
|
||||
if (!existingImagePullSecretObjects) {
|
||||
existingImagePullSecretObjects = new Array();
|
||||
}
|
||||
existingImagePullSecretObjects = existingImagePullSecretObjects.concat(newImagePullSecretsObjects);
|
||||
setImagePullSecrets(inputObject, existingImagePullSecretObjects);
|
||||
}
|
||||
exports.updateImagePullSecrets = updateImagePullSecrets;
|
||||
const workloadTypes = ['deployment', 'replicaset', 'daemonset', 'pod', 'statefulset', 'job', 'cronjob'];
|
||||
function isWorkloadEntity(kind) {
|
||||
if (!kind) {
|
||||
core.debug('ResourceKindNotDefined');
|
||||
return false;
|
||||
}
|
||||
return workloadTypes.some((type) => {
|
||||
return utility_1.isEqual(type, kind);
|
||||
});
|
||||
}
|
||||
exports.isWorkloadEntity = isWorkloadEntity;
|
||||
@ -1,324 +0,0 @@
|
||||
'use strict';
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getResources = exports.updateSelectorLabels = exports.updateSpecLabels = exports.updateImageDetails = exports.updateImagePullSecrets = exports.updateObjectAnnotations = exports.updateObjectLabels = exports.getReplicaCount = exports.isServiceEntity = exports.isWorkloadEntity = exports.isDeploymentEntity = void 0;
|
||||
const fs = require("fs");
|
||||
const core = require("@actions/core");
|
||||
const yaml = require("js-yaml");
|
||||
const constants_1 = require("../constants");
|
||||
const string_comparison_1 = require("./string-comparison");
|
||||
function isDeploymentEntity(kind) {
|
||||
if (!kind) {
|
||||
throw ('ResourceKindNotDefined');
|
||||
}
|
||||
return constants_1.deploymentTypes.some((type) => {
|
||||
return string_comparison_1.isEqual(type, kind, string_comparison_1.StringComparer.OrdinalIgnoreCase);
|
||||
});
|
||||
}
|
||||
exports.isDeploymentEntity = isDeploymentEntity;
|
||||
function isWorkloadEntity(kind) {
|
||||
if (!kind) {
|
||||
throw ('ResourceKindNotDefined');
|
||||
}
|
||||
return constants_1.workloadTypes.some((type) => {
|
||||
return string_comparison_1.isEqual(type, kind, string_comparison_1.StringComparer.OrdinalIgnoreCase);
|
||||
});
|
||||
}
|
||||
exports.isWorkloadEntity = isWorkloadEntity;
|
||||
function isServiceEntity(kind) {
|
||||
if (!kind) {
|
||||
throw ('ResourceKindNotDefined');
|
||||
}
|
||||
return string_comparison_1.isEqual("Service", kind, string_comparison_1.StringComparer.OrdinalIgnoreCase);
|
||||
}
|
||||
exports.isServiceEntity = isServiceEntity;
|
||||
function getReplicaCount(inputObject) {
|
||||
if (!inputObject) {
|
||||
throw ('NullInputObject');
|
||||
}
|
||||
if (!inputObject.kind) {
|
||||
throw ('ResourceKindNotDefined');
|
||||
}
|
||||
const kind = inputObject.kind;
|
||||
if (!string_comparison_1.isEqual(kind, constants_1.KubernetesWorkload.pod, string_comparison_1.StringComparer.OrdinalIgnoreCase) && !string_comparison_1.isEqual(kind, constants_1.KubernetesWorkload.daemonSet, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
return inputObject.spec.replicas;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
exports.getReplicaCount = getReplicaCount;
|
||||
function updateObjectLabels(inputObject, newLabels, override) {
|
||||
if (!inputObject) {
|
||||
throw ('NullInputObject');
|
||||
}
|
||||
if (!inputObject.metadata) {
|
||||
throw ('NullInputObjectMetadata');
|
||||
}
|
||||
if (!newLabels) {
|
||||
return;
|
||||
}
|
||||
if (override) {
|
||||
inputObject.metadata.labels = newLabels;
|
||||
}
|
||||
else {
|
||||
let existingLabels = inputObject.metadata.labels;
|
||||
if (!existingLabels) {
|
||||
existingLabels = new Map();
|
||||
}
|
||||
Object.keys(newLabels).forEach(function (key) {
|
||||
existingLabels[key] = newLabels[key];
|
||||
});
|
||||
inputObject.metadata.labels = existingLabels;
|
||||
}
|
||||
}
|
||||
exports.updateObjectLabels = updateObjectLabels;
|
||||
function updateObjectAnnotations(inputObject, newAnnotations, override) {
|
||||
if (!inputObject) {
|
||||
throw ('NullInputObject');
|
||||
}
|
||||
if (!inputObject.metadata) {
|
||||
throw ('NullInputObjectMetadata');
|
||||
}
|
||||
if (!newAnnotations) {
|
||||
return;
|
||||
}
|
||||
if (override) {
|
||||
inputObject.metadata.annotations = newAnnotations;
|
||||
}
|
||||
else {
|
||||
let existingAnnotations = inputObject.metadata.annotations;
|
||||
if (!existingAnnotations) {
|
||||
existingAnnotations = new Map();
|
||||
}
|
||||
Object.keys(newAnnotations).forEach(function (key) {
|
||||
existingAnnotations[key] = newAnnotations[key];
|
||||
});
|
||||
inputObject.metadata.annotations = existingAnnotations;
|
||||
}
|
||||
}
|
||||
exports.updateObjectAnnotations = updateObjectAnnotations;
|
||||
function updateImagePullSecrets(inputObject, newImagePullSecrets, override) {
|
||||
if (!inputObject || !inputObject.spec || !newImagePullSecrets) {
|
||||
return;
|
||||
}
|
||||
const newImagePullSecretsObjects = Array.from(newImagePullSecrets, x => { return { 'name': x }; });
|
||||
let existingImagePullSecretObjects = getImagePullSecrets(inputObject);
|
||||
if (override) {
|
||||
existingImagePullSecretObjects = newImagePullSecretsObjects;
|
||||
}
|
||||
else {
|
||||
if (!existingImagePullSecretObjects) {
|
||||
existingImagePullSecretObjects = new Array();
|
||||
}
|
||||
existingImagePullSecretObjects = existingImagePullSecretObjects.concat(newImagePullSecretsObjects);
|
||||
}
|
||||
setImagePullSecrets(inputObject, existingImagePullSecretObjects);
|
||||
}
|
||||
exports.updateImagePullSecrets = updateImagePullSecrets;
|
||||
function updateImageDetails(inputObject, containers) {
|
||||
if (!inputObject || !inputObject.spec || !containers) {
|
||||
return;
|
||||
}
|
||||
if (inputObject.spec.template && !!inputObject.spec.template.spec) {
|
||||
if (inputObject.spec.template.spec.containers) {
|
||||
updateContainers(inputObject.spec.template.spec.containers, containers);
|
||||
}
|
||||
if (inputObject.spec.template.spec.initContainers) {
|
||||
updateContainers(inputObject.spec.template.spec.initContainers, containers);
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (inputObject.spec.jobTemplate && inputObject.spec.jobTemplate.spec && inputObject.spec.jobTemplate.spec.template && inputObject.spec.jobTemplate.spec.template.spec) {
|
||||
if (inputObject.spec.jobTemplate.spec.template.spec.containers) {
|
||||
updateContainers(inputObject.spec.jobTemplate.spec.template.spec.containers, containers);
|
||||
}
|
||||
if (inputObject.spec.jobTemplate.spec.template.spec.initContainers) {
|
||||
updateContainers(inputObject.spec.jobTemplate.spec.template.spec.initContainers, containers);
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (inputObject.spec.containers) {
|
||||
updateContainers(inputObject.spec.containers, containers);
|
||||
}
|
||||
if (inputObject.spec.initContainers) {
|
||||
updateContainers(inputObject.spec.initContainers, containers);
|
||||
}
|
||||
}
|
||||
exports.updateImageDetails = updateImageDetails;
|
||||
function updateContainers(containers, images) {
|
||||
if (!containers || containers.length === 0) {
|
||||
return containers;
|
||||
}
|
||||
containers.forEach((container) => {
|
||||
const imageName = extractImageName(container.image.trim());
|
||||
images.forEach(image => {
|
||||
if (extractImageName(image) === imageName) {
|
||||
container.image = image;
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
function extractImageName(imageName) {
|
||||
let img = '';
|
||||
if (imageName.indexOf('/') > 0) {
|
||||
const registry = imageName.substring(0, imageName.indexOf('/'));
|
||||
const imgName = imageName.substring(imageName.indexOf('/') + 1).split(':')[0];
|
||||
img = `${registry}/${imgName}`;
|
||||
}
|
||||
else {
|
||||
img = imageName.split(':')[0];
|
||||
}
|
||||
return img;
|
||||
}
|
||||
function updateSpecLabels(inputObject, newLabels, override) {
|
||||
if (!inputObject) {
|
||||
throw ('NullInputObject');
|
||||
}
|
||||
if (!inputObject.kind) {
|
||||
throw ('ResourceKindNotDefined');
|
||||
}
|
||||
if (!newLabels) {
|
||||
return;
|
||||
}
|
||||
let existingLabels = getSpecLabels(inputObject);
|
||||
if (override) {
|
||||
existingLabels = newLabels;
|
||||
}
|
||||
else {
|
||||
if (!existingLabels) {
|
||||
existingLabels = new Map();
|
||||
}
|
||||
Object.keys(newLabels).forEach(function (key) {
|
||||
existingLabels[key] = newLabels[key];
|
||||
});
|
||||
}
|
||||
setSpecLabels(inputObject, existingLabels);
|
||||
}
|
||||
exports.updateSpecLabels = updateSpecLabels;
|
||||
function updateSelectorLabels(inputObject, newLabels, override) {
|
||||
if (!inputObject) {
|
||||
throw ('NullInputObject');
|
||||
}
|
||||
if (!inputObject.kind) {
|
||||
throw ('ResourceKindNotDefined');
|
||||
}
|
||||
if (!newLabels) {
|
||||
return;
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.pod, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
return;
|
||||
}
|
||||
let existingLabels = getSpecSelectorLabels(inputObject);
|
||||
if (override) {
|
||||
existingLabels = newLabels;
|
||||
}
|
||||
else {
|
||||
if (!existingLabels) {
|
||||
existingLabels = new Map();
|
||||
}
|
||||
Object.keys(newLabels).forEach(function (key) {
|
||||
existingLabels[key] = newLabels[key];
|
||||
});
|
||||
}
|
||||
setSpecSelectorLabels(inputObject, existingLabels);
|
||||
}
|
||||
exports.updateSelectorLabels = updateSelectorLabels;
|
||||
function getResources(filePaths, filterResourceTypes) {
|
||||
if (!filePaths) {
|
||||
return [];
|
||||
}
|
||||
const resources = [];
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const inputObjectKind = inputObject ? inputObject.kind : '';
|
||||
if (filterResourceTypes.filter(type => string_comparison_1.isEqual(inputObjectKind, type, string_comparison_1.StringComparer.OrdinalIgnoreCase)).length > 0) {
|
||||
const resource = {
|
||||
type: inputObject.kind,
|
||||
name: inputObject.metadata.name
|
||||
};
|
||||
resources.push(resource);
|
||||
}
|
||||
});
|
||||
});
|
||||
return resources;
|
||||
}
|
||||
exports.getResources = getResources;
|
||||
function getSpecLabels(inputObject) {
|
||||
if (!inputObject) {
|
||||
return null;
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.pod, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
return inputObject.metadata.labels;
|
||||
}
|
||||
if (!!inputObject.spec && !!inputObject.spec.template && !!inputObject.spec.template.metadata) {
|
||||
return inputObject.spec.template.metadata.labels;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
function getImagePullSecrets(inputObject) {
|
||||
if (!inputObject || !inputObject.spec) {
|
||||
return null;
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.cronjob, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
try {
|
||||
return inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug(`Fetching imagePullSecrets failed due to this error: ${JSON.stringify(ex)}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.pod, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
return inputObject.spec.imagePullSecrets;
|
||||
}
|
||||
if (!!inputObject.spec.template && !!inputObject.spec.template.spec) {
|
||||
return inputObject.spec.template.spec.imagePullSecrets;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
function setImagePullSecrets(inputObject, newImagePullSecrets) {
|
||||
if (!inputObject || !inputObject.spec || !newImagePullSecrets) {
|
||||
return;
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.pod, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
inputObject.spec.imagePullSecrets = newImagePullSecrets;
|
||||
return;
|
||||
}
|
||||
if (string_comparison_1.isEqual(inputObject.kind, constants_1.KubernetesWorkload.cronjob, string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
try {
|
||||
inputObject.spec.jobTemplate.spec.template.spec.imagePullSecrets = newImagePullSecrets;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug(`Overriding imagePullSecrets failed due to this error: ${JSON.stringify(ex)}`);
|
||||
//Do nothing
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (!!inputObject.spec.template && !!inputObject.spec.template.spec) {
|
||||
inputObject.spec.template.spec.imagePullSecrets = newImagePullSecrets;
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
function setSpecLabels(inputObject, newLabels) {
|
||||
let specLabels = getSpecLabels(inputObject);
|
||||
if (!!newLabels) {
|
||||
specLabels = newLabels;
|
||||
}
|
||||
}
|
||||
function getSpecSelectorLabels(inputObject) {
|
||||
if (!!inputObject && !!inputObject.spec && !!inputObject.spec.selector) {
|
||||
if (isServiceEntity(inputObject.kind)) {
|
||||
return inputObject.spec.selector;
|
||||
}
|
||||
else {
|
||||
return inputObject.spec.selector.matchLabels;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
function setSpecSelectorLabels(inputObject, newLabels) {
|
||||
let selectorLabels = getSpecSelectorLabels(inputObject);
|
||||
if (!!selectorLabels) {
|
||||
selectorLabels = newLabels;
|
||||
}
|
||||
}
|
||||
@ -1,187 +0,0 @@
|
||||
'use strict';
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getStableResourceName = exports.getBaselineResourceName = exports.getCanaryResourceName = exports.isSMICanaryStrategy = exports.isCanaryDeploymentStrategy = exports.fetchResource = exports.fetchCanaryResource = exports.getNewCanaryResource = exports.getNewBaselineResource = exports.getStableResource = exports.isResourceMarkedAsStable = exports.markResourceAsStable = exports.deleteCanaryDeployment = exports.STABLE_LABEL_VALUE = exports.STABLE_SUFFIX = exports.CANARY_LABEL_VALUE = exports.BASELINE_LABEL_VALUE = exports.CANARY_VERSION_LABEL = exports.TRAFFIC_SPLIT_STRATEGY = exports.CANARY_DEPLOYMENT_STRATEGY = void 0;
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const core = require("@actions/core");
|
||||
const TaskInputParameters = require("../../input-parameters");
|
||||
const helper = require("../resource-object-utility");
|
||||
const constants_1 = require("../../constants");
|
||||
const string_comparison_1 = require("../string-comparison");
|
||||
const utility_1 = require("../utility");
|
||||
const utils = require("../manifest-utilities");
|
||||
exports.CANARY_DEPLOYMENT_STRATEGY = 'CANARY';
|
||||
exports.TRAFFIC_SPLIT_STRATEGY = 'SMI';
|
||||
exports.CANARY_VERSION_LABEL = 'workflow/version';
|
||||
const BASELINE_SUFFIX = '-baseline';
|
||||
exports.BASELINE_LABEL_VALUE = 'baseline';
|
||||
const CANARY_SUFFIX = '-canary';
|
||||
exports.CANARY_LABEL_VALUE = 'canary';
|
||||
exports.STABLE_SUFFIX = '-stable';
|
||||
exports.STABLE_LABEL_VALUE = 'stable';
|
||||
function deleteCanaryDeployment(kubectl, manifestFilePaths, includeServices) {
|
||||
// get manifest files
|
||||
const inputManifestFiles = utils.getManifestFiles(manifestFilePaths);
|
||||
if (inputManifestFiles == null || inputManifestFiles.length == 0) {
|
||||
throw new Error('ManifestFileNotFound');
|
||||
}
|
||||
// create delete cmd prefix
|
||||
cleanUpCanary(kubectl, inputManifestFiles, includeServices);
|
||||
}
|
||||
exports.deleteCanaryDeployment = deleteCanaryDeployment;
|
||||
function markResourceAsStable(inputObject) {
|
||||
if (isResourceMarkedAsStable(inputObject)) {
|
||||
return inputObject;
|
||||
}
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// Adding labels and annotations.
|
||||
addCanaryLabelsAndAnnotations(newObject, exports.STABLE_LABEL_VALUE);
|
||||
core.debug("Added stable label: " + JSON.stringify(newObject));
|
||||
return newObject;
|
||||
}
|
||||
exports.markResourceAsStable = markResourceAsStable;
|
||||
function isResourceMarkedAsStable(inputObject) {
|
||||
return inputObject &&
|
||||
inputObject.metadata &&
|
||||
inputObject.metadata.labels &&
|
||||
inputObject.metadata.labels[exports.CANARY_VERSION_LABEL] == exports.STABLE_LABEL_VALUE;
|
||||
}
|
||||
exports.isResourceMarkedAsStable = isResourceMarkedAsStable;
|
||||
function getStableResource(inputObject) {
|
||||
var replicaCount = isSpecContainsReplicas(inputObject.kind) ? inputObject.metadata.replicas : 0;
|
||||
return getNewCanaryObject(inputObject, replicaCount, exports.STABLE_LABEL_VALUE);
|
||||
}
|
||||
exports.getStableResource = getStableResource;
|
||||
function getNewBaselineResource(stableObject, replicas) {
|
||||
return getNewCanaryObject(stableObject, replicas, exports.BASELINE_LABEL_VALUE);
|
||||
}
|
||||
exports.getNewBaselineResource = getNewBaselineResource;
|
||||
function getNewCanaryResource(inputObject, replicas) {
|
||||
return getNewCanaryObject(inputObject, replicas, exports.CANARY_LABEL_VALUE);
|
||||
}
|
||||
exports.getNewCanaryResource = getNewCanaryResource;
|
||||
function fetchCanaryResource(kubectl, kind, name) {
|
||||
return fetchResource(kubectl, kind, getCanaryResourceName(name));
|
||||
}
|
||||
exports.fetchCanaryResource = fetchCanaryResource;
|
||||
function fetchResource(kubectl, kind, name) {
|
||||
const result = kubectl.getResource(kind, name);
|
||||
if (result == null || !!result.stderr) {
|
||||
return null;
|
||||
}
|
||||
if (!!result.stdout) {
|
||||
const resource = JSON.parse(result.stdout);
|
||||
try {
|
||||
UnsetsClusterSpecficDetails(resource);
|
||||
return resource;
|
||||
}
|
||||
catch (ex) {
|
||||
core.debug('Exception occurred while Parsing ' + resource + ' in Json object');
|
||||
core.debug(`Exception:${ex}`);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
exports.fetchResource = fetchResource;
|
||||
function isCanaryDeploymentStrategy() {
|
||||
const deploymentStrategy = TaskInputParameters.deploymentStrategy;
|
||||
return deploymentStrategy && deploymentStrategy.toUpperCase() === exports.CANARY_DEPLOYMENT_STRATEGY;
|
||||
}
|
||||
exports.isCanaryDeploymentStrategy = isCanaryDeploymentStrategy;
|
||||
function isSMICanaryStrategy() {
|
||||
const deploymentStrategy = TaskInputParameters.trafficSplitMethod;
|
||||
return isCanaryDeploymentStrategy() && deploymentStrategy && deploymentStrategy.toUpperCase() === exports.TRAFFIC_SPLIT_STRATEGY;
|
||||
}
|
||||
exports.isSMICanaryStrategy = isSMICanaryStrategy;
|
||||
function getCanaryResourceName(name) {
|
||||
return name + CANARY_SUFFIX;
|
||||
}
|
||||
exports.getCanaryResourceName = getCanaryResourceName;
|
||||
function getBaselineResourceName(name) {
|
||||
return name + BASELINE_SUFFIX;
|
||||
}
|
||||
exports.getBaselineResourceName = getBaselineResourceName;
|
||||
function getStableResourceName(name) {
|
||||
return name + exports.STABLE_SUFFIX;
|
||||
}
|
||||
exports.getStableResourceName = getStableResourceName;
|
||||
function UnsetsClusterSpecficDetails(resource) {
|
||||
if (resource == null) {
|
||||
return;
|
||||
}
|
||||
// Unsets the cluster specific details in the object
|
||||
if (!!resource) {
|
||||
const metadata = resource.metadata;
|
||||
const status = resource.status;
|
||||
if (!!metadata) {
|
||||
const newMetadata = {
|
||||
'annotations': metadata.annotations,
|
||||
'labels': metadata.labels,
|
||||
'name': metadata.name
|
||||
};
|
||||
resource.metadata = newMetadata;
|
||||
}
|
||||
if (!!status) {
|
||||
resource.status = {};
|
||||
}
|
||||
}
|
||||
}
|
||||
function getNewCanaryObject(inputObject, replicas, type) {
|
||||
const newObject = JSON.parse(JSON.stringify(inputObject));
|
||||
// Updating name
|
||||
if (type === exports.CANARY_LABEL_VALUE) {
|
||||
newObject.metadata.name = getCanaryResourceName(inputObject.metadata.name);
|
||||
}
|
||||
else if (type === exports.STABLE_LABEL_VALUE) {
|
||||
newObject.metadata.name = getStableResourceName(inputObject.metadata.name);
|
||||
}
|
||||
else {
|
||||
newObject.metadata.name = getBaselineResourceName(inputObject.metadata.name);
|
||||
}
|
||||
// Adding labels and annotations.
|
||||
addCanaryLabelsAndAnnotations(newObject, type);
|
||||
// Updating no. of replicas
|
||||
if (isSpecContainsReplicas(newObject.kind)) {
|
||||
newObject.spec.replicas = replicas;
|
||||
}
|
||||
return newObject;
|
||||
}
|
||||
function isSpecContainsReplicas(kind) {
|
||||
return !string_comparison_1.isEqual(kind, constants_1.KubernetesWorkload.pod, string_comparison_1.StringComparer.OrdinalIgnoreCase) &&
|
||||
!string_comparison_1.isEqual(kind, constants_1.KubernetesWorkload.daemonSet, string_comparison_1.StringComparer.OrdinalIgnoreCase) &&
|
||||
!helper.isServiceEntity(kind);
|
||||
}
|
||||
function addCanaryLabelsAndAnnotations(inputObject, type) {
|
||||
const newLabels = new Map();
|
||||
newLabels[exports.CANARY_VERSION_LABEL] = type;
|
||||
helper.updateObjectLabels(inputObject, newLabels, false);
|
||||
helper.updateObjectAnnotations(inputObject, newLabels, false);
|
||||
helper.updateSelectorLabels(inputObject, newLabels, false);
|
||||
if (!helper.isServiceEntity(inputObject.kind)) {
|
||||
helper.updateSpecLabels(inputObject, newLabels, false);
|
||||
}
|
||||
}
|
||||
function cleanUpCanary(kubectl, files, includeServices) {
|
||||
var deleteObject = function (kind, name) {
|
||||
try {
|
||||
const result = kubectl.delete([kind, name]);
|
||||
utility_1.checkForErrors([result]);
|
||||
}
|
||||
catch (ex) {
|
||||
// Ignore failures of delete if doesn't exist
|
||||
}
|
||||
};
|
||||
files.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (helper.isDeploymentEntity(kind) || (includeServices && helper.isServiceEntity(kind))) {
|
||||
const canaryObjectName = getCanaryResourceName(name);
|
||||
const baselineObjectName = getBaselineResourceName(name);
|
||||
deleteObject(kind, canaryObjectName);
|
||||
deleteObject(kind, baselineObjectName);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
@ -1,159 +0,0 @@
|
||||
'use strict';
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.deploy = void 0;
|
||||
const fs = require("fs");
|
||||
const core = require("@actions/core");
|
||||
const yaml = require("js-yaml");
|
||||
const canaryDeploymentHelper = require("./canary-deployment-helper");
|
||||
const KubernetesObjectUtility = require("../resource-object-utility");
|
||||
const TaskInputParameters = require("../../input-parameters");
|
||||
const models = require("../../constants");
|
||||
const fileHelper = require("../files-helper");
|
||||
const utils = require("../manifest-utilities");
|
||||
const KubernetesManifestUtility = require("../manifest-stability-utility");
|
||||
const KubernetesConstants = require("../../constants");
|
||||
const string_comparison_1 = require("./../string-comparison");
|
||||
const pod_canary_deployment_helper_1 = require("./pod-canary-deployment-helper");
|
||||
const smi_canary_deployment_helper_1 = require("./smi-canary-deployment-helper");
|
||||
const utility_1 = require("../utility");
|
||||
function deploy(kubectl, manifestFilePaths, deploymentStrategy) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// get manifest files
|
||||
let inputManifestFiles = getManifestFiles(manifestFilePaths);
|
||||
// artifact substitution
|
||||
inputManifestFiles = updateResourceObjects(inputManifestFiles, TaskInputParameters.imagePullSecrets, TaskInputParameters.containers);
|
||||
// deployment
|
||||
const deployedManifestFiles = deployManifests(inputManifestFiles, kubectl, isCanaryDeploymentStrategy(deploymentStrategy));
|
||||
// check manifest stability
|
||||
const resourceTypes = KubernetesObjectUtility.getResources(deployedManifestFiles, models.deploymentTypes.concat([KubernetesConstants.DiscoveryAndLoadBalancerResource.service]));
|
||||
yield checkManifestStability(kubectl, resourceTypes);
|
||||
// print ingress resources
|
||||
const ingressResources = KubernetesObjectUtility.getResources(deployedManifestFiles, [KubernetesConstants.DiscoveryAndLoadBalancerResource.ingress]);
|
||||
ingressResources.forEach(ingressResource => {
|
||||
kubectl.getResource(KubernetesConstants.DiscoveryAndLoadBalancerResource.ingress, ingressResource.name);
|
||||
});
|
||||
// annotate resources
|
||||
let allPods;
|
||||
try {
|
||||
allPods = JSON.parse((kubectl.getAllPods()).stdout);
|
||||
}
|
||||
catch (e) {
|
||||
core.debug("Unable to parse pods; Error: " + e);
|
||||
}
|
||||
annotateResources(deployedManifestFiles, kubectl, resourceTypes, allPods);
|
||||
});
|
||||
}
|
||||
exports.deploy = deploy;
|
||||
function getManifestFiles(manifestFilePaths) {
|
||||
const files = utils.getManifestFiles(manifestFilePaths);
|
||||
if (files == null || files.length === 0) {
|
||||
throw new Error(`ManifestFileNotFound : ${manifestFilePaths}`);
|
||||
}
|
||||
return files;
|
||||
}
|
||||
function deployManifests(files, kubectl, isCanaryDeploymentStrategy) {
|
||||
let result;
|
||||
if (isCanaryDeploymentStrategy) {
|
||||
let canaryDeploymentOutput;
|
||||
if (canaryDeploymentHelper.isSMICanaryStrategy()) {
|
||||
canaryDeploymentOutput = smi_canary_deployment_helper_1.deploySMICanary(kubectl, files);
|
||||
}
|
||||
else {
|
||||
canaryDeploymentOutput = pod_canary_deployment_helper_1.deployPodCanary(kubectl, files);
|
||||
}
|
||||
result = canaryDeploymentOutput.result;
|
||||
files = canaryDeploymentOutput.newFilePaths;
|
||||
}
|
||||
else {
|
||||
if (canaryDeploymentHelper.isSMICanaryStrategy()) {
|
||||
const updatedManifests = appendStableVersionLabelToResource(files, kubectl);
|
||||
result = kubectl.apply(updatedManifests, TaskInputParameters.forceDeployment);
|
||||
}
|
||||
else {
|
||||
result = kubectl.apply(files, TaskInputParameters.forceDeployment);
|
||||
}
|
||||
}
|
||||
utility_1.checkForErrors([result]);
|
||||
return files;
|
||||
}
|
||||
function appendStableVersionLabelToResource(files, kubectl) {
|
||||
const manifestFiles = [];
|
||||
const newObjectsList = [];
|
||||
files.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const kind = inputObject.kind;
|
||||
if (KubernetesObjectUtility.isDeploymentEntity(kind)) {
|
||||
const updatedObject = canaryDeploymentHelper.markResourceAsStable(inputObject);
|
||||
newObjectsList.push(updatedObject);
|
||||
}
|
||||
else {
|
||||
manifestFiles.push(filePath);
|
||||
}
|
||||
});
|
||||
});
|
||||
const updatedManifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
manifestFiles.push(...updatedManifestFiles);
|
||||
return manifestFiles;
|
||||
}
|
||||
function checkManifestStability(kubectl, resources) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
yield KubernetesManifestUtility.checkManifestStability(kubectl, resources);
|
||||
});
|
||||
}
|
||||
function annotateResources(files, kubectl, resourceTypes, allPods) {
|
||||
const annotateResults = [];
|
||||
annotateResults.push(utility_1.annotateNamespace(kubectl, TaskInputParameters.namespace));
|
||||
annotateResults.push(kubectl.annotateFiles(files, models.workflowAnnotations, true));
|
||||
resourceTypes.forEach(resource => {
|
||||
if (resource.type.toUpperCase() !== models.KubernetesWorkload.pod.toUpperCase()) {
|
||||
utility_1.annotateChildPods(kubectl, resource.type, resource.name, allPods)
|
||||
.forEach(execResult => annotateResults.push(execResult));
|
||||
}
|
||||
});
|
||||
utility_1.checkForErrors(annotateResults, true);
|
||||
}
|
||||
function updateResourceObjects(filePaths, imagePullSecrets, containers) {
|
||||
const newObjectsList = [];
|
||||
const updateResourceObject = (inputObject) => {
|
||||
if (!!imagePullSecrets && imagePullSecrets.length > 0) {
|
||||
KubernetesObjectUtility.updateImagePullSecrets(inputObject, imagePullSecrets, false);
|
||||
}
|
||||
if (!!containers && containers.length > 0) {
|
||||
KubernetesObjectUtility.updateImageDetails(inputObject, containers);
|
||||
}
|
||||
};
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath).toString();
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
if (inputObject && inputObject.kind) {
|
||||
const kind = inputObject.kind;
|
||||
if (KubernetesObjectUtility.isWorkloadEntity(kind)) {
|
||||
updateResourceObject(inputObject);
|
||||
}
|
||||
else if (string_comparison_1.isEqual(kind, 'list', string_comparison_1.StringComparer.OrdinalIgnoreCase)) {
|
||||
let items = inputObject.items;
|
||||
if (items.length > 0) {
|
||||
items.forEach((item) => updateResourceObject(item));
|
||||
}
|
||||
}
|
||||
newObjectsList.push(inputObject);
|
||||
}
|
||||
});
|
||||
});
|
||||
core.debug('New K8s objects after adding imagePullSecrets are :' + JSON.stringify(newObjectsList));
|
||||
const newFilePaths = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
return newFilePaths;
|
||||
}
|
||||
function isCanaryDeploymentStrategy(deploymentStrategy) {
|
||||
return deploymentStrategy != null && deploymentStrategy.toUpperCase() === canaryDeploymentHelper.CANARY_DEPLOYMENT_STRATEGY.toUpperCase();
|
||||
}
|
||||
@ -1,58 +0,0 @@
|
||||
'use strict';
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.deployPodCanary = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const TaskInputParameters = require("../../input-parameters");
|
||||
const fileHelper = require("../files-helper");
|
||||
const helper = require("../resource-object-utility");
|
||||
const canaryDeploymentHelper = require("./canary-deployment-helper");
|
||||
function deployPodCanary(kubectl, filePaths) {
|
||||
const newObjectsList = [];
|
||||
const percentage = parseInt(TaskInputParameters.canaryPercentage);
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (helper.isDeploymentEntity(kind)) {
|
||||
core.debug('Calculating replica count for canary');
|
||||
const canaryReplicaCount = calculateReplicaCountForCanary(inputObject, percentage);
|
||||
core.debug('Replica count is ' + canaryReplicaCount);
|
||||
// Get stable object
|
||||
core.debug('Querying stable object');
|
||||
const stableObject = canaryDeploymentHelper.fetchResource(kubectl, kind, name);
|
||||
if (!stableObject) {
|
||||
core.debug('Stable object not found. Creating only canary object');
|
||||
// If stable object not found, create canary deployment.
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
core.debug('New canary object is: ' + JSON.stringify(newCanaryObject));
|
||||
newObjectsList.push(newCanaryObject);
|
||||
}
|
||||
else {
|
||||
core.debug('Stable object found. Creating canary and baseline objects');
|
||||
// If canary object not found, create canary and baseline object.
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
const newBaselineObject = canaryDeploymentHelper.getNewBaselineResource(stableObject, canaryReplicaCount);
|
||||
core.debug('New canary object is: ' + JSON.stringify(newCanaryObject));
|
||||
core.debug('New baseline object is: ' + JSON.stringify(newBaselineObject));
|
||||
newObjectsList.push(newCanaryObject);
|
||||
newObjectsList.push(newBaselineObject);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Updating non deployment entity as it is.
|
||||
newObjectsList.push(inputObject);
|
||||
}
|
||||
});
|
||||
});
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
const result = kubectl.apply(manifestFiles, TaskInputParameters.forceDeployment);
|
||||
return { 'result': result, 'newFilePaths': manifestFiles };
|
||||
}
|
||||
exports.deployPodCanary = deployPodCanary;
|
||||
function calculateReplicaCountForCanary(inputObject, percentage) {
|
||||
const inputReplicaCount = helper.getReplicaCount(inputObject);
|
||||
return Math.round((inputReplicaCount * percentage) / 100);
|
||||
}
|
||||
@ -1,199 +0,0 @@
|
||||
'use strict';
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.redirectTrafficToStableDeployment = exports.redirectTrafficToCanaryDeployment = exports.deploySMICanary = void 0;
|
||||
const core = require("@actions/core");
|
||||
const fs = require("fs");
|
||||
const yaml = require("js-yaml");
|
||||
const util = require("util");
|
||||
const TaskInputParameters = require("../../input-parameters");
|
||||
const fileHelper = require("../files-helper");
|
||||
const helper = require("../resource-object-utility");
|
||||
const utils = require("../manifest-utilities");
|
||||
const kubectlUtils = require("../kubectl-util");
|
||||
const canaryDeploymentHelper = require("./canary-deployment-helper");
|
||||
const utility_1 = require("../utility");
|
||||
const TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX = '-workflow-rollout';
|
||||
const TRAFFIC_SPLIT_OBJECT = 'TrafficSplit';
|
||||
let trafficSplitAPIVersion = "";
|
||||
function deploySMICanary(kubectl, filePaths) {
|
||||
const newObjectsList = [];
|
||||
const canaryReplicaCount = parseInt(TaskInputParameters.baselineAndCanaryReplicas);
|
||||
core.debug('Replica count is ' + canaryReplicaCount);
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (helper.isDeploymentEntity(kind)) {
|
||||
// Get stable object
|
||||
core.debug('Querying stable object');
|
||||
const stableObject = canaryDeploymentHelper.fetchResource(kubectl, kind, name);
|
||||
if (!stableObject) {
|
||||
core.debug('Stable object not found. Creating only canary object');
|
||||
// If stable object not found, create canary deployment.
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
core.debug('New canary object is: ' + JSON.stringify(newCanaryObject));
|
||||
newObjectsList.push(newCanaryObject);
|
||||
}
|
||||
else {
|
||||
if (!canaryDeploymentHelper.isResourceMarkedAsStable(stableObject)) {
|
||||
throw (`StableSpecSelectorNotExist : ${name}`);
|
||||
}
|
||||
core.debug('Stable object found. Creating canary and baseline objects');
|
||||
// If canary object not found, create canary and baseline object.
|
||||
const newCanaryObject = canaryDeploymentHelper.getNewCanaryResource(inputObject, canaryReplicaCount);
|
||||
const newBaselineObject = canaryDeploymentHelper.getNewBaselineResource(stableObject, canaryReplicaCount);
|
||||
core.debug('New canary object is: ' + JSON.stringify(newCanaryObject));
|
||||
core.debug('New baseline object is: ' + JSON.stringify(newBaselineObject));
|
||||
newObjectsList.push(newCanaryObject);
|
||||
newObjectsList.push(newBaselineObject);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Updating non deployment entity as it is.
|
||||
newObjectsList.push(inputObject);
|
||||
}
|
||||
});
|
||||
});
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
const result = kubectl.apply(manifestFiles, TaskInputParameters.forceDeployment);
|
||||
createCanaryService(kubectl, filePaths);
|
||||
return { 'result': result, 'newFilePaths': manifestFiles };
|
||||
}
|
||||
exports.deploySMICanary = deploySMICanary;
|
||||
function createCanaryService(kubectl, filePaths) {
|
||||
const newObjectsList = [];
|
||||
const trafficObjectsList = [];
|
||||
filePaths.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (helper.isServiceEntity(kind)) {
|
||||
const newCanaryServiceObject = canaryDeploymentHelper.getNewCanaryResource(inputObject);
|
||||
core.debug('New canary service object is: ' + JSON.stringify(newCanaryServiceObject));
|
||||
newObjectsList.push(newCanaryServiceObject);
|
||||
const newBaselineServiceObject = canaryDeploymentHelper.getNewBaselineResource(inputObject);
|
||||
core.debug('New baseline object is: ' + JSON.stringify(newBaselineServiceObject));
|
||||
newObjectsList.push(newBaselineServiceObject);
|
||||
core.debug('Querying for stable service object');
|
||||
const stableObject = canaryDeploymentHelper.fetchResource(kubectl, kind, canaryDeploymentHelper.getStableResourceName(name));
|
||||
if (!stableObject) {
|
||||
const newStableServiceObject = canaryDeploymentHelper.getStableResource(inputObject);
|
||||
core.debug('New stable service object is: ' + JSON.stringify(newStableServiceObject));
|
||||
newObjectsList.push(newStableServiceObject);
|
||||
core.debug('Creating the traffic object for service: ' + name);
|
||||
const trafficObject = createTrafficSplitManifestFile(kubectl, name, 0, 0, 1000);
|
||||
core.debug('Creating the traffic object for service: ' + trafficObject);
|
||||
trafficObjectsList.push(trafficObject);
|
||||
}
|
||||
else {
|
||||
let updateTrafficObject = true;
|
||||
const trafficObject = canaryDeploymentHelper.fetchResource(kubectl, TRAFFIC_SPLIT_OBJECT, getTrafficSplitResourceName(name));
|
||||
if (trafficObject) {
|
||||
const trafficJObject = JSON.parse(JSON.stringify(trafficObject));
|
||||
if (trafficJObject && trafficJObject.spec && trafficJObject.spec.backends) {
|
||||
trafficJObject.spec.backends.forEach((s) => {
|
||||
if (s.service === canaryDeploymentHelper.getCanaryResourceName(name) && s.weight === "1000m") {
|
||||
core.debug('Update traffic objcet not required');
|
||||
updateTrafficObject = false;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
if (updateTrafficObject) {
|
||||
core.debug('Stable service object present so updating the traffic object for service: ' + name);
|
||||
trafficObjectsList.push(updateTrafficSplitObject(kubectl, name));
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
const manifestFiles = fileHelper.writeObjectsToFile(newObjectsList);
|
||||
manifestFiles.push(...trafficObjectsList);
|
||||
const result = kubectl.apply(manifestFiles, TaskInputParameters.forceDeployment);
|
||||
utility_1.checkForErrors([result]);
|
||||
}
|
||||
function redirectTrafficToCanaryDeployment(kubectl, manifestFilePaths) {
|
||||
adjustTraffic(kubectl, manifestFilePaths, 0, 1000);
|
||||
}
|
||||
exports.redirectTrafficToCanaryDeployment = redirectTrafficToCanaryDeployment;
|
||||
function redirectTrafficToStableDeployment(kubectl, manifestFilePaths) {
|
||||
adjustTraffic(kubectl, manifestFilePaths, 1000, 0);
|
||||
}
|
||||
exports.redirectTrafficToStableDeployment = redirectTrafficToStableDeployment;
|
||||
function adjustTraffic(kubectl, manifestFilePaths, stableWeight, canaryWeight) {
|
||||
// get manifest files
|
||||
const inputManifestFiles = utils.getManifestFiles(manifestFilePaths);
|
||||
if (inputManifestFiles == null || inputManifestFiles.length == 0) {
|
||||
return;
|
||||
}
|
||||
const trafficSplitManifests = [];
|
||||
const serviceObjects = [];
|
||||
inputManifestFiles.forEach((filePath) => {
|
||||
const fileContents = fs.readFileSync(filePath);
|
||||
yaml.safeLoadAll(fileContents, function (inputObject) {
|
||||
const name = inputObject.metadata.name;
|
||||
const kind = inputObject.kind;
|
||||
if (helper.isServiceEntity(kind)) {
|
||||
trafficSplitManifests.push(createTrafficSplitManifestFile(kubectl, name, stableWeight, 0, canaryWeight));
|
||||
serviceObjects.push(name);
|
||||
}
|
||||
});
|
||||
});
|
||||
if (trafficSplitManifests.length <= 0) {
|
||||
return;
|
||||
}
|
||||
const result = kubectl.apply(trafficSplitManifests, TaskInputParameters.forceDeployment);
|
||||
core.debug('serviceObjects:' + serviceObjects.join(',') + ' result:' + result);
|
||||
utility_1.checkForErrors([result]);
|
||||
}
|
||||
function updateTrafficSplitObject(kubectl, serviceName) {
|
||||
const percentage = parseInt(TaskInputParameters.canaryPercentage) * 10;
|
||||
const baselineAndCanaryWeight = percentage / 2;
|
||||
const stableDeploymentWeight = 1000 - percentage;
|
||||
core.debug('Creating the traffic object with canary weight: ' + baselineAndCanaryWeight + ',baseling weight: ' + baselineAndCanaryWeight + ',stable: ' + stableDeploymentWeight);
|
||||
return createTrafficSplitManifestFile(kubectl, serviceName, stableDeploymentWeight, baselineAndCanaryWeight, baselineAndCanaryWeight);
|
||||
}
|
||||
function createTrafficSplitManifestFile(kubectl, serviceName, stableWeight, baselineWeight, canaryWeight) {
|
||||
const smiObjectString = getTrafficSplitObject(kubectl, serviceName, stableWeight, baselineWeight, canaryWeight);
|
||||
const manifestFile = fileHelper.writeManifestToFile(smiObjectString, TRAFFIC_SPLIT_OBJECT, serviceName);
|
||||
if (!manifestFile) {
|
||||
throw new Error('UnableToCreateTrafficSplitManifestFile');
|
||||
}
|
||||
return manifestFile;
|
||||
}
|
||||
function getTrafficSplitObject(kubectl, name, stableWeight, baselineWeight, canaryWeight) {
|
||||
if (!trafficSplitAPIVersion) {
|
||||
trafficSplitAPIVersion = kubectlUtils.getTrafficSplitAPIVersion(kubectl);
|
||||
}
|
||||
const trafficSplitObjectJson = `{
|
||||
"apiVersion": "${trafficSplitAPIVersion}",
|
||||
"kind": "TrafficSplit",
|
||||
"metadata": {
|
||||
"name": "%s"
|
||||
},
|
||||
"spec": {
|
||||
"backends": [
|
||||
{
|
||||
"service": "%s",
|
||||
"weight": "%sm"
|
||||
},
|
||||
{
|
||||
"service": "%s",
|
||||
"weight": "%sm"
|
||||
},
|
||||
{
|
||||
"service": "%s",
|
||||
"weight": "%sm"
|
||||
}
|
||||
],
|
||||
"service": "%s"
|
||||
}
|
||||
}`;
|
||||
const trafficSplitObject = util.format(trafficSplitObjectJson, getTrafficSplitResourceName(name), canaryDeploymentHelper.getStableResourceName(name), stableWeight, canaryDeploymentHelper.getBaselineResourceName(name), baselineWeight, canaryDeploymentHelper.getCanaryResourceName(name), canaryWeight, name);
|
||||
return trafficSplitObject;
|
||||
}
|
||||
function getTrafficSplitResourceName(name) {
|
||||
return name + TRAFFIC_SPLIT_OBJECT_NAME_SUFFIX;
|
||||
}
|
||||
@ -1,26 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.isEqual = exports.StringComparer = void 0;
|
||||
var StringComparer;
|
||||
(function (StringComparer) {
|
||||
StringComparer[StringComparer["Ordinal"] = 0] = "Ordinal";
|
||||
StringComparer[StringComparer["OrdinalIgnoreCase"] = 1] = "OrdinalIgnoreCase";
|
||||
})(StringComparer = exports.StringComparer || (exports.StringComparer = {}));
|
||||
function isEqual(str1, str2, stringComparer) {
|
||||
if (str1 == null && str2 == null) {
|
||||
return true;
|
||||
}
|
||||
if (str1 == null) {
|
||||
return false;
|
||||
}
|
||||
if (str2 == null) {
|
||||
return false;
|
||||
}
|
||||
if (stringComparer == StringComparer.OrdinalIgnoreCase) {
|
||||
return str1.toUpperCase() === str2.toUpperCase();
|
||||
}
|
||||
else {
|
||||
return str1 === str2;
|
||||
}
|
||||
}
|
||||
exports.isEqual = isEqual;
|
||||
@ -1,527 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ToolRunner = void 0;
|
||||
const os = require("os");
|
||||
const events = require("events");
|
||||
const child = require("child_process");
|
||||
const core = require("@actions/core");
|
||||
class ToolRunner extends events.EventEmitter {
|
||||
constructor(toolPath) {
|
||||
super();
|
||||
if (!toolPath) {
|
||||
throw new Error('Parameter \'toolPath\' cannot be null or empty.');
|
||||
}
|
||||
this.toolPath = toolPath;
|
||||
this.args = [];
|
||||
core.debug('toolRunner toolPath: ' + toolPath);
|
||||
}
|
||||
_debug(message) {
|
||||
this.emit('debug', message);
|
||||
}
|
||||
_argStringToArray(argString) {
|
||||
var args = [];
|
||||
var inQuotes = false;
|
||||
var escaped = false;
|
||||
var lastCharWasSpace = true;
|
||||
var arg = '';
|
||||
var append = function (c) {
|
||||
// we only escape double quotes.
|
||||
if (escaped && c !== '"') {
|
||||
arg += '\\';
|
||||
}
|
||||
arg += c;
|
||||
escaped = false;
|
||||
};
|
||||
for (var i = 0; i < argString.length; i++) {
|
||||
var c = argString.charAt(i);
|
||||
if (c === ' ' && !inQuotes) {
|
||||
if (!lastCharWasSpace) {
|
||||
args.push(arg);
|
||||
arg = '';
|
||||
}
|
||||
lastCharWasSpace = true;
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
lastCharWasSpace = false;
|
||||
}
|
||||
if (c === '"') {
|
||||
if (!escaped) {
|
||||
inQuotes = !inQuotes;
|
||||
}
|
||||
else {
|
||||
append(c);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (c === "\\" && escaped) {
|
||||
append(c);
|
||||
continue;
|
||||
}
|
||||
if (c === "\\" && inQuotes) {
|
||||
escaped = true;
|
||||
continue;
|
||||
}
|
||||
append(c);
|
||||
lastCharWasSpace = false;
|
||||
}
|
||||
if (!lastCharWasSpace) {
|
||||
args.push(arg.trim());
|
||||
}
|
||||
return args;
|
||||
}
|
||||
_getCommandString(options, noPrefix) {
|
||||
let toolPath = this._getSpawnFileName();
|
||||
let args = this._getSpawnArgs(options);
|
||||
let cmd = noPrefix ? '' : '[command]'; // omit prefix when piped to a second tool
|
||||
if (process.platform == 'win32') {
|
||||
// Windows + cmd file
|
||||
if (this._isCmdFile()) {
|
||||
cmd += toolPath;
|
||||
args.forEach((a) => {
|
||||
cmd += ` ${a}`;
|
||||
});
|
||||
}
|
||||
// Windows + verbatim
|
||||
else if (options.windowsVerbatimArguments) {
|
||||
cmd += `"${toolPath}"`;
|
||||
args.forEach((a) => {
|
||||
cmd += ` ${a}`;
|
||||
});
|
||||
}
|
||||
// Windows (regular)
|
||||
else {
|
||||
cmd += this._windowsQuoteCmdArg(toolPath);
|
||||
args.forEach((a) => {
|
||||
cmd += ` ${this._windowsQuoteCmdArg(a)}`;
|
||||
});
|
||||
}
|
||||
}
|
||||
else {
|
||||
// OSX/Linux - this can likely be improved with some form of quoting.
|
||||
// creating processes on Unix is fundamentally different than Windows.
|
||||
// on Unix, execvp() takes an arg array.
|
||||
cmd += toolPath;
|
||||
args.forEach((a) => {
|
||||
cmd += ` ${a}`;
|
||||
});
|
||||
}
|
||||
// append second tool
|
||||
if (this.pipeOutputToTool) {
|
||||
cmd += ' | ' + this.pipeOutputToTool._getCommandString(options, /*noPrefix:*/ true);
|
||||
}
|
||||
return cmd;
|
||||
}
|
||||
_getSpawnFileName() {
|
||||
if (process.platform == 'win32') {
|
||||
if (this._isCmdFile()) {
|
||||
return process.env['COMSPEC'] || 'cmd.exe';
|
||||
}
|
||||
}
|
||||
return this.toolPath;
|
||||
}
|
||||
_getSpawnArgs(options) {
|
||||
if (process.platform == 'win32') {
|
||||
if (this._isCmdFile()) {
|
||||
let argline = `/D /S /C "${this._windowsQuoteCmdArg(this.toolPath)}`;
|
||||
for (let i = 0; i < this.args.length; i++) {
|
||||
argline += ' ';
|
||||
argline += options.windowsVerbatimArguments ? this.args[i] : this._windowsQuoteCmdArg(this.args[i]);
|
||||
}
|
||||
argline += '"';
|
||||
return [argline];
|
||||
}
|
||||
if (options.windowsVerbatimArguments) {
|
||||
// note, in Node 6.x options.argv0 can be used instead of overriding args.slice and args.unshift.
|
||||
// for more details, refer to https://github.com/nodejs/node/blob/v6.x/lib/child_process.js
|
||||
let args = this.args.slice(0); // copy the array
|
||||
// override slice to prevent Node from creating a copy of the arg array.
|
||||
// we need Node to use the "unshift" override below.
|
||||
args.slice = function () {
|
||||
if (arguments.length != 1 || arguments[0] != 0) {
|
||||
throw new Error('Unexpected arguments passed to args.slice when windowsVerbatimArguments flag is set.');
|
||||
}
|
||||
return args;
|
||||
};
|
||||
// override unshift
|
||||
//
|
||||
// when using the windowsVerbatimArguments option, Node does not quote the tool path when building
|
||||
// the cmdline parameter for the win32 function CreateProcess(). an unquoted space in the tool path
|
||||
// causes problems for tools when attempting to parse their own command line args. tools typically
|
||||
// assume their arguments begin after arg 0.
|
||||
//
|
||||
// by hijacking unshift, we can quote the tool path when it pushed onto the args array. Node builds
|
||||
// the cmdline parameter from the args array.
|
||||
//
|
||||
// note, we can't simply pass a quoted tool path to Node for multiple reasons:
|
||||
// 1) Node verifies the file exists (calls win32 function GetFileAttributesW) and the check returns
|
||||
// false if the path is quoted.
|
||||
// 2) Node passes the tool path as the application parameter to CreateProcess, which expects the
|
||||
// path to be unquoted.
|
||||
//
|
||||
// also note, in addition to the tool path being embedded within the cmdline parameter, Node also
|
||||
// passes the tool path to CreateProcess via the application parameter (optional parameter). when
|
||||
// present, Windows uses the application parameter to determine which file to run, instead of
|
||||
// interpreting the file from the cmdline parameter.
|
||||
args.unshift = function () {
|
||||
if (arguments.length != 1) {
|
||||
throw new Error('Unexpected arguments passed to args.unshift when windowsVerbatimArguments flag is set.');
|
||||
}
|
||||
return Array.prototype.unshift.call(args, `"${arguments[0]}"`); // quote the file name
|
||||
};
|
||||
return args;
|
||||
}
|
||||
}
|
||||
return this.args;
|
||||
}
|
||||
_isCmdFile() {
|
||||
let upperToolPath = this.toolPath.toUpperCase();
|
||||
return this._endsWith(upperToolPath, '.CMD') || this._endsWith(upperToolPath, '.BAT');
|
||||
}
|
||||
_endsWith(str, end) {
|
||||
return str.slice(-end.length) == end;
|
||||
}
|
||||
_windowsQuoteCmdArg(arg) {
|
||||
// for .exe, apply the normal quoting rules that libuv applies
|
||||
if (!this._isCmdFile()) {
|
||||
return this._uv_quote_cmd_arg(arg);
|
||||
}
|
||||
// otherwise apply quoting rules specific to the cmd.exe command line parser.
|
||||
// the libuv rules are generic and are not designed specifically for cmd.exe
|
||||
// command line parser.
|
||||
//
|
||||
// for a detailed description of the cmd.exe command line parser, refer to
|
||||
// http://stackoverflow.com/questions/4094699/how-does-the-windows-command-interpreter-cmd-exe-parse-scripts/7970912#7970912
|
||||
// need quotes for empty arg
|
||||
if (!arg) {
|
||||
return '""';
|
||||
}
|
||||
// determine whether the arg needs to be quoted
|
||||
const cmdSpecialChars = [' ', '\t', '&', '(', ')', '[', ']', '{', '}', '^', '=', ';', '!', '\'', '+', ',', '`', '~', '|', '<', '>', '"'];
|
||||
let needsQuotes = false;
|
||||
for (let char of arg) {
|
||||
if (cmdSpecialChars.some(x => x == char)) {
|
||||
needsQuotes = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// short-circuit if quotes not needed
|
||||
if (!needsQuotes) {
|
||||
return arg;
|
||||
}
|
||||
// the following quoting rules are very similar to the rules that by libuv applies.
|
||||
//
|
||||
// 1) wrap the string in quotes
|
||||
//
|
||||
// 2) double-up quotes - i.e. " => ""
|
||||
//
|
||||
// this is different from the libuv quoting rules. libuv replaces " with \", which unfortunately
|
||||
// doesn't work well with a cmd.exe command line.
|
||||
//
|
||||
// note, replacing " with "" also works well if the arg is passed to a downstream .NET console app.
|
||||
// for example, the command line:
|
||||
// foo.exe "myarg:""my val"""
|
||||
// is parsed by a .NET console app into an arg array:
|
||||
// [ "myarg:\"my val\"" ]
|
||||
// which is the same end result when applying libuv quoting rules. although the actual
|
||||
// command line from libuv quoting rules would look like:
|
||||
// foo.exe "myarg:\"my val\""
|
||||
//
|
||||
// 3) double-up slashes that preceed a quote,
|
||||
// e.g. hello \world => "hello \world"
|
||||
// hello\"world => "hello\\""world"
|
||||
// hello\\"world => "hello\\\\""world"
|
||||
// hello world\ => "hello world\\"
|
||||
//
|
||||
// technically this is not required for a cmd.exe command line, or the batch argument parser.
|
||||
// the reasons for including this as a .cmd quoting rule are:
|
||||
//
|
||||
// a) this is optimized for the scenario where the argument is passed from the .cmd file to an
|
||||
// external program. many programs (e.g. .NET console apps) rely on the slash-doubling rule.
|
||||
//
|
||||
// b) it's what we've been doing previously (by deferring to node default behavior) and we
|
||||
// haven't heard any complaints about that aspect.
|
||||
//
|
||||
// note, a weakness of the quoting rules chosen here, is that % is not escaped. in fact, % cannot be
|
||||
// escaped when used on the command line directly - even though within a .cmd file % can be escaped
|
||||
// by using %%.
|
||||
//
|
||||
// the saving grace is, on the command line, %var% is left as-is if var is not defined. this contrasts
|
||||
// the line parsing rules within a .cmd file, where if var is not defined it is replaced with nothing.
|
||||
//
|
||||
// one option that was explored was replacing % with ^% - i.e. %var% => ^%var^%. this hack would
|
||||
// often work, since it is unlikely that var^ would exist, and the ^ character is removed when the
|
||||
// variable is used. the problem, however, is that ^ is not removed when %* is used to pass the args
|
||||
// to an external program.
|
||||
//
|
||||
// an unexplored potential solution for the % escaping problem, is to create a wrapper .cmd file.
|
||||
// % can be escaped within a .cmd file.
|
||||
let reverse = '"';
|
||||
let quote_hit = true;
|
||||
for (let i = arg.length; i > 0; i--) { // walk the string in reverse
|
||||
reverse += arg[i - 1];
|
||||
if (quote_hit && arg[i - 1] == '\\') {
|
||||
reverse += '\\'; // double the slash
|
||||
}
|
||||
else if (arg[i - 1] == '"') {
|
||||
quote_hit = true;
|
||||
reverse += '"'; // double the quote
|
||||
}
|
||||
else {
|
||||
quote_hit = false;
|
||||
}
|
||||
}
|
||||
reverse += '"';
|
||||
return reverse.split('').reverse().join('');
|
||||
}
|
||||
_uv_quote_cmd_arg(arg) {
|
||||
// Tool runner wraps child_process.spawn() and needs to apply the same quoting as
|
||||
// Node in certain cases where the undocumented spawn option windowsVerbatimArguments
|
||||
// is used.
|
||||
//
|
||||
// Since this function is a port of quote_cmd_arg from Node 4.x (technically, lib UV,
|
||||
// see https://github.com/nodejs/node/blob/v4.x/deps/uv/src/win/process.c for details),
|
||||
// pasting copyright notice from Node within this function:
|
||||
//
|
||||
// Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to
|
||||
// deal in the Software without restriction, including without limitation the
|
||||
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
// sell copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
// IN THE SOFTWARE.
|
||||
if (!arg) {
|
||||
// Need double quotation for empty argument
|
||||
return '""';
|
||||
}
|
||||
if (arg.indexOf(' ') < 0 && arg.indexOf('\t') < 0 && arg.indexOf('"') < 0) {
|
||||
// No quotation needed
|
||||
return arg;
|
||||
}
|
||||
if (arg.indexOf('"') < 0 && arg.indexOf('\\') < 0) {
|
||||
// No embedded double quotes or backslashes, so I can just wrap
|
||||
// quote marks around the whole thing.
|
||||
return `"${arg}"`;
|
||||
}
|
||||
// Expected input/output:
|
||||
// input : hello"world
|
||||
// output: "hello\"world"
|
||||
// input : hello""world
|
||||
// output: "hello\"\"world"
|
||||
// input : hello\world
|
||||
// output: hello\world
|
||||
// input : hello\\world
|
||||
// output: hello\\world
|
||||
// input : hello\"world
|
||||
// output: "hello\\\"world"
|
||||
// input : hello\\"world
|
||||
// output: "hello\\\\\"world"
|
||||
// input : hello world\
|
||||
// output: "hello world\\" - note the comment in libuv actually reads "hello world\"
|
||||
// but it appears the comment is wrong, it should be "hello world\\"
|
||||
let reverse = '"';
|
||||
let quote_hit = true;
|
||||
for (let i = arg.length; i > 0; i--) { // walk the string in reverse
|
||||
reverse += arg[i - 1];
|
||||
if (quote_hit && arg[i - 1] == '\\') {
|
||||
reverse += '\\';
|
||||
}
|
||||
else if (arg[i - 1] == '"') {
|
||||
quote_hit = true;
|
||||
reverse += '\\';
|
||||
}
|
||||
else {
|
||||
quote_hit = false;
|
||||
}
|
||||
}
|
||||
reverse += '"';
|
||||
return reverse.split('').reverse().join('');
|
||||
}
|
||||
_cloneExecOptions(options) {
|
||||
options = options || {};
|
||||
let result = {
|
||||
cwd: options.cwd || process.cwd(),
|
||||
env: options.env || process.env,
|
||||
silent: options.silent || false,
|
||||
failOnStdErr: options.failOnStdErr || false,
|
||||
ignoreReturnCode: options.ignoreReturnCode || false,
|
||||
windowsVerbatimArguments: options.windowsVerbatimArguments || false
|
||||
};
|
||||
result.outStream = options.outStream || process.stdout;
|
||||
result.errStream = options.errStream || process.stderr;
|
||||
return result;
|
||||
}
|
||||
_getSpawnSyncOptions(options) {
|
||||
let result = {};
|
||||
result.cwd = options.cwd;
|
||||
result.env = options.env;
|
||||
result['windowsVerbatimArguments'] = options.windowsVerbatimArguments || this._isCmdFile();
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
* Add argument
|
||||
* Append an argument or an array of arguments
|
||||
* returns ToolRunner for chaining
|
||||
*
|
||||
* @param val string cmdline or array of strings
|
||||
* @returns ToolRunner
|
||||
*/
|
||||
arg(val) {
|
||||
if (!val) {
|
||||
return this;
|
||||
}
|
||||
if (val instanceof Array) {
|
||||
core.debug(this.toolPath + ' arg: ' + JSON.stringify(val));
|
||||
this.args = this.args.concat(val);
|
||||
}
|
||||
else if (typeof (val) === 'string') {
|
||||
core.debug(this.toolPath + ' arg: ' + val);
|
||||
this.args = this.args.concat(val.trim());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Parses an argument line into one or more arguments
|
||||
* e.g. .line('"arg one" two -z') is equivalent to .arg(['arg one', 'two', '-z'])
|
||||
* returns ToolRunner for chaining
|
||||
*
|
||||
* @param val string argument line
|
||||
* @returns ToolRunner
|
||||
*/
|
||||
line(val) {
|
||||
if (!val) {
|
||||
return this;
|
||||
}
|
||||
core.debug(this.toolPath + ' arg: ' + val);
|
||||
this.args = this.args.concat(this._argStringToArray(val));
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Add argument(s) if a condition is met
|
||||
* Wraps arg(). See arg for details
|
||||
* returns ToolRunner for chaining
|
||||
*
|
||||
* @param condition boolean condition
|
||||
* @param val string cmdline or array of strings
|
||||
* @returns ToolRunner
|
||||
*/
|
||||
argIf(condition, val) {
|
||||
if (condition) {
|
||||
this.arg(val);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Pipe output of exec() to another tool
|
||||
* @param tool
|
||||
* @param file optional filename to additionally stream the output to.
|
||||
* @returns {ToolRunner}
|
||||
*/
|
||||
pipeExecOutputToTool(tool, file) {
|
||||
this.pipeOutputToTool = tool;
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* Exec a tool synchronously.
|
||||
* Output will be *not* be streamed to the live console. It will be returned after execution is complete.
|
||||
* Appropriate for short running tools
|
||||
* Returns IExecSyncResult with output and return code
|
||||
*
|
||||
* @param tool path to tool to exec
|
||||
* @param options optional exec options. See IExecSyncOptions
|
||||
* @returns IExecSyncResult
|
||||
*/
|
||||
execSync(options) {
|
||||
core.debug('exec tool: ' + this.toolPath);
|
||||
core.debug('arguments:');
|
||||
this.args.forEach((arg) => {
|
||||
core.debug(' ' + arg);
|
||||
});
|
||||
options = this._cloneExecOptions(options);
|
||||
if (!options.silent) {
|
||||
options.outStream.write(this._getCommandString(options) + os.EOL);
|
||||
}
|
||||
var r = child.spawnSync(this._getSpawnFileName(), this._getSpawnArgs(options), this._getSpawnSyncOptions(options));
|
||||
var res = { code: r.status, error: r.error };
|
||||
if (!options.silent && r.stdout && r.stdout.length > 0) {
|
||||
options.outStream.write(r.stdout);
|
||||
}
|
||||
if (!options.silent && r.stderr && r.stderr.length > 0) {
|
||||
options.errStream.write(r.stderr);
|
||||
}
|
||||
res.stdout = (r.stdout) ? r.stdout.toString() : '';
|
||||
res.stderr = (r.stderr) ? r.stderr.toString() : '';
|
||||
return res;
|
||||
}
|
||||
}
|
||||
exports.ToolRunner = ToolRunner;
|
||||
class ExecState extends events.EventEmitter {
|
||||
constructor(options, toolPath) {
|
||||
super();
|
||||
this.delay = 10000; // 10 seconds
|
||||
this.timeout = null;
|
||||
if (!toolPath) {
|
||||
throw new Error('toolPath must not be empty');
|
||||
}
|
||||
this.options = options;
|
||||
this.toolPath = toolPath;
|
||||
let delay = process.env['TASKLIB_TEST_TOOLRUNNER_EXITDELAY'];
|
||||
if (delay) {
|
||||
this.delay = parseInt(delay);
|
||||
}
|
||||
}
|
||||
CheckComplete() {
|
||||
if (this.done) {
|
||||
return;
|
||||
}
|
||||
if (this.processClosed) {
|
||||
this._setResult();
|
||||
}
|
||||
else if (this.processExited) {
|
||||
this.timeout = setTimeout(ExecState.HandleTimeout, this.delay, this);
|
||||
}
|
||||
}
|
||||
_setResult() {
|
||||
// determine whether there is an error
|
||||
let error;
|
||||
if (this.processExited) {
|
||||
if (this.processError) {
|
||||
error = new Error(`LIB_ProcessError: \n tool: ${this.toolPath} \n error: ${this.processError}`);
|
||||
}
|
||||
else if (this.processExitCode != 0 && !this.options.ignoreReturnCode) {
|
||||
error = new Error(`LIB_ProcessExitCode\n tool: ${this.toolPath} \n Exit Code: ${this.processExitCode}`);
|
||||
}
|
||||
else if (this.processStderr && this.options.failOnStdErr) {
|
||||
error = new Error(`LIB_ProcessStderr', ${this.toolPath}`);
|
||||
}
|
||||
}
|
||||
// clear the timeout
|
||||
if (this.timeout) {
|
||||
clearTimeout(this.timeout);
|
||||
this.timeout = null;
|
||||
}
|
||||
this.done = true;
|
||||
this.emit('done', error, this.processExitCode);
|
||||
}
|
||||
static HandleTimeout(state) {
|
||||
if (state.done) {
|
||||
return;
|
||||
}
|
||||
if (!state.processClosed && state.processExited) {
|
||||
core.debug(`LIB_StdioNotClosed`);
|
||||
}
|
||||
state._setResult();
|
||||
}
|
||||
}
|
||||
@ -1,105 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.getCurrentTime = exports.getRandomInt = exports.sleep = exports.annotateNamespace = exports.annotateChildPods = exports.checkForErrors = exports.isEqual = exports.getExecutableExtension = void 0;
|
||||
const os = require("os");
|
||||
const core = require("@actions/core");
|
||||
const constants_1 = require("../constants");
|
||||
function getExecutableExtension() {
|
||||
if (os.type().match(/^Win/)) {
|
||||
return '.exe';
|
||||
}
|
||||
return '';
|
||||
}
|
||||
exports.getExecutableExtension = getExecutableExtension;
|
||||
function isEqual(str1, str2, ignoreCase) {
|
||||
if (str1 == null && str2 == null) {
|
||||
return true;
|
||||
}
|
||||
if (str1 == null || str2 == null) {
|
||||
return false;
|
||||
}
|
||||
if (ignoreCase) {
|
||||
return str1.toUpperCase() === str2.toUpperCase();
|
||||
}
|
||||
else {
|
||||
return str1 === str2;
|
||||
}
|
||||
}
|
||||
exports.isEqual = isEqual;
|
||||
function checkForErrors(execResults, warnIfError) {
|
||||
if (execResults.length !== 0) {
|
||||
let stderr = '';
|
||||
execResults.forEach(result => {
|
||||
if (result && result.stderr) {
|
||||
if (result.code !== 0) {
|
||||
stderr += result.stderr + '\n';
|
||||
}
|
||||
else {
|
||||
core.warning(result.stderr);
|
||||
}
|
||||
}
|
||||
});
|
||||
if (stderr.length > 0) {
|
||||
if (warnIfError) {
|
||||
core.warning(stderr.trim());
|
||||
}
|
||||
else {
|
||||
throw new Error(stderr.trim());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.checkForErrors = checkForErrors;
|
||||
function annotateChildPods(kubectl, resourceType, resourceName, allPods) {
|
||||
const commandExecutionResults = [];
|
||||
let owner = resourceName;
|
||||
if (resourceType.toLowerCase().indexOf('deployment') > -1) {
|
||||
owner = kubectl.getNewReplicaSet(resourceName);
|
||||
}
|
||||
if (allPods && allPods.items && allPods.items.length > 0) {
|
||||
allPods.items.forEach((pod) => {
|
||||
const owners = pod.metadata.ownerReferences;
|
||||
if (owners) {
|
||||
owners.forEach(ownerRef => {
|
||||
if (ownerRef.name === owner) {
|
||||
commandExecutionResults.push(kubectl.annotate('pod', pod.metadata.name, constants_1.workflowAnnotations, true));
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
return commandExecutionResults;
|
||||
}
|
||||
exports.annotateChildPods = annotateChildPods;
|
||||
function annotateNamespace(kubectl, namespaceName) {
|
||||
const result = kubectl.getResource('namespace', namespaceName);
|
||||
if (!result) {
|
||||
return { code: -1, stderr: 'Failed to get resource' };
|
||||
}
|
||||
else if (result && result.stderr) {
|
||||
return result;
|
||||
}
|
||||
if (result && result.stdout) {
|
||||
const annotationsSet = JSON.parse(result.stdout).metadata.annotations;
|
||||
if (annotationsSet && annotationsSet.runUri) {
|
||||
if (annotationsSet.runUri.indexOf(process.env['GITHUB_REPOSITORY']) == -1) {
|
||||
core.debug(`Skipping 'annotate namespace' as namespace annotated by other workflow`);
|
||||
return { code: 0, stdout: '' };
|
||||
}
|
||||
}
|
||||
return kubectl.annotate('namespace', namespaceName, constants_1.workflowAnnotations, true);
|
||||
}
|
||||
}
|
||||
exports.annotateNamespace = annotateNamespace;
|
||||
function sleep(timeout) {
|
||||
return new Promise(resolve => setTimeout(resolve, timeout));
|
||||
}
|
||||
exports.sleep = sleep;
|
||||
function getRandomInt(max) {
|
||||
return Math.floor(Math.random() * Math.floor(max));
|
||||
}
|
||||
exports.getRandomInt = getRandomInt;
|
||||
function getCurrentTime() {
|
||||
return new Date().getTime();
|
||||
}
|
||||
exports.getCurrentTime = getCurrentTime;
|
||||
15
node_modules/.bin/esparse
generated
vendored
15
node_modules/.bin/esparse
generated
vendored
@ -1,15 +0,0 @@
|
||||
#!/bin/sh
|
||||
basedir=$(dirname "$(echo "$0" | sed -e 's,\\,/,g')")
|
||||
|
||||
case `uname` in
|
||||
*CYGWIN*) basedir=`cygpath -w "$basedir"`;;
|
||||
esac
|
||||
|
||||
if [ -x "$basedir/node" ]; then
|
||||
"$basedir/node" "$basedir/../esprima/bin/esparse.js" "$@"
|
||||
ret=$?
|
||||
else
|
||||
node "$basedir/../esprima/bin/esparse.js" "$@"
|
||||
ret=$?
|
||||
fi
|
||||
exit $ret
|
||||
7
node_modules/.bin/esparse.cmd
generated
vendored
7
node_modules/.bin/esparse.cmd
generated
vendored
@ -1,7 +0,0 @@
|
||||
@IF EXIST "%~dp0\node.exe" (
|
||||
"%~dp0\node.exe" "%~dp0\..\esprima\bin\esparse.js" %*
|
||||
) ELSE (
|
||||
@SETLOCAL
|
||||
@SET PATHEXT=%PATHEXT:;.JS;=;%
|
||||
node "%~dp0\..\esprima\bin\esparse.js" %*
|
||||
)
|
||||
15
node_modules/.bin/esvalidate
generated
vendored
15
node_modules/.bin/esvalidate
generated
vendored
@ -1,15 +0,0 @@
|
||||
#!/bin/sh
|
||||
basedir=$(dirname "$(echo "$0" | sed -e 's,\\,/,g')")
|
||||
|
||||
case `uname` in
|
||||
*CYGWIN*) basedir=`cygpath -w "$basedir"`;;
|
||||
esac
|
||||
|
||||
if [ -x "$basedir/node" ]; then
|
||||
"$basedir/node" "$basedir/../esprima/bin/esvalidate.js" "$@"
|
||||
ret=$?
|
||||
else
|
||||
node "$basedir/../esprima/bin/esvalidate.js" "$@"
|
||||
ret=$?
|
||||
fi
|
||||
exit $ret
|
||||
7
node_modules/.bin/esvalidate.cmd
generated
vendored
7
node_modules/.bin/esvalidate.cmd
generated
vendored
@ -1,7 +0,0 @@
|
||||
@IF EXIST "%~dp0\node.exe" (
|
||||
"%~dp0\node.exe" "%~dp0\..\esprima\bin\esvalidate.js" %*
|
||||
) ELSE (
|
||||
@SETLOCAL
|
||||
@SET PATHEXT=%PATHEXT:;.JS;=;%
|
||||
node "%~dp0\..\esprima\bin\esvalidate.js" %*
|
||||
)
|
||||
15
node_modules/.bin/js-yaml
generated
vendored
15
node_modules/.bin/js-yaml
generated
vendored
@ -1,15 +0,0 @@
|
||||
#!/bin/sh
|
||||
basedir=$(dirname "$(echo "$0" | sed -e 's,\\,/,g')")
|
||||
|
||||
case `uname` in
|
||||
*CYGWIN*) basedir=`cygpath -w "$basedir"`;;
|
||||
esac
|
||||
|
||||
if [ -x "$basedir/node" ]; then
|
||||
"$basedir/node" "$basedir/../js-yaml/bin/js-yaml.js" "$@"
|
||||
ret=$?
|
||||
else
|
||||
node "$basedir/../js-yaml/bin/js-yaml.js" "$@"
|
||||
ret=$?
|
||||
fi
|
||||
exit $ret
|
||||
7
node_modules/.bin/js-yaml.cmd
generated
vendored
7
node_modules/.bin/js-yaml.cmd
generated
vendored
@ -1,7 +0,0 @@
|
||||
@IF EXIST "%~dp0\node.exe" (
|
||||
"%~dp0\node.exe" "%~dp0\..\js-yaml\bin\js-yaml.js" %*
|
||||
) ELSE (
|
||||
@SETLOCAL
|
||||
@SET PATHEXT=%PATHEXT:;.JS;=;%
|
||||
node "%~dp0\..\js-yaml\bin\js-yaml.js" %*
|
||||
)
|
||||
15
node_modules/.bin/semver
generated
vendored
15
node_modules/.bin/semver
generated
vendored
@ -1,15 +0,0 @@
|
||||
#!/bin/sh
|
||||
basedir=$(dirname "$(echo "$0" | sed -e 's,\\,/,g')")
|
||||
|
||||
case `uname` in
|
||||
*CYGWIN*) basedir=`cygpath -w "$basedir"`;;
|
||||
esac
|
||||
|
||||
if [ -x "$basedir/node" ]; then
|
||||
"$basedir/node" "$basedir/../semver/bin/semver.js" "$@"
|
||||
ret=$?
|
||||
else
|
||||
node "$basedir/../semver/bin/semver.js" "$@"
|
||||
ret=$?
|
||||
fi
|
||||
exit $ret
|
||||
7
node_modules/.bin/semver.cmd
generated
vendored
7
node_modules/.bin/semver.cmd
generated
vendored
@ -1,7 +0,0 @@
|
||||
@IF EXIST "%~dp0\node.exe" (
|
||||
"%~dp0\node.exe" "%~dp0\..\semver\bin\semver.js" %*
|
||||
) ELSE (
|
||||
@SETLOCAL
|
||||
@SET PATHEXT=%PATHEXT:;.JS;=;%
|
||||
node "%~dp0\..\semver\bin\semver.js" %*
|
||||
)
|
||||
15
node_modules/.bin/uuid
generated
vendored
15
node_modules/.bin/uuid
generated
vendored
@ -1,15 +0,0 @@
|
||||
#!/bin/sh
|
||||
basedir=$(dirname "$(echo "$0" | sed -e 's,\\,/,g')")
|
||||
|
||||
case `uname` in
|
||||
*CYGWIN*) basedir=`cygpath -w "$basedir"`;;
|
||||
esac
|
||||
|
||||
if [ -x "$basedir/node" ]; then
|
||||
"$basedir/node" "$basedir/../uuid/bin/uuid" "$@"
|
||||
ret=$?
|
||||
else
|
||||
node "$basedir/../uuid/bin/uuid" "$@"
|
||||
ret=$?
|
||||
fi
|
||||
exit $ret
|
||||
7
node_modules/.bin/uuid.cmd
generated
vendored
7
node_modules/.bin/uuid.cmd
generated
vendored
@ -1,7 +0,0 @@
|
||||
@IF EXIST "%~dp0\node.exe" (
|
||||
"%~dp0\node.exe" "%~dp0\..\uuid\bin\uuid" %*
|
||||
) ELSE (
|
||||
@SETLOCAL
|
||||
@SET PATHEXT=%PATHEXT:;.JS;=;%
|
||||
node "%~dp0\..\uuid\bin\uuid" %*
|
||||
)
|
||||
9
node_modules/@actions/core/LICENSE.md
generated
vendored
9
node_modules/@actions/core/LICENSE.md
generated
vendored
@ -1,9 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright 2019 GitHub
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
147
node_modules/@actions/core/README.md
generated
vendored
147
node_modules/@actions/core/README.md
generated
vendored
@ -1,147 +0,0 @@
|
||||
# `@actions/core`
|
||||
|
||||
> Core functions for setting results, logging, registering secrets and exporting variables across actions
|
||||
|
||||
## Usage
|
||||
|
||||
### Import the package
|
||||
|
||||
```js
|
||||
// javascript
|
||||
const core = require('@actions/core');
|
||||
|
||||
// typescript
|
||||
import * as core from '@actions/core';
|
||||
```
|
||||
|
||||
#### Inputs/Outputs
|
||||
|
||||
Action inputs can be read with `getInput`. Outputs can be set with `setOutput` which makes them available to be mapped into inputs of other actions to ensure they are decoupled.
|
||||
|
||||
```js
|
||||
const myInput = core.getInput('inputName', { required: true });
|
||||
|
||||
core.setOutput('outputKey', 'outputVal');
|
||||
```
|
||||
|
||||
#### Exporting variables
|
||||
|
||||
Since each step runs in a separate process, you can use `exportVariable` to add it to this step and future steps environment blocks.
|
||||
|
||||
```js
|
||||
core.exportVariable('envVar', 'Val');
|
||||
```
|
||||
|
||||
#### Setting a secret
|
||||
|
||||
Setting a secret registers the secret with the runner to ensure it is masked in logs.
|
||||
|
||||
```js
|
||||
core.setSecret('myPassword');
|
||||
```
|
||||
|
||||
#### PATH Manipulation
|
||||
|
||||
To make a tool's path available in the path for the remainder of the job (without altering the machine or containers state), use `addPath`. The runner will prepend the path given to the jobs PATH.
|
||||
|
||||
```js
|
||||
core.addPath('/path/to/mytool');
|
||||
```
|
||||
|
||||
#### Exit codes
|
||||
|
||||
You should use this library to set the failing exit code for your action. If status is not set and the script runs to completion, that will lead to a success.
|
||||
|
||||
```js
|
||||
const core = require('@actions/core');
|
||||
|
||||
try {
|
||||
// Do stuff
|
||||
}
|
||||
catch (err) {
|
||||
// setFailed logs the message and sets a failing exit code
|
||||
core.setFailed(`Action failed with error ${err}`);
|
||||
}
|
||||
|
||||
Note that `setNeutral` is not yet implemented in actions V2 but equivalent functionality is being planned.
|
||||
|
||||
```
|
||||
|
||||
#### Logging
|
||||
|
||||
Finally, this library provides some utilities for logging. Note that debug logging is hidden from the logs by default. This behavior can be toggled by enabling the [Step Debug Logs](../../docs/action-debugging.md#step-debug-logs).
|
||||
|
||||
```js
|
||||
const core = require('@actions/core');
|
||||
|
||||
const myInput = core.getInput('input');
|
||||
try {
|
||||
core.debug('Inside try block');
|
||||
|
||||
if (!myInput) {
|
||||
core.warning('myInput was not set');
|
||||
}
|
||||
|
||||
if (core.isDebug()) {
|
||||
// curl -v https://github.com
|
||||
} else {
|
||||
// curl https://github.com
|
||||
}
|
||||
|
||||
// Do stuff
|
||||
core.info('Output to the actions build log')
|
||||
}
|
||||
catch (err) {
|
||||
core.error(`Error ${err}, action may still succeed though`);
|
||||
}
|
||||
```
|
||||
|
||||
This library can also wrap chunks of output in foldable groups.
|
||||
|
||||
```js
|
||||
const core = require('@actions/core')
|
||||
|
||||
// Manually wrap output
|
||||
core.startGroup('Do some function')
|
||||
doSomeFunction()
|
||||
core.endGroup()
|
||||
|
||||
// Wrap an asynchronous function call
|
||||
const result = await core.group('Do something async', async () => {
|
||||
const response = await doSomeHTTPRequest()
|
||||
return response
|
||||
})
|
||||
```
|
||||
|
||||
#### Action state
|
||||
|
||||
You can use this library to save state and get state for sharing information between a given wrapper action:
|
||||
|
||||
**action.yml**
|
||||
```yaml
|
||||
name: 'Wrapper action sample'
|
||||
inputs:
|
||||
name:
|
||||
default: 'GitHub'
|
||||
runs:
|
||||
using: 'node12'
|
||||
main: 'main.js'
|
||||
post: 'cleanup.js'
|
||||
```
|
||||
|
||||
In action's `main.js`:
|
||||
|
||||
```js
|
||||
const core = require('@actions/core');
|
||||
|
||||
core.saveState("pidToKill", 12345);
|
||||
```
|
||||
|
||||
In action's `cleanup.js`:
|
||||
```js
|
||||
const core = require('@actions/core');
|
||||
|
||||
var pid = core.getState("pidToKill");
|
||||
|
||||
process.kill(pid);
|
||||
```
|
||||
16
node_modules/@actions/core/lib/command.d.ts
generated
vendored
16
node_modules/@actions/core/lib/command.d.ts
generated
vendored
@ -1,16 +0,0 @@
|
||||
interface CommandProperties {
|
||||
[key: string]: any;
|
||||
}
|
||||
/**
|
||||
* Commands
|
||||
*
|
||||
* Command Format:
|
||||
* ::name key=value,key=value::message
|
||||
*
|
||||
* Examples:
|
||||
* ::warning::This is the message
|
||||
* ::set-env name=MY_VAR::some value
|
||||
*/
|
||||
export declare function issueCommand(command: string, properties: CommandProperties, message: any): void;
|
||||
export declare function issue(name: string, message?: string): void;
|
||||
export {};
|
||||
79
node_modules/@actions/core/lib/command.js
generated
vendored
79
node_modules/@actions/core/lib/command.js
generated
vendored
@ -1,79 +0,0 @@
|
||||
"use strict";
|
||||
var __importStar = (this && this.__importStar) || function (mod) {
|
||||
if (mod && mod.__esModule) return mod;
|
||||
var result = {};
|
||||
if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
|
||||
result["default"] = mod;
|
||||
return result;
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const os = __importStar(require("os"));
|
||||
const utils_1 = require("./utils");
|
||||
/**
|
||||
* Commands
|
||||
*
|
||||
* Command Format:
|
||||
* ::name key=value,key=value::message
|
||||
*
|
||||
* Examples:
|
||||
* ::warning::This is the message
|
||||
* ::set-env name=MY_VAR::some value
|
||||
*/
|
||||
function issueCommand(command, properties, message) {
|
||||
const cmd = new Command(command, properties, message);
|
||||
process.stdout.write(cmd.toString() + os.EOL);
|
||||
}
|
||||
exports.issueCommand = issueCommand;
|
||||
function issue(name, message = '') {
|
||||
issueCommand(name, {}, message);
|
||||
}
|
||||
exports.issue = issue;
|
||||
const CMD_STRING = '::';
|
||||
class Command {
|
||||
constructor(command, properties, message) {
|
||||
if (!command) {
|
||||
command = 'missing.command';
|
||||
}
|
||||
this.command = command;
|
||||
this.properties = properties;
|
||||
this.message = message;
|
||||
}
|
||||
toString() {
|
||||
let cmdStr = CMD_STRING + this.command;
|
||||
if (this.properties && Object.keys(this.properties).length > 0) {
|
||||
cmdStr += ' ';
|
||||
let first = true;
|
||||
for (const key in this.properties) {
|
||||
if (this.properties.hasOwnProperty(key)) {
|
||||
const val = this.properties[key];
|
||||
if (val) {
|
||||
if (first) {
|
||||
first = false;
|
||||
}
|
||||
else {
|
||||
cmdStr += ',';
|
||||
}
|
||||
cmdStr += `${key}=${escapeProperty(val)}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
cmdStr += `${CMD_STRING}${escapeData(this.message)}`;
|
||||
return cmdStr;
|
||||
}
|
||||
}
|
||||
function escapeData(s) {
|
||||
return utils_1.toCommandValue(s)
|
||||
.replace(/%/g, '%25')
|
||||
.replace(/\r/g, '%0D')
|
||||
.replace(/\n/g, '%0A');
|
||||
}
|
||||
function escapeProperty(s) {
|
||||
return utils_1.toCommandValue(s)
|
||||
.replace(/%/g, '%25')
|
||||
.replace(/\r/g, '%0D')
|
||||
.replace(/\n/g, '%0A')
|
||||
.replace(/:/g, '%3A')
|
||||
.replace(/,/g, '%2C');
|
||||
}
|
||||
//# sourceMappingURL=command.js.map
|
||||
1
node_modules/@actions/core/lib/command.js.map
generated
vendored
1
node_modules/@actions/core/lib/command.js.map
generated
vendored
@ -1 +0,0 @@
|
||||
{"version":3,"file":"command.js","sourceRoot":"","sources":["../src/command.ts"],"names":[],"mappings":";;;;;;;;;AAAA,uCAAwB;AACxB,mCAAsC;AAWtC;;;;;;;;;GASG;AACH,SAAgB,YAAY,CAC1B,OAAe,EACf,UAA6B,EAC7B,OAAY;IAEZ,MAAM,GAAG,GAAG,IAAI,OAAO,CAAC,OAAO,EAAE,UAAU,EAAE,OAAO,CAAC,CAAA;IACrD,OAAO,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,GAAG,CAAC,CAAA;AAC/C,CAAC;AAPD,oCAOC;AAED,SAAgB,KAAK,CAAC,IAAY,EAAE,UAAkB,EAAE;IACtD,YAAY,CAAC,IAAI,EAAE,EAAE,EAAE,OAAO,CAAC,CAAA;AACjC,CAAC;AAFD,sBAEC;AAED,MAAM,UAAU,GAAG,IAAI,CAAA;AAEvB,MAAM,OAAO;IAKX,YAAY,OAAe,EAAE,UAA6B,EAAE,OAAe;QACzE,IAAI,CAAC,OAAO,EAAE;YACZ,OAAO,GAAG,iBAAiB,CAAA;SAC5B;QAED,IAAI,CAAC,OAAO,GAAG,OAAO,CAAA;QACtB,IAAI,CAAC,UAAU,GAAG,UAAU,CAAA;QAC5B,IAAI,CAAC,OAAO,GAAG,OAAO,CAAA;IACxB,CAAC;IAED,QAAQ;QACN,IAAI,MAAM,GAAG,UAAU,GAAG,IAAI,CAAC,OAAO,CAAA;QAEtC,IAAI,IAAI,CAAC,UAAU,IAAI,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC,MAAM,GAAG,CAAC,EAAE;YAC9D,MAAM,IAAI,GAAG,CAAA;YACb,IAAI,KAAK,GAAG,IAAI,CAAA;YAChB,KAAK,MAAM,GAAG,IAAI,IAAI,CAAC,UAAU,EAAE;gBACjC,IAAI,IAAI,CAAC,UAAU,CAAC,cAAc,CAAC,GAAG,CAAC,EAAE;oBACvC,MAAM,GAAG,GAAG,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,CAAA;oBAChC,IAAI,GAAG,EAAE;wBACP,IAAI,KAAK,EAAE;4BACT,KAAK,GAAG,KAAK,CAAA;yBACd;6BAAM;4BACL,MAAM,IAAI,GAAG,CAAA;yBACd;wBAED,MAAM,IAAI,GAAG,GAAG,IAAI,cAAc,CAAC,GAAG,CAAC,EAAE,CAAA;qBAC1C;iBACF;aACF;SACF;QAED,MAAM,IAAI,GAAG,UAAU,GAAG,UAAU,CAAC,IAAI,CAAC,OAAO,CAAC,EAAE,CAAA;QACpD,OAAO,MAAM,CAAA;IACf,CAAC;CACF;AAED,SAAS,UAAU,CAAC,CAAM;IACxB,OAAO,sBAAc,CAAC,CAAC,CAAC;SACrB,OAAO,CAAC,IAAI,EAAE,KAAK,CAAC;SACpB,OAAO,CAAC,KAAK,EAAE,KAAK,CAAC;SACrB,OAAO,CAAC,KAAK,EAAE,KAAK,CAAC,CAAA;AAC1B,CAAC;AAED,SAAS,cAAc,CAAC,CAAM;IAC5B,OAAO,sBAAc,CAAC,CAAC,CAAC;SACrB,OAAO,CAAC,IAAI,EAAE,KAAK,CAAC;SACpB,OAAO,CAAC,KAAK,EAAE,KAAK,CAAC;SACrB,OAAO,CAAC,KAAK,EAAE,KAAK,CAAC;SACrB,OAAO,CAAC,IAAI,EAAE,KAAK,CAAC;SACpB,OAAO,CAAC,IAAI,EAAE,KAAK,CAAC,CAAA;AACzB,CAAC"}
|
||||
122
node_modules/@actions/core/lib/core.d.ts
generated
vendored
122
node_modules/@actions/core/lib/core.d.ts
generated
vendored
@ -1,122 +0,0 @@
|
||||
/**
|
||||
* Interface for getInput options
|
||||
*/
|
||||
export interface InputOptions {
|
||||
/** Optional. Whether the input is required. If required and not present, will throw. Defaults to false */
|
||||
required?: boolean;
|
||||
}
|
||||
/**
|
||||
* The code to exit an action
|
||||
*/
|
||||
export declare enum ExitCode {
|
||||
/**
|
||||
* A code indicating that the action was successful
|
||||
*/
|
||||
Success = 0,
|
||||
/**
|
||||
* A code indicating that the action was a failure
|
||||
*/
|
||||
Failure = 1
|
||||
}
|
||||
/**
|
||||
* Sets env variable for this action and future actions in the job
|
||||
* @param name the name of the variable to set
|
||||
* @param val the value of the variable. Non-string values will be converted to a string via JSON.stringify
|
||||
*/
|
||||
export declare function exportVariable(name: string, val: any): void;
|
||||
/**
|
||||
* Registers a secret which will get masked from logs
|
||||
* @param secret value of the secret
|
||||
*/
|
||||
export declare function setSecret(secret: string): void;
|
||||
/**
|
||||
* Prepends inputPath to the PATH (for this action and future actions)
|
||||
* @param inputPath
|
||||
*/
|
||||
export declare function addPath(inputPath: string): void;
|
||||
/**
|
||||
* Gets the value of an input. The value is also trimmed.
|
||||
*
|
||||
* @param name name of the input to get
|
||||
* @param options optional. See InputOptions.
|
||||
* @returns string
|
||||
*/
|
||||
export declare function getInput(name: string, options?: InputOptions): string;
|
||||
/**
|
||||
* Sets the value of an output.
|
||||
*
|
||||
* @param name name of the output to set
|
||||
* @param value value to store. Non-string values will be converted to a string via JSON.stringify
|
||||
*/
|
||||
export declare function setOutput(name: string, value: any): void;
|
||||
/**
|
||||
* Enables or disables the echoing of commands into stdout for the rest of the step.
|
||||
* Echoing is disabled by default if ACTIONS_STEP_DEBUG is not set.
|
||||
*
|
||||
*/
|
||||
export declare function setCommandEcho(enabled: boolean): void;
|
||||
/**
|
||||
* Sets the action status to failed.
|
||||
* When the action exits it will be with an exit code of 1
|
||||
* @param message add error issue message
|
||||
*/
|
||||
export declare function setFailed(message: string | Error): void;
|
||||
/**
|
||||
* Gets whether Actions Step Debug is on or not
|
||||
*/
|
||||
export declare function isDebug(): boolean;
|
||||
/**
|
||||
* Writes debug message to user log
|
||||
* @param message debug message
|
||||
*/
|
||||
export declare function debug(message: string): void;
|
||||
/**
|
||||
* Adds an error issue
|
||||
* @param message error issue message. Errors will be converted to string via toString()
|
||||
*/
|
||||
export declare function error(message: string | Error): void;
|
||||
/**
|
||||
* Adds an warning issue
|
||||
* @param message warning issue message. Errors will be converted to string via toString()
|
||||
*/
|
||||
export declare function warning(message: string | Error): void;
|
||||
/**
|
||||
* Writes info to log with console.log.
|
||||
* @param message info message
|
||||
*/
|
||||
export declare function info(message: string): void;
|
||||
/**
|
||||
* Begin an output group.
|
||||
*
|
||||
* Output until the next `groupEnd` will be foldable in this group
|
||||
*
|
||||
* @param name The name of the output group
|
||||
*/
|
||||
export declare function startGroup(name: string): void;
|
||||
/**
|
||||
* End an output group.
|
||||
*/
|
||||
export declare function endGroup(): void;
|
||||
/**
|
||||
* Wrap an asynchronous function call in a group.
|
||||
*
|
||||
* Returns the same type as the function itself.
|
||||
*
|
||||
* @param name The name of the group
|
||||
* @param fn The function to wrap in the group
|
||||
*/
|
||||
export declare function group<T>(name: string, fn: () => Promise<T>): Promise<T>;
|
||||
/**
|
||||
* Saves state for current action, the state can only be retrieved by this action's post job execution.
|
||||
*
|
||||
* @param name name of the state to store
|
||||
* @param value value to store. Non-string values will be converted to a string via JSON.stringify
|
||||
*/
|
||||
export declare function saveState(name: string, value: any): void;
|
||||
/**
|
||||
* Gets the value of an state set by this action's main execution.
|
||||
*
|
||||
* @param name name of the state to get
|
||||
* @returns string
|
||||
*/
|
||||
export declare function getState(name: string): string;
|
||||
238
node_modules/@actions/core/lib/core.js
generated
vendored
238
node_modules/@actions/core/lib/core.js
generated
vendored
@ -1,238 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
var __importStar = (this && this.__importStar) || function (mod) {
|
||||
if (mod && mod.__esModule) return mod;
|
||||
var result = {};
|
||||
if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
|
||||
result["default"] = mod;
|
||||
return result;
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const command_1 = require("./command");
|
||||
const file_command_1 = require("./file-command");
|
||||
const utils_1 = require("./utils");
|
||||
const os = __importStar(require("os"));
|
||||
const path = __importStar(require("path"));
|
||||
/**
|
||||
* The code to exit an action
|
||||
*/
|
||||
var ExitCode;
|
||||
(function (ExitCode) {
|
||||
/**
|
||||
* A code indicating that the action was successful
|
||||
*/
|
||||
ExitCode[ExitCode["Success"] = 0] = "Success";
|
||||
/**
|
||||
* A code indicating that the action was a failure
|
||||
*/
|
||||
ExitCode[ExitCode["Failure"] = 1] = "Failure";
|
||||
})(ExitCode = exports.ExitCode || (exports.ExitCode = {}));
|
||||
//-----------------------------------------------------------------------
|
||||
// Variables
|
||||
//-----------------------------------------------------------------------
|
||||
/**
|
||||
* Sets env variable for this action and future actions in the job
|
||||
* @param name the name of the variable to set
|
||||
* @param val the value of the variable. Non-string values will be converted to a string via JSON.stringify
|
||||
*/
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
function exportVariable(name, val) {
|
||||
const convertedVal = utils_1.toCommandValue(val);
|
||||
process.env[name] = convertedVal;
|
||||
const filePath = process.env['GITHUB_ENV'] || '';
|
||||
if (filePath) {
|
||||
const delimiter = '_GitHubActionsFileCommandDelimeter_';
|
||||
const commandValue = `${name}<<${delimiter}${os.EOL}${convertedVal}${os.EOL}${delimiter}`;
|
||||
file_command_1.issueCommand('ENV', commandValue);
|
||||
}
|
||||
else {
|
||||
command_1.issueCommand('set-env', { name }, convertedVal);
|
||||
}
|
||||
}
|
||||
exports.exportVariable = exportVariable;
|
||||
/**
|
||||
* Registers a secret which will get masked from logs
|
||||
* @param secret value of the secret
|
||||
*/
|
||||
function setSecret(secret) {
|
||||
command_1.issueCommand('add-mask', {}, secret);
|
||||
}
|
||||
exports.setSecret = setSecret;
|
||||
/**
|
||||
* Prepends inputPath to the PATH (for this action and future actions)
|
||||
* @param inputPath
|
||||
*/
|
||||
function addPath(inputPath) {
|
||||
const filePath = process.env['GITHUB_PATH'] || '';
|
||||
if (filePath) {
|
||||
file_command_1.issueCommand('PATH', inputPath);
|
||||
}
|
||||
else {
|
||||
command_1.issueCommand('add-path', {}, inputPath);
|
||||
}
|
||||
process.env['PATH'] = `${inputPath}${path.delimiter}${process.env['PATH']}`;
|
||||
}
|
||||
exports.addPath = addPath;
|
||||
/**
|
||||
* Gets the value of an input. The value is also trimmed.
|
||||
*
|
||||
* @param name name of the input to get
|
||||
* @param options optional. See InputOptions.
|
||||
* @returns string
|
||||
*/
|
||||
function getInput(name, options) {
|
||||
const val = process.env[`INPUT_${name.replace(/ /g, '_').toUpperCase()}`] || '';
|
||||
if (options && options.required && !val) {
|
||||
throw new Error(`Input required and not supplied: ${name}`);
|
||||
}
|
||||
return val.trim();
|
||||
}
|
||||
exports.getInput = getInput;
|
||||
/**
|
||||
* Sets the value of an output.
|
||||
*
|
||||
* @param name name of the output to set
|
||||
* @param value value to store. Non-string values will be converted to a string via JSON.stringify
|
||||
*/
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
function setOutput(name, value) {
|
||||
command_1.issueCommand('set-output', { name }, value);
|
||||
}
|
||||
exports.setOutput = setOutput;
|
||||
/**
|
||||
* Enables or disables the echoing of commands into stdout for the rest of the step.
|
||||
* Echoing is disabled by default if ACTIONS_STEP_DEBUG is not set.
|
||||
*
|
||||
*/
|
||||
function setCommandEcho(enabled) {
|
||||
command_1.issue('echo', enabled ? 'on' : 'off');
|
||||
}
|
||||
exports.setCommandEcho = setCommandEcho;
|
||||
//-----------------------------------------------------------------------
|
||||
// Results
|
||||
//-----------------------------------------------------------------------
|
||||
/**
|
||||
* Sets the action status to failed.
|
||||
* When the action exits it will be with an exit code of 1
|
||||
* @param message add error issue message
|
||||
*/
|
||||
function setFailed(message) {
|
||||
process.exitCode = ExitCode.Failure;
|
||||
error(message);
|
||||
}
|
||||
exports.setFailed = setFailed;
|
||||
//-----------------------------------------------------------------------
|
||||
// Logging Commands
|
||||
//-----------------------------------------------------------------------
|
||||
/**
|
||||
* Gets whether Actions Step Debug is on or not
|
||||
*/
|
||||
function isDebug() {
|
||||
return process.env['RUNNER_DEBUG'] === '1';
|
||||
}
|
||||
exports.isDebug = isDebug;
|
||||
/**
|
||||
* Writes debug message to user log
|
||||
* @param message debug message
|
||||
*/
|
||||
function debug(message) {
|
||||
command_1.issueCommand('debug', {}, message);
|
||||
}
|
||||
exports.debug = debug;
|
||||
/**
|
||||
* Adds an error issue
|
||||
* @param message error issue message. Errors will be converted to string via toString()
|
||||
*/
|
||||
function error(message) {
|
||||
command_1.issue('error', message instanceof Error ? message.toString() : message);
|
||||
}
|
||||
exports.error = error;
|
||||
/**
|
||||
* Adds an warning issue
|
||||
* @param message warning issue message. Errors will be converted to string via toString()
|
||||
*/
|
||||
function warning(message) {
|
||||
command_1.issue('warning', message instanceof Error ? message.toString() : message);
|
||||
}
|
||||
exports.warning = warning;
|
||||
/**
|
||||
* Writes info to log with console.log.
|
||||
* @param message info message
|
||||
*/
|
||||
function info(message) {
|
||||
process.stdout.write(message + os.EOL);
|
||||
}
|
||||
exports.info = info;
|
||||
/**
|
||||
* Begin an output group.
|
||||
*
|
||||
* Output until the next `groupEnd` will be foldable in this group
|
||||
*
|
||||
* @param name The name of the output group
|
||||
*/
|
||||
function startGroup(name) {
|
||||
command_1.issue('group', name);
|
||||
}
|
||||
exports.startGroup = startGroup;
|
||||
/**
|
||||
* End an output group.
|
||||
*/
|
||||
function endGroup() {
|
||||
command_1.issue('endgroup');
|
||||
}
|
||||
exports.endGroup = endGroup;
|
||||
/**
|
||||
* Wrap an asynchronous function call in a group.
|
||||
*
|
||||
* Returns the same type as the function itself.
|
||||
*
|
||||
* @param name The name of the group
|
||||
* @param fn The function to wrap in the group
|
||||
*/
|
||||
function group(name, fn) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
startGroup(name);
|
||||
let result;
|
||||
try {
|
||||
result = yield fn();
|
||||
}
|
||||
finally {
|
||||
endGroup();
|
||||
}
|
||||
return result;
|
||||
});
|
||||
}
|
||||
exports.group = group;
|
||||
//-----------------------------------------------------------------------
|
||||
// Wrapper action state
|
||||
//-----------------------------------------------------------------------
|
||||
/**
|
||||
* Saves state for current action, the state can only be retrieved by this action's post job execution.
|
||||
*
|
||||
* @param name name of the state to store
|
||||
* @param value value to store. Non-string values will be converted to a string via JSON.stringify
|
||||
*/
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
function saveState(name, value) {
|
||||
command_1.issueCommand('save-state', { name }, value);
|
||||
}
|
||||
exports.saveState = saveState;
|
||||
/**
|
||||
* Gets the value of an state set by this action's main execution.
|
||||
*
|
||||
* @param name name of the state to get
|
||||
* @returns string
|
||||
*/
|
||||
function getState(name) {
|
||||
return process.env[`STATE_${name}`] || '';
|
||||
}
|
||||
exports.getState = getState;
|
||||
//# sourceMappingURL=core.js.map
|
||||
1
node_modules/@actions/core/lib/core.js.map
generated
vendored
1
node_modules/@actions/core/lib/core.js.map
generated
vendored
@ -1 +0,0 @@
|
||||
{"version":3,"file":"core.js","sourceRoot":"","sources":["../src/core.ts"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;AAAA,uCAA6C;AAC7C,iDAA+D;AAC/D,mCAAsC;AAEtC,uCAAwB;AACxB,2CAA4B;AAU5B;;GAEG;AACH,IAAY,QAUX;AAVD,WAAY,QAAQ;IAClB;;OAEG;IACH,6CAAW,CAAA;IAEX;;OAEG;IACH,6CAAW,CAAA;AACb,CAAC,EAVW,QAAQ,GAAR,gBAAQ,KAAR,gBAAQ,QAUnB;AAED,yEAAyE;AACzE,YAAY;AACZ,yEAAyE;AAEzE;;;;GAIG;AACH,8DAA8D;AAC9D,SAAgB,cAAc,CAAC,IAAY,EAAE,GAAQ;IACnD,MAAM,YAAY,GAAG,sBAAc,CAAC,GAAG,CAAC,CAAA;IACxC,OAAO,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,YAAY,CAAA;IAEhC,MAAM,QAAQ,GAAG,OAAO,CAAC,GAAG,CAAC,YAAY,CAAC,IAAI,EAAE,CAAA;IAChD,IAAI,QAAQ,EAAE;QACZ,MAAM,SAAS,GAAG,qCAAqC,CAAA;QACvD,MAAM,YAAY,GAAG,GAAG,IAAI,KAAK,SAAS,GAAG,EAAE,CAAC,GAAG,GAAG,YAAY,GAAG,EAAE,CAAC,GAAG,GAAG,SAAS,EAAE,CAAA;QACzF,2BAAgB,CAAC,KAAK,EAAE,YAAY,CAAC,CAAA;KACtC;SAAM;QACL,sBAAY,CAAC,SAAS,EAAE,EAAC,IAAI,EAAC,EAAE,YAAY,CAAC,CAAA;KAC9C;AACH,CAAC;AAZD,wCAYC;AAED;;;GAGG;AACH,SAAgB,SAAS,CAAC,MAAc;IACtC,sBAAY,CAAC,UAAU,EAAE,EAAE,EAAE,MAAM,CAAC,CAAA;AACtC,CAAC;AAFD,8BAEC;AAED;;;GAGG;AACH,SAAgB,OAAO,CAAC,SAAiB;IACvC,MAAM,QAAQ,GAAG,OAAO,CAAC,GAAG,CAAC,aAAa,CAAC,IAAI,EAAE,CAAA;IACjD,IAAI,QAAQ,EAAE;QACZ,2BAAgB,CAAC,MAAM,EAAE,SAAS,CAAC,CAAA;KACpC;SAAM;QACL,sBAAY,CAAC,UAAU,EAAE,EAAE,EAAE,SAAS,CAAC,CAAA;KACxC;IACD,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,GAAG,SAAS,GAAG,IAAI,CAAC,SAAS,GAAG,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC,EAAE,CAAA;AAC7E,CAAC;AARD,0BAQC;AAED;;;;;;GAMG;AACH,SAAgB,QAAQ,CAAC,IAAY,EAAE,OAAsB;IAC3D,MAAM,GAAG,GACP,OAAO,CAAC,GAAG,CAAC,SAAS,IAAI,CAAC,OAAO,CAAC,IAAI,EAAE,GAAG,CAAC,CAAC,WAAW,EAAE,EAAE,CAAC,IAAI,EAAE,CAAA;IACrE,IAAI,OAAO,IAAI,OAAO,CAAC,QAAQ,IAAI,CAAC,GAAG,EAAE;QACvC,MAAM,IAAI,KAAK,CAAC,oCAAoC,IAAI,EAAE,CAAC,CAAA;KAC5D;IAED,OAAO,GAAG,CAAC,IAAI,EAAE,CAAA;AACnB,CAAC;AARD,4BAQC;AAED;;;;;GAKG;AACH,8DAA8D;AAC9D,SAAgB,SAAS,CAAC,IAAY,EAAE,KAAU;IAChD,sBAAY,CAAC,YAAY,EAAE,EAAC,IAAI,EAAC,EAAE,KAAK,CAAC,CAAA;AAC3C,CAAC;AAFD,8BAEC;AAED;;;;GAIG;AACH,SAAgB,cAAc,CAAC,OAAgB;IAC7C,eAAK,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,KAAK,CAAC,CAAA;AACvC,CAAC;AAFD,wCAEC;AAED,yEAAyE;AACzE,UAAU;AACV,yEAAyE;AAEzE;;;;GAIG;AACH,SAAgB,SAAS,CAAC,OAAuB;IAC/C,OAAO,CAAC,QAAQ,GAAG,QAAQ,CAAC,OAAO,CAAA;IAEnC,KAAK,CAAC,OAAO,CAAC,CAAA;AAChB,CAAC;AAJD,8BAIC;AAED,yEAAyE;AACzE,mBAAmB;AACnB,yEAAyE;AAEzE;;GAEG;AACH,SAAgB,OAAO;IACrB,OAAO,OAAO,CAAC,GAAG,CAAC,cAAc,CAAC,KAAK,GAAG,CAAA;AAC5C,CAAC;AAFD,0BAEC;AAED;;;GAGG;AACH,SAAgB,KAAK,CAAC,OAAe;IACnC,sBAAY,CAAC,OAAO,EAAE,EAAE,EAAE,OAAO,CAAC,CAAA;AACpC,CAAC;AAFD,sBAEC;AAED;;;GAGG;AACH,SAAgB,KAAK,CAAC,OAAuB;IAC3C,eAAK,CAAC,OAAO,EAAE,OAAO,YAAY,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,CAAA;AACzE,CAAC;AAFD,sBAEC;AAED;;;GAGG;AACH,SAAgB,OAAO,CAAC,OAAuB;IAC7C,eAAK,CAAC,SAAS,EAAE,OAAO,YAAY,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,CAAA;AAC3E,CAAC;AAFD,0BAEC;AAED;;;GAGG;AACH,SAAgB,IAAI,CAAC,OAAe;IAClC,OAAO,CAAC,MAAM,CAAC,KAAK,CAAC,OAAO,GAAG,EAAE,CAAC,GAAG,CAAC,CAAA;AACxC,CAAC;AAFD,oBAEC;AAED;;;;;;GAMG;AACH,SAAgB,UAAU,CAAC,IAAY;IACrC,eAAK,CAAC,OAAO,EAAE,IAAI,CAAC,CAAA;AACtB,CAAC;AAFD,gCAEC;AAED;;GAEG;AACH,SAAgB,QAAQ;IACtB,eAAK,CAAC,UAAU,CAAC,CAAA;AACnB,CAAC;AAFD,4BAEC;AAED;;;;;;;GAOG;AACH,SAAsB,KAAK,CAAI,IAAY,EAAE,EAAoB;;QAC/D,UAAU,CAAC,IAAI,CAAC,CAAA;QAEhB,IAAI,MAAS,CAAA;QAEb,IAAI;YACF,MAAM,GAAG,MAAM,EAAE,EAAE,CAAA;SACpB;gBAAS;YACR,QAAQ,EAAE,CAAA;SACX;QAED,OAAO,MAAM,CAAA;IACf,CAAC;CAAA;AAZD,sBAYC;AAED,yEAAyE;AACzE,uBAAuB;AACvB,yEAAyE;AAEzE;;;;;GAKG;AACH,8DAA8D;AAC9D,SAAgB,SAAS,CAAC,IAAY,EAAE,KAAU;IAChD,sBAAY,CAAC,YAAY,EAAE,EAAC,IAAI,EAAC,EAAE,KAAK,CAAC,CAAA;AAC3C,CAAC;AAFD,8BAEC;AAED;;;;;GAKG;AACH,SAAgB,QAAQ,CAAC,IAAY;IACnC,OAAO,OAAO,CAAC,GAAG,CAAC,SAAS,IAAI,EAAE,CAAC,IAAI,EAAE,CAAA;AAC3C,CAAC;AAFD,4BAEC"}
|
||||
1
node_modules/@actions/core/lib/file-command.d.ts
generated
vendored
1
node_modules/@actions/core/lib/file-command.d.ts
generated
vendored
@ -1 +0,0 @@
|
||||
export declare function issueCommand(command: string, message: any): void;
|
||||
29
node_modules/@actions/core/lib/file-command.js
generated
vendored
29
node_modules/@actions/core/lib/file-command.js
generated
vendored
@ -1,29 +0,0 @@
|
||||
"use strict";
|
||||
// For internal use, subject to change.
|
||||
var __importStar = (this && this.__importStar) || function (mod) {
|
||||
if (mod && mod.__esModule) return mod;
|
||||
var result = {};
|
||||
if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
|
||||
result["default"] = mod;
|
||||
return result;
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
// We use any as a valid input type
|
||||
/* eslint-disable @typescript-eslint/no-explicit-any */
|
||||
const fs = __importStar(require("fs"));
|
||||
const os = __importStar(require("os"));
|
||||
const utils_1 = require("./utils");
|
||||
function issueCommand(command, message) {
|
||||
const filePath = process.env[`GITHUB_${command}`];
|
||||
if (!filePath) {
|
||||
throw new Error(`Unable to find environment variable for file command ${command}`);
|
||||
}
|
||||
if (!fs.existsSync(filePath)) {
|
||||
throw new Error(`Missing file at path: ${filePath}`);
|
||||
}
|
||||
fs.appendFileSync(filePath, `${utils_1.toCommandValue(message)}${os.EOL}`, {
|
||||
encoding: 'utf8'
|
||||
});
|
||||
}
|
||||
exports.issueCommand = issueCommand;
|
||||
//# sourceMappingURL=file-command.js.map
|
||||
1
node_modules/@actions/core/lib/file-command.js.map
generated
vendored
1
node_modules/@actions/core/lib/file-command.js.map
generated
vendored
@ -1 +0,0 @@
|
||||
{"version":3,"file":"file-command.js","sourceRoot":"","sources":["../src/file-command.ts"],"names":[],"mappings":";AAAA,uCAAuC;;;;;;;;;AAEvC,mCAAmC;AACnC,uDAAuD;AAEvD,uCAAwB;AACxB,uCAAwB;AACxB,mCAAsC;AAEtC,SAAgB,YAAY,CAAC,OAAe,EAAE,OAAY;IACxD,MAAM,QAAQ,GAAG,OAAO,CAAC,GAAG,CAAC,UAAU,OAAO,EAAE,CAAC,CAAA;IACjD,IAAI,CAAC,QAAQ,EAAE;QACb,MAAM,IAAI,KAAK,CACb,wDAAwD,OAAO,EAAE,CAClE,CAAA;KACF;IACD,IAAI,CAAC,EAAE,CAAC,UAAU,CAAC,QAAQ,CAAC,EAAE;QAC5B,MAAM,IAAI,KAAK,CAAC,yBAAyB,QAAQ,EAAE,CAAC,CAAA;KACrD;IAED,EAAE,CAAC,cAAc,CAAC,QAAQ,EAAE,GAAG,sBAAc,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,GAAG,EAAE,EAAE;QACjE,QAAQ,EAAE,MAAM;KACjB,CAAC,CAAA;AACJ,CAAC;AAdD,oCAcC"}
|
||||
5
node_modules/@actions/core/lib/utils.d.ts
generated
vendored
5
node_modules/@actions/core/lib/utils.d.ts
generated
vendored
@ -1,5 +0,0 @@
|
||||
/**
|
||||
* Sanitizes an input into a string so it can be passed into issueCommand safely
|
||||
* @param input input to sanitize into a string
|
||||
*/
|
||||
export declare function toCommandValue(input: any): string;
|
||||
19
node_modules/@actions/core/lib/utils.js
generated
vendored
19
node_modules/@actions/core/lib/utils.js
generated
vendored
@ -1,19 +0,0 @@
|
||||
"use strict";
|
||||
// We use any as a valid input type
|
||||
/* eslint-disable @typescript-eslint/no-explicit-any */
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
/**
|
||||
* Sanitizes an input into a string so it can be passed into issueCommand safely
|
||||
* @param input input to sanitize into a string
|
||||
*/
|
||||
function toCommandValue(input) {
|
||||
if (input === null || input === undefined) {
|
||||
return '';
|
||||
}
|
||||
else if (typeof input === 'string' || input instanceof String) {
|
||||
return input;
|
||||
}
|
||||
return JSON.stringify(input);
|
||||
}
|
||||
exports.toCommandValue = toCommandValue;
|
||||
//# sourceMappingURL=utils.js.map
|
||||
1
node_modules/@actions/core/lib/utils.js.map
generated
vendored
1
node_modules/@actions/core/lib/utils.js.map
generated
vendored
@ -1 +0,0 @@
|
||||
{"version":3,"file":"utils.js","sourceRoot":"","sources":["../src/utils.ts"],"names":[],"mappings":";AAAA,mCAAmC;AACnC,uDAAuD;;AAEvD;;;GAGG;AACH,SAAgB,cAAc,CAAC,KAAU;IACvC,IAAI,KAAK,KAAK,IAAI,IAAI,KAAK,KAAK,SAAS,EAAE;QACzC,OAAO,EAAE,CAAA;KACV;SAAM,IAAI,OAAO,KAAK,KAAK,QAAQ,IAAI,KAAK,YAAY,MAAM,EAAE;QAC/D,OAAO,KAAe,CAAA;KACvB;IACD,OAAO,IAAI,CAAC,SAAS,CAAC,KAAK,CAAC,CAAA;AAC9B,CAAC;AAPD,wCAOC"}
|
||||
68
node_modules/@actions/core/package.json
generated
vendored
68
node_modules/@actions/core/package.json
generated
vendored
@ -1,68 +0,0 @@
|
||||
{
|
||||
"_from": "@actions/core@^1.2.6",
|
||||
"_id": "@actions/core@1.2.6",
|
||||
"_inBundle": false,
|
||||
"_integrity": "sha512-ZQYitnqiyBc3D+k7LsgSBmMDVkOVidaagDG7j3fOym77jNunWRuYx7VSHa9GNfFZh+zh61xsCjRj4JxMZlDqTA==",
|
||||
"_location": "/@actions/core",
|
||||
"_phantomChildren": {},
|
||||
"_requested": {
|
||||
"type": "range",
|
||||
"registry": true,
|
||||
"raw": "@actions/core@^1.2.6",
|
||||
"name": "@actions/core",
|
||||
"escapedName": "@actions%2fcore",
|
||||
"scope": "@actions",
|
||||
"rawSpec": "^1.2.6",
|
||||
"saveSpec": null,
|
||||
"fetchSpec": "^1.2.6"
|
||||
},
|
||||
"_requiredBy": [
|
||||
"/",
|
||||
"/@actions/tool-cache"
|
||||
],
|
||||
"_resolved": "https://registry.npmjs.org/@actions/core/-/core-1.2.6.tgz",
|
||||
"_shasum": "a78d49f41a4def18e88ce47c2cac615d5694bf09",
|
||||
"_spec": "@actions/core@^1.2.6",
|
||||
"_where": "C:\\Users\\kodey\\workspace\\github-actions\\k8s-deploy",
|
||||
"bugs": {
|
||||
"url": "https://github.com/actions/toolkit/issues"
|
||||
},
|
||||
"bundleDependencies": false,
|
||||
"deprecated": false,
|
||||
"description": "Actions core lib",
|
||||
"devDependencies": {
|
||||
"@types/node": "^12.0.2"
|
||||
},
|
||||
"directories": {
|
||||
"lib": "lib",
|
||||
"test": "__tests__"
|
||||
},
|
||||
"files": [
|
||||
"lib",
|
||||
"!.DS_Store"
|
||||
],
|
||||
"homepage": "https://github.com/actions/toolkit/tree/main/packages/core",
|
||||
"keywords": [
|
||||
"github",
|
||||
"actions",
|
||||
"core"
|
||||
],
|
||||
"license": "MIT",
|
||||
"main": "lib/core.js",
|
||||
"name": "@actions/core",
|
||||
"publishConfig": {
|
||||
"access": "public"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/actions/toolkit.git",
|
||||
"directory": "packages/core"
|
||||
},
|
||||
"scripts": {
|
||||
"audit-moderate": "npm install && npm audit --json --audit-level=moderate > audit.json",
|
||||
"test": "echo \"Error: run tests from root\" && exit 1",
|
||||
"tsc": "tsc"
|
||||
},
|
||||
"types": "lib/core.d.ts",
|
||||
"version": "1.2.6"
|
||||
}
|
||||
7
node_modules/@actions/exec/LICENSE.md
generated
vendored
7
node_modules/@actions/exec/LICENSE.md
generated
vendored
@ -1,7 +0,0 @@
|
||||
Copyright 2019 GitHub
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
60
node_modules/@actions/exec/README.md
generated
vendored
60
node_modules/@actions/exec/README.md
generated
vendored
@ -1,60 +0,0 @@
|
||||
# `@actions/exec`
|
||||
|
||||
## Usage
|
||||
|
||||
#### Basic
|
||||
|
||||
You can use this package to execute your tools on the command line in a cross platform way:
|
||||
|
||||
```js
|
||||
const exec = require('@actions/exec');
|
||||
|
||||
await exec.exec('node index.js');
|
||||
```
|
||||
|
||||
#### Args
|
||||
|
||||
You can also pass in arg arrays:
|
||||
|
||||
```js
|
||||
const exec = require('@actions/exec');
|
||||
|
||||
await exec.exec('node', ['index.js', 'foo=bar']);
|
||||
```
|
||||
|
||||
#### Output/options
|
||||
|
||||
Capture output or specify [other options](https://github.com/actions/toolkit/blob/d9347d4ab99fd507c0b9104b2cf79fb44fcc827d/packages/exec/src/interfaces.ts#L5):
|
||||
|
||||
```js
|
||||
const exec = require('@actions/exec');
|
||||
|
||||
let myOutput = '';
|
||||
let myError = '';
|
||||
|
||||
const options = {};
|
||||
options.listeners = {
|
||||
stdout: (data: Buffer) => {
|
||||
myOutput += data.toString();
|
||||
},
|
||||
stderr: (data: Buffer) => {
|
||||
myError += data.toString();
|
||||
}
|
||||
};
|
||||
options.cwd = './lib';
|
||||
|
||||
await exec.exec('node', ['index.js', 'foo=bar'], options);
|
||||
```
|
||||
|
||||
#### Exec tools not in the PATH
|
||||
|
||||
You can use it in conjunction with the `which` function from `@actions/io` to execute tools that are not in the PATH:
|
||||
|
||||
```js
|
||||
const exec = require('@actions/exec');
|
||||
const io = require('@actions/io');
|
||||
|
||||
const pythonPath: string = await io.which('python', true)
|
||||
|
||||
await exec.exec(`"${pythonPath}"`, ['main.py']);
|
||||
```
|
||||
12
node_modules/@actions/exec/lib/exec.d.ts
generated
vendored
12
node_modules/@actions/exec/lib/exec.d.ts
generated
vendored
@ -1,12 +0,0 @@
|
||||
import * as im from './interfaces';
|
||||
/**
|
||||
* Exec a command.
|
||||
* Output will be streamed to the live console.
|
||||
* Returns promise with return code
|
||||
*
|
||||
* @param commandLine command to execute (can include additional args). Must be correctly escaped.
|
||||
* @param args optional arguments for tool. Escaping is handled by the lib.
|
||||
* @param options optional exec options. See ExecOptions
|
||||
* @returns Promise<number> exit code
|
||||
*/
|
||||
export declare function exec(commandLine: string, args?: string[], options?: im.ExecOptions): Promise<number>;
|
||||
37
node_modules/@actions/exec/lib/exec.js
generated
vendored
37
node_modules/@actions/exec/lib/exec.js
generated
vendored
@ -1,37 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const tr = require("./toolrunner");
|
||||
/**
|
||||
* Exec a command.
|
||||
* Output will be streamed to the live console.
|
||||
* Returns promise with return code
|
||||
*
|
||||
* @param commandLine command to execute (can include additional args). Must be correctly escaped.
|
||||
* @param args optional arguments for tool. Escaping is handled by the lib.
|
||||
* @param options optional exec options. See ExecOptions
|
||||
* @returns Promise<number> exit code
|
||||
*/
|
||||
function exec(commandLine, args, options) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const commandArgs = tr.argStringToArray(commandLine);
|
||||
if (commandArgs.length === 0) {
|
||||
throw new Error(`Parameter 'commandLine' cannot be null or empty.`);
|
||||
}
|
||||
// Path to tool to execute should be first arg
|
||||
const toolPath = commandArgs[0];
|
||||
args = commandArgs.slice(1).concat(args || []);
|
||||
const runner = new tr.ToolRunner(toolPath, args, options);
|
||||
return runner.exec();
|
||||
});
|
||||
}
|
||||
exports.exec = exec;
|
||||
//# sourceMappingURL=exec.js.map
|
||||
1
node_modules/@actions/exec/lib/exec.js.map
generated
vendored
1
node_modules/@actions/exec/lib/exec.js.map
generated
vendored
@ -1 +0,0 @@
|
||||
{"version":3,"file":"exec.js","sourceRoot":"","sources":["../src/exec.ts"],"names":[],"mappings":";;;;;;;;;;;AACA,mCAAkC;AAElC;;;;;;;;;GASG;AACH,SAAsB,IAAI,CACxB,WAAmB,EACnB,IAAe,EACf,OAAwB;;QAExB,MAAM,WAAW,GAAG,EAAE,CAAC,gBAAgB,CAAC,WAAW,CAAC,CAAA;QACpD,IAAI,WAAW,CAAC,MAAM,KAAK,CAAC,EAAE;YAC5B,MAAM,IAAI,KAAK,CAAC,kDAAkD,CAAC,CAAA;SACpE;QACD,8CAA8C;QAC9C,MAAM,QAAQ,GAAG,WAAW,CAAC,CAAC,CAAC,CAAA;QAC/B,IAAI,GAAG,WAAW,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,IAAI,EAAE,CAAC,CAAA;QAC9C,MAAM,MAAM,GAAkB,IAAI,EAAE,CAAC,UAAU,CAAC,QAAQ,EAAE,IAAI,EAAE,OAAO,CAAC,CAAA;QACxE,OAAO,MAAM,CAAC,IAAI,EAAE,CAAA;IACtB,CAAC;CAAA;AAdD,oBAcC"}
|
||||
35
node_modules/@actions/exec/lib/interfaces.d.ts
generated
vendored
35
node_modules/@actions/exec/lib/interfaces.d.ts
generated
vendored
@ -1,35 +0,0 @@
|
||||
/// <reference types="node" />
|
||||
import * as stream from 'stream';
|
||||
/**
|
||||
* Interface for exec options
|
||||
*/
|
||||
export interface ExecOptions {
|
||||
/** optional working directory. defaults to current */
|
||||
cwd?: string;
|
||||
/** optional envvar dictionary. defaults to current process's env */
|
||||
env?: {
|
||||
[key: string]: string;
|
||||
};
|
||||
/** optional. defaults to false */
|
||||
silent?: boolean;
|
||||
/** optional out stream to use. Defaults to process.stdout */
|
||||
outStream?: stream.Writable;
|
||||
/** optional err stream to use. Defaults to process.stderr */
|
||||
errStream?: stream.Writable;
|
||||
/** optional. whether to skip quoting/escaping arguments if needed. defaults to false. */
|
||||
windowsVerbatimArguments?: boolean;
|
||||
/** optional. whether to fail if output to stderr. defaults to false */
|
||||
failOnStdErr?: boolean;
|
||||
/** optional. defaults to failing on non zero. ignore will not fail leaving it up to the caller */
|
||||
ignoreReturnCode?: boolean;
|
||||
/** optional. How long in ms to wait for STDIO streams to close after the exit event of the process before terminating. defaults to 10000 */
|
||||
delay?: number;
|
||||
/** optional. Listeners for output. Callback functions that will be called on these events */
|
||||
listeners?: {
|
||||
stdout?: (data: Buffer) => void;
|
||||
stderr?: (data: Buffer) => void;
|
||||
stdline?: (data: string) => void;
|
||||
errline?: (data: string) => void;
|
||||
debug?: (data: string) => void;
|
||||
};
|
||||
}
|
||||
3
node_modules/@actions/exec/lib/interfaces.js
generated
vendored
3
node_modules/@actions/exec/lib/interfaces.js
generated
vendored
@ -1,3 +0,0 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
//# sourceMappingURL=interfaces.js.map
|
||||
1
node_modules/@actions/exec/lib/interfaces.js.map
generated
vendored
1
node_modules/@actions/exec/lib/interfaces.js.map
generated
vendored
@ -1 +0,0 @@
|
||||
{"version":3,"file":"interfaces.js","sourceRoot":"","sources":["../src/interfaces.ts"],"names":[],"mappings":""}
|
||||
37
node_modules/@actions/exec/lib/toolrunner.d.ts
generated
vendored
37
node_modules/@actions/exec/lib/toolrunner.d.ts
generated
vendored
@ -1,37 +0,0 @@
|
||||
/// <reference types="node" />
|
||||
import * as events from 'events';
|
||||
import * as im from './interfaces';
|
||||
export declare class ToolRunner extends events.EventEmitter {
|
||||
constructor(toolPath: string, args?: string[], options?: im.ExecOptions);
|
||||
private toolPath;
|
||||
private args;
|
||||
private options;
|
||||
private _debug;
|
||||
private _getCommandString;
|
||||
private _processLineBuffer;
|
||||
private _getSpawnFileName;
|
||||
private _getSpawnArgs;
|
||||
private _endsWith;
|
||||
private _isCmdFile;
|
||||
private _windowsQuoteCmdArg;
|
||||
private _uvQuoteCmdArg;
|
||||
private _cloneExecOptions;
|
||||
private _getSpawnOptions;
|
||||
/**
|
||||
* Exec a tool.
|
||||
* Output will be streamed to the live console.
|
||||
* Returns promise with return code
|
||||
*
|
||||
* @param tool path to tool to exec
|
||||
* @param options optional exec options. See ExecOptions
|
||||
* @returns number
|
||||
*/
|
||||
exec(): Promise<number>;
|
||||
}
|
||||
/**
|
||||
* Convert an arg string to an array of args. Handles escaping
|
||||
*
|
||||
* @param argString string of arguments
|
||||
* @returns string[] array of arguments
|
||||
*/
|
||||
export declare function argStringToArray(argString: string): string[];
|
||||
574
node_modules/@actions/exec/lib/toolrunner.js
generated
vendored
574
node_modules/@actions/exec/lib/toolrunner.js
generated
vendored
@ -1,574 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const os = require("os");
|
||||
const events = require("events");
|
||||
const child = require("child_process");
|
||||
/* eslint-disable @typescript-eslint/unbound-method */
|
||||
const IS_WINDOWS = process.platform === 'win32';
|
||||
/*
|
||||
* Class for running command line tools. Handles quoting and arg parsing in a platform agnostic way.
|
||||
*/
|
||||
class ToolRunner extends events.EventEmitter {
|
||||
constructor(toolPath, args, options) {
|
||||
super();
|
||||
if (!toolPath) {
|
||||
throw new Error("Parameter 'toolPath' cannot be null or empty.");
|
||||
}
|
||||
this.toolPath = toolPath;
|
||||
this.args = args || [];
|
||||
this.options = options || {};
|
||||
}
|
||||
_debug(message) {
|
||||
if (this.options.listeners && this.options.listeners.debug) {
|
||||
this.options.listeners.debug(message);
|
||||
}
|
||||
}
|
||||
_getCommandString(options, noPrefix) {
|
||||
const toolPath = this._getSpawnFileName();
|
||||
const args = this._getSpawnArgs(options);
|
||||
let cmd = noPrefix ? '' : '[command]'; // omit prefix when piped to a second tool
|
||||
if (IS_WINDOWS) {
|
||||
// Windows + cmd file
|
||||
if (this._isCmdFile()) {
|
||||
cmd += toolPath;
|
||||
for (const a of args) {
|
||||
cmd += ` ${a}`;
|
||||
}
|
||||
}
|
||||
// Windows + verbatim
|
||||
else if (options.windowsVerbatimArguments) {
|
||||
cmd += `"${toolPath}"`;
|
||||
for (const a of args) {
|
||||
cmd += ` ${a}`;
|
||||
}
|
||||
}
|
||||
// Windows (regular)
|
||||
else {
|
||||
cmd += this._windowsQuoteCmdArg(toolPath);
|
||||
for (const a of args) {
|
||||
cmd += ` ${this._windowsQuoteCmdArg(a)}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// OSX/Linux - this can likely be improved with some form of quoting.
|
||||
// creating processes on Unix is fundamentally different than Windows.
|
||||
// on Unix, execvp() takes an arg array.
|
||||
cmd += toolPath;
|
||||
for (const a of args) {
|
||||
cmd += ` ${a}`;
|
||||
}
|
||||
}
|
||||
return cmd;
|
||||
}
|
||||
_processLineBuffer(data, strBuffer, onLine) {
|
||||
try {
|
||||
let s = strBuffer + data.toString();
|
||||
let n = s.indexOf(os.EOL);
|
||||
while (n > -1) {
|
||||
const line = s.substring(0, n);
|
||||
onLine(line);
|
||||
// the rest of the string ...
|
||||
s = s.substring(n + os.EOL.length);
|
||||
n = s.indexOf(os.EOL);
|
||||
}
|
||||
strBuffer = s;
|
||||
}
|
||||
catch (err) {
|
||||
// streaming lines to console is best effort. Don't fail a build.
|
||||
this._debug(`error processing line. Failed with error ${err}`);
|
||||
}
|
||||
}
|
||||
_getSpawnFileName() {
|
||||
if (IS_WINDOWS) {
|
||||
if (this._isCmdFile()) {
|
||||
return process.env['COMSPEC'] || 'cmd.exe';
|
||||
}
|
||||
}
|
||||
return this.toolPath;
|
||||
}
|
||||
_getSpawnArgs(options) {
|
||||
if (IS_WINDOWS) {
|
||||
if (this._isCmdFile()) {
|
||||
let argline = `/D /S /C "${this._windowsQuoteCmdArg(this.toolPath)}`;
|
||||
for (const a of this.args) {
|
||||
argline += ' ';
|
||||
argline += options.windowsVerbatimArguments
|
||||
? a
|
||||
: this._windowsQuoteCmdArg(a);
|
||||
}
|
||||
argline += '"';
|
||||
return [argline];
|
||||
}
|
||||
}
|
||||
return this.args;
|
||||
}
|
||||
_endsWith(str, end) {
|
||||
return str.endsWith(end);
|
||||
}
|
||||
_isCmdFile() {
|
||||
const upperToolPath = this.toolPath.toUpperCase();
|
||||
return (this._endsWith(upperToolPath, '.CMD') ||
|
||||
this._endsWith(upperToolPath, '.BAT'));
|
||||
}
|
||||
_windowsQuoteCmdArg(arg) {
|
||||
// for .exe, apply the normal quoting rules that libuv applies
|
||||
if (!this._isCmdFile()) {
|
||||
return this._uvQuoteCmdArg(arg);
|
||||
}
|
||||
// otherwise apply quoting rules specific to the cmd.exe command line parser.
|
||||
// the libuv rules are generic and are not designed specifically for cmd.exe
|
||||
// command line parser.
|
||||
//
|
||||
// for a detailed description of the cmd.exe command line parser, refer to
|
||||
// http://stackoverflow.com/questions/4094699/how-does-the-windows-command-interpreter-cmd-exe-parse-scripts/7970912#7970912
|
||||
// need quotes for empty arg
|
||||
if (!arg) {
|
||||
return '""';
|
||||
}
|
||||
// determine whether the arg needs to be quoted
|
||||
const cmdSpecialChars = [
|
||||
' ',
|
||||
'\t',
|
||||
'&',
|
||||
'(',
|
||||
')',
|
||||
'[',
|
||||
']',
|
||||
'{',
|
||||
'}',
|
||||
'^',
|
||||
'=',
|
||||
';',
|
||||
'!',
|
||||
"'",
|
||||
'+',
|
||||
',',
|
||||
'`',
|
||||
'~',
|
||||
'|',
|
||||
'<',
|
||||
'>',
|
||||
'"'
|
||||
];
|
||||
let needsQuotes = false;
|
||||
for (const char of arg) {
|
||||
if (cmdSpecialChars.some(x => x === char)) {
|
||||
needsQuotes = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// short-circuit if quotes not needed
|
||||
if (!needsQuotes) {
|
||||
return arg;
|
||||
}
|
||||
// the following quoting rules are very similar to the rules that by libuv applies.
|
||||
//
|
||||
// 1) wrap the string in quotes
|
||||
//
|
||||
// 2) double-up quotes - i.e. " => ""
|
||||
//
|
||||
// this is different from the libuv quoting rules. libuv replaces " with \", which unfortunately
|
||||
// doesn't work well with a cmd.exe command line.
|
||||
//
|
||||
// note, replacing " with "" also works well if the arg is passed to a downstream .NET console app.
|
||||
// for example, the command line:
|
||||
// foo.exe "myarg:""my val"""
|
||||
// is parsed by a .NET console app into an arg array:
|
||||
// [ "myarg:\"my val\"" ]
|
||||
// which is the same end result when applying libuv quoting rules. although the actual
|
||||
// command line from libuv quoting rules would look like:
|
||||
// foo.exe "myarg:\"my val\""
|
||||
//
|
||||
// 3) double-up slashes that precede a quote,
|
||||
// e.g. hello \world => "hello \world"
|
||||
// hello\"world => "hello\\""world"
|
||||
// hello\\"world => "hello\\\\""world"
|
||||
// hello world\ => "hello world\\"
|
||||
//
|
||||
// technically this is not required for a cmd.exe command line, or the batch argument parser.
|
||||
// the reasons for including this as a .cmd quoting rule are:
|
||||
//
|
||||
// a) this is optimized for the scenario where the argument is passed from the .cmd file to an
|
||||
// external program. many programs (e.g. .NET console apps) rely on the slash-doubling rule.
|
||||
//
|
||||
// b) it's what we've been doing previously (by deferring to node default behavior) and we
|
||||
// haven't heard any complaints about that aspect.
|
||||
//
|
||||
// note, a weakness of the quoting rules chosen here, is that % is not escaped. in fact, % cannot be
|
||||
// escaped when used on the command line directly - even though within a .cmd file % can be escaped
|
||||
// by using %%.
|
||||
//
|
||||
// the saving grace is, on the command line, %var% is left as-is if var is not defined. this contrasts
|
||||
// the line parsing rules within a .cmd file, where if var is not defined it is replaced with nothing.
|
||||
//
|
||||
// one option that was explored was replacing % with ^% - i.e. %var% => ^%var^%. this hack would
|
||||
// often work, since it is unlikely that var^ would exist, and the ^ character is removed when the
|
||||
// variable is used. the problem, however, is that ^ is not removed when %* is used to pass the args
|
||||
// to an external program.
|
||||
//
|
||||
// an unexplored potential solution for the % escaping problem, is to create a wrapper .cmd file.
|
||||
// % can be escaped within a .cmd file.
|
||||
let reverse = '"';
|
||||
let quoteHit = true;
|
||||
for (let i = arg.length; i > 0; i--) {
|
||||
// walk the string in reverse
|
||||
reverse += arg[i - 1];
|
||||
if (quoteHit && arg[i - 1] === '\\') {
|
||||
reverse += '\\'; // double the slash
|
||||
}
|
||||
else if (arg[i - 1] === '"') {
|
||||
quoteHit = true;
|
||||
reverse += '"'; // double the quote
|
||||
}
|
||||
else {
|
||||
quoteHit = false;
|
||||
}
|
||||
}
|
||||
reverse += '"';
|
||||
return reverse
|
||||
.split('')
|
||||
.reverse()
|
||||
.join('');
|
||||
}
|
||||
_uvQuoteCmdArg(arg) {
|
||||
// Tool runner wraps child_process.spawn() and needs to apply the same quoting as
|
||||
// Node in certain cases where the undocumented spawn option windowsVerbatimArguments
|
||||
// is used.
|
||||
//
|
||||
// Since this function is a port of quote_cmd_arg from Node 4.x (technically, lib UV,
|
||||
// see https://github.com/nodejs/node/blob/v4.x/deps/uv/src/win/process.c for details),
|
||||
// pasting copyright notice from Node within this function:
|
||||
//
|
||||
// Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to
|
||||
// deal in the Software without restriction, including without limitation the
|
||||
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
// sell copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
// IN THE SOFTWARE.
|
||||
if (!arg) {
|
||||
// Need double quotation for empty argument
|
||||
return '""';
|
||||
}
|
||||
if (!arg.includes(' ') && !arg.includes('\t') && !arg.includes('"')) {
|
||||
// No quotation needed
|
||||
return arg;
|
||||
}
|
||||
if (!arg.includes('"') && !arg.includes('\\')) {
|
||||
// No embedded double quotes or backslashes, so I can just wrap
|
||||
// quote marks around the whole thing.
|
||||
return `"${arg}"`;
|
||||
}
|
||||
// Expected input/output:
|
||||
// input : hello"world
|
||||
// output: "hello\"world"
|
||||
// input : hello""world
|
||||
// output: "hello\"\"world"
|
||||
// input : hello\world
|
||||
// output: hello\world
|
||||
// input : hello\\world
|
||||
// output: hello\\world
|
||||
// input : hello\"world
|
||||
// output: "hello\\\"world"
|
||||
// input : hello\\"world
|
||||
// output: "hello\\\\\"world"
|
||||
// input : hello world\
|
||||
// output: "hello world\\" - note the comment in libuv actually reads "hello world\"
|
||||
// but it appears the comment is wrong, it should be "hello world\\"
|
||||
let reverse = '"';
|
||||
let quoteHit = true;
|
||||
for (let i = arg.length; i > 0; i--) {
|
||||
// walk the string in reverse
|
||||
reverse += arg[i - 1];
|
||||
if (quoteHit && arg[i - 1] === '\\') {
|
||||
reverse += '\\';
|
||||
}
|
||||
else if (arg[i - 1] === '"') {
|
||||
quoteHit = true;
|
||||
reverse += '\\';
|
||||
}
|
||||
else {
|
||||
quoteHit = false;
|
||||
}
|
||||
}
|
||||
reverse += '"';
|
||||
return reverse
|
||||
.split('')
|
||||
.reverse()
|
||||
.join('');
|
||||
}
|
||||
_cloneExecOptions(options) {
|
||||
options = options || {};
|
||||
const result = {
|
||||
cwd: options.cwd || process.cwd(),
|
||||
env: options.env || process.env,
|
||||
silent: options.silent || false,
|
||||
windowsVerbatimArguments: options.windowsVerbatimArguments || false,
|
||||
failOnStdErr: options.failOnStdErr || false,
|
||||
ignoreReturnCode: options.ignoreReturnCode || false,
|
||||
delay: options.delay || 10000
|
||||
};
|
||||
result.outStream = options.outStream || process.stdout;
|
||||
result.errStream = options.errStream || process.stderr;
|
||||
return result;
|
||||
}
|
||||
_getSpawnOptions(options, toolPath) {
|
||||
options = options || {};
|
||||
const result = {};
|
||||
result.cwd = options.cwd;
|
||||
result.env = options.env;
|
||||
result['windowsVerbatimArguments'] =
|
||||
options.windowsVerbatimArguments || this._isCmdFile();
|
||||
if (options.windowsVerbatimArguments) {
|
||||
result.argv0 = `"${toolPath}"`;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
* Exec a tool.
|
||||
* Output will be streamed to the live console.
|
||||
* Returns promise with return code
|
||||
*
|
||||
* @param tool path to tool to exec
|
||||
* @param options optional exec options. See ExecOptions
|
||||
* @returns number
|
||||
*/
|
||||
exec() {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
return new Promise((resolve, reject) => {
|
||||
this._debug(`exec tool: ${this.toolPath}`);
|
||||
this._debug('arguments:');
|
||||
for (const arg of this.args) {
|
||||
this._debug(` ${arg}`);
|
||||
}
|
||||
const optionsNonNull = this._cloneExecOptions(this.options);
|
||||
if (!optionsNonNull.silent && optionsNonNull.outStream) {
|
||||
optionsNonNull.outStream.write(this._getCommandString(optionsNonNull) + os.EOL);
|
||||
}
|
||||
const state = new ExecState(optionsNonNull, this.toolPath);
|
||||
state.on('debug', (message) => {
|
||||
this._debug(message);
|
||||
});
|
||||
const fileName = this._getSpawnFileName();
|
||||
const cp = child.spawn(fileName, this._getSpawnArgs(optionsNonNull), this._getSpawnOptions(this.options, fileName));
|
||||
const stdbuffer = '';
|
||||
if (cp.stdout) {
|
||||
cp.stdout.on('data', (data) => {
|
||||
if (this.options.listeners && this.options.listeners.stdout) {
|
||||
this.options.listeners.stdout(data);
|
||||
}
|
||||
if (!optionsNonNull.silent && optionsNonNull.outStream) {
|
||||
optionsNonNull.outStream.write(data);
|
||||
}
|
||||
this._processLineBuffer(data, stdbuffer, (line) => {
|
||||
if (this.options.listeners && this.options.listeners.stdline) {
|
||||
this.options.listeners.stdline(line);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
const errbuffer = '';
|
||||
if (cp.stderr) {
|
||||
cp.stderr.on('data', (data) => {
|
||||
state.processStderr = true;
|
||||
if (this.options.listeners && this.options.listeners.stderr) {
|
||||
this.options.listeners.stderr(data);
|
||||
}
|
||||
if (!optionsNonNull.silent &&
|
||||
optionsNonNull.errStream &&
|
||||
optionsNonNull.outStream) {
|
||||
const s = optionsNonNull.failOnStdErr
|
||||
? optionsNonNull.errStream
|
||||
: optionsNonNull.outStream;
|
||||
s.write(data);
|
||||
}
|
||||
this._processLineBuffer(data, errbuffer, (line) => {
|
||||
if (this.options.listeners && this.options.listeners.errline) {
|
||||
this.options.listeners.errline(line);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
cp.on('error', (err) => {
|
||||
state.processError = err.message;
|
||||
state.processExited = true;
|
||||
state.processClosed = true;
|
||||
state.CheckComplete();
|
||||
});
|
||||
cp.on('exit', (code) => {
|
||||
state.processExitCode = code;
|
||||
state.processExited = true;
|
||||
this._debug(`Exit code ${code} received from tool '${this.toolPath}'`);
|
||||
state.CheckComplete();
|
||||
});
|
||||
cp.on('close', (code) => {
|
||||
state.processExitCode = code;
|
||||
state.processExited = true;
|
||||
state.processClosed = true;
|
||||
this._debug(`STDIO streams have closed for tool '${this.toolPath}'`);
|
||||
state.CheckComplete();
|
||||
});
|
||||
state.on('done', (error, exitCode) => {
|
||||
if (stdbuffer.length > 0) {
|
||||
this.emit('stdline', stdbuffer);
|
||||
}
|
||||
if (errbuffer.length > 0) {
|
||||
this.emit('errline', errbuffer);
|
||||
}
|
||||
cp.removeAllListeners();
|
||||
if (error) {
|
||||
reject(error);
|
||||
}
|
||||
else {
|
||||
resolve(exitCode);
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.ToolRunner = ToolRunner;
|
||||
/**
|
||||
* Convert an arg string to an array of args. Handles escaping
|
||||
*
|
||||
* @param argString string of arguments
|
||||
* @returns string[] array of arguments
|
||||
*/
|
||||
function argStringToArray(argString) {
|
||||
const args = [];
|
||||
let inQuotes = false;
|
||||
let escaped = false;
|
||||
let arg = '';
|
||||
function append(c) {
|
||||
// we only escape double quotes.
|
||||
if (escaped && c !== '"') {
|
||||
arg += '\\';
|
||||
}
|
||||
arg += c;
|
||||
escaped = false;
|
||||
}
|
||||
for (let i = 0; i < argString.length; i++) {
|
||||
const c = argString.charAt(i);
|
||||
if (c === '"') {
|
||||
if (!escaped) {
|
||||
inQuotes = !inQuotes;
|
||||
}
|
||||
else {
|
||||
append(c);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (c === '\\' && escaped) {
|
||||
append(c);
|
||||
continue;
|
||||
}
|
||||
if (c === '\\' && inQuotes) {
|
||||
escaped = true;
|
||||
continue;
|
||||
}
|
||||
if (c === ' ' && !inQuotes) {
|
||||
if (arg.length > 0) {
|
||||
args.push(arg);
|
||||
arg = '';
|
||||
}
|
||||
continue;
|
||||
}
|
||||
append(c);
|
||||
}
|
||||
if (arg.length > 0) {
|
||||
args.push(arg.trim());
|
||||
}
|
||||
return args;
|
||||
}
|
||||
exports.argStringToArray = argStringToArray;
|
||||
class ExecState extends events.EventEmitter {
|
||||
constructor(options, toolPath) {
|
||||
super();
|
||||
this.processClosed = false; // tracks whether the process has exited and stdio is closed
|
||||
this.processError = '';
|
||||
this.processExitCode = 0;
|
||||
this.processExited = false; // tracks whether the process has exited
|
||||
this.processStderr = false; // tracks whether stderr was written to
|
||||
this.delay = 10000; // 10 seconds
|
||||
this.done = false;
|
||||
this.timeout = null;
|
||||
if (!toolPath) {
|
||||
throw new Error('toolPath must not be empty');
|
||||
}
|
||||
this.options = options;
|
||||
this.toolPath = toolPath;
|
||||
if (options.delay) {
|
||||
this.delay = options.delay;
|
||||
}
|
||||
}
|
||||
CheckComplete() {
|
||||
if (this.done) {
|
||||
return;
|
||||
}
|
||||
if (this.processClosed) {
|
||||
this._setResult();
|
||||
}
|
||||
else if (this.processExited) {
|
||||
this.timeout = setTimeout(ExecState.HandleTimeout, this.delay, this);
|
||||
}
|
||||
}
|
||||
_debug(message) {
|
||||
this.emit('debug', message);
|
||||
}
|
||||
_setResult() {
|
||||
// determine whether there is an error
|
||||
let error;
|
||||
if (this.processExited) {
|
||||
if (this.processError) {
|
||||
error = new Error(`There was an error when attempting to execute the process '${this.toolPath}'. This may indicate the process failed to start. Error: ${this.processError}`);
|
||||
}
|
||||
else if (this.processExitCode !== 0 && !this.options.ignoreReturnCode) {
|
||||
error = new Error(`The process '${this.toolPath}' failed with exit code ${this.processExitCode}`);
|
||||
}
|
||||
else if (this.processStderr && this.options.failOnStdErr) {
|
||||
error = new Error(`The process '${this.toolPath}' failed because one or more lines were written to the STDERR stream`);
|
||||
}
|
||||
}
|
||||
// clear the timeout
|
||||
if (this.timeout) {
|
||||
clearTimeout(this.timeout);
|
||||
this.timeout = null;
|
||||
}
|
||||
this.done = true;
|
||||
this.emit('done', error, this.processExitCode);
|
||||
}
|
||||
static HandleTimeout(state) {
|
||||
if (state.done) {
|
||||
return;
|
||||
}
|
||||
if (!state.processClosed && state.processExited) {
|
||||
const message = `The STDIO streams did not close within ${state.delay /
|
||||
1000} seconds of the exit event from process '${state.toolPath}'. This may indicate a child process inherited the STDIO streams and has not yet exited.`;
|
||||
state._debug(message);
|
||||
}
|
||||
state._setResult();
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=toolrunner.js.map
|
||||
1
node_modules/@actions/exec/lib/toolrunner.js.map
generated
vendored
1
node_modules/@actions/exec/lib/toolrunner.js.map
generated
vendored
File diff suppressed because one or more lines are too long
68
node_modules/@actions/exec/package.json
generated
vendored
68
node_modules/@actions/exec/package.json
generated
vendored
@ -1,68 +0,0 @@
|
||||
{
|
||||
"_args": [
|
||||
[
|
||||
"@actions/exec@1.0.1",
|
||||
"E:\\k8s-actions\\deploy\\k8s-deploy"
|
||||
]
|
||||
],
|
||||
"_from": "@actions/exec@1.0.1",
|
||||
"_id": "@actions/exec@1.0.1",
|
||||
"_inBundle": false,
|
||||
"_integrity": "sha512-nvFkxwiicvpzNiCBF4wFBDfnBvi7xp/as7LE1hBxBxKG2L29+gkIPBiLKMVORL+Hg3JNf07AKRfl0V5djoypjQ==",
|
||||
"_location": "/@actions/exec",
|
||||
"_phantomChildren": {},
|
||||
"_requested": {
|
||||
"type": "version",
|
||||
"registry": true,
|
||||
"raw": "@actions/exec@1.0.1",
|
||||
"name": "@actions/exec",
|
||||
"escapedName": "@actions%2fexec",
|
||||
"scope": "@actions",
|
||||
"rawSpec": "1.0.1",
|
||||
"saveSpec": null,
|
||||
"fetchSpec": "1.0.1"
|
||||
},
|
||||
"_requiredBy": [
|
||||
"/",
|
||||
"/@actions/tool-cache"
|
||||
],
|
||||
"_resolved": "https://registry.npmjs.org/@actions/exec/-/exec-1.0.1.tgz",
|
||||
"_spec": "1.0.1",
|
||||
"_where": "E:\\k8s-actions\\deploy\\k8s-deploy",
|
||||
"bugs": {
|
||||
"url": "https://github.com/actions/toolkit/issues"
|
||||
},
|
||||
"description": "Actions exec lib",
|
||||
"devDependencies": {
|
||||
"@actions/io": "^1.0.1"
|
||||
},
|
||||
"directories": {
|
||||
"lib": "lib",
|
||||
"test": "__tests__"
|
||||
},
|
||||
"files": [
|
||||
"lib"
|
||||
],
|
||||
"gitHead": "a2ab4bcf78e4f7080f0d45856e6eeba16f0bbc52",
|
||||
"homepage": "https://github.com/actions/toolkit/tree/master/packages/exec",
|
||||
"keywords": [
|
||||
"github",
|
||||
"actions",
|
||||
"exec"
|
||||
],
|
||||
"license": "MIT",
|
||||
"main": "lib/exec.js",
|
||||
"name": "@actions/exec",
|
||||
"publishConfig": {
|
||||
"access": "public"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/actions/toolkit.git"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "echo \"Error: run tests from root\" && exit 1",
|
||||
"tsc": "tsc"
|
||||
},
|
||||
"version": "1.0.1"
|
||||
}
|
||||
7
node_modules/@actions/io/LICENSE.md
generated
vendored
7
node_modules/@actions/io/LICENSE.md
generated
vendored
@ -1,7 +0,0 @@
|
||||
Copyright 2019 GitHub
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
53
node_modules/@actions/io/README.md
generated
vendored
53
node_modules/@actions/io/README.md
generated
vendored
@ -1,53 +0,0 @@
|
||||
# `@actions/io`
|
||||
|
||||
> Core functions for cli filesystem scenarios
|
||||
|
||||
## Usage
|
||||
|
||||
#### mkdir -p
|
||||
|
||||
Recursively make a directory. Follows rules specified in [man mkdir](https://linux.die.net/man/1/mkdir) with the `-p` option specified:
|
||||
|
||||
```js
|
||||
const io = require('@actions/io');
|
||||
|
||||
await io.mkdirP('path/to/make');
|
||||
```
|
||||
|
||||
#### cp/mv
|
||||
|
||||
Copy or move files or folders. Follows rules specified in [man cp](https://linux.die.net/man/1/cp) and [man mv](https://linux.die.net/man/1/mv):
|
||||
|
||||
```js
|
||||
const io = require('@actions/io');
|
||||
|
||||
// Recursive must be true for directories
|
||||
const options = { recursive: true, force: false }
|
||||
|
||||
await io.cp('path/to/directory', 'path/to/dest', options);
|
||||
await io.mv('path/to/file', 'path/to/dest');
|
||||
```
|
||||
|
||||
#### rm -rf
|
||||
|
||||
Remove a file or folder recursively. Follows rules specified in [man rm](https://linux.die.net/man/1/rm) with the `-r` and `-f` rules specified.
|
||||
|
||||
```js
|
||||
const io = require('@actions/io');
|
||||
|
||||
await io.rmRF('path/to/directory');
|
||||
await io.rmRF('path/to/file');
|
||||
```
|
||||
|
||||
#### which
|
||||
|
||||
Get the path to a tool and resolves via paths. Follows the rules specified in [man which](https://linux.die.net/man/1/which).
|
||||
|
||||
```js
|
||||
const exec = require('@actions/exec');
|
||||
const io = require('@actions/io');
|
||||
|
||||
const pythonPath: string = await io.which('python', true)
|
||||
|
||||
await exec.exec(`"${pythonPath}"`, ['main.py']);
|
||||
```
|
||||
29
node_modules/@actions/io/lib/io-util.d.ts
generated
vendored
29
node_modules/@actions/io/lib/io-util.d.ts
generated
vendored
@ -1,29 +0,0 @@
|
||||
/// <reference types="node" />
|
||||
import * as fs from 'fs';
|
||||
export declare const chmod: typeof fs.promises.chmod, copyFile: typeof fs.promises.copyFile, lstat: typeof fs.promises.lstat, mkdir: typeof fs.promises.mkdir, readdir: typeof fs.promises.readdir, readlink: typeof fs.promises.readlink, rename: typeof fs.promises.rename, rmdir: typeof fs.promises.rmdir, stat: typeof fs.promises.stat, symlink: typeof fs.promises.symlink, unlink: typeof fs.promises.unlink;
|
||||
export declare const IS_WINDOWS: boolean;
|
||||
export declare function exists(fsPath: string): Promise<boolean>;
|
||||
export declare function isDirectory(fsPath: string, useStat?: boolean): Promise<boolean>;
|
||||
/**
|
||||
* On OSX/Linux, true if path starts with '/'. On Windows, true for paths like:
|
||||
* \, \hello, \\hello\share, C:, and C:\hello (and corresponding alternate separator cases).
|
||||
*/
|
||||
export declare function isRooted(p: string): boolean;
|
||||
/**
|
||||
* Recursively create a directory at `fsPath`.
|
||||
*
|
||||
* This implementation is optimistic, meaning it attempts to create the full
|
||||
* path first, and backs up the path stack from there.
|
||||
*
|
||||
* @param fsPath The path to create
|
||||
* @param maxDepth The maximum recursion depth
|
||||
* @param depth The current recursion depth
|
||||
*/
|
||||
export declare function mkdirP(fsPath: string, maxDepth?: number, depth?: number): Promise<void>;
|
||||
/**
|
||||
* Best effort attempt to determine whether a file exists and is executable.
|
||||
* @param filePath file path to check
|
||||
* @param extensions additional file extensions to try
|
||||
* @return if file exists and is executable, returns the file path. otherwise empty string.
|
||||
*/
|
||||
export declare function tryGetExecutablePath(filePath: string, extensions: string[]): Promise<string>;
|
||||
195
node_modules/@actions/io/lib/io-util.js
generated
vendored
195
node_modules/@actions/io/lib/io-util.js
generated
vendored
@ -1,195 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
var _a;
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const assert_1 = require("assert");
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
_a = fs.promises, exports.chmod = _a.chmod, exports.copyFile = _a.copyFile, exports.lstat = _a.lstat, exports.mkdir = _a.mkdir, exports.readdir = _a.readdir, exports.readlink = _a.readlink, exports.rename = _a.rename, exports.rmdir = _a.rmdir, exports.stat = _a.stat, exports.symlink = _a.symlink, exports.unlink = _a.unlink;
|
||||
exports.IS_WINDOWS = process.platform === 'win32';
|
||||
function exists(fsPath) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
try {
|
||||
yield exports.stat(fsPath);
|
||||
}
|
||||
catch (err) {
|
||||
if (err.code === 'ENOENT') {
|
||||
return false;
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
}
|
||||
exports.exists = exists;
|
||||
function isDirectory(fsPath, useStat = false) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const stats = useStat ? yield exports.stat(fsPath) : yield exports.lstat(fsPath);
|
||||
return stats.isDirectory();
|
||||
});
|
||||
}
|
||||
exports.isDirectory = isDirectory;
|
||||
/**
|
||||
* On OSX/Linux, true if path starts with '/'. On Windows, true for paths like:
|
||||
* \, \hello, \\hello\share, C:, and C:\hello (and corresponding alternate separator cases).
|
||||
*/
|
||||
function isRooted(p) {
|
||||
p = normalizeSeparators(p);
|
||||
if (!p) {
|
||||
throw new Error('isRooted() parameter "p" cannot be empty');
|
||||
}
|
||||
if (exports.IS_WINDOWS) {
|
||||
return (p.startsWith('\\') || /^[A-Z]:/i.test(p) // e.g. \ or \hello or \\hello
|
||||
); // e.g. C: or C:\hello
|
||||
}
|
||||
return p.startsWith('/');
|
||||
}
|
||||
exports.isRooted = isRooted;
|
||||
/**
|
||||
* Recursively create a directory at `fsPath`.
|
||||
*
|
||||
* This implementation is optimistic, meaning it attempts to create the full
|
||||
* path first, and backs up the path stack from there.
|
||||
*
|
||||
* @param fsPath The path to create
|
||||
* @param maxDepth The maximum recursion depth
|
||||
* @param depth The current recursion depth
|
||||
*/
|
||||
function mkdirP(fsPath, maxDepth = 1000, depth = 1) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
assert_1.ok(fsPath, 'a path argument must be provided');
|
||||
fsPath = path.resolve(fsPath);
|
||||
if (depth >= maxDepth)
|
||||
return exports.mkdir(fsPath);
|
||||
try {
|
||||
yield exports.mkdir(fsPath);
|
||||
return;
|
||||
}
|
||||
catch (err) {
|
||||
switch (err.code) {
|
||||
case 'ENOENT': {
|
||||
yield mkdirP(path.dirname(fsPath), maxDepth, depth + 1);
|
||||
yield exports.mkdir(fsPath);
|
||||
return;
|
||||
}
|
||||
default: {
|
||||
let stats;
|
||||
try {
|
||||
stats = yield exports.stat(fsPath);
|
||||
}
|
||||
catch (err2) {
|
||||
throw err;
|
||||
}
|
||||
if (!stats.isDirectory())
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.mkdirP = mkdirP;
|
||||
/**
|
||||
* Best effort attempt to determine whether a file exists and is executable.
|
||||
* @param filePath file path to check
|
||||
* @param extensions additional file extensions to try
|
||||
* @return if file exists and is executable, returns the file path. otherwise empty string.
|
||||
*/
|
||||
function tryGetExecutablePath(filePath, extensions) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
let stats = undefined;
|
||||
try {
|
||||
// test file exists
|
||||
stats = yield exports.stat(filePath);
|
||||
}
|
||||
catch (err) {
|
||||
if (err.code !== 'ENOENT') {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(`Unexpected error attempting to determine if executable file exists '${filePath}': ${err}`);
|
||||
}
|
||||
}
|
||||
if (stats && stats.isFile()) {
|
||||
if (exports.IS_WINDOWS) {
|
||||
// on Windows, test for valid extension
|
||||
const upperExt = path.extname(filePath).toUpperCase();
|
||||
if (extensions.some(validExt => validExt.toUpperCase() === upperExt)) {
|
||||
return filePath;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (isUnixExecutable(stats)) {
|
||||
return filePath;
|
||||
}
|
||||
}
|
||||
}
|
||||
// try each extension
|
||||
const originalFilePath = filePath;
|
||||
for (const extension of extensions) {
|
||||
filePath = originalFilePath + extension;
|
||||
stats = undefined;
|
||||
try {
|
||||
stats = yield exports.stat(filePath);
|
||||
}
|
||||
catch (err) {
|
||||
if (err.code !== 'ENOENT') {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(`Unexpected error attempting to determine if executable file exists '${filePath}': ${err}`);
|
||||
}
|
||||
}
|
||||
if (stats && stats.isFile()) {
|
||||
if (exports.IS_WINDOWS) {
|
||||
// preserve the case of the actual file (since an extension was appended)
|
||||
try {
|
||||
const directory = path.dirname(filePath);
|
||||
const upperName = path.basename(filePath).toUpperCase();
|
||||
for (const actualName of yield exports.readdir(directory)) {
|
||||
if (upperName === actualName.toUpperCase()) {
|
||||
filePath = path.join(directory, actualName);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(`Unexpected error attempting to determine the actual case of the file '${filePath}': ${err}`);
|
||||
}
|
||||
return filePath;
|
||||
}
|
||||
else {
|
||||
if (isUnixExecutable(stats)) {
|
||||
return filePath;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return '';
|
||||
});
|
||||
}
|
||||
exports.tryGetExecutablePath = tryGetExecutablePath;
|
||||
function normalizeSeparators(p) {
|
||||
p = p || '';
|
||||
if (exports.IS_WINDOWS) {
|
||||
// convert slashes on Windows
|
||||
p = p.replace(/\//g, '\\');
|
||||
// remove redundant slashes
|
||||
return p.replace(/\\\\+/g, '\\');
|
||||
}
|
||||
// remove redundant slashes
|
||||
return p.replace(/\/\/+/g, '/');
|
||||
}
|
||||
// on Mac/Linux, test the execute bit
|
||||
// R W X R W X R W X
|
||||
// 256 128 64 32 16 8 4 2 1
|
||||
function isUnixExecutable(stats) {
|
||||
return ((stats.mode & 1) > 0 ||
|
||||
((stats.mode & 8) > 0 && stats.gid === process.getgid()) ||
|
||||
((stats.mode & 64) > 0 && stats.uid === process.getuid()));
|
||||
}
|
||||
//# sourceMappingURL=io-util.js.map
|
||||
1
node_modules/@actions/io/lib/io-util.js.map
generated
vendored
1
node_modules/@actions/io/lib/io-util.js.map
generated
vendored
@ -1 +0,0 @@
|
||||
{"version":3,"file":"io-util.js","sourceRoot":"","sources":["../src/io-util.ts"],"names":[],"mappings":";;;;;;;;;;;;AAAA,mCAAyB;AACzB,yBAAwB;AACxB,6BAA4B;AAEf,gBAYE,qTAAA;AAEF,QAAA,UAAU,GAAG,OAAO,CAAC,QAAQ,KAAK,OAAO,CAAA;AAEtD,SAAsB,MAAM,CAAC,MAAc;;QACzC,IAAI;YACF,MAAM,YAAI,CAAC,MAAM,CAAC,CAAA;SACnB;QAAC,OAAO,GAAG,EAAE;YACZ,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE;gBACzB,OAAO,KAAK,CAAA;aACb;YAED,MAAM,GAAG,CAAA;SACV;QAED,OAAO,IAAI,CAAA;IACb,CAAC;CAAA;AAZD,wBAYC;AAED,SAAsB,WAAW,CAC/B,MAAc,EACd,UAAmB,KAAK;;QAExB,MAAM,KAAK,GAAG,OAAO,CAAC,CAAC,CAAC,MAAM,YAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,aAAK,CAAC,MAAM,CAAC,CAAA;QAChE,OAAO,KAAK,CAAC,WAAW,EAAE,CAAA;IAC5B,CAAC;CAAA;AAND,kCAMC;AAED;;;GAGG;AACH,SAAgB,QAAQ,CAAC,CAAS;IAChC,CAAC,GAAG,mBAAmB,CAAC,CAAC,CAAC,CAAA;IAC1B,IAAI,CAAC,CAAC,EAAE;QACN,MAAM,IAAI,KAAK,CAAC,0CAA0C,CAAC,CAAA;KAC5D;IAED,IAAI,kBAAU,EAAE;QACd,OAAO,CACL,CAAC,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,UAAU,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,8BAA8B;SACxE,CAAA,CAAC,sBAAsB;KACzB;IAED,OAAO,CAAC,CAAC,UAAU,CAAC,GAAG,CAAC,CAAA;AAC1B,CAAC;AAbD,4BAaC;AAED;;;;;;;;;GASG;AACH,SAAsB,MAAM,CAC1B,MAAc,EACd,WAAmB,IAAI,EACvB,QAAgB,CAAC;;QAEjB,WAAE,CAAC,MAAM,EAAE,kCAAkC,CAAC,CAAA;QAE9C,MAAM,GAAG,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,CAAA;QAE7B,IAAI,KAAK,IAAI,QAAQ;YAAE,OAAO,aAAK,CAAC,MAAM,CAAC,CAAA;QAE3C,IAAI;YACF,MAAM,aAAK,CAAC,MAAM,CAAC,CAAA;YACnB,OAAM;SACP;QAAC,OAAO,GAAG,EAAE;YACZ,QAAQ,GAAG,CAAC,IAAI,EAAE;gBAChB,KAAK,QAAQ,CAAC,CAAC;oBACb,MAAM,MAAM,CAAC,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,EAAE,QAAQ,EAAE,KAAK,GAAG,CAAC,CAAC,CAAA;oBACvD,MAAM,aAAK,CAAC,MAAM,CAAC,CAAA;oBACnB,OAAM;iBACP;gBACD,OAAO,CAAC,CAAC;oBACP,IAAI,KAAe,CAAA;oBAEnB,IAAI;wBACF,KAAK,GAAG,MAAM,YAAI,CAAC,MAAM,CAAC,CAAA;qBAC3B;oBAAC,OAAO,IAAI,EAAE;wBACb,MAAM,GAAG,CAAA;qBACV;oBAED,IAAI,CAAC,KAAK,CAAC,WAAW,EAAE;wBAAE,MAAM,GAAG,CAAA;iBACpC;aACF;SACF;IACH,CAAC;CAAA;AAlCD,wBAkCC;AAED;;;;;GAKG;AACH,SAAsB,oBAAoB,CACxC,QAAgB,EAChB,UAAoB;;QAEpB,IAAI,KAAK,GAAyB,SAAS,CAAA;QAC3C,IAAI;YACF,mBAAmB;YACnB,KAAK,GAAG,MAAM,YAAI,CAAC,QAAQ,CAAC,CAAA;SAC7B;QAAC,OAAO,GAAG,EAAE;YACZ,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE;gBACzB,sCAAsC;gBACtC,OAAO,CAAC,GAAG,CACT,uEAAuE,QAAQ,MAAM,GAAG,EAAE,CAC3F,CAAA;aACF;SACF;QACD,IAAI,KAAK,IAAI,KAAK,CAAC,MAAM,EAAE,EAAE;YAC3B,IAAI,kBAAU,EAAE;gBACd,uCAAuC;gBACvC,MAAM,QAAQ,GAAG,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,WAAW,EAAE,CAAA;gBACrD,IAAI,UAAU,CAAC,IAAI,CAAC,QAAQ,CAAC,EAAE,CAAC,QAAQ,CAAC,WAAW,EAAE,KAAK,QAAQ,CAAC,EAAE;oBACpE,OAAO,QAAQ,CAAA;iBAChB;aACF;iBAAM;gBACL,IAAI,gBAAgB,CAAC,KAAK,CAAC,EAAE;oBAC3B,OAAO,QAAQ,CAAA;iBAChB;aACF;SACF;QAED,qBAAqB;QACrB,MAAM,gBAAgB,GAAG,QAAQ,CAAA;QACjC,KAAK,MAAM,SAAS,IAAI,UAAU,EAAE;YAClC,QAAQ,GAAG,gBAAgB,GAAG,SAAS,CAAA;YAEvC,KAAK,GAAG,SAAS,CAAA;YACjB,IAAI;gBACF,KAAK,GAAG,MAAM,YAAI,CAAC,QAAQ,CAAC,CAAA;aAC7B;YAAC,OAAO,GAAG,EAAE;gBACZ,IAAI,GAAG,CAAC,IAAI,KAAK,QAAQ,EAAE;oBACzB,sCAAsC;oBACtC,OAAO,CAAC,GAAG,CACT,uEAAuE,QAAQ,MAAM,GAAG,EAAE,CAC3F,CAAA;iBACF;aACF;YAED,IAAI,KAAK,IAAI,KAAK,CAAC,MAAM,EAAE,EAAE;gBAC3B,IAAI,kBAAU,EAAE;oBACd,yEAAyE;oBACzE,IAAI;wBACF,MAAM,SAAS,GAAG,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAA;wBACxC,MAAM,SAAS,GAAG,IAAI,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC,WAAW,EAAE,CAAA;wBACvD,KAAK,MAAM,UAAU,IAAI,MAAM,eAAO,CAAC,SAAS,CAAC,EAAE;4BACjD,IAAI,SAAS,KAAK,UAAU,CAAC,WAAW,EAAE,EAAE;gCAC1C,QAAQ,GAAG,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,UAAU,CAAC,CAAA;gCAC3C,MAAK;6BACN;yBACF;qBACF;oBAAC,OAAO,GAAG,EAAE;wBACZ,sCAAsC;wBACtC,OAAO,CAAC,GAAG,CACT,yEAAyE,QAAQ,MAAM,GAAG,EAAE,CAC7F,CAAA;qBACF;oBAED,OAAO,QAAQ,CAAA;iBAChB;qBAAM;oBACL,IAAI,gBAAgB,CAAC,KAAK,CAAC,EAAE;wBAC3B,OAAO,QAAQ,CAAA;qBAChB;iBACF;aACF;SACF;QAED,OAAO,EAAE,CAAA;IACX,CAAC;CAAA;AA5ED,oDA4EC;AAED,SAAS,mBAAmB,CAAC,CAAS;IACpC,CAAC,GAAG,CAAC,IAAI,EAAE,CAAA;IACX,IAAI,kBAAU,EAAE;QACd,6BAA6B;QAC7B,CAAC,GAAG,CAAC,CAAC,OAAO,CAAC,KAAK,EAAE,IAAI,CAAC,CAAA;QAE1B,2BAA2B;QAC3B,OAAO,CAAC,CAAC,OAAO,CAAC,QAAQ,EAAE,IAAI,CAAC,CAAA;KACjC;IAED,2BAA2B;IAC3B,OAAO,CAAC,CAAC,OAAO,CAAC,QAAQ,EAAE,GAAG,CAAC,CAAA;AACjC,CAAC;AAED,qCAAqC;AACrC,6BAA6B;AAC7B,6BAA6B;AAC7B,SAAS,gBAAgB,CAAC,KAAe;IACvC,OAAO,CACL,CAAC,KAAK,CAAC,IAAI,GAAG,CAAC,CAAC,GAAG,CAAC;QACpB,CAAC,CAAC,KAAK,CAAC,IAAI,GAAG,CAAC,CAAC,GAAG,CAAC,IAAI,KAAK,CAAC,GAAG,KAAK,OAAO,CAAC,MAAM,EAAE,CAAC;QACxD,CAAC,CAAC,KAAK,CAAC,IAAI,GAAG,EAAE,CAAC,GAAG,CAAC,IAAI,KAAK,CAAC,GAAG,KAAK,OAAO,CAAC,MAAM,EAAE,CAAC,CAC1D,CAAA;AACH,CAAC"}
|
||||
56
node_modules/@actions/io/lib/io.d.ts
generated
vendored
56
node_modules/@actions/io/lib/io.d.ts
generated
vendored
@ -1,56 +0,0 @@
|
||||
/**
|
||||
* Interface for cp/mv options
|
||||
*/
|
||||
export interface CopyOptions {
|
||||
/** Optional. Whether to recursively copy all subdirectories. Defaults to false */
|
||||
recursive?: boolean;
|
||||
/** Optional. Whether to overwrite existing files in the destination. Defaults to true */
|
||||
force?: boolean;
|
||||
}
|
||||
/**
|
||||
* Interface for cp/mv options
|
||||
*/
|
||||
export interface MoveOptions {
|
||||
/** Optional. Whether to overwrite existing files in the destination. Defaults to true */
|
||||
force?: boolean;
|
||||
}
|
||||
/**
|
||||
* Copies a file or folder.
|
||||
* Based off of shelljs - https://github.com/shelljs/shelljs/blob/9237f66c52e5daa40458f94f9565e18e8132f5a6/src/cp.js
|
||||
*
|
||||
* @param source source path
|
||||
* @param dest destination path
|
||||
* @param options optional. See CopyOptions.
|
||||
*/
|
||||
export declare function cp(source: string, dest: string, options?: CopyOptions): Promise<void>;
|
||||
/**
|
||||
* Moves a path.
|
||||
*
|
||||
* @param source source path
|
||||
* @param dest destination path
|
||||
* @param options optional. See MoveOptions.
|
||||
*/
|
||||
export declare function mv(source: string, dest: string, options?: MoveOptions): Promise<void>;
|
||||
/**
|
||||
* Remove a path recursively with force
|
||||
*
|
||||
* @param inputPath path to remove
|
||||
*/
|
||||
export declare function rmRF(inputPath: string): Promise<void>;
|
||||
/**
|
||||
* Make a directory. Creates the full path with folders in between
|
||||
* Will throw if it fails
|
||||
*
|
||||
* @param fsPath path to create
|
||||
* @returns Promise<void>
|
||||
*/
|
||||
export declare function mkdirP(fsPath: string): Promise<void>;
|
||||
/**
|
||||
* Returns path of a tool had the tool actually been invoked. Resolves via paths.
|
||||
* If you check and the tool does not exist, it will throw.
|
||||
*
|
||||
* @param tool name of the tool
|
||||
* @param check whether to check if tool exists
|
||||
* @returns Promise<string> path to tool
|
||||
*/
|
||||
export declare function which(tool: string, check?: boolean): Promise<string>;
|
||||
290
node_modules/@actions/io/lib/io.js
generated
vendored
290
node_modules/@actions/io/lib/io.js
generated
vendored
@ -1,290 +0,0 @@
|
||||
"use strict";
|
||||
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
||||
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
||||
return new (P || (P = Promise))(function (resolve, reject) {
|
||||
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
||||
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
||||
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
||||
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
||||
});
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const childProcess = require("child_process");
|
||||
const path = require("path");
|
||||
const util_1 = require("util");
|
||||
const ioUtil = require("./io-util");
|
||||
const exec = util_1.promisify(childProcess.exec);
|
||||
/**
|
||||
* Copies a file or folder.
|
||||
* Based off of shelljs - https://github.com/shelljs/shelljs/blob/9237f66c52e5daa40458f94f9565e18e8132f5a6/src/cp.js
|
||||
*
|
||||
* @param source source path
|
||||
* @param dest destination path
|
||||
* @param options optional. See CopyOptions.
|
||||
*/
|
||||
function cp(source, dest, options = {}) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
const { force, recursive } = readCopyOptions(options);
|
||||
const destStat = (yield ioUtil.exists(dest)) ? yield ioUtil.stat(dest) : null;
|
||||
// Dest is an existing file, but not forcing
|
||||
if (destStat && destStat.isFile() && !force) {
|
||||
return;
|
||||
}
|
||||
// If dest is an existing directory, should copy inside.
|
||||
const newDest = destStat && destStat.isDirectory()
|
||||
? path.join(dest, path.basename(source))
|
||||
: dest;
|
||||
if (!(yield ioUtil.exists(source))) {
|
||||
throw new Error(`no such file or directory: ${source}`);
|
||||
}
|
||||
const sourceStat = yield ioUtil.stat(source);
|
||||
if (sourceStat.isDirectory()) {
|
||||
if (!recursive) {
|
||||
throw new Error(`Failed to copy. ${source} is a directory, but tried to copy without recursive flag.`);
|
||||
}
|
||||
else {
|
||||
yield cpDirRecursive(source, newDest, 0, force);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (path.relative(source, newDest) === '') {
|
||||
// a file cannot be copied to itself
|
||||
throw new Error(`'${newDest}' and '${source}' are the same file`);
|
||||
}
|
||||
yield copyFile(source, newDest, force);
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.cp = cp;
|
||||
/**
|
||||
* Moves a path.
|
||||
*
|
||||
* @param source source path
|
||||
* @param dest destination path
|
||||
* @param options optional. See MoveOptions.
|
||||
*/
|
||||
function mv(source, dest, options = {}) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
if (yield ioUtil.exists(dest)) {
|
||||
let destExists = true;
|
||||
if (yield ioUtil.isDirectory(dest)) {
|
||||
// If dest is directory copy src into dest
|
||||
dest = path.join(dest, path.basename(source));
|
||||
destExists = yield ioUtil.exists(dest);
|
||||
}
|
||||
if (destExists) {
|
||||
if (options.force == null || options.force) {
|
||||
yield rmRF(dest);
|
||||
}
|
||||
else {
|
||||
throw new Error('Destination already exists');
|
||||
}
|
||||
}
|
||||
}
|
||||
yield mkdirP(path.dirname(dest));
|
||||
yield ioUtil.rename(source, dest);
|
||||
});
|
||||
}
|
||||
exports.mv = mv;
|
||||
/**
|
||||
* Remove a path recursively with force
|
||||
*
|
||||
* @param inputPath path to remove
|
||||
*/
|
||||
function rmRF(inputPath) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
if (ioUtil.IS_WINDOWS) {
|
||||
// Node doesn't provide a delete operation, only an unlink function. This means that if the file is being used by another
|
||||
// program (e.g. antivirus), it won't be deleted. To address this, we shell out the work to rd/del.
|
||||
try {
|
||||
if (yield ioUtil.isDirectory(inputPath, true)) {
|
||||
yield exec(`rd /s /q "${inputPath}"`);
|
||||
}
|
||||
else {
|
||||
yield exec(`del /f /a "${inputPath}"`);
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
// if you try to delete a file that doesn't exist, desired result is achieved
|
||||
// other errors are valid
|
||||
if (err.code !== 'ENOENT')
|
||||
throw err;
|
||||
}
|
||||
// Shelling out fails to remove a symlink folder with missing source, this unlink catches that
|
||||
try {
|
||||
yield ioUtil.unlink(inputPath);
|
||||
}
|
||||
catch (err) {
|
||||
// if you try to delete a file that doesn't exist, desired result is achieved
|
||||
// other errors are valid
|
||||
if (err.code !== 'ENOENT')
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
else {
|
||||
let isDir = false;
|
||||
try {
|
||||
isDir = yield ioUtil.isDirectory(inputPath);
|
||||
}
|
||||
catch (err) {
|
||||
// if you try to delete a file that doesn't exist, desired result is achieved
|
||||
// other errors are valid
|
||||
if (err.code !== 'ENOENT')
|
||||
throw err;
|
||||
return;
|
||||
}
|
||||
if (isDir) {
|
||||
yield exec(`rm -rf "${inputPath}"`);
|
||||
}
|
||||
else {
|
||||
yield ioUtil.unlink(inputPath);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.rmRF = rmRF;
|
||||
/**
|
||||
* Make a directory. Creates the full path with folders in between
|
||||
* Will throw if it fails
|
||||
*
|
||||
* @param fsPath path to create
|
||||
* @returns Promise<void>
|
||||
*/
|
||||
function mkdirP(fsPath) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
yield ioUtil.mkdirP(fsPath);
|
||||
});
|
||||
}
|
||||
exports.mkdirP = mkdirP;
|
||||
/**
|
||||
* Returns path of a tool had the tool actually been invoked. Resolves via paths.
|
||||
* If you check and the tool does not exist, it will throw.
|
||||
*
|
||||
* @param tool name of the tool
|
||||
* @param check whether to check if tool exists
|
||||
* @returns Promise<string> path to tool
|
||||
*/
|
||||
function which(tool, check) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
if (!tool) {
|
||||
throw new Error("parameter 'tool' is required");
|
||||
}
|
||||
// recursive when check=true
|
||||
if (check) {
|
||||
const result = yield which(tool, false);
|
||||
if (!result) {
|
||||
if (ioUtil.IS_WINDOWS) {
|
||||
throw new Error(`Unable to locate executable file: ${tool}. Please verify either the file path exists or the file can be found within a directory specified by the PATH environment variable. Also verify the file has a valid extension for an executable file.`);
|
||||
}
|
||||
else {
|
||||
throw new Error(`Unable to locate executable file: ${tool}. Please verify either the file path exists or the file can be found within a directory specified by the PATH environment variable. Also check the file mode to verify the file is executable.`);
|
||||
}
|
||||
}
|
||||
}
|
||||
try {
|
||||
// build the list of extensions to try
|
||||
const extensions = [];
|
||||
if (ioUtil.IS_WINDOWS && process.env.PATHEXT) {
|
||||
for (const extension of process.env.PATHEXT.split(path.delimiter)) {
|
||||
if (extension) {
|
||||
extensions.push(extension);
|
||||
}
|
||||
}
|
||||
}
|
||||
// if it's rooted, return it if exists. otherwise return empty.
|
||||
if (ioUtil.isRooted(tool)) {
|
||||
const filePath = yield ioUtil.tryGetExecutablePath(tool, extensions);
|
||||
if (filePath) {
|
||||
return filePath;
|
||||
}
|
||||
return '';
|
||||
}
|
||||
// if any path separators, return empty
|
||||
if (tool.includes('/') || (ioUtil.IS_WINDOWS && tool.includes('\\'))) {
|
||||
return '';
|
||||
}
|
||||
// build the list of directories
|
||||
//
|
||||
// Note, technically "where" checks the current directory on Windows. From a toolkit perspective,
|
||||
// it feels like we should not do this. Checking the current directory seems like more of a use
|
||||
// case of a shell, and the which() function exposed by the toolkit should strive for consistency
|
||||
// across platforms.
|
||||
const directories = [];
|
||||
if (process.env.PATH) {
|
||||
for (const p of process.env.PATH.split(path.delimiter)) {
|
||||
if (p) {
|
||||
directories.push(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
// return the first match
|
||||
for (const directory of directories) {
|
||||
const filePath = yield ioUtil.tryGetExecutablePath(directory + path.sep + tool, extensions);
|
||||
if (filePath) {
|
||||
return filePath;
|
||||
}
|
||||
}
|
||||
return '';
|
||||
}
|
||||
catch (err) {
|
||||
throw new Error(`which failed with message ${err.message}`);
|
||||
}
|
||||
});
|
||||
}
|
||||
exports.which = which;
|
||||
function readCopyOptions(options) {
|
||||
const force = options.force == null ? true : options.force;
|
||||
const recursive = Boolean(options.recursive);
|
||||
return { force, recursive };
|
||||
}
|
||||
function cpDirRecursive(sourceDir, destDir, currentDepth, force) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
// Ensure there is not a run away recursive copy
|
||||
if (currentDepth >= 255)
|
||||
return;
|
||||
currentDepth++;
|
||||
yield mkdirP(destDir);
|
||||
const files = yield ioUtil.readdir(sourceDir);
|
||||
for (const fileName of files) {
|
||||
const srcFile = `${sourceDir}/${fileName}`;
|
||||
const destFile = `${destDir}/${fileName}`;
|
||||
const srcFileStat = yield ioUtil.lstat(srcFile);
|
||||
if (srcFileStat.isDirectory()) {
|
||||
// Recurse
|
||||
yield cpDirRecursive(srcFile, destFile, currentDepth, force);
|
||||
}
|
||||
else {
|
||||
yield copyFile(srcFile, destFile, force);
|
||||
}
|
||||
}
|
||||
// Change the mode for the newly created directory
|
||||
yield ioUtil.chmod(destDir, (yield ioUtil.stat(sourceDir)).mode);
|
||||
});
|
||||
}
|
||||
// Buffered file copy
|
||||
function copyFile(srcFile, destFile, force) {
|
||||
return __awaiter(this, void 0, void 0, function* () {
|
||||
if ((yield ioUtil.lstat(srcFile)).isSymbolicLink()) {
|
||||
// unlink/re-link it
|
||||
try {
|
||||
yield ioUtil.lstat(destFile);
|
||||
yield ioUtil.unlink(destFile);
|
||||
}
|
||||
catch (e) {
|
||||
// Try to override file permission
|
||||
if (e.code === 'EPERM') {
|
||||
yield ioUtil.chmod(destFile, '0666');
|
||||
yield ioUtil.unlink(destFile);
|
||||
}
|
||||
// other errors = it doesn't exist, no work to do
|
||||
}
|
||||
// Copy over symlink
|
||||
const symlinkFull = yield ioUtil.readlink(srcFile);
|
||||
yield ioUtil.symlink(symlinkFull, destFile, ioUtil.IS_WINDOWS ? 'junction' : null);
|
||||
}
|
||||
else if (!(yield ioUtil.exists(destFile)) || force) {
|
||||
yield ioUtil.copyFile(srcFile, destFile);
|
||||
}
|
||||
});
|
||||
}
|
||||
//# sourceMappingURL=io.js.map
|
||||
1
node_modules/@actions/io/lib/io.js.map
generated
vendored
1
node_modules/@actions/io/lib/io.js.map
generated
vendored
File diff suppressed because one or more lines are too long
65
node_modules/@actions/io/package.json
generated
vendored
65
node_modules/@actions/io/package.json
generated
vendored
@ -1,65 +0,0 @@
|
||||
{
|
||||
"_args": [
|
||||
[
|
||||
"@actions/io@1.0.1",
|
||||
"E:\\k8s-actions\\deploy\\k8s-deploy"
|
||||
]
|
||||
],
|
||||
"_from": "@actions/io@1.0.1",
|
||||
"_id": "@actions/io@1.0.1",
|
||||
"_inBundle": false,
|
||||
"_integrity": "sha512-rhq+tfZukbtaus7xyUtwKfuiCRXd1hWSfmJNEpFgBQJ4woqPEpsBw04awicjwz9tyG2/MVhAEMfVn664Cri5zA==",
|
||||
"_location": "/@actions/io",
|
||||
"_phantomChildren": {},
|
||||
"_requested": {
|
||||
"type": "version",
|
||||
"registry": true,
|
||||
"raw": "@actions/io@1.0.1",
|
||||
"name": "@actions/io",
|
||||
"escapedName": "@actions%2fio",
|
||||
"scope": "@actions",
|
||||
"rawSpec": "1.0.1",
|
||||
"saveSpec": null,
|
||||
"fetchSpec": "1.0.1"
|
||||
},
|
||||
"_requiredBy": [
|
||||
"/",
|
||||
"/@actions/tool-cache"
|
||||
],
|
||||
"_resolved": "https://registry.npmjs.org/@actions/io/-/io-1.0.1.tgz",
|
||||
"_spec": "1.0.1",
|
||||
"_where": "E:\\k8s-actions\\deploy\\k8s-deploy",
|
||||
"bugs": {
|
||||
"url": "https://github.com/actions/toolkit/issues"
|
||||
},
|
||||
"description": "Actions io lib",
|
||||
"directories": {
|
||||
"lib": "lib",
|
||||
"test": "__tests__"
|
||||
},
|
||||
"files": [
|
||||
"lib"
|
||||
],
|
||||
"gitHead": "a2ab4bcf78e4f7080f0d45856e6eeba16f0bbc52",
|
||||
"homepage": "https://github.com/actions/toolkit/tree/master/packages/io",
|
||||
"keywords": [
|
||||
"github",
|
||||
"actions",
|
||||
"io"
|
||||
],
|
||||
"license": "MIT",
|
||||
"main": "lib/io.js",
|
||||
"name": "@actions/io",
|
||||
"publishConfig": {
|
||||
"access": "public"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/actions/toolkit.git"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "echo \"Error: run tests from root\" && exit 1",
|
||||
"tsc": "tsc"
|
||||
},
|
||||
"version": "1.0.1"
|
||||
}
|
||||
7
node_modules/@actions/tool-cache/LICENSE.md
generated
vendored
7
node_modules/@actions/tool-cache/LICENSE.md
generated
vendored
@ -1,7 +0,0 @@
|
||||
Copyright 2019 GitHub
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user