diff --git a/.github/workflows/branch-spellcheck.yml b/.github/workflows/branch-spellcheck.yml index 732e531f99..a03537163c 100644 --- a/.github/workflows/branch-spellcheck.yml +++ b/.github/workflows/branch-spellcheck.yml @@ -24,6 +24,7 @@ jobs: - name: Install pnpm uses: pnpm/action-setup@v4 with: + version: 10.13.1 run_install: false - name: Setup Node @@ -33,6 +34,19 @@ jobs: cache: 'pnpm' cache-dependency-path: 'pnpm-lock.yaml' + - name: Get pnpm store directory + shell: bash + run: | + echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV + + - uses: actions/cache@v3 + name: Setup pnpm cache + with: + path: ${{ env.STORE_PATH }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + - name: PNPM install run: | echo; echo "cd to workspace" diff --git a/.github/workflows/branch-validation-full.yml b/.github/workflows/branch-validation-full.yml new file mode 100644 index 0000000000..feb4b11981 --- /dev/null +++ b/.github/workflows/branch-validation-full.yml @@ -0,0 +1,123 @@ +name: Deploy to Channel Full Refresh + +on: + workflow_dispatch: + +# Allow this job to clone the repo and create a page deployment +permissions: + contents: read + pages: write + id-token: write + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + branch_tests: + runs-on: ubuntu-latest + strategy: + fail-fast: true + + env: + SHOULD_DEPLOY: ${{ secrets.OCTOPUSSERVERURL != '' || '' }} + OCTOPUS_URL: ${{ secrets.OCTOPUSSERVERURL }} + OCTOPUS_API_KEY: ${{ secrets.OCTOPUSSERVERAPIKEY }} + OCTOPUS_SPACE: "DevOps Microsite" + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.13.1 + run_install: false + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: 18 + cache: 'pnpm' + cache-dependency-path: 'pnpm-lock.yaml' + + - name: Get pnpm store directory + shell: bash + run: | + echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV + + - uses: actions/cache@v3 + name: Setup pnpm cache + with: + path: ${{ env.STORE_PATH }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + + - name: PNPM and Playwright install + run: | + echo; echo "cd to workspace" + cd $GITHUB_WORKSPACE + echo; echo "listing" + ls + echo; echo "NPM install" + pnpm install + + - name: Astro build and test + run: | + export NODE_OPTIONS=--max_old_space_size=4096 + pnpm dev:img + pnpm test + pnpm crawl + + - name: Set Version + run: | + echo "PACKAGE_VERSION=$(date +'%Y.%m.%d').$GITHUB_RUN_NUMBER" >> $GITHUB_ENV + + - name: Check output + run: | + echo; echo "cd to workspace" + cd $GITHUB_WORKSPACE + echo; echo "listing" + ls + + - name: Create a Zip package 🐙 + id: package + uses: OctopusDeploy/create-zip-package-action@v3 + with: + package_id: 'DocsMicrosite' + version: "${{ env.PACKAGE_VERSION }}-pullrequest.full" + base_path: "./dist" + output_folder: "./artifacts" + files: | + **/*.* + + - name: Push a package to Octopus Deploy 🐙 + uses: OctopusDeploy/push-package-action@v3.0.2 + if: ${{ env.SHOULD_DEPLOY }} + with: + packages: ${{ steps.package.outputs.package_file_path }} + + - name: Let people download package + uses: actions/upload-artifact@v4 + if: ${{ ! env.SHOULD_DEPLOY }} + with: + name: docs-microsite + path: ${{ steps.package.outputs.package_file_path }} + + - name: Push build information to Octopus Deploy 🐙 + uses: OctopusDeploy/push-build-information-action@v3 + if: ${{ env.SHOULD_DEPLOY }} + with: + version: "${{ env.PACKAGE_VERSION }}-pullrequest.full" + packages: ${{ steps.package.outputs.package_file_path }} + + - name: Create a release in Octopus Deploy 🐙 + id: "create_release" + uses: OctopusDeploy/create-release-action@v3 + if: ${{ env.SHOULD_DEPLOY }} + with: + project: "Docs Microsite" + package_version: "${{ env.PACKAGE_VERSION }}-pullrequest.full" + git_ref: ${{ github.ref }} diff --git a/.github/workflows/branch-validation.yml b/.github/workflows/branch-validation.yml index 2c6bc75cd9..79381ed303 100644 --- a/.github/workflows/branch-validation.yml +++ b/.github/workflows/branch-validation.yml @@ -22,7 +22,7 @@ jobs: SHOULD_DEPLOY: ${{ secrets.OCTOPUSSERVERURL != '' || '' }} OCTOPUS_URL: ${{ secrets.OCTOPUSSERVERURL }} OCTOPUS_API_KEY: ${{ secrets.OCTOPUSSERVERAPIKEY }} - OCTOPUS_SPACE: 'DevOps Microsite' + OCTOPUS_SPACE: "DevOps Microsite" steps: - name: Checkout @@ -31,6 +31,7 @@ jobs: - name: Install pnpm uses: pnpm/action-setup@v4 with: + version: 10.13.1 run_install: false - name: Setup Node @@ -40,6 +41,19 @@ jobs: cache: 'pnpm' cache-dependency-path: 'pnpm-lock.yaml' + - name: Get pnpm store directory + shell: bash + run: | + echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV + + - uses: actions/cache@v3 + name: Setup pnpm cache + with: + path: ${{ env.STORE_PATH }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + - name: PNPM and Playwright install run: | echo; echo "cd to workspace" @@ -54,7 +68,8 @@ jobs: export NODE_OPTIONS=--max_old_space_size=4096 pnpm dev:img pnpm test - node ./src/themes/octopus/utilities/page-report.js + pnpm crawl + pnpm thin - name: Set Version run: | @@ -72,9 +87,9 @@ jobs: uses: OctopusDeploy/create-zip-package-action@v3 with: package_id: 'DocsMicrosite' - version: '${{ env.PACKAGE_VERSION }}-pullrequest' - base_path: './dist' - output_folder: './artifacts' + version: "${{ env.PACKAGE_VERSION }}-pullrequest" + base_path: "./dist" + output_folder: "./artifacts" files: | **/*.* @@ -95,14 +110,14 @@ jobs: uses: OctopusDeploy/push-build-information-action@v3 if: ${{ env.SHOULD_DEPLOY }} with: - version: '${{ env.PACKAGE_VERSION }}-pullrequest' + version: "${{ env.PACKAGE_VERSION }}-pullrequest" packages: ${{ steps.package.outputs.package_file_path }} - name: Create a release in Octopus Deploy 🐙 - id: 'create_release' + id: "create_release" uses: OctopusDeploy/create-release-action@v3 if: ${{ env.SHOULD_DEPLOY }} with: - project: 'Docs Microsite' - package_version: '${{ env.PACKAGE_VERSION }}-pullrequest' + project: "Docs Microsite" + package_version: "${{ env.PACKAGE_VERSION }}-pullrequest" git_ref: ${{ github.ref }} diff --git a/.github/workflows/build-astro-full.yml b/.github/workflows/build-astro-full.yml new file mode 100644 index 0000000000..5f13ea588a --- /dev/null +++ b/.github/workflows/build-astro-full.yml @@ -0,0 +1,123 @@ +name: Deploy Full Refresh + +on: + workflow_dispatch: + +# Allow this job to clone the repo and create a page deployment +permissions: + contents: read + pages: write + id-token: write + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build_and_deploy: + runs-on: ubuntu-latest + strategy: + fail-fast: true + + env: + SHOULD_DEPLOY: ${{ secrets.OCTOPUSSERVERURL != '' || '' }} + OCTOPUS_URL: ${{ secrets.OCTOPUSSERVERURL }} + OCTOPUS_API_KEY: ${{ secrets.OCTOPUSSERVERAPIKEY }} + OCTOPUS_SPACE: "DevOps Microsite" + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.13.1 + run_install: false + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: 18 + cache: 'pnpm' + cache-dependency-path: 'pnpm-lock.yaml' + + - name: Get pnpm store directory + shell: bash + run: | + echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV + + - uses: actions/cache@v3 + name: Setup pnpm cache + with: + path: ${{ env.STORE_PATH }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + + - name: PNPM and Playwright install + run: | + echo; echo "cd to workspace" + cd $GITHUB_WORKSPACE + echo; echo "listing" + ls + echo; echo "PNPM install" + pnpm install + + - name: Astro build and test + run: | + export NODE_OPTIONS=--max_old_space_size=4096 + pnpm dev:img + pnpm test + pnpm crawl + + - name: Set Version + run: | + echo "PACKAGE_VERSION=$(date +'%Y.%m.%d').$GITHUB_RUN_NUMBER" >> $GITHUB_ENV + + - name: Check output + run: | + echo; echo "cd to workspace" + cd $GITHUB_WORKSPACE + echo; echo "listing" + ls + + - name: Create a Zip package 🐙 + id: package + uses: OctopusDeploy/create-zip-package-action@v3 + with: + package_id: 'DocsMicrosite' + version: "${{ env.PACKAGE_VERSION }}-full" + base_path: "./dist" + output_folder: "./artifacts" + files: | + **/*.* + + - name: Push a package to Octopus Deploy 🐙 + uses: OctopusDeploy/push-package-action@v3.0.2 + if: ${{ env.SHOULD_DEPLOY }} + with: + packages: ${{ steps.package.outputs.package_file_path }} + + - name: Let people download package + uses: actions/upload-artifact@v4 + if: ${{ ! env.SHOULD_DEPLOY }} + with: + name: docs-microsite + path: ${{ steps.package.outputs.package_file_path }} + + - name: Push build information to Octopus Deploy 🐙 + uses: OctopusDeploy/push-build-information-action@v3 + if: ${{ env.SHOULD_DEPLOY }} + with: + version: "${{ env.PACKAGE_VERSION }}-full" + packages: ${{ steps.package.outputs.package_file_path }} + + - name: Create a release in Octopus Deploy 🐙 + id: "create_release" + uses: OctopusDeploy/create-release-action@v3 + if: ${{ env.SHOULD_DEPLOY }} + with: + project: "Docs Microsite" + package_version: "${{ env.PACKAGE_VERSION }}-full" + git_ref: ${{ github.ref }} diff --git a/.github/workflows/build-astro.yml b/.github/workflows/build-astro.yml index 9e0a69cc7f..6f9997b9c0 100644 --- a/.github/workflows/build-astro.yml +++ b/.github/workflows/build-astro.yml @@ -1,11 +1,10 @@ name: Deploy on: - # Disable on push temporarily while we're doing the migration workflow_dispatch: push: - branches: [main] - + branches: [ main ] + # Allow this job to clone the repo and create a page deployment permissions: contents: read @@ -26,7 +25,7 @@ jobs: SHOULD_DEPLOY: ${{ secrets.OCTOPUSSERVERURL != '' || '' }} OCTOPUS_URL: ${{ secrets.OCTOPUSSERVERURL }} OCTOPUS_API_KEY: ${{ secrets.OCTOPUSSERVERAPIKEY }} - OCTOPUS_SPACE: 'DevOps Microsite' + OCTOPUS_SPACE: "DevOps Microsite" steps: - name: Checkout @@ -35,6 +34,7 @@ jobs: - name: Install pnpm uses: pnpm/action-setup@v4 with: + version: 10.13.1 run_install: false - name: Setup Node @@ -44,13 +44,26 @@ jobs: cache: 'pnpm' cache-dependency-path: 'pnpm-lock.yaml' + - name: Get pnpm store directory + shell: bash + run: | + echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV + + - uses: actions/cache@v3 + name: Setup pnpm cache + with: + path: ${{ env.STORE_PATH }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + - name: PNPM and Playwright install run: | echo; echo "cd to workspace" cd $GITHUB_WORKSPACE echo; echo "listing" ls - echo; echo "NPM install" + echo; echo "PNPM install" pnpm install - name: Astro build and test @@ -58,7 +71,8 @@ jobs: export NODE_OPTIONS=--max_old_space_size=4096 pnpm dev:img pnpm test - node ./src/themes/octopus/utilities/page-report.js + pnpm crawl + pnpm thin - name: Set Version run: | @@ -76,9 +90,9 @@ jobs: uses: OctopusDeploy/create-zip-package-action@v3 with: package_id: 'DocsMicrosite' - version: '${{ env.PACKAGE_VERSION }}' - base_path: './dist' - output_folder: './artifacts' + version: "${{ env.PACKAGE_VERSION }}" + base_path: "./dist" + output_folder: "./artifacts" files: | **/*.* @@ -99,14 +113,14 @@ jobs: uses: OctopusDeploy/push-build-information-action@v3 if: ${{ env.SHOULD_DEPLOY }} with: - version: '${{ env.PACKAGE_VERSION }}' + version: "${{ env.PACKAGE_VERSION }}" packages: ${{ steps.package.outputs.package_file_path }} - name: Create a release in Octopus Deploy 🐙 - id: 'create_release' + id: "create_release" uses: OctopusDeploy/create-release-action@v3 if: ${{ env.SHOULD_DEPLOY }} with: - project: 'Docs Microsite' - package_version: '${{ env.PACKAGE_VERSION }}' + project: "Docs Microsite" + package_version: "${{ env.PACKAGE_VERSION }}" git_ref: ${{ github.ref }} diff --git a/docs/credits.md b/docs/credits.md index f372c8f490..e314a66021 100644 --- a/docs/credits.md +++ b/docs/credits.md @@ -258,7 +258,7 @@ Octopus Deploy is made possible thanks to the following great third-party produc | @swc/core-linux-x64-musl | 강동윤 | [https://github.com/swc-project/swc](https://github.com/swc-project/swc) | [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) [MIT](https://spdx.org/licenses/MIT.html) | | @swc/counter | 강동윤 | [https://github.com/swc-project/pkgs](https://github.com/swc-project/pkgs) | [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) | | @swc/jest | 강동윤 | [https://github.com/swc-project/jest](https://github.com/swc-project/jest) | [MIT](https://spdx.org/licenses/MIT.html) | -| @swc/plugin-emotion | 강동윤 | [+https://github.com/swc-project/plugins](+https://github.com/swc-project/plugins) | [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) | +| @swc/plugin-emotion | 강동윤 | [+https://github.com/swc-project/plugins](https://github.com/swc-project/plugins) | [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) | | @swc/register | 강동윤 | [https://github.com/swc-project/register](https://github.com/swc-project/register) | [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) [MIT](https://spdx.org/licenses/MIT.html) | | @swc/types | 강동윤 | [https://github.com/swc-project/swc](https://github.com/swc-project/swc) | [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) | | @testing-library/dom | Kent C. Dodds | [https://github.com/testing-library/dom-testing-library](https://github.com/testing-library/dom-testing-library) | [MIT](https://spdx.org/licenses/MIT.html) | diff --git a/package.json b/package.json index c4f7cc5334..52962b88f6 100644 --- a/package.json +++ b/package.json @@ -13,6 +13,7 @@ "dev:img": "node ./src/themes/octopus/utilities/img.mjs", "dev:astro": "astro dev", "dev:watch": "pnpm watch", + "crawl": "linkinator ./dist --skip \"^(?!http://localhost)\" --recurse --verbosity error", "test": "astro build && npx playwright install --with-deps && npx playwright test", "spellcheck": "git fetch origin main:refs/remotes/origin/main && git diff origin/main --name-only --diff-filter=ACMRTUXB | cspell --no-must-find-files --file-list stdin", "spellcheck-all": "cspell \"**/*.{md,mdx}\"", @@ -21,11 +22,11 @@ "astro": "astro", "css": "robocopy ./public/docs/css/ ./dist/docs/css/ /mir && robocopy ./public/docs/js/ ./dist/docs/js/ /mir", "format": "prettier --write --plugin=prettier-plugin-astro 'src/**/*.{js,mjs,ts,astro,css}' 'public/docs/js/**/*.js' 'public/docs/css/**/*.css'", - "watch": "onchange 'src/**/*.{js,mjs,ts,astro,css}' 'public/docs/js/**/*.js' 'public/docs/css/**/*.css' -- prettier --write --plugin=prettier-plugin-astro {{changed}}" + "watch": "onchange 'src/**/*.{js,mjs,ts,astro,css}' 'public/docs/js/**/*.js' 'public/docs/css/**/*.css' -- prettier --write --plugin=prettier-plugin-astro {{changed}}", + "thin": "node ./src/themes/octopus/utilities/dist-thinner.mjs" }, "dependencies": { "@astrojs/mdx": "^4.3.3", - "@pnpm/exe": "^10.14.0", "astro": "^5.12.9", "astro-accelerator-utils": "^0.3.50", "cspell": "^8.19.4", @@ -34,12 +35,14 @@ "hast-util-from-selector": "^3.0.1", "html-to-text": "^9.0.5", "keyword-extractor": "^0.0.28", + "linkinator": "^6.1.4", "optional": "^0.1.4", "remark-directive": "^4.0.0", "remark-heading-id": "^1.0.1", "sharp": "^0.33.5" }, "devDependencies": { + "@pnpm/exe": "^10.14.0", "@playwright/test": "^1.54.2", "csv-parse": "^5.6.0", "npm-run-all": "^4.1.5", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 470638d72b..63456d017e 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -16,9 +16,6 @@ importers: '@astrojs/mdx': specifier: ^4.3.3 version: 4.3.3(astro@5.12.9(@types/node@24.2.1)(rollup@4.46.2)(typescript@5.8.2)(yaml@2.8.1)) - '@pnpm/exe': - specifier: ^10.14.0 - version: 10.14.0 astro: specifier: ^5.12.9 version: 5.12.9(@types/node@24.2.1)(rollup@4.46.2)(typescript@5.8.2)(yaml@2.8.1) @@ -43,6 +40,9 @@ importers: keyword-extractor: specifier: ^0.0.28 version: 0.0.28 + linkinator: + specifier: ^6.1.4 + version: 6.1.4 optional: specifier: ^0.1.4 version: 0.1.4 @@ -59,6 +59,9 @@ importers: '@playwright/test': specifier: ^1.54.2 version: 1.54.2 + '@pnpm/exe': + specifier: ^10.14.0 + version: 10.14.0 csv-parse: specifier: ^5.6.0 version: 5.6.0 @@ -630,6 +633,10 @@ packages: '@oslojs/encoding@1.1.0': resolution: {integrity: sha512-70wQhgYmndg4GCPxPPxPGevRKqTIJ2Nh4OkiMWmDAVYsTQ+Ta7Sq+rPevXyXGdzr30/qZBnyOalCszoMxlyldQ==} + '@pkgjs/parseargs@0.11.0': + resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} + engines: {node: '>=14'} + '@playwright/test@1.54.2': resolution: {integrity: sha512-A+znathYxPf+72riFd1r1ovOLqsIIB0jKIoPjyK2kqEIe30/6jF6BC7QNluHuwUmsD2tv1XZVugN8GqfTMOxsA==} engines: {node: '>=18'} @@ -860,6 +867,10 @@ packages: engines: {node: '>=0.4.0'} hasBin: true + agent-base@7.1.4: + resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} + engines: {node: '>= 14'} + ansi-align@3.0.1: resolution: {integrity: sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==} @@ -964,6 +975,9 @@ packages: brace-expansion@1.1.12: resolution: {integrity: sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==} + brace-expansion@2.0.2: + resolution: {integrity: sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==} + braces@3.0.3: resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} engines: {node: '>=8'} @@ -1320,6 +1334,9 @@ packages: engines: {node: '>=18'} hasBin: true + escape-html@1.0.3: + resolution: {integrity: sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==} + escape-string-regexp@1.0.5: resolution: {integrity: sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==} engines: {node: '>=0.8.0'} @@ -1438,6 +1455,10 @@ packages: functions-have-names@1.2.3: resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} + gaxios@6.7.1: + resolution: {integrity: sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ==} + engines: {node: '>=14'} + gensequence@7.0.0: resolution: {integrity: sha512-47Frx13aZh01afHJTB3zTtKIlFI6vWY+MYCN9Qpew6i52rfKjnhCF/l1YlC8UmEMvvntZZ6z4PiCcmyuedR2aQ==} engines: {node: '>=18'} @@ -1465,6 +1486,10 @@ packages: resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} engines: {node: '>= 6'} + glob@10.4.5: + resolution: {integrity: sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==} + hasBin: true + glob@11.0.3: resolution: {integrity: sha512-2Nim7dha1KVkaiF4q6Dj+ngPPMdfvLJEOpZk/jKiUAkqKebpGAWQXAq9z1xu9HKu5lWfqw/FASuccEjyznjPaA==} engines: {node: 20 || >=22} @@ -1575,12 +1600,19 @@ packages: html-void-elements@3.0.0: resolution: {integrity: sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==} + htmlparser2@10.0.0: + resolution: {integrity: sha512-TwAZM+zE5Tq3lrEHvOlvwgj1XLWQCtaaibSN11Q+gGBAS7Y1uZSWwXXRe4iF6OXnaq1riyQAPFOBtYc77Mxq0g==} + htmlparser2@8.0.2: resolution: {integrity: sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==} http-cache-semantics@4.2.0: resolution: {integrity: sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ==} + https-proxy-agent@7.0.6: + resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} + engines: {node: '>= 14'} + ignore@5.3.2: resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} engines: {node: '>= 4'} @@ -1726,6 +1758,10 @@ packages: resolution: {integrity: sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A==} engines: {node: '>= 0.4'} + is-stream@2.0.1: + resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==} + engines: {node: '>=8'} + is-string@1.1.1: resolution: {integrity: sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA==} engines: {node: '>= 0.4'} @@ -1760,6 +1796,9 @@ packages: isexe@2.0.0: resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + jackspeak@3.4.3: + resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==} + jackspeak@4.1.1: resolution: {integrity: sha512-zptv57P3GpL+O0I7VdMJNBZCu+BPHVQUk55Ft8/QCJjTVxrnJHuVuX/0Bl2A6/+2oyR/ZMEuFKwmzqqZ/U5nPQ==} engines: {node: 20 || >=22} @@ -1800,6 +1839,11 @@ packages: leac@0.6.0: resolution: {integrity: sha512-y+SqErxb8h7nE/fiEX07jsbuhrpO9lL8eca7/Y1nuWV2moNlXhyd59iDGcRf6moVyDMbmTNzL40SUyrFU/yDpg==} + linkinator@6.1.4: + resolution: {integrity: sha512-7DXjwFiJ6rqye8OawwWi/CyDdKdIb69HLCbPhRI6tGSNnGruWFw8qucNsoWFXybel/I960UujFHefjvprhhvYA==} + engines: {node: '>=18'} + hasBin: true + load-json-file@4.0.0: resolution: {integrity: sha512-Kx8hMakjX03tiGTLAIdJ+lL0htKnXjEZN6hk/tozf/WOuYGdZBJrZ+rCJRbVCugsjB3jMLn9746NsQIf5VjBMw==} engines: {node: '>=4'} @@ -1830,6 +1874,11 @@ packages: markdown-table@3.0.4: resolution: {integrity: sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==} + marked@13.0.3: + resolution: {integrity: sha512-rqRix3/TWzE9rIoFGIn8JmsVfhiuC8VIQ8IdX5TfzmeBucdY05/0UlzKaw0eVtpcN/OdVFpBk7CjKGo9iHJ/zA==} + engines: {node: '>= 18'} + hasBin: true + math-intrinsics@1.1.0: resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} engines: {node: '>= 0.4'} @@ -1895,6 +1944,10 @@ packages: resolution: {integrity: sha512-S3UwM3yj5mtUSEfP41UZmt/0SCoVYUcU1rkXv+BQ5Ig8ndL4sPoJNBUJERafdPb5jjHJGuMgytgKvKIf58XNBw==} engines: {node: '>= 0.10.0'} + meow@13.2.0: + resolution: {integrity: sha512-pxQJQzB6djGPXh08dacEloMFopsOqGVRKFPYvPOt9XDZ1HasbgDZA74CJGreSU4G3Ak7EFJGoiH2auq+yXISgA==} + engines: {node: '>=18'} + micromark-core-commonmark@2.0.3: resolution: {integrity: sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==} @@ -2003,6 +2056,11 @@ packages: micromark@4.0.2: resolution: {integrity: sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==} + mime@4.0.7: + resolution: {integrity: sha512-2OfDPL+e03E0LrXaGYOtTFIYhiuzep94NSsuhrNULq+stylcJedcHdzHtz0atMUuGwJfFYs0YL5xeC/Ca2x0eQ==} + engines: {node: '>=16'} + hasBin: true + minimatch@10.0.3: resolution: {integrity: sha512-IPZ167aShDZZUMdRk66cyQAW3qr0WzbHkPdMYa8bzZhlHhO3jALbKdxcaak7W9FfT2rZNpQuUu4Od7ILEpXSaw==} engines: {node: 20 || >=22} @@ -2010,6 +2068,10 @@ packages: minimatch@3.1.2: resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + minimatch@9.0.5: + resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} + engines: {node: '>=16 || 14 >=14.17'} + minipass@7.1.2: resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} engines: {node: '>=16 || 14 >=14.17'} @@ -2154,6 +2216,10 @@ packages: path-parse@1.0.7: resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + path-scurry@1.11.1: + resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==} + engines: {node: '>=16 || 14 >=14.18'} + path-scurry@2.0.0: resolution: {integrity: sha512-ypGJsmGtdXUOeM5u93TyeIEfEhM6s+ljAhrk5vAvSx8uyY/02OvrZnA0YNGUrPXfpJMgI1ODd3nwz8Npx4O4cg==} engines: {node: 20 || >=22} @@ -2384,6 +2450,9 @@ packages: engines: {node: '>=10'} hasBin: true + server-destroy@1.0.1: + resolution: {integrity: sha512-rb+9B5YBIEzYcD6x2VKidaa+cqYBJQKnU4oe4E3ANwRRN56yk/ua1YCJT1n21NTS8w6CcOclAKNP3PhdCXKYtQ==} + set-function-length@1.2.2: resolution: {integrity: sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==} engines: {node: '>= 0.4'} @@ -2479,6 +2548,10 @@ packages: sprintf-js@1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} + srcset@5.0.1: + resolution: {integrity: sha512-/P1UYbGfJVlxZag7aABNRrulEXAwCSDo7fklafOQrantuPTDmYgijJMks2zusPCVzgW9+4P69mq7w6pYuZpgxw==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + stop-iteration-iterator@1.1.0: resolution: {integrity: sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ==} engines: {node: '>= 0.4'} @@ -2738,6 +2811,10 @@ packages: uploadthing: optional: true + uuid@9.0.1: + resolution: {integrity: sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==} + hasBin: true + validate-npm-package-license@3.0.4: resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==} @@ -3413,6 +3490,9 @@ snapshots: '@oslojs/encoding@1.1.0': {} + '@pkgjs/parseargs@0.11.0': + optional: true + '@playwright/test@1.54.2': dependencies: playwright: 1.54.2 @@ -3600,6 +3680,8 @@ snapshots: acorn@8.15.0: {} + agent-base@7.1.4: {} + ansi-align@3.0.1: dependencies: string-width: 4.2.3 @@ -3793,6 +3875,10 @@ snapshots: balanced-match: 1.0.2 concat-map: 0.0.1 + brace-expansion@2.0.2: + dependencies: + balanced-match: 1.0.2 + braces@3.0.3: dependencies: fill-range: 7.1.1 @@ -4272,6 +4358,8 @@ snapshots: '@esbuild/win32-ia32': 0.25.8 '@esbuild/win32-x64': 0.25.8 + escape-html@1.0.3: {} + escape-string-regexp@1.0.5: {} escape-string-regexp@5.0.0: {} @@ -4393,6 +4481,17 @@ snapshots: functions-have-names@1.2.3: {} + gaxios@6.7.1: + dependencies: + extend: 3.0.2 + https-proxy-agent: 7.0.6 + is-stream: 2.0.1 + node-fetch: 2.7.0 + uuid: 9.0.1 + transitivePeerDependencies: + - encoding + - supports-color + gensequence@7.0.0: {} get-east-asian-width@1.3.0: {} @@ -4427,6 +4526,15 @@ snapshots: dependencies: is-glob: 4.0.3 + glob@10.4.5: + dependencies: + foreground-child: 3.3.1 + jackspeak: 3.4.3 + minimatch: 9.0.5 + minipass: 7.1.2 + package-json-from-dist: 1.0.1 + path-scurry: 1.11.1 + glob@11.0.3: dependencies: foreground-child: 3.3.1 @@ -4641,6 +4749,13 @@ snapshots: html-void-elements@3.0.0: {} + htmlparser2@10.0.0: + dependencies: + domelementtype: 2.3.0 + domhandler: 5.0.3 + domutils: 3.2.2 + entities: 6.0.1 + htmlparser2@8.0.2: dependencies: domelementtype: 2.3.0 @@ -4650,6 +4765,13 @@ snapshots: http-cache-semantics@4.2.0: {} + https-proxy-agent@7.0.6: + dependencies: + agent-base: 7.1.4 + debug: 4.4.1 + transitivePeerDependencies: + - supports-color + ignore@5.3.2: {} import-fresh@3.3.1: @@ -4783,6 +4905,8 @@ snapshots: dependencies: call-bound: 1.0.4 + is-stream@2.0.1: {} + is-string@1.1.1: dependencies: call-bound: 1.0.4 @@ -4817,6 +4941,12 @@ snapshots: isexe@2.0.0: {} + jackspeak@3.4.3: + dependencies: + '@isaacs/cliui': 8.0.2 + optionalDependencies: + '@pkgjs/parseargs': 0.11.0 + jackspeak@4.1.1: dependencies: '@isaacs/cliui': 8.0.2 @@ -4848,6 +4978,22 @@ snapshots: leac@0.6.0: {} + linkinator@6.1.4: + dependencies: + chalk: 5.5.0 + escape-html: 1.0.3 + gaxios: 6.7.1 + glob: 10.4.5 + htmlparser2: 10.0.0 + marked: 13.0.3 + meow: 13.2.0 + mime: 4.0.7 + server-destroy: 1.0.1 + srcset: 5.0.1 + transitivePeerDependencies: + - encoding + - supports-color + load-json-file@4.0.0: dependencies: graceful-fs: 4.2.11 @@ -4877,6 +5023,8 @@ snapshots: markdown-table@3.0.4: {} + marked@13.0.3: {} + math-intrinsics@1.1.0: {} mdast-util-definitions@6.0.0: @@ -5066,6 +5214,8 @@ snapshots: memorystream@0.3.1: {} + meow@13.2.0: {} + micromark-core-commonmark@2.0.3: dependencies: decode-named-character-reference: 1.2.0 @@ -5340,6 +5490,8 @@ snapshots: transitivePeerDependencies: - supports-color + mime@4.0.7: {} + minimatch@10.0.3: dependencies: '@isaacs/brace-expansion': 5.0.0 @@ -5348,6 +5500,10 @@ snapshots: dependencies: brace-expansion: 1.1.12 + minimatch@9.0.5: + dependencies: + brace-expansion: 2.0.2 + minipass@7.1.2: {} mrmime@2.0.1: {} @@ -5504,6 +5660,11 @@ snapshots: path-parse@1.0.7: {} + path-scurry@1.11.1: + dependencies: + lru-cache: 10.4.3 + minipass: 7.1.2 + path-scurry@2.0.0: dependencies: lru-cache: 11.1.0 @@ -5831,6 +5992,8 @@ snapshots: semver@7.7.2: {} + server-destroy@1.0.1: {} + set-function-length@1.2.2: dependencies: define-data-property: 1.1.4 @@ -5964,6 +6127,8 @@ snapshots: sprintf-js@1.0.3: {} + srcset@5.0.1: {} + stop-iteration-iterator@1.1.0: dependencies: es-errors: 1.3.0 @@ -6224,6 +6389,8 @@ snapshots: ofetch: 1.4.1 ufo: 1.6.1 + uuid@9.0.1: {} + validate-npm-package-license@3.0.4: dependencies: spdx-correct: 3.2.0 diff --git a/src/config.ts b/src/config.ts index 2c2b5c2903..6e2a0c02b4 100644 --- a/src/config.ts +++ b/src/config.ts @@ -57,8 +57,6 @@ export const OPEN_GRAPH = { }; export const HEADER_SCRIPTS = ` - - diff --git a/src/pages/docs/administration/migrate-spaces-with-octoterra/index.md b/src/pages/docs/administration/migrate-spaces-with-octoterra/index.md index 01957ed632..159dda1cca 100644 --- a/src/pages/docs/administration/migrate-spaces-with-octoterra/index.md +++ b/src/pages/docs/administration/migrate-spaces-with-octoterra/index.md @@ -28,9 +28,9 @@ The [Import/Export tool](https://octopus.com/docs/projects/export-import) is bui Typically, you would choose the Import/Export tool to perform a migration. However, there are cases where the Import/Export tool is not suitable: -* You wish to migrate Config-as-Code (CaC) projects, as the Import/Export tool does not support CaC projects. -* You wish to recreate targets, as the Import/Export tool does not migrate targets. -* You wish to "own" or modify the intermediate format used for the migration, as the Import/Export tool uses an undocumented JSON format. +- You wish to migrate Config-as-Code (CaC) projects, as the Import/Export tool does not support CaC projects. +- You wish to recreate targets, as the Import/Export tool does not migrate targets. +- You wish to "own" or modify the intermediate format used for the migration, as the Import/Export tool uses an undocumented JSON format. ## Limitations of Octoterra and migrating projects between instances @@ -68,35 +68,35 @@ The recommended solution is to convert the projects in the destination space to The following is a non-exhaustive list of settings that are not exported by Octoterra: -* Users, teams, and roles -* Authentication settings -* Packages in the built-in feed -* Audit logs -* Releases and deployments -* Runbook runs -* Subscriptions -* API Keys -* SEIM settings -* ITSM settings -* GitHub app connections -* License details -* Node configuration -* SMTP settings -* Insights dashboards -* OIDC accounts +- Users, teams, and roles +- Authentication settings +- Packages in the built-in feed +- Audit logs +- Releases and deployments +- Runbook runs +- Subscriptions +- API Keys +- SEIM settings +- ITSM settings +- GitHub app connections +- License details +- Node configuration +- SMTP settings +- Insights dashboards +- OIDC accounts ## Prerequisites These are the prerequisites for migrating an Octopus space with the Octoterra Wizard: -* [Backup](https://octopus.com/docs/administration/data/backup-and-restore) and [update](https://octopus.com/docs/administration/upgrading) your Octopus instance. -* [Backup](https://octopus.com/docs/administration/data/backup-and-restore) your Octopus instance again before the migration. -* Download the Octoterra Wizard from [GitHub](https://github.com/OctopusSolutionsEngineering/OctoterraWizard). -* Install [Terraform](https://developer.hashicorp.com/terraform/install) on your local workstation. -* [Create an API key](https://octopus.com/docs/octopus-rest-api/how-to-create-an-api-key) for the source Octopus instance. -* [Create an API key](https://octopus.com/docs/octopus-rest-api/how-to-create-an-api-key) for the destination Octopus instance. -* Create a remote [Terraform backend](https://developer.hashicorp.com/terraform/language/settings/backends/configuration) to maintain the state of the Terraform resources. [AWS S3](https://developer.hashicorp.com/terraform/language/settings/backends/s3) and [Azure Storage Accounts](https://developer.hashicorp.com/terraform/language/settings/backends/azurerm) are supported. -* Install any required local tools. See the "Local Tools vs Container Images" section for more details. +- [Backup](https://octopus.com/docs/administration/data/backup-and-restore) and [update](https://octopus.com/docs/administration/upgrading) your Octopus instance. +- [Backup](https://octopus.com/docs/administration/data/backup-and-restore) your Octopus instance again before the migration. +- Download the Octoterra Wizard from [GitHub](https://github.com/OctopusSolutionsEngineering/OctoterraWizard). +- Install [Terraform](https://developer.hashicorp.com/terraform/install) on your local workstation. +- [Create an API key](https://octopus.com/docs/octopus-rest-api/how-to-create-an-api-key) for the source Octopus instance. +- [Create an API key](https://octopus.com/docs/octopus-rest-api/how-to-create-an-api-key) for the destination Octopus instance. +- Create a remote [Terraform backend](https://developer.hashicorp.com/terraform/language/settings/backends/configuration) to maintain the state of the Terraform resources. [AWS S3](https://developer.hashicorp.com/terraform/language/settings/backends/s3) and [Azure Storage Accounts](https://developer.hashicorp.com/terraform/language/settings/backends/azurerm) are supported. +- Install any required local tools. See the "Local Tools vs Container Images" section for more details. ## Running the wizard @@ -150,9 +150,9 @@ If you are migrating from an on-premises Windows server, you will likely select If you select the `Local tools` option, your on-premises server or default worker pool must have the following tools installed: -* [Terraform](https://developer.hashicorp.com/terraform/install) -* [Python](https://www.python.org/downloads/) -* [PowerShell Core](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell) +- [Terraform](https://developer.hashicorp.com/terraform/install) +- [Python](https://www.python.org/downloads/) +- [PowerShell Core](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell) ## Space vs project level resources @@ -181,9 +181,9 @@ Projects can typically be migrated independently of each other. However, some st Because the Octoterra Wizard serializes Octopus resources to Terraform modules, we can use Terraform's functionality to implement a number of strategies for migrating spaces: -* Big bang migration, where the migration is done all at once. -* Incremental migration, where projects are migrated over time. -* Continual migration, where the destination server is updated as changes are made to the source server. +- Big bang migration, where the migration is done all at once. +- Incremental migration, where projects are migrated over time. +- Continual migration, where the destination server is updated as changes are made to the source server. ### Big bang migration @@ -193,9 +193,9 @@ To perform a big bang migration, run the wizard to completion. This will migrate Consider a big bang migration strategy when: -* You can migrate the space and project level resources in one operation. -* You are confident that the migrated resources work as expected. -* You can perform all the post-migration steps before the destination server is put into operation. +- You can migrate the space and project level resources in one operation. +- You are confident that the migrated resources work as expected. +- You can perform all the post-migration steps before the destination server is put into operation. ### Incremental migration @@ -209,9 +209,9 @@ You may consider disabling the project on the source server once it has been mig Consider an incremental migration strategy when: -* You need to break down the migration into multiple steps. -* Your projects have different risk profiles i.e. you have low risk projects you can migrate first, and only when they are successful can you migrate high risk projects. -* You wish to delegate the process of migrating projects to different teams who will perform the migration on their own schedule. +- You need to break down the migration into multiple steps. +- Your projects have different risk profiles i.e. you have low risk projects you can migrate first, and only when they are successful can you migrate high risk projects. +- You wish to delegate the process of migrating projects to different teams who will perform the migration on their own schedule. ### Continual migration @@ -225,9 +225,9 @@ The source server is considered the source of truth for space and project level Consider a continual migration strategy when: -* You wish to perform the bulk of the migration up front. -* You need to test the destination server while the source server is still actively used. -* You need to update the destination server with any changes made to the source server while testing the migration. +- You wish to perform the bulk of the migration up front. +- You need to test the destination server while the source server is still actively used. +- You need to update the destination server with any changes made to the source server while testing the migration. #### Limitations of continual migration @@ -262,7 +262,7 @@ The second approach is to delete any projects on the destination server and recr :::div{.hint} Projects are configured to ignore changes to the `project_group_id` and `name` with the following [lifecycle meta-argument](https://developer.hashicorp.com/terraform/language/meta-arguments/lifecycle): -``` +```ruby lifecycle { ignore_changes = ["`project_group_id`", "name"] } @@ -270,10 +270,10 @@ Projects are configured to ignore changes to the `project_group_id` and `name` w This allows projects on the destination server to be moved to a new project group and have their name updated while allowing other settings to be updated. This means you must do one of the following to reflect a change to a project group or project name on the source server: -* Manually move the projects on the destination server to reflect the changes on the source server. -* Manually update the project name to reflect the changes on the source server. -* Use the second approach where the project on the destination server is deleted and recreated. -* Manually edit the Terraform module to remove `project_group_id` from the list of ignored changes. +- Manually move the projects on the destination server to reflect the changes on the source server. +- Manually update the project name to reflect the changes on the source server. +- Use the second approach where the project on the destination server is deleted and recreated. +- Manually edit the Terraform module to remove `project_group_id` from the list of ignored changes. ::: @@ -293,9 +293,9 @@ You must determine if concurrent deployments have the potential to cause issues, There are a number of strategies you can implement to prevent or manage concurrent deployments: -* Use a [named mutex](https://octopus.com/docs/administration/managing-infrastructure/run-multiple-processes-on-a-target-simultaneously#named-mutex-for-shared-resources). -* Disable projects to ensure only the source or destination server can run a migrated project. -* Disable targets to ensure only the source or destination server can interact with a migrated target. +- Use a [named mutex](https://octopus.com/docs/administration/managing-infrastructure/run-multiple-processes-on-a-target-simultaneously#named-mutex-for-shared-resources). +- Disable projects to ensure only the source or destination server can run a migrated project. +- Disable targets to ensure only the source or destination server can interact with a migrated target. ### Duplicated triggers @@ -309,12 +309,12 @@ The migration of space and project level resources will transfer most, but not a A number of sensitive values can not be migrated by Octoterra including: -* Account credentials -* Feed credentials -* Git credentials -* Certificates -* Secret values define in steps such as the `Deploy to IIS` and `Deploy to Tomcat` steps -* Sensitive values defined for sensitive step templates parameters +- Account credentials +- Feed credentials +- Git credentials +- Certificates +- Secret values define in steps such as the `Deploy to IIS` and `Deploy to Tomcat` steps +- Sensitive values defined for sensitive step templates parameters All these values must be manually reconfigured on the destination server. @@ -442,7 +442,7 @@ You must manually configure the [SMTP settings](https://octopus.com/docs/project You will increase the count of resources that impact licensing during the migration, including projects, tenants, and machines. -Contact [sales@octopus.com](sales@octopus.com) to discuss any impacts to licensing while performing a migration. +Contact [sales@octopus.com](mailto:sales@octopus.com) to discuss any impacts to licensing while performing a migration. ## FAQ @@ -473,4 +473,3 @@ A: No, Octoterra only supports the supported LTS versions of Octopus. Q: How do I fix the `unexpected token while parsing list: IDENT` error when applying Terraform modules. A: This is most likely caused by running an old version of Terraform. In particular, you will see this error if you rely on the version of Terraform bundled with Octopus (version 0.11.15) which is too old to apply the Terraform modules created by Octoterra. The Octopus logs capture the Terraform version used for the deployment and will display a message like `Your version of Terraform is out of date!` if using an old Terraform version. - diff --git a/src/pages/docs/deployments/kubernetes/automatically-track-third-party-helm-charts/index.md b/src/pages/docs/deployments/kubernetes/automatically-track-third-party-helm-charts/index.md index 5de6b851eb..23005818a4 100644 --- a/src/pages/docs/deployments/kubernetes/automatically-track-third-party-helm-charts/index.md +++ b/src/pages/docs/deployments/kubernetes/automatically-track-third-party-helm-charts/index.md @@ -1,7 +1,7 @@ --- layout: src/layouts/Redirect.astro title: Redirect -redirect: \docs\kubernetes\tutorials\automatically-track-third-party-helm-charts.md +redirect: /docs/kubernetes/tutorials/automatically-track-third-party-helm-charts pubDate: 2024-07-29 navSearch: false navSitemap: false diff --git a/src/pages/docs/deployments/nginx/configure-octopus-deploy-project.md b/src/pages/docs/deployments/nginx/configure-octopus-deploy-project.md index eedec581c6..cca5785d46 100644 --- a/src/pages/docs/deployments/nginx/configure-octopus-deploy-project.md +++ b/src/pages/docs/deployments/nginx/configure-octopus-deploy-project.md @@ -59,7 +59,7 @@ To configure NGINX to send traffic to your application you need to fill in a few | **Bindings** | Specify any number of HTTP/HTTPS bindings that should be added to the NGINX virtual server. | | | | **Locations** | Specify any number of locations that NGINX should test request URIs against to send traffic to your application. | | | -When defining **locations** you can configure NGINX to deliver files from the file system , or proxy requests to another server. For our sample application we want requests to `http:///` to deliver the `index.html` file from the `WWWRoot` folder of our ASP.NET Core project and requests to `http:///api/` to be proxied to our ASP.NET Core project running on [http://localhost:5000](http://localhost:5000). +When defining **locations** you can configure NGINX to deliver files from the file system , or proxy requests to another server. For our sample application we want requests to `http:///` to deliver the `index.html` file from the `WWWRoot` folder of our ASP.NET Core project and requests to `http:///api/` to be proxied to our ASP.NET Core project running on `http://localhost:5000`. :::figure ![](/docs/deployments/nginx/images/deployment_process_nginx_feature.png) diff --git a/src/pages/docs/deployments/node-js/node-on-linux.md b/src/pages/docs/deployments/node-js/node-on-linux.md index 096ca36747..b036499d34 100644 --- a/src/pages/docs/deployments/node-js/node-on-linux.md +++ b/src/pages/docs/deployments/node-js/node-on-linux.md @@ -107,7 +107,7 @@ npm run build npm start ``` -If the site runs correctly, when you navigate to [http://localhost:8081](http://localhost:8081) you should see a page with words that appear to be missing. These will be populated in the config files during the deployment. +If the site runs correctly, when you navigate to `http://localhost:8081` you should see a page with words that appear to be missing. These will be populated in the config files during the deployment. :::figure ![App with missing variables](/docs/deployments/node-js/images/missing-variables.png) diff --git a/src/pages/docs/getting-started/reference-architectures.md b/src/pages/docs/getting-started/reference-architectures.md index a6382dad84..c0ffe7696b 100644 --- a/src/pages/docs/getting-started/reference-architectures.md +++ b/src/pages/docs/getting-started/reference-architectures.md @@ -19,5 +19,5 @@ The reference architecture steps are typically run from a runbook. The runbook r ## Reference architecture steps -* [AWS EKS](reference-architectures/eks-reference-architecture) -* [Azure Web Apps](reference-architectures/webapp-reference-architecture) +- [AWS EKS](/docs/getting-started/reference-architectures/eks-reference-architecture) +- [Azure Web Apps](/docs/getting-started/reference-architectures/webapp-reference-architecture) diff --git a/src/pages/docs/infrastructure/workers/kubernetes-worker/automated-installation.md b/src/pages/docs/infrastructure/workers/kubernetes-worker/automated-installation.md index 834b8a328b..5361309683 100644 --- a/src/pages/docs/infrastructure/workers/kubernetes-worker/automated-installation.md +++ b/src/pages/docs/infrastructure/workers/kubernetes-worker/automated-installation.md @@ -8,28 +8,32 @@ navOrder: 50 --- ## Automated installation via Terraform + The Kubernetes Worker can be installed and managed using a combination of the [Helm chart >= v2.2.1](https://hub.docker.com/r/octopusdeploy/kubernetes-agent), [Octopus Deploy >= v0.30.0 Terraform provider](https://registry.terraform.io/providers/OctopusDeployLabs/octopusdeploy/latest) and/or [Helm Terraform provider](https://registry.terraform.io/providers/hashicorp/helm). ### Octopus Deploy & Helm -Using a combination of the Octopus Deploy and Helm providers you can completely manage the Kubernetes Worker via Terraform. + +Using a combination of the Octopus Deploy and Helm providers you can completely manage the Kubernetes Worker via Terraform. :::div{.info} + To ensure that the Kubernetes Worker is correctly installed in Octopus, certain criteria must hold for the following Terraform resource properties: -| **Kubernetes Worker resource** | | **Helm resource (chart value)** | -|----------|----------|----------| -| `octopusdeploy_kubernetes_agent_worker.name` | must be the same value as | `agent.name` | -| `octopusdeploy_kubernetes_agent_worker.uri` | must be the same value as | `agent.serverSubscriptionId` | -| `octopusdeploy_kubernetes_agent_worker.thumbprint` | is the thumbprint calculated from the certificate used in | `agent.certificate` | +| **Kubernetes Worker resource** | | **Helm resource (chart value)** | +| -------------------------------------------------- | --------------------------------------------------------- | ------------------------------- | +| `octopusdeploy_kubernetes_agent_worker.name` | must be the same value as | `agent.name` | +| `octopusdeploy_kubernetes_agent_worker.uri` | must be the same value as | `agent.serverSubscriptionId` | +| `octopusdeploy_kubernetes_agent_worker.thumbprint` | is the thumbprint calculated from the certificate used in | `agent.certificate` | + ::: :::div{.warning} Always specify the major version in the **version** property on the **helm_release** resource (e.g. `version = "2.*.*"`) to prevent Terraform from defaulting to the latest Helm chart version. This is important, as a newer major version of the Kubernetes Worker Helm chart could introduce breaking changes. -When upgrading to a new major version of the Kubernetes Worker, create a separate resource to ensure the Helm values match the updated schema. [Automatic upgrade support](../../../kubernetes/targets/kubernetes-agent/upgrading#automatic-updates-coming-in-20234) is expected in version 2023.4. +When upgrading to a new major version of the Kubernetes Worker, create a separate resource to ensure the Helm values match the updated schema. [Automatic upgrade support](/docs/kubernetes/targets/kubernetes-agent/upgrading#automatic-updates-coming-in-20234) is expected in version 2023.4. ::: -```hcl +```ruby terraform { required_providers { octopusdeploy = { @@ -147,11 +151,12 @@ resource "helm_release" "kubernetes_worker" { ``` ### Helm -The Kubernetes Worker can be installed using just the Helm provider alone. However, the associated worker that is created in Octopus cannot be managed solely using the Helm provider. This is because the Helm chart values relating to the worker are only used on initial installation. Any further modifications to them will not trigger an update to the worker unless you perform a complete reinstall of the worker. + +The Kubernetes Worker can be installed using just the Helm provider alone. However, the associated worker that is created in Octopus cannot be managed solely using the Helm provider. This is because the Helm chart values relating to the worker are only used on initial installation. Any further modifications to them will not trigger an update to the worker unless you perform a complete reinstall of the worker. If you don't intend to manage the Kubernetes Worker configuration through Terraform (choosing to handle it via the Octopus Portal or API instead), this option will be beneficial to you as it is simpler to set up. -```hcl +```ruby terraform { required_providers { helm = { @@ -222,4 +227,4 @@ resource "helm_release" "kubernetes_worker" { value = ["WorkerPools-1"] } } -``` \ No newline at end of file +``` diff --git a/src/pages/docs/infrastructure/workers/kubernetes-worker/index.md b/src/pages/docs/infrastructure/workers/kubernetes-worker/index.md index bbd286275b..cfa297626c 100644 --- a/src/pages/docs/infrastructure/workers/kubernetes-worker/index.md +++ b/src/pages/docs/infrastructure/workers/kubernetes-worker/index.md @@ -5,6 +5,7 @@ modDate: 2024-08-22 title: Kubernetes Worker navOrder: 10 --- + The Kubernetes Worker allows worker operations to be executed within a Kubernetes cluster in a scalable manner. This allows compute resources used during the execution of a Deployment process (or runbook) are released when the Deployment completes. @@ -19,12 +20,14 @@ The Octopus Web portal provides a wizard which constructs guides you through the which installs the Kubernetes Worker in your cluster. Once installed, the Kubernetes Worker functions as a standard Octopus worker: -* It must be included in 1 or more worker pools -* Supports deployments to any deployment target -* Will be kept up to date via machine health checks & updates -* Can execute operations in custom containers (as defined on the deployment step) + +- It must be included in 1 or more worker pools +- Supports deployments to any deployment target +- Will be kept up to date via machine health checks & updates +- Can execute operations in custom containers (as defined on the deployment step) ## Default Behavior + The web portal's [installation process](/docs/infrastructure/workers#installing-a-kubernetes-worker) installs a worker which will work for a majority of workloads. When the Kubernetes worker executes a deployment step, it executes the operation within a [worker-tools](https://hub.docker.com/r/octopusdeploy/worker-tools) container, @@ -34,16 +37,17 @@ If a step requires specific tooling, you are able to set the desired container o Worker honours this setting as per other worker types. ## Customizations + The behavior of the Kubernetes Worker can be modified through [Helm chart](https://github.com/OctopusDeploy/helm-charts/tree/main/charts/kubernetes-agent) `Values`. These values can be set during installation (by editing the Octopus Server supplied command line), or at any time via a Helm upgrade. Of note: -| Value | Purpose | -| --- |---------------------------------------------------------------------------| -| scriptPods.worker.image | Specifies the docker container image to be used when running an operation | -| scriptPods.resources.requests | Specifies the average cpu/memory usage required to execute an operation | +| Value | Purpose | +| ----------------------------- | ------------------------------------------------------------------------- | +| scriptPods.worker.image | Specifies the docker container image to be used when running an operation | +| scriptPods.resources.requests | Specifies the average cpu/memory usage required to execute an operation | If you are experiencing difficulties with your Kubernetes Cluster's autoscaling, modifying `scriptPods.resources.requests.*` may provide a solution. @@ -53,6 +57,7 @@ Too large (i.e. higher than actual usage) then the cluster will scale too early, pods pending for longer than necessary. ## Permissions + The Kubernetes Worker is limited to modifying its local namespace, preventing it from polluting the cluster at large. The Kubernetes Worker is permitted unfettered access to its local namespace, ensuring it is able to update itself, and @@ -61,9 +66,11 @@ create new pods for each requested operation. The Kubernetes Worker allows execution permissions to be overwritten in the same way as the [Kubernetes Agent](/docs/kubernetes/targets/kubernetes-agent/permissions). ## Limitations + Being securely hosted inside a kubernetes cluster comes with some limitations - the primary of which is the lack of `Docker`. Which means certain operations which are typically valid, may not be possible. Specifically: -* Creating an [inline execution container](../../projects/steps/execution-containers-for-workers#inline-execution-containers) -* Fetching docker images (when used as secondary packages) -* Arbitrary scripts which use docker \ No newline at end of file + +- Creating an [inline execution container](/docs/projects/steps/execution-containers-for-workers#inline-execution-containers) +- Fetching docker images (when used as secondary packages) +- Arbitrary scripts which use docker diff --git a/src/pages/docs/infrastructure/workers/kubernetes-worker/troubleshooting.md b/src/pages/docs/infrastructure/workers/kubernetes-worker/troubleshooting.md index 68feeee7c9..99a63b6963 100644 --- a/src/pages/docs/infrastructure/workers/kubernetes-worker/troubleshooting.md +++ b/src/pages/docs/infrastructure/workers/kubernetes-worker/troubleshooting.md @@ -9,4 +9,4 @@ description: How to troubleshoot common Kubernetes Worker issues navOrder: 60 --- -For troubleshooting common issues, please refer to the Kubernetes Agent [troubleshooting page](../../../kubernetes/targets/kubernetes-agent/troubleshooting), as the Agent and Worker are based on the same underlying technology. \ No newline at end of file +For troubleshooting common issues, please refer to the Kubernetes Agent [troubleshooting page](/docs/kubernetes/targets/kubernetes-agent/troubleshooting), as the Agent and Worker are based on the same underlying technology. diff --git a/src/pages/docs/kubernetes/targets/kubernetes-agent/automated-installation.md b/src/pages/docs/kubernetes/targets/kubernetes-agent/automated-installation.md index 8fd9e35e5c..2b42d73a20 100644 --- a/src/pages/docs/kubernetes/targets/kubernetes-agent/automated-installation.md +++ b/src/pages/docs/kubernetes/targets/kubernetes-agent/automated-installation.md @@ -8,28 +8,32 @@ navOrder: 40 --- ## Automated installation via Terraform + The Kubernetes Agent can be installed and managed using a combination of the Kubernetes Agent [Helm chart >= v2.2.1](https://hub.docker.com/r/octopusdeploy/kubernetes-agent), [Octopus Deploy >= v0.30.0 Terraform provider](https://registry.terraform.io/providers/OctopusDeployLabs/octopusdeploy/latest) and/or [Helm Terraform provider](https://registry.terraform.io/providers/hashicorp/helm). ### Octopus Deploy & Helm -Using a combination of the Octopus Deploy and Helm providers you can completely manage the Kubernetes Agent via Terraform. + +Using a combination of the Octopus Deploy and Helm providers you can completely manage the Kubernetes Agent via Terraform. :::div{.info} + To ensure that the Kubernetes Agent is correctly installed as a deployment target in Octopus, certain criteria must hold for the following Terraform resource properties: -| **Kubernetes Agent resource** | | **Helm resource (chart value)** | -|----------|----------|----------| -| `octopusdeploy_kubernetes_agent_deployment_target.name` | must be the same value as | `agent.name` | -| `octopusdeploy_kubernetes_agent_deployment_target.uri` | must be the same value as | `agent.serverSubscriptionId` | -| `octopusdeploy_kubernetes_agent_deployment_target.thumbprint` | is the thumbprint calculated from the certificate used in | `agent.certificate` | +| **Kubernetes Agent resource** | | **Helm resource (chart value)** | +| ------------------------------------------------------------- | --------------------------------------------------------- | ------------------------------- | +| `octopusdeploy_kubernetes_agent_deployment_target.name` | must be the same value as | `agent.name` | +| `octopusdeploy_kubernetes_agent_deployment_target.uri` | must be the same value as | `agent.serverSubscriptionId` | +| `octopusdeploy_kubernetes_agent_deployment_target.thumbprint` | is the thumbprint calculated from the certificate used in | `agent.certificate` | + ::: :::div{.warning} Always specify the major version in the **version** property on the **helm_release** resource (e.g. `version = "2.*.*"`) to prevent Terraform from defaulting to the latest Helm chart version. This is important, as a newer major version of the Agent Helm chart could introduce breaking changes. -When upgrading to a new major version of the Agent, create a separate resource to ensure the Helm values match the updated schema. [Automatic upgrade support](./upgrading#automatic-updates-coming-in-20234) is expected in version 2023.4. +When upgrading to a new major version of the Agent, create a separate resource to ensure the Helm values match the updated schema. [Automatic upgrade support](/docs/kubernetes/targets/kubernetes-agent/upgrading#automatic-updates-coming-in-20234) is expected in version 2023.4. ::: -```hcl +```ruby terraform { required_providers { octopusdeploy = { @@ -150,11 +154,12 @@ resource "helm_release" "octopus_agent" { ``` ### Helm -The Kubernetes Agent can be installed using just the Helm provider alone. However, the associated deployment target that is created in Octopus cannot be managed solely using the Helm provider. This is because the Helm chart values relating to the agent are only used on initial installation. Any further modifications to them will not trigger an update to the deployment target unless you perform a complete reinstall of the agent. + +The Kubernetes Agent can be installed using just the Helm provider alone. However, the associated deployment target that is created in Octopus cannot be managed solely using the Helm provider. This is because the Helm chart values relating to the agent are only used on initial installation. Any further modifications to them will not trigger an update to the deployment target unless you perform a complete reinstall of the agent. If you don't intend to manage the Kubernetes Agent configuration through Terraform (choosing to handle it via the Octopus Portal or API instead), this option will be beneficial to you as it is simpler to set up. -```hcl +```ruby terraform { required_providers { helm = { @@ -230,4 +235,4 @@ resource "helm_release" "octopus_agent" { value = ["Role-1"] } } -``` \ No newline at end of file +``` diff --git a/src/pages/docs/kubernetes/targets/kubernetes-agent/kubernetes-monitor.md b/src/pages/docs/kubernetes/targets/kubernetes-agent/kubernetes-monitor.md index 8a9c643213..91647e0e80 100644 --- a/src/pages/docs/kubernetes/targets/kubernetes-agent/kubernetes-monitor.md +++ b/src/pages/docs/kubernetes/targets/kubernetes-agent/kubernetes-monitor.md @@ -7,18 +7,17 @@ description: How to manage the Kubernetes monitor component navOrder: 25 --- -The Kubernetes monitor is a component that runs alongside Tentacle in the cluster. The Kubernetes monitor tracks the health of resources deployed to the cluster via Octopus Server. +The Kubernetes monitor is a component that runs alongside Tentacle in the cluster. The Kubernetes monitor tracks the health of resources deployed to the cluster via Octopus Server. ## How it works The Kubernetes monitor communicates with Octopus Server over gRPC on a new port (8443) to send back object information to Octopus Deploy. Communications are initiated by the Kubernetes monitor, so no endpoints on the Kubernetes cluster need to be exposed. -The monitor process utilizes the [Argo project gitops engine project](https://github.com/argoproj/gitops-engine) to internally keep track of the resources running on your cluster and react to changes as they occur. - +The monitor process uses the [Argo project gitops engine project](https://github.com/argoproj/gitops-engine) to internally keep track of the resources running on your cluster and react to changes as they occur. ## Required Kubernetes permissions -### Registration +### Registration During registration, the Kubernetes monitor manages a secret to store it's authentication information. @@ -36,8 +35,8 @@ To enabled this a `ClusterRole` is created for use by the Kubernetes monitor wit The Kubernetes monitor's upgrade process is directly tied to the Kubernetes agent. -See [how upgrades work for the Kubernetes agent here](./upgrading) +See [how upgrades work for the Kubernetes agent here](/docs/kubernetes/targets/kubernetes-agent/upgrading) ## Troubleshooting -See [Kubernetes Live Object Status troubleshooting](../../live-object-status/troubleshooting) \ No newline at end of file +See [Kubernetes Live Object Status troubleshooting](/docs/kubernetes/live-object-status/troubleshooting) diff --git a/src/pages/docs/kubernetes/targets/kubernetes-agent/troubleshooting/index.md b/src/pages/docs/kubernetes/targets/kubernetes-agent/troubleshooting/index.md index 0997da871f..6834d4e51b 100644 --- a/src/pages/docs/kubernetes/targets/kubernetes-agent/troubleshooting/index.md +++ b/src/pages/docs/kubernetes/targets/kubernetes-agent/troubleshooting/index.md @@ -20,23 +20,24 @@ The generated helm commands use the [`--atomic`](https://helm.sh/docs/helm/helm_ If the helm command fails, then it may print an error message containing context deadline exceeded This indicates that the timeout was exceeded and the Kubernetes resources did not correctly start. -To help diagnose these issues, the `kubectl` commands [`describe`](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_describe/) and [`logs`](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_logs/) can be used _while the helm command is executing_ to help debug any issues. +To help diagnose these issues, the `kubectl` commands [`describe`](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_describe/) and [`logs`](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_logs/) can be used *while the helm command is executing* to help debug any issues. #### NFS CSI driver install command -``` +```bash kubectl describe pods -l app.kubernetes.io/name=csi-driver-nfs -n kube-system ``` #### Agent install command -``` +```bash # To get pod information kubectl describe pods -l app.kubernetes.io/name=octopus-agent -n [NAMESPACE] # To get pod logs kubectl logs -l app.kubernetes.io/name=octopus-agent -n [NAMESPACE] ``` -_Replace `[NAMESPACE]` with the namespace in the agent installation command_ + +Replace `[NAMESPACE]` with the namespace in the agent installation command. If the Agent install command fails with a timeout error, it could be that: @@ -47,6 +48,7 @@ If the Agent install command fails with a timeout error, it could be that: - (if using a custom Storage Class) the Storage Class name doesn't match #### Setting scriptPod Service Account annotations + To add an annotation to the Service Account for the `scriptPods`, use the following syntax ```bash @@ -61,9 +63,9 @@ To add an annotation to the Service Account for the `scriptPods`, use the follow ## Script Execution Issues -### `Unexpected Script Pod log line number, expected: expected-line-no, actual: actual-line-no` +### `Unexpected Script Pod log line number, expected: expected-line-no, actual: actual-line-no` -This error indicates that the logs from the script pods are incomplete or malformed. +This error indicates that the logs from the script pods are incomplete or malformed. When scripts are executed, any outputs or logs are stored in the script pod's container logs. The Tentacle pod then reads from the container logs to feed back to Octopus Server. @@ -138,7 +140,7 @@ In version `2024.3.11946` onwards and all `2024.4` versions, Octopus Server uses This means, that if your version of Octopus Server is trying to use that service account, but the installed agent is on version before the version it was added, you will receive an error like -``` +```text Operation returned an invalid status code 'Forbidden', response body {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"pods \"octopus-script-xxx\" is forbidden: error looking up service account octopus-agent-XXX/octopus-agent-auto-upgrader: serviceaccount \"octopus-agent-auto-upgrader\" not found","reason":"Forbidden","details":{"name":"octopus-script-xxx","kind":"pods"},"code":403} ``` @@ -166,6 +168,6 @@ Executing this command in a terminal connected to the Kubernetes cluster will re This error indicates that the agent was unable to complete the initial SSL handshake with the Octopus Server. -There are various reasons why this error may occur, but a likely cause is incompatibility with the SSL certificate configuration. Specifically, the agent **does not support SHA1RSA certificates when the Octopus Server is running on Windows Server 2012 R2**. If your setup matches this configuration and the inner exception in the error stack includes a message like `error:0A00042E:SSL routines::tlsv1 alert protocol version`, this likely indicates that the SSL connection issue is due to the certificate incompatibility. +There are various reasons why this error may occur, but a likely cause is incompatibility with the SSL certificate configuration. Specifically, the agent **does not support SHA1RSA certificates when the Octopus Server is running on Windows Server 2012 R2**. If your setup matches this configuration and the inner exception in the error stack includes a message like `error:0A00042E:SSL routines::tlsv1 alert protocol version`, this likely indicates that the SSL connection issue is due to the certificate incompatibility. -For detailed instructions on diagnosing and resolving this issue, please refer to the guide on this [page](troubleshooting/sha1-certificate-incompatibility). \ No newline at end of file +For detailed instructions on diagnosing and resolving this issue, please refer to the guide on this [page](/docs/kubernetes/targets/kubernetes-agent/troubleshooting/sha1-certificate-incompatibility). diff --git a/src/pages/docs/kubernetes/targets/kubernetes-agent/troubleshooting/sha1-certificate-incompatibility.md b/src/pages/docs/kubernetes/targets/kubernetes-agent/troubleshooting/sha1-certificate-incompatibility.md index 6e73050075..d56152e223 100644 --- a/src/pages/docs/kubernetes/targets/kubernetes-agent/troubleshooting/sha1-certificate-incompatibility.md +++ b/src/pages/docs/kubernetes/targets/kubernetes-agent/troubleshooting/sha1-certificate-incompatibility.md @@ -8,17 +8,21 @@ navOrder: 71 --- ## Background -[Since 2017](../../../../security/cve/shattered-and-octopus-deploy), Octopus Server no longer supports SHA1 certificates due to their inherent security vulnerabilities. SHA1 is an outdated cryptographic hash algorithm that has been replaced by the more secure SHA256 standard, in line with industry best practices. +[Since 2017](/docs/security/cve/shattered-and-octopus-deploy), Octopus Server no longer supports SHA1 certificates due to their inherent security vulnerabilities. SHA1 is an outdated cryptographic hash algorithm that has been replaced by the more secure SHA256 standard, in line with industry best practices. ## Compatibility Issue: SHA1 on Windows Server 2012 R2 + If your Octopus Server is still using a SHA1 certificate and is running on Windows Server 2012 R2, you may encounter issues when trying to connect with the Kubernetes Agent. This is due to a known incompatibility between the agent and older Windows systems using SHA1 certificates. ## Why This Happens + Windows Server 2012 R2 lacks support for certain modern cipher suites and hash algorithms required by the Kubernetes agent. Specifically, the agent fails during SSL negotiation because it cannot find the necessary SHA1RSA signature hash algorithm in the system's cryptographic libraries. This results in a failure to establish a secure connection between the Tentacle agent and the Octopus Server. ## Diagnosis + To confirm that the issue is caused by SHA1 compatibility, check each of the following signs: + 1. **Verify the OS**: Ensure the Octopus Server is running on Windows Server 2012 R2. 2. **Check the Certificate Type**: Confirm that the Octopus Server is using a SHA1 certificate. Verify that the Octopus Server is using a SHA1 certificate. The simplest method is to query the Octopus REST API endpoint `/api/certificates/certificate-global` and inspect the **SignatureAlgorithm** field. 3. **Enable Schannel Event Logging**: On the Windows Server, [enable Schannel event logging](https://learn.microsoft.com/en-us/troubleshoot/developer/webapps/iis/health-diagnostic-performance/enable-schannel-event-logging). Look for an error event stating: @@ -30,17 +34,21 @@ This error will occur when the agent attempts to connect to the server, typicall ::: ## Solutions + To resolve this issue and continue using the Kubernetes agent with your Octopus Server, you have two options, both of which are recommended regardless of the current issue: ### Option 1: Rotate the SSL certificate + Replace the current SHA1 certificate with a more secure **SHA256** certificate. This will ensure that the Kubernetes agent can successfully complete the SSL handshake with the server. #### Steps to rotate the certificate + 1. Generate a new self-signed certificate using the SHA256RSA algorithm. 2. Install the new certificate on your Octopus Server. 3. Update all the associated Tentacles to trust the new certificate. -Refer to this [documentation page](../../../../security/octopus-tentacle-communication/custom-certificates-with-octopus-server-and-tentacle) for detailed instructions on how to set up certificates with Octopus Server and Tentacle. +Refer to this [documentation page](/docs/security/octopus-tentacle-communication/custom-certificates-with-octopus-server-and-tentacle) for detailed instructions on how to set up certificates with Octopus Server and Tentacle. ### Option 2: Upgrade the Windows Server Version -Upgrade the Octopus server from Windows Server 2012 R2 to a newer version, such as **Windows Server 2016 or later**, which supports the necessary cryptographic standards. Since Windows Server 2012 R2 has already reached [end-of-support](https://azure.microsoft.com/en-au/updates/windows-server-2012r2-reaches-end-of-support) and no longer receives updates from Microsoft, it remains vulnerable to security risks. Moving to a supported version will not only ensure compatibility with modern SSL/TLS protocols but also protect your system by receiving ongoing security updates and enhancements. + +Upgrade the Octopus server from Windows Server 2012 R2 to a newer version, such as **Windows Server 2016 or later**, which supports the necessary cryptographic standards. Since Windows Server 2012 R2 has already reached [end-of-support](https://azure.microsoft.com/en-au/updates/windows-server-2012r2-reaches-end-of-support) and no longer receives updates from Microsoft, it remains vulnerable to security risks. Moving to a supported version will not only ensure compatibility with modern SSL/TLS protocols but also protect your system by receiving ongoing security updates and enhancements. diff --git a/src/pages/docs/kubernetes/targets/kubernetes-agent/upgrading.md b/src/pages/docs/kubernetes/targets/kubernetes-agent/upgrading.md index 0ad7cbf3b9..90de19962e 100644 --- a/src/pages/docs/kubernetes/targets/kubernetes-agent/upgrading.md +++ b/src/pages/docs/kubernetes/targets/kubernetes-agent/upgrading.md @@ -13,24 +13,24 @@ The Kubernetes agent is automatically kept up to date by Octopus Server when run ## Disabling automatic upgrades -Automatic upgrades can be disabled by updating the machine updates settings in your applied [machine policy](../../../infrastructure/machine-policies) +Automatic upgrades can be disabled by updating the machine updates settings in your applied [machine policy](/docs/infrastructure/deployment-targets/machine-policies) ## V1 Changes to the Kubernetes agent Helm Chart necessitated a breaking change. -The version of a Kubernetes agent is found by going to **Infrastructure** then into **DeploymentTargets**; from there click on the -**Kubernetes agent** of interest; on its **Connectivity** sub-page you will see 'Current Version'. +The version of a Kubernetes agent is found by going to **Infrastructure** then into **DeploymentTargets**; from there click on the **Kubernetes agent** of interest; on its **Connectivity** sub-page you will see 'Current Version'. :::figure ![Kubernetes agent default namespace](/docs/infrastructure/deployment-targets/kubernetes/kubernetes-agent/kubernetes-agent-upgrade-version.png) ::: -Installed v1 instances will continue to operate as expected, however they will receive no further updates other than security updates. +Installed v1 instances will continue to operate as expected, however they will receive no further updates other than security updates. + While you may continue to use v1 of the helm-chart, it is highly recommended to perform an upgrade to v2 to you receive ongoing functional and security updates. As of Octopus Server 2024.4, version 1 Helm charts can be automatically upgraded to version 2 without manual intervention. For older versions of Octopus Server you can manually upgrade a v1 instance following the guide in the Kubernetes agent [documentation](https://github.com/OctopusDeploy/helm-charts/blob/main/charts/kubernetes-agent/migrations). -Alternatively, existing v1 Kubernetes agents can be deleted from your server instance, and recreated as v2 agents via the installation workflow available in Octopus Server. \ No newline at end of file +Alternatively, existing v1 Kubernetes agents can be deleted from your server instance, and recreated as v2 agents via the installation workflow available in Octopus Server. diff --git a/src/pages/docs/octopus-ai-assistant/cookbook/index.md b/src/pages/docs/octopus-ai-assistant/cookbook/index.md index 1a8dc68221..72765b5a6f 100644 --- a/src/pages/docs/octopus-ai-assistant/cookbook/index.md +++ b/src/pages/docs/octopus-ai-assistant/cookbook/index.md @@ -13,30 +13,29 @@ This cookbook includes ready-to-use prompts that help automate and optimize your ## Available Recipes -- [Analyze step template usage](./cookbook/analyze-step-template-usage) -- [Audit environment naming and counts](./cookbook/audit-environment-naming-and-counts) -- [Audit PCI deployments](./cookbook/audit-pci-deployments) -- [Audit target role distribution](./cookbook/audit-target-role-distribution) -- [Check retention policy consistency](./cookbook/check-retention-policy-consistency) -- [Create a .NET Azure App deployment process](./cookbook/create-a-net-azure-app-deployment-process) -- [Detect overlapping variable names](./cookbook/detect-overlapping-variable-names) -- [Detect unused variables](./cookbook/detect-unused-variables) -- [Evaluate deployment frequency](./cookbook/evaluate-deployment-frequency) -- [Fix variable binding errors](./cookbook/fix-variable-binding-errors) -- [Generate deployment rollback plan](./cookbook/generate-deployment-rollback-plan) -- [Improve multi-tenant deployments](./cookbook/improve-multi-tenant-deployments) -- [Investigate production deployment failure](./cookbook/investigate-production-deployment-failure) -- [Kubernetes deployment pipeline](./cookbook/kubernetes-deployment-pipeline) -- [List failed deployments](./cookbook/list-failed-deployments) -- [List perpetual API keys](./cookbook/list-perpetual-api-keys) -- [Recommend variable scoping](./cookbook/recommend-variable-scoping) -- [Report runbook scheduling](./cookbook/report-runbook-scheduling) -- [Report skipped steps](./cookbook/report-skipped-steps) -- [Resolve rolling deployment timeouts](./cookbook/resolve-rolling-deployment-timeouts) -- [Restart Windows services Runbook](./cookbook/restart-windows-services-runbook) -- [Review runbook usage](./cookbook/review-runbook-usage) -- [Security best practices check](./cookbook/security-best-practices-check) -- [Speed up Lifecycle phases](./cookbook/speed-up-lifecycle-phases) -- [Summarize Ops Runbooks](./cookbook/summarize-ops-runbooks) -- [Summarize tag set coverage](./cookbook/summarize-tag-set-coverage) -- [Summarize worker pool health](./cookbook/summarize-worker-pool-health) +- [Analyze step template usage](/docs/octopus-ai-assistant/cookbook/analyze-step-template-usage) +- [Audit environment naming and counts](/docs/octopus-ai-assistant/cookbook/audit-environment-naming-and-counts) +- [Audit PCI deployments](/docs/octopus-ai-assistant/cookbook/audit-pci-deployments) +- [Audit target role distribution](/docs/octopus-ai-assistant/cookbook/audit-target-role-distribution) +- [Check retention policy consistency](/docs/octopus-ai-assistant/cookbook/check-retention-policy-consistency) +- [Create a .NET Azure App deployment process](/docs/octopus-ai-assistant/cookbook/create-a-net-azure-app-deployment-process) +- [Detect overlapping variable names](/docs/octopus-ai-assistant/cookbook/detect-overlapping-variable-names) +- [Detect unused variables](/docs/octopus-ai-assistant/cookbook/detect-unused-variables) +- [Evaluate deployment frequency](/docs/octopus-ai-assistant/cookbook/evaluate-deployment-frequency) +- [Fix variable binding errors](/docs/octopus-ai-assistant/cookbook/fix-variable-binding-errors) +- [Generate deployment rollback plan](/docs/octopus-ai-assistant/cookbook/generate-deployment-rollback-plan) +- [Improve multi-tenant deployments](/docs/octopus-ai-assistant/cookbook/improve-multi-tenant-deployments) +- [Investigate production deployment failure](/docs/octopus-ai-assistant/cookbook/investigate-production-deployment-failure) +- [Kubernetes deployment pipeline](/docs/octopus-ai-assistant/cookbook/kubernetes-deployment-pipeline) +- [List failed deployments](/docs/octopus-ai-assistant/cookbook/list-failed-deployments) +- [Recommend variable scoping](/docs/octopus-ai-assistant/cookbook/recommend-variable-scoping) +- [Report runbook scheduling](/docs/octopus-ai-assistant/cookbook/report-runbook-scheduling) +- [Report skipped steps](/docs/octopus-ai-assistant/cookbook/report-skipped-steps) +- [Resolve rolling deployment timeouts](/docs/octopus-ai-assistant/cookbook/resolve-rolling-deployment-timeouts) +- [Restart Windows services Runbook](/docs/octopus-ai-assistant/cookbook/restart-windows-services-runbook) +- [Review runbook usage](/docs/octopus-ai-assistant/cookbook/review-runbook-usage) +- [Security best practices check](/docs/octopus-ai-assistant/cookbook/security-best-practices-check) +- [Speed up Lifecycle phases](/docs/octopus-ai-assistant/cookbook/speed-up-lifecycle-phases) +- [Summarize Ops Runbooks](/docs/octopus-ai-assistant/cookbook/summarize-ops-runbooks) +- [Summarize tag set coverage](/docs/octopus-ai-assistant/cookbook/summarize-tag-set-coverage) +- [Summarize worker pool health](/docs/octopus-ai-assistant/cookbook/summarize-worker-pool-health) diff --git a/src/pages/docs/platform-engineering/enterprise-patterns.md b/src/pages/docs/platform-engineering/enterprise-patterns.md index bdc25560a7..0f6d0dc2fa 100644 --- a/src/pages/docs/platform-engineering/enterprise-patterns.md +++ b/src/pages/docs/platform-engineering/enterprise-patterns.md @@ -33,14 +33,14 @@ This pattern is very easy to implement, as it often involves little more than cr However, spaces do have some limitations. Because spaces belong to a single Octopus installation, and Octopus installations need a low latency connection to the database, spaces do not let you co-locate Octopus with geographically dispersed teams. Plus, all tasks initiated by spaces use a shared task queue. When projects in a space queue many tasks, other spaces have to wait for their deployments to be processed. This is commonly known as the "noisy neighbor" problem. -| Feature | Solves | -|---|---| -| Independent projects, runbooks, dashboards etc | ✓ | -| Task execution guarantees for business unit/application | ✕ | -| Shared authentication settings | ✓ | -| Synchronized projects, runbooks, dashboards etc | ✕ | -| Supports geographically disperse business units | ✕ | -| Robust RBAC support | ✓ | +| Feature | Solves | +| ------------------------------------------------------- | ------ | +| Independent projects, runbooks, dashboards etc | ✓ | +| Task execution guarantees for business unit/application | ✕ | +| Shared authentication settings | ✓ | +| Synchronized projects, runbooks, dashboards etc | ✕ | +| Supports geographically disperse business units | ✕ | +| Robust RBAC support | ✓ | ## Independent instance per business unit/region @@ -58,14 +58,14 @@ Like the independent space pattern, the independent instance pattern is easy to - Audit log streaming - And more -| Feature | Solves | -|---|---| -| Independent projects, runbooks, dashboards etc | ✓ | -| Task execution guarantees for team/customer | ✓ | -| Shared authentication settings | ✕ | -| Synchronized projects, runbooks, dashboards etc | ✕ | -| Supports geographically disperse teams/customers | ✓ | -| Robust RBAC support | ✓ | +| Feature | Solves | +| ------------------------------------------------ | ------ | +| Independent projects, runbooks, dashboards etc | ✓ | +| Task execution guarantees for team/customer | ✓ | +| Shared authentication settings | ✕ | +| Synchronized projects, runbooks, dashboards etc | ✕ | +| Supports geographically disperse teams/customers | ✓ | +| Robust RBAC support | ✓ | ## Tenant per customer @@ -79,14 +79,14 @@ However, the RBAC controls around tenants are not expressive enough to isolate c You can find more information about [tenants in our documentation](https://octopus.com/docs/tenants). -| Feature | Solves | -|---|---| -| Independent projects, runbooks, dashboards etc | ✕ | -| Task execution guarantees for team/customer | ✕ | -| Shared authentication settings | ✓| -| Synchronized projects, runbooks, dashboards etc |✓ | -| Supports geographically disperse teams/customers | ✕ | -| Robust RBAC support |✕ | +| Feature | Solves | +| ------------------------------------------------ | ------ | +| Independent projects, runbooks, dashboards etc | ✕ | +| Task execution guarantees for team/customer | ✕ | +| Shared authentication settings | ✓ | +| Synchronized projects, runbooks, dashboards etc | ✓ | +| Supports geographically disperse teams/customers | ✕ | +| Robust RBAC support | ✕ | ## Managed space per business unit/application @@ -96,15 +96,14 @@ This solution represents a typical "hub and spoke", or [platform engineering](ht A tenant represents each space in the management space, also known as the upstream space. And deployment projects or runbooks configure the managed spaces, also known as downstream spaces. You can use the Terraform provider or raw API scripting to push configuration for shared resources, like template projects, to the managed spaces. - -| Feature | Solves | -|---|---| -| Independent projects, runbooks, dashboards etc | ✓ | -| Task execution guarantees for team/customer | ✕ | -| Shared authentication settings | ✓| -| Synchronized projects, runbooks, dashboards etc |✓ | -| Supports geographically disperse teams/customers | ✕ | -| Robust RBAC support |✓ | +| Feature | Solves | +| ------------------------------------------------ | ------ | +| Independent projects, runbooks, dashboards etc | ✓ | +| Task execution guarantees for team/customer | ✕ | +| Shared authentication settings | ✓ | +| Synchronized projects, runbooks, dashboards etc | ✓ | +| Supports geographically disperse teams/customers | ✕ | +| Robust RBAC support | ✓ | ## Managed instance per business unit/region @@ -114,14 +113,14 @@ Like the "managed space per business unit/application" pattern, this represents A tenant represents each managed Octopus instance in the management (or upstream) space. And deployment projects or runbooks configure the managed (or downstream) Octopus instances. You can use the Terraform provider or raw API scripting to push configuration for shared resources, like template projects, to the managed instances. -| Feature | Solves | -|---|---| -| Independent projects, runbooks, dashboards etc | ✓ | -| Task execution guarantees for team/customer | ✓ | -| Shared authentication settings | ✓| -| Synchronized projects, runbooks, dashboards etc |✓ | -| Supports geographically disperse teams/customers | ✓ | -| Robust RBAC support |✓ | +| Feature | Solves | +| ------------------------------------------------ | ------ | +| Independent projects, runbooks, dashboards etc | ✓ | +| Task execution guarantees for team/customer | ✓ | +| Shared authentication settings | ✓ | +| Synchronized projects, runbooks, dashboards etc | ✓ | +| Supports geographically disperse teams/customers | ✓ | +| Robust RBAC support | ✓ | ## Facade space per customer @@ -131,14 +130,14 @@ This pattern provides each customer with their own space. Each customer space ha This approach has the benefit of only requiring you to create very simple projects in each managed space. A tenant represents each customer in the management space, taking advantage of the built-in features of tenants. Customers log into their own space, providing a high degree of security. -| Feature | Solves | -|---|---| -| Independent projects, runbooks, dashboards etc | not required| -| Task execution guarantees for team/customer | ✕ | -| Shared authentication settings | ✓| -| Synchronized projects, runbooks, dashboards etc | not required | -| Supports geographically disperse teams/customers | ✕ | -| Robust RBAC support |✓ | +| Feature | Solves | +| ------------------------------------------------ | ------------ | +| Independent projects, runbooks, dashboards etc | not required | +| Task execution guarantees for team/customer | ✕ | +| Shared authentication settings | ✓ | +| Synchronized projects, runbooks, dashboards etc | not required | +| Supports geographically disperse teams/customers | ✕ | +| Robust RBAC support | ✓ | ## Custom UI over Octopus Installation @@ -152,15 +151,14 @@ This solution also allows orchestrating deployments across multiple Octopus inst You can find more information about the [Octopus REST API in our documentation](https://octopus.com/docs/octopus-rest-api). -| Feature | Solves | -|---|---| -| Independent projects, runbooks, dashboards etc | ✓| -| Task execution guarantees for team/customer | ✓ | -| Shared authentication settings | ✓| -| Synchronized projects, runbooks, dashboards etc | not required | -| Supports geographically disperse teams/customers | ✓ | -| Robust RBAC support |✓ | - +| Feature | Solves | +| ------------------------------------------------ | ------------ | +| Independent projects, runbooks, dashboards etc | ✓ | +| Task execution guarantees for team/customer | ✓ | +| Shared authentication settings | ✓ | +| Synchronized projects, runbooks, dashboards etc | not required | +| Supports geographically disperse teams/customers | ✓ | +| Robust RBAC support | ✓ | ## Managed instance per environment @@ -175,14 +173,14 @@ Unlike the previous patterns, this pattern is less concerned with providing the This pattern may also use used to isolate Octopus installations for compliance reasons, such as PCI. Having a separate Octopus installation for the production environment makes it easy demonstrate access controls and other security measures when undertaking security audits. -| Feature | Solves | -|---|---| -| Independent projects, runbooks, dashboards etc | N/A | -| Task execution guarantees for team/customer | N/A | -| Shared authentication settings | N/A| -| Synchronized projects, runbooks, dashboards etc | ✓ | -| Supports geographically disperse teams/customers | N/A | -| Robust RBAC support | N/A | +| Feature | Solves | +| ------------------------------------------------ | ------ | +| Independent projects, runbooks, dashboards etc | N/A | +| Task execution guarantees for team/customer | N/A | +| Shared authentication settings | N/A | +| Synchronized projects, runbooks, dashboards etc | ✓ | +| Supports geographically disperse teams/customers | N/A | +| Robust RBAC support | N/A | ## Conclusion @@ -197,7 +195,7 @@ Some of these patterns require little effort to deploy or are deeply embedded in The "custom UI over Octopus installation" is an advanced pattern that requires a dedicated development team to build a web application that consumes the Octopus REST API. You can refer to the [API documentation](https://octopus.com/docs/octopus-rest-api) for more information if you're interested in this pattern. -The following patterns are implemented using the strategies documented in the [managing space resources](managing-space-resources) and [managing project resources](managing-project-resources) sections: +The following patterns are implemented using the strategies documented in the [managing space resources](/docs/platform-engineering/managing-space-resources) and [managing project resources](/docs/platform-engineering/managing-project-resources) sections: - Managed space per business unit/application - Managed instance per business unit/region diff --git a/src/pages/docs/platform-engineering/listing-downstream-drift.md b/src/pages/docs/platform-engineering/listing-downstream-drift.md index 5dd13ab13c..e2dc8c55ea 100644 --- a/src/pages/docs/platform-engineering/listing-downstream-drift.md +++ b/src/pages/docs/platform-engineering/listing-downstream-drift.md @@ -7,7 +7,7 @@ description: Learn how to scan downstream CaC repos for drift navOrder: 8 --- -When upstream and downstream projects are [configured with CaC and backed by forked repositories](forking-git-repos) it becomes possible to track drift. +When upstream and downstream projects are [configured with CaC and backed by forked repositories](/docs/platform-engineering/forking-git-repos) it becomes possible to track drift. The `Octopus - Find CaC Updates` steps detect drift by: diff --git a/src/pages/docs/platform-engineering/managing-project-resources.md b/src/pages/docs/platform-engineering/managing-project-resources.md index 2ad8abb55a..b3d4b7852a 100644 --- a/src/pages/docs/platform-engineering/managing-project-resources.md +++ b/src/pages/docs/platform-engineering/managing-project-resources.md @@ -18,22 +18,22 @@ Space level resources are shared by projects and do not tend to change as freque Managed, or downstream, spaces (i.e. spaces with centrally managed resources) are implemented by deploying space and project level resources as separate processes: -* Space level resources are deployed first to support one or more projects -* Project level resources are deployed second referencing the space level resources +- Space level resources are deployed first to support one or more projects +- Project level resources are deployed second referencing the space level resources There are two ways to manage project level resources: -* Define database backed projects, complete with all deployment steps, with Terraform -* Define the configuration of a [Config-as-code](/docs/projects/version-control) (CaC) project with Terraform, while deferring the configuration of CaC managed settings like the deployment process, non-secret variables, and some project settings to configuration stored in Git +- Define database backed projects, complete with all deployment steps, with Terraform +- Define the configuration of a [Config-as-code](/docs/projects/version-control) (CaC) project with Terraform, while deferring the configuration of CaC managed settings like the deployment process, non-secret variables, and some project settings to configuration stored in Git -Defining database backed projects in Terraform is useful for [centralized responsibility](levels-of-responsibility) projects where the customer has little or no ability to modify the project, or [customer responsibility](levels-of-responsibility) projects where projects are not centrally updated after they are created. +Defining database backed projects in Terraform is useful for [centralized responsibility](/docs/platform-engineering/levels-of-responsibility) projects where the customer has little or no ability to modify the project, or [customer responsibility](/docs/platform-engineering/levels-of-responsibility) projects where projects are not centrally updated after they are created. -Defining CaC projects is useful for [shared responsibility](levels-of-responsibility) projects where deployment processes can be modified by customers and the platform team, with differences reconciled with Git merges. +Defining CaC projects is useful for [shared responsibility](/docs/platform-engineering/levels-of-responsibility) projects where deployment processes can be modified by customers and the platform team, with differences reconciled with Git merges. Project level resources can be defined in a Terraform module in two ways: -* Write the module by hand -* Serialize an existing project to a Terraform module with [octoterra](https://github.com/OctopusSolutionsEngineering/OctopusTerraformExport) +- Write the module by hand +- Serialize an existing project to a Terraform module with [octoterra](https://github.com/OctopusSolutionsEngineering/OctopusTerraformExport) ## Writing by hand @@ -167,9 +167,9 @@ The steps documented below are best run on the `Hosted Ubuntu` worker pools for Executing the runbook will: -* Export the project to a Terraform module -* Zip the resulting files -* Upload the zip file to the built-in feed of the current space or the space defined in the `Octopus Upload Space ID` field +- Export the project to a Terraform module +- Zip the resulting files +- Upload the zip file to the built-in feed of the current space or the space defined in the `Octopus Upload Space ID` field The zip file has one directory called `space_population` which contains a Terraform module to populate a space with the exported resources. @@ -201,4 +201,4 @@ Executing the runbook will create a new project in an existing space. Any space ### Updating project resources -The runbooks `__ 1. Serialize Project` and `__ 2. Deploy Project` can be run as needed to serialize any changes to the upstream project and deploy the changes to downstream projects. The Terraform module zip file pushed to the built-in feed is versioned with a unique value each time, so you can also revert changes by redeploying an older package. In this way you can use Octopus to deploy Octopus projects using the same processes you use Octopus to deploy applications. \ No newline at end of file +The runbooks `__ 1. Serialize Project` and `__ 2. Deploy Project` can be run as needed to serialize any changes to the upstream project and deploy the changes to downstream projects. The Terraform module zip file pushed to the built-in feed is versioned with a unique value each time, so you can also revert changes by redeploying an older package. In this way you can use Octopus to deploy Octopus projects using the same processes you use Octopus to deploy applications. diff --git a/src/pages/docs/platform-engineering/managing-runbook-resources.md b/src/pages/docs/platform-engineering/managing-runbook-resources.md index 94aaec8449..f891f31960 100644 --- a/src/pages/docs/platform-engineering/managing-runbook-resources.md +++ b/src/pages/docs/platform-engineering/managing-runbook-resources.md @@ -21,12 +21,12 @@ Runbooks are not managed by Config-as-code. Runbooks can be defined in a Terraform module in two ways: -* Write the module by hand -* Serialize an existing project to a Terraform module with [octoterra](https://github.com/OctopusSolutionsEngineering/OctopusTerraformExport) +- Write the module by hand +- Serialize an existing project to a Terraform module with [octoterra](https://github.com/OctopusSolutionsEngineering/OctopusTerraformExport) ## Writing by hand -The process of defining a runbook in Terraform is much the same as defining a project. Both a runbook and a project have the concept of a deployment process that defines the steps to be run. See the [Managing project resources](managing-project-resources) section for more information on defining steps in Terraform by hand. +The process of defining a runbook in Terraform is much the same as defining a project. Both a runbook and a project have the concept of a deployment process that defines the steps to be run. See the [Managing project resources](/docs/platform-engineering/managing-project-resources) section for more information on defining steps in Terraform by hand. ## Serializing with octoterra @@ -55,9 +55,9 @@ The steps documented below are best run on the `Hosted Ubuntu` worker pools for Executing the runbook will: -* Export the runbook to a Terraform module -* Zip the resulting files -* Upload the zip file to the built-in feed of the current space or the space defined in the `Octopus Upload Space ID` field +- Export the runbook to a Terraform module +- Zip the resulting files +- Upload the zip file to the built-in feed of the current space or the space defined in the `Octopus Upload Space ID` field The zip file has one directory called `space_population` which contains a Terraform module to populate a space with the exported resources. @@ -69,7 +69,7 @@ Many of the exported resources expose values, like resource names, as Terraform The exported module defines only the runbook and the runbook deployment process. It does not define other project level resources like project variables or variable sets. Any project that the exported runbook is added to is expected to define all the variables referenced by the runbook. -Any project level variables required by the runbook can be defined as Terraform resources and deployed alongside the exported runbook module. The instructions documented in the [Managing project resources](managing-project-resources) section can be used to export a project to a Terraform module. The project level variables can be copied from the exported project module and placed in their own module as needed. +Any project level variables required by the runbook can be defined as Terraform resources and deployed alongside the exported runbook module. The instructions documented in the [Managing project resources](/docs/platform-engineering/managing-project-resources) section can be used to export a project to a Terraform module. The project level variables can be copied from the exported project module and placed in their own module as needed. ## Importing a runbook @@ -84,9 +84,9 @@ The following steps create a runbook in an existing project with the Terraform m 5. Set the `Octopus API Key` field to the [API key](/docs/octopus-rest-api/how-to-create-an-api-key) used when accessing the instance defined in the `Octopus Server URL` field. 6. Set the `Octopus Space ID` field to the ID of an existing space where the project will be created. 7. Set the `Octopus Project Name` field to the name of the project to deploy the runbook into. - 7. Set the `Terraform Additional Apply Params` field to a list of additional arguments to pass to the `terraform apply` command. This field is typically used to override the name of the runbook e.g. `"-var=runbook_eks_octopub_audits____describe_pods_name=The New Runbook Name"`. Leave this field blank if you do not wish to customize the deployed runbook. - 8. Set the `Terraform Additional Init Params` field to a list of additional arguments to pass to the `terraform init` command. Leave this field blank unless you have a specific reason to pass an argument to Terraform. - 9. Each `Octopus - Add Runbook to Project` step exposes values relating to their specific Terraform backend that must be configured. For example, the `Octopus - Octopus - Add Runbook to Project (S3 Backend)` step exposes fields to configure the S3 bucket, key, and region where the Terraform state is saved. Other steps have similar fields. + 8. Set the `Terraform Additional Apply Params` field to a list of additional arguments to pass to the `terraform apply` command. This field is typically used to override the name of the runbook e.g. `"-var=runbook_eks_octopub_audits____describe_pods_name=The New Runbook Name"`. Leave this field blank if you do not wish to customize the deployed runbook. + 9. Set the `Terraform Additional Init Params` field to a list of additional arguments to pass to the `terraform init` command. Leave this field blank unless you have a specific reason to pass an argument to Terraform. + 10. Each `Octopus - Add Runbook to Project` step exposes values relating to their specific Terraform backend that must be configured. For example, the `Octopus - Octopus - Add Runbook to Project (S3 Backend)` step exposes fields to configure the S3 bucket, key, and region where the Terraform state is saved. Other steps have similar fields. Typically, downstream spaces are represented by tenants in the upstream space. For example, the space called `Acme` is represented by a tenant wth the same name. Configuring the `__ Deploy Runbook` runbook to run against a tenant allows you to manage the creation and updates of downstream projects with a typical tenant based deployment process. @@ -96,4 +96,4 @@ Executing the runbook will create a new runbook in an existing project. Any spac ### Updating project resources -The runbooks `__ Serialize Runbook` and `__ Deploy Runbook` can be run as needed to serialize any changes to the upstream runbook and deploy the changes to downstream runbooks. The Terraform module zip file pushed to the built-in feed is versioned with a unique value each time, so you can also revert changes by redeploying an older package. In this way, you can use Octopus to deploy Octopus runbooks using the same processes you use Octopus to deploy applications. \ No newline at end of file +The runbooks `__ Serialize Runbook` and `__ Deploy Runbook` can be run as needed to serialize any changes to the upstream runbook and deploy the changes to downstream runbooks. The Terraform module zip file pushed to the built-in feed is versioned with a unique value each time, so you can also revert changes by redeploying an older package. In this way, you can use Octopus to deploy Octopus runbooks using the same processes you use Octopus to deploy applications. diff --git a/src/pages/docs/platform-engineering/managing-space-resources.md b/src/pages/docs/platform-engineering/managing-space-resources.md index 297aeb0003..fa6c8ad24c 100644 --- a/src/pages/docs/platform-engineering/managing-space-resources.md +++ b/src/pages/docs/platform-engineering/managing-space-resources.md @@ -18,8 +18,8 @@ Space level resources are shared by projects and do not tend to change as freque Managed, or downstream, spaces (i.e. spaces with centrally managed resources) are implemented by deploying space and project level resources as separate processes: -* Space level resources are deployed first to support one or more projects -* Project level resources are deployed second referencing the space level resources +- Space level resources are deployed first to support one or more projects +- Project level resources are deployed second referencing the space level resources Space level resources are best managed with the [Octopus Terraform provider](https://registry.terraform.io/providers/OctopusDeployLabs/octopusdeploy/latest/docs). @@ -29,8 +29,8 @@ Space level resources are best managed with the [Octopus Terraform provider](htt Space level resources can be defined in a Terraform module in two ways: -* Write the module by hand -* Serialize an existing space to a Terraform module with [octoterra](https://github.com/OctopusSolutionsEngineering/OctopusTerraformExport) +- Write the module by hand +- Serialize an existing space to a Terraform module with [octoterra](https://github.com/OctopusSolutionsEngineering/OctopusTerraformExport) ## Writing by hand @@ -69,13 +69,14 @@ The following process serializes a space to a Terraform module: Executing the runbook will: -* Export space level resources (i.e. everything but projects) to a Terraform module -* Zip the resulting Terraform module files into a package named after the current space -* Upload the zip file to the built-in feed of the current space, or the space defined in the `Octopus Upload Space ID` field +- Export space level resources (i.e. everything but projects) to a Terraform module +- Zip the resulting Terraform module files into a package named after the current space +- Upload the zip file to the built-in feed of the current space, or the space defined in the `Octopus Upload Space ID` field The package has two directories: -* `space_creation`, which contains a Terraform module to create a new space -* `space_population`, which contains a Terraform module to populate a space with the exported resources. + +- `space_creation`, which contains a Terraform module to create a new space +- `space_population`, which contains a Terraform module to populate a space with the exported resources. :::div{.hint} Many of the exported resources expose values, like resource names, as Terraform variables with default values. You can override these variables when applying the module to customize the resources, or leave the Terraform variables with their default value to recreate the resources with their original names. diff --git a/src/pages/docs/platform-engineering/merging-downstream.md b/src/pages/docs/platform-engineering/merging-downstream.md index 8a9c55c922..bf02f2732c 100644 --- a/src/pages/docs/platform-engineering/merging-downstream.md +++ b/src/pages/docs/platform-engineering/merging-downstream.md @@ -7,7 +7,7 @@ description: Learn how to merge changes to downstream repos navOrder: 9 --- -When upstream and downstream projects are [configured with CaC and backed by forked repositories](forking-git-repos) it becomes possible to merge changes from upstream to downstream repositories. +When upstream and downstream projects are [configured with CaC and backed by forked repositories](/docs/platform-engineering/forking-git-repos) it becomes possible to merge changes from upstream to downstream repositories. The `Octopus - Merge CaC Updates` steps merges changes by: @@ -34,4 +34,4 @@ The `Octopus - Merge CaC Updates` steps are typically defined in a runbook attac 9. Set the `Git Template Repo` field to the Git repository hosting the upstream project. 10. Each `Octopus - Merge CaC Updates` step then defines additional fields related to the specific Terraform backend. For example, the `Octopus - Merge CaC Updates (S3 Backend)` step has fields for AWS credentials, region, bucket, and key. -Executing the runbook will merge upstream changes into downstream repositories or print instructions on manually resolving merge conflicts in the verbose logs. \ No newline at end of file +Executing the runbook will merge upstream changes into downstream repositories or print instructions on manually resolving merge conflicts in the verbose logs. diff --git a/src/pages/docs/platform-engineering/validating-cac-prs.md b/src/pages/docs/platform-engineering/validating-cac-prs.md index 894beb3c36..b791b295a3 100644 --- a/src/pages/docs/platform-engineering/validating-cac-prs.md +++ b/src/pages/docs/platform-engineering/validating-cac-prs.md @@ -7,7 +7,7 @@ description: Learn how to automatically validate pull requests in a CaC Git repo navOrder: 10 --- -One of the challenges when implementing the [shared responsibility (or eventual consistency) model](levels-of-responsibility) is the potential for complex conflicts to be introduced to the downstream repositories. Without any controls on what changes can be made to a downstream project, it may become impractical to continue to push changes downstream. +One of the challenges when implementing the [shared responsibility (or eventual consistency) model](/docs/platform-engineering/levels-of-responsibility) is the potential for complex conflicts to be introduced to the downstream repositories. Without any controls on what changes can be made to a downstream project, it may become impractical to continue to push changes downstream. One way to constrain the changes introduced to downstream CaC Git repositories is to automatically validate changes during a pull request (PR). This allows the platform team to introduce minimum requirements that all downstream CaC projects must adhere to while also allowing internal customers to customize their projects. @@ -261,4 +261,4 @@ This example uses the [`lodash`](https://lodash.com/) library to clone the wrapp console.log(error.matcherResult.message) process.exit(1) } -``` \ No newline at end of file +``` diff --git a/src/pages/docs/projects/project-triggers/external-feed-triggers.md b/src/pages/docs/projects/project-triggers/external-feed-triggers.md index c1d7c98c62..251817599f 100644 --- a/src/pages/docs/projects/project-triggers/external-feed-triggers.md +++ b/src/pages/docs/projects/project-triggers/external-feed-triggers.md @@ -21,7 +21,7 @@ The details of these container images and Helm Charts are already known in Octop Create releases when any referenced images used in your Helm charts are updated. -- [Tracking third party Helm charts](/docs/deployments/kubernetes/automatically-track-third-party-helm-charts) +- [Tracking third party Helm charts](/docs/kubernetes/tutorials/automatically-track-third-party-helm-charts) Create releases whenever a third party releases a new Helm chart. diff --git a/src/pages/docs/runbooks/runbook-examples/routine/restarting-tomcat.md b/src/pages/docs/runbooks/runbook-examples/routine/restarting-tomcat.md index f12ba06704..316c903aa7 100644 --- a/src/pages/docs/runbooks/runbook-examples/routine/restarting-tomcat.md +++ b/src/pages/docs/runbooks/runbook-examples/routine/restarting-tomcat.md @@ -22,12 +22,13 @@ To create a runbook to restart Tomcat: | Parameter | Description | Example | | ------------- | ------------- | ------------- | -| Tomcat Manage URL | URL of the Tomcat Manager | http://localhost:8080/manager | -| Management user | Name of the management account | tomcat | -| Management password | Password for the management account | MySecretPassword!!! | -| Context path | The relative URL to your application | /myapp | -| Deployment version | Version number of your application | 1.0.0.1 | +| Tomcat Manage URL | URL of the Tomcat Manager | `http://localhost:8080/manager` | +| Management user | Name of the management account | `tomcat` | +| Management password | Password for the management account | `MySecretPassword!!!` | +| Context path | The relative URL to your application | `/my-app` | +| Deployment version | Version number of your application | `1.0.0.1` | The last option under `Advanced Options` is a radio button with two options: + - Leave the application running (default). Note, This option will start the app if in a stopped state. -- Stop the application \ No newline at end of file +- Stop the application diff --git a/src/themes/octopus/utilities/dist-thinner.mjs b/src/themes/octopus/utilities/dist-thinner.mjs new file mode 100644 index 0000000000..805ad6ff7c --- /dev/null +++ b/src/themes/octopus/utilities/dist-thinner.mjs @@ -0,0 +1,135 @@ +/** + * This javascript file comes from Astro Accelerator + * Edits will be overwritten if you change the file locally + * + * @format + */ + +import fs from 'fs'; +import path from 'path'; + +const workingDirectory = process.cwd(); + +const imageSize = await import( + 'file://' + path.join(workingDirectory, 'src/data/image-size.mjs') +); +const imageModule = await import( + 'file://' + path.join(workingDirectory, 'src/data/images.mjs') +); +const size = imageSize.size; +const imagePaths = imageModule.imagePaths; + +const imagePath = path.join('dist', imagePaths.src); +const outputPath = path.join('dist', imagePaths.dest); +const imageDirectory = path.join(workingDirectory, imagePath); + + const filesToProcess = []; + +function getDestinationFilePathless(source, s) { + let destination = path.join( + workingDirectory, + outputPath, + s.toString(), + source + ); + destination = destination.replace(path.parse(destination).ext, ''); + return destination; +} + +async function recurseFiles(directory) { + const f = await fs.promises.readdir(path.join(imageDirectory, directory), { + withFileTypes: true, + }); + + for (const file of f) { + if (file.isDirectory()) { + const nextDirectory = path.join(directory, file.name); + await recurseFiles(nextDirectory); + } else { + const ext = path.parse(file.name).ext; + + switch (ext) { + case '.jpg': + case '.jpeg': + case '.png': + case '.webp': + const sourcePath = path.join(directory, file.name); + + const webP = sourcePath.replace( + /.jpg$|.jpeg$|.png$/, + '.webp' + ); + const info = { + path: sourcePath, + webP: webP, + }; + + // Only processes images where there is no json metadata file + const metaPath = path.join( + workingDirectory, + imagePath, + sourcePath + '.json' + ); + + + if (fs.existsSync(metaPath)) { + const data = fs.readFileSync(metaPath, 'utf8'); + const jsonData = JSON.parse(data); + const date90DaysAgo = new Date( + Date.now() - 14 /* <- days */ * 24 * 60 * 60 * 1000 + ); + + //console.log('Checking:', metaPath); + + if (jsonData.updated && new Date(jsonData.updated) < date90DaysAgo) { + console.log('Processing:', metaPath); + filesToProcess.push(info); + } + } + + break; + } + } + } +} + +await recurseFiles(''); + +for (const file of filesToProcess) { + const source = path.join(imageDirectory, file.path); + const destination = getDestinationFilePathless(file.path, 'x'); + + const ext = path.parse(source).ext; + + // Delete original file + fs.unlinkSync(source); + + // Delete the fallback file + switch (ext) { + case '.png': + fs.unlinkSync(destination + '.png'); + break; + case '.jpg': + case '.jpeg': + fs.unlinkSync(destination + '.jpg'); + break; + case '.webp': + fs.unlinkSync(destination + '.webp'); + break; + } + + const metaFile = source + '.json'; + + // Delete metadata file + fs.unlinkSync(metaFile); + + // Delete resized images + for (const key in size) { + const resizeDestination = getDestinationFilePathless( + file.path, + size[key] + ); + + fs.unlinkSync(resizeDestination + '.webp'); + } +} \ No newline at end of file diff --git a/tests/home.spec.ts b/tests/home.spec.ts deleted file mode 100644 index 44d0c7f812..0000000000 --- a/tests/home.spec.ts +++ /dev/null @@ -1,103 +0,0 @@ -import { test, expect } from '@playwright/test'; -import { unified } from 'unified'; -import { visit } from 'unist-util-visit' -import rehypeParse from 'rehype-parse'; -import rehypeStringify from 'rehype-stringify'; - -const baseUrl = 'http://[::1]:3000'; -const startPath = '/docs'; -const crawled: string[] = []; -let discoveredLinks: string[] = []; -let discoveredImages: string[] = []; - -test('Crawl for bad URIs', async () => { - - async function crawl(url: string, foundOn: string = '') { - if (crawled.includes(url)) { - return; - } - - crawled.push(url); - - try { - const response = await fetch(url); - expect(response.status, `Expected a 200 OK response for page ${url} found on ${foundOn}`).toBe(200); - - const text = await response.text(); - await handleHtmlDocument(text); - } catch(error) { - expect(`Failed to fetch ${url} due to ${error}`).toBe(''); - } - - await crawlImages(url); - - const links = [...new Set(discoveredLinks)]; - discoveredLinks = []; - - for (let i = 0; i < links.length; i++) { - await crawl(links[i], url); - } - }; - - // Kick off the crawl - await crawl(baseUrl + startPath, 'First Page'); - console.log('Crawl checked', crawled.length); -}); - -function handleHtmlDocument(text: string) { - return unified() - .use(rehypeParse) - .use(rehypeStringify) - .use(findUris) - .process(text) -} - -async function crawlImages(foundOn: string) { - const images = [...new Set(discoveredImages)]; - discoveredImages = []; - - for (let i = 0; i < images.length; i++) { - const response = await fetch(images[i]); - - if (response.status != 200) { - console.log(images[i]); - } - - expect(response.status, `Expected a 200 OK response for image ${images[i]} found on ${foundOn}`).toBe(200); - } -} - -function addUri(collection: string[], uri: string) { - if (uri.substring(0, 1) == '/') { - collection.push(baseUrl + uri); - } - - if (uri.indexOf(baseUrl) == 0) { - collection.push(uri.split('#')[0]); - } -} - -function isString(s: string | any) : s is string { - return typeof s === 'string'; -} - -function findUris(options = {}) { - return (tree: any) => { - visit(tree, 'element', (node) => { - if (node.tagName === 'a' && node.properties && isString(node.properties.href)) { - addUri(discoveredLinks, node.properties.href); - } else if (node.tagName === 'img' && node.properties) { - - if (isString(node.properties.src)) { - addUri(discoveredImages, node.properties.src); - } - - if (isString(node.properties.srcSet)) { - (node.properties.srcSet.split(',')) - .map(s => s.split(' ')[0]) - .forEach(s => addUri(discoveredImages, s)); - } - } - }) - } -} \ No newline at end of file