diff --git a/.github/workflows/empty-worksapce-smoke-test-on-minikube-ubi9.yaml b/.github/workflows/empty-worksapce-smoke-test-on-minikube-ubi9.yaml index b963417f..58105d4e 100644 --- a/.github/workflows/empty-worksapce-smoke-test-on-minikube-ubi9.yaml +++ b/.github/workflows/empty-worksapce-smoke-test-on-minikube-ubi9.yaml @@ -19,6 +19,8 @@ on: - LICENSE env: + # Use repository variable if set, otherwise fallback to default registry + REGISTRY: ${{ vars.REGISTRY || 'quay.io/devfile' }} USERSTORY: CloneGitRepoAPI TS_API_TEST_KUBERNETES_COMMAND_LINE_TOOL: kubectl DEPLOYMENT_TIMEOUT: 90s @@ -71,12 +73,12 @@ jobs: - name: Build base image run: | eval $(minikube docker-env) - cd base/ubi9 && docker build -t quay.io/devfile/base-developer-image:ubi9-latest . + cd base/ubi9 && docker build -t ${{ env.REGISTRY }}/base-developer-image:ubi9-latest . - name: Build universal image run: | eval $(minikube docker-env) - cd universal/ubi9 && docker build -t quay.io/devfile/universal-developer-image:${{ env.PR_NUMBER }} . + cd universal/ubi9 && docker build -t ${{ env.REGISTRY }}/universal-developer-image:${{ env.PR_NUMBER }} . - name: Checkout DWO uses: actions/checkout@master @@ -104,7 +106,7 @@ jobs: - name: Check that UDI is present in the image list run: | # we used it for the build above and do not need it anymore. It saves the disk space - minikube image rm quay.io/devfile/base-developer-image:ubi9-latest + minikube image rm ${{ env.REGISTRY }}/base-developer-image:ubi9-latest minikube image list --format table - name: Install NodeJs @@ -159,7 +161,7 @@ jobs: cpuRequest: 30m - name: che-code-runtime-description container: - image: quay.io/devfile/universal-developer-image:${{ env.PR_NUMBER }} + image: ${{ env.REGISTRY }}/universal-developer-image:${{ env.PR_NUMBER }} memoryLimit: 1024Mi memoryRequest: 256Mi cpuLimit: 500m @@ -242,7 +244,7 @@ jobs: - name: Run Empty workspace smoke test run: | - export TS_API_TEST_UDI_IMAGE=quay.io/devfile/universal-developer-image:${{ env.PR_NUMBER }} + export TS_API_TEST_UDI_IMAGE=${{ env.REGISTRY }}/universal-developer-image:${{ env.PR_NUMBER }} export TS_API_TEST_CHE_CODE_EDITOR_DEVFILE_URI=http://file-service.default.svc:80/devfile.yaml cd che/tests/e2e npm i diff --git a/.github/workflows/pr-check-ubi10.yaml b/.github/workflows/pr-check-ubi10.yaml new file mode 100644 index 00000000..414196e5 --- /dev/null +++ b/.github/workflows/pr-check-ubi10.yaml @@ -0,0 +1,193 @@ +# +# Copyright (c) 2021-2025 Red Hat, Inc. +# This program and the accompanying materials are made +# available under the terms of the Eclipse Public License 2.0 +# which is available at https://www.eclipse.org/legal/epl-2.0/ +# +# SPDX-License-Identifier: EPL-2.0 +# + +name: Pull Request Check UBI 10 + +# Trigger the workflow on pull request +on: [pull_request] + +env: + # Use repository variable if set, otherwise fallback to default registry + REGISTRY: ${{ vars.REGISTRY || 'quay.io/devfile' }} + +jobs: + build-base-image: + name: Build base image + strategy: + fail-fast: false + matrix: + runners: ['ubuntu-22.04', 'ubuntu-22.04-arm'] + runs-on: ${{matrix.runners}} + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Set arch environment variable + run: | + if [[ ${{matrix.runners}} == 'ubuntu-22.04' ]]; then + echo arch="amd64" >> $GITHUB_ENV + else + echo arch="arm64" >> $GITHUB_ENV + fi + - name: Free runner space + run: sudo rm -rf /usr/local/lib/android + - name: Cleanup docker images + run: docker system prune -af + - name: Build base image + run: | + cd base/ubi10 && docker buildx build \ + --platform linux/${{env.arch}} \ + --progress=plain \ + -t base-developer-image-${{env.arch}} . + - name: Display docker images + run: docker images + - name: Login to Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_PASSWORD }} + # TODO: Remove this step once UDI on UBI 10 is ready and we have proper PR workflow for UDI + # This temporary step pushes base image to registry for testing purposes + - name: Tag and push base image for testing + run: | + docker tag base-developer-image-${{env.arch}} ${{ env.REGISTRY }}/base-developer-image:${{env.arch}}-ubi10-pr-${{github.event.number}} + docker push ${{ env.REGISTRY }}/base-developer-image:${{env.arch}}-ubi10-pr-${{github.event.number}} + - name: Compress image to a file + run: docker save base-developer-image-${{env.arch}} | gzip > base-developer-image-${{env.arch}}.tgz + - name: Upload image artifact + uses: actions/upload-artifact@v4 + with: + name: base-developer-image-${{env.arch}} + path: base-developer-image-${{env.arch}}.tgz + + publish-base-image: + name: Publish base image + runs-on: ubuntu-22.04 + needs: build-base-image + steps: + - name: Login to Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_PASSWORD }} + # TODO: Remove this job once UDI on UBI 10 is ready and we have proper PR workflow for UDI + # This temporary job publishes base image manifest for testing purposes + - name: publish base image manifest + run: | + docker manifest create ${{ env.REGISTRY }}/base-developer-image:ubi10-pr-${{github.event.number}} \ + --amend ${{ env.REGISTRY }}/base-developer-image:amd64-ubi10-pr-${{github.event.number}} \ + --amend ${{ env.REGISTRY }}/base-developer-image:arm64-ubi10-pr-${{github.event.number}} + + docker manifest annotate ${{ env.REGISTRY }}/base-developer-image:ubi10-pr-${{github.event.number}} \ + ${{ env.REGISTRY }}/base-developer-image:amd64-ubi10-pr-${{github.event.number}} \ + --os linux --arch amd64 + docker manifest annotate ${{ env.REGISTRY }}/base-developer-image:ubi10-pr-${{github.event.number}} \ + ${{ env.REGISTRY }}/base-developer-image:arm64-ubi10-pr-${{github.event.number}} \ + --os linux --arch arm64 + + docker manifest push ${{ env.REGISTRY }}/base-developer-image:ubi10-pr-${{github.event.number}} + - name: 'Comment PR' + uses: actions/github-script@v7 + with: + script: | + const { repo: { owner, repo } } = context; + await github.rest.issues.createComment({ + issue_number: ${{github.event.number}}, + owner: context.repo.owner, + repo: context.repo.repo, + body: `Pull Request UBI 10 images published ✨\n\nBase Image: [${{ env.REGISTRY }}/base-developer-image:ubi10-pr-${{github.event.number}}](https://${{ env.REGISTRY }}/base-developer-image:ubi10-pr-${{github.event.number}})` + }) + + # build-udi: + # name: Build udi + # strategy: + # fail-fast: false + # matrix: + # runners: ['ubuntu-22.04', 'ubuntu-22.04-arm'] + # runs-on: ${{matrix.runners}} + # needs: build-base-image + # steps: + # - name: Set arch environment variable + # run: | + # if [[ ${{matrix.runners}} == 'ubuntu-22.04' ]]; then + # echo arch="amd64" >> $GITHUB_ENV + # else + # echo arch="arm64" >> $GITHUB_ENV + # fi + # - name: Checkout + # uses: actions/checkout@v4 + # - name: Free runner space + # run: sudo rm -rf /usr/local/lib/android + # - name: Cleanup docker images + # run: docker system prune -af + # - name: Download BDI artifacts + # uses: actions/download-artifact@v4 + # with: + # pattern: base-developer-image-* + # merge-multiple: true + # path: . + # - name: List downloaded files + # run: ls -lah + # - name: Load docker images + # run: docker load -i base-developer-image-${{env.arch}}.tgz + # - name: Display docker images + # run: docker images + # - name: Update UDI Dockerfile + # run: sed "s|${{ env.REGISTRY }}/base-developer-image:ubi10-latest|base-developer-image-${{env.arch}}|" -i "universal/ubi10/Dockerfile" + # - name: Login to Registry + # uses: docker/login-action@v3 + # with: + # registry: ${{ env.REGISTRY }} + # username: ${{ secrets.QUAY_USERNAME }} + # password: ${{ secrets.QUAY_PASSWORD }} + # - name: Build udi + # run: | + # cd universal/ubi10 && docker buildx build \ + # --platform linux/${{env.arch}} \ + # --progress=plain \ + # --push \ + # -t ${{ env.REGISTRY }}/universal-developer-image:${{env.arch}}-ubi10-pr-${{github.event.number}} . + + # publish-udi: + # name: Publish udi + # runs-on: ubuntu-22.04 + # needs: build-udi + # steps: + # - name: Login to Registry + # uses: docker/login-action@v3 + # with: + # registry: ${{ env.REGISTRY }} + # username: ${{ secrets.QUAY_USERNAME }} + # password: ${{ secrets.QUAY_PASSWORD }} + # - name: publish + # run: | + # docker manifest create ${{ env.REGISTRY }}/universal-developer-image:ubi10-pr-${{github.event.number}} \ + # --amend ${{ env.REGISTRY }}/universal-developer-image:amd64-ubi10-pr-${{github.event.number}} \ + # --amend ${{ env.REGISTRY }}/universal-developer-image:arm64-ubi10-pr-${{github.event.number}} + # + # docker manifest annotate ${{ env.REGISTRY }}/universal-developer-image:ubi10-pr-${{github.event.number}} \ + # ${{ env.REGISTRY }}/universal-developer-image:amd64-ubi10-pr-${{github.event.number}} \ + # --os linux --arch amd64 + # docker manifest annotate ${{ env.REGISTRY }}/universal-developer-image:ubi10-pr-${{github.event.number}} \ + # ${{ env.REGISTRY }}/universal-developer-image:arm64-ubi10-pr-${{github.event.number}} \ + # --os linux --arch arm64 + # + # docker manifest push ${{ env.REGISTRY }}/universal-developer-image:ubi10-pr-${{github.event.number}} + # - name: 'Comment PR' + # uses: actions/github-script@v7 + # with: + # script: | + # const { repo: { owner, repo } } = context; + # await github.rest.issues.createComment({ + # issue_number: ${{github.event.number}}, + # owner: context.repo.owner, + # repo: context.repo.repo, + # body: `Pull Request images published ✨\n\nUDI: [${{ env.REGISTRY }}/universal-developer-image:ubi10-pr-${{github.event.number}}](https://${{ env.REGISTRY }}/universal-developer-image:ubi10-pr-${{github.event.number}})` + # }) \ No newline at end of file diff --git a/.github/workflows/pr-check.yaml b/.github/workflows/pr-check.yaml index cc2f22cf..485225ac 100644 --- a/.github/workflows/pr-check.yaml +++ b/.github/workflows/pr-check.yaml @@ -12,6 +12,10 @@ name: Pull Request Check # Trigger the workflow on pull request on: [pull_request] +env: + # Use repository variable if set, otherwise fallback to default registry + REGISTRY: ${{ vars.REGISTRY || 'quay.io/devfile' }} + jobs: build-base-image: name: Build base image @@ -86,10 +90,10 @@ jobs: run: docker images - name: Update UDI Dockerfile run: sed "s|quay.io/devfile/base-developer-image:ubi9-latest|base-developer-image-${{env.arch}}|" -i "universal/ubi9/Dockerfile" - - name: Login to Quay.io + - name: Login to Registry uses: docker/login-action@v3 with: - registry: quay.io + registry: ${{ env.REGISTRY }} username: ${{ secrets.QUAY_USERNAME }} password: ${{ secrets.QUAY_PASSWORD }} - name: Build udi @@ -98,33 +102,33 @@ jobs: --platform linux/${{env.arch}} \ --progress=plain \ --push \ - -t quay.io/devfile/universal-developer-image:${{env.arch}}-pr-${{github.event.number}} . + -t ${{ env.REGISTRY }}/universal-developer-image:${{env.arch}}-pr-${{github.event.number}} . publish-udi: name: Publish udi runs-on: ubuntu-22.04 needs: build-udi steps: - - name: Login to Quay.io + - name: Login to Registry uses: docker/login-action@v3 with: - registry: quay.io + registry: ${{ env.REGISTRY }} username: ${{ secrets.QUAY_USERNAME }} password: ${{ secrets.QUAY_PASSWORD }} - name: publish run: | - docker manifest create quay.io/devfile/universal-developer-image:pr-${{github.event.number}} \ - --amend quay.io/devfile/universal-developer-image:amd64-pr-${{github.event.number}} \ - --amend quay.io/devfile/universal-developer-image:arm64-pr-${{github.event.number}} + docker manifest create ${{ env.REGISTRY }}/universal-developer-image:pr-${{github.event.number}} \ + --amend ${{ env.REGISTRY }}/universal-developer-image:amd64-pr-${{github.event.number}} \ + --amend ${{ env.REGISTRY }}/universal-developer-image:arm64-pr-${{github.event.number}} - docker manifest annotate quay.io/devfile/universal-developer-image:pr-${{github.event.number}} \ - quay.io/devfile/universal-developer-image:amd64-pr-${{github.event.number}} \ + docker manifest annotate ${{ env.REGISTRY }}/universal-developer-image:pr-${{github.event.number}} \ + ${{ env.REGISTRY }}/universal-developer-image:amd64-pr-${{github.event.number}} \ --os linux --arch amd64 - docker manifest annotate quay.io/devfile/universal-developer-image:pr-${{github.event.number}} \ - quay.io/devfile/universal-developer-image:arm64-pr-${{github.event.number}} \ + docker manifest annotate ${{ env.REGISTRY }}/universal-developer-image:pr-${{github.event.number}} \ + ${{ env.REGISTRY }}/universal-developer-image:arm64-pr-${{github.event.number}} \ --os linux --arch arm64 - docker manifest push quay.io/devfile/universal-developer-image:pr-${{github.event.number}} + docker manifest push ${{ env.REGISTRY }}/universal-developer-image:pr-${{github.event.number}} - name: 'Comment PR' uses: actions/github-script@v7 with: @@ -134,5 +138,5 @@ jobs: issue_number: ${{github.event.number}}, owner: context.repo.owner, repo: context.repo.repo, - body: `Pull Request images published ✨\n\nUDI: [quay.io/devfile/universal-developer-image:pr-${{github.event.number}}](https://quay.io/devfile/universal-developer-image:pr-${{github.event.number}})` + body: `Pull Request images published ✨\n\nUDI: [${{ env.REGISTRY }}/universal-developer-image:pr-${{github.event.number}}](https://${{ env.REGISTRY }}/universal-developer-image:pr-${{github.event.number}})` }) diff --git a/.github/workflows/ubi10-build.yaml b/.github/workflows/ubi10-build.yaml new file mode 100644 index 00000000..014ffd64 --- /dev/null +++ b/.github/workflows/ubi10-build.yaml @@ -0,0 +1,172 @@ +name: Build of UBI 10 based Developer Images + +on: + push: + branches: [ main ] + workflow_dispatch: + + workflow_call: + # Map the workflow outputs to job outputs + secrets: + QUAY_USERNAME: + required: true + QUAY_PASSWORD: + required: true + # outputs: + # uniq_tag: + # description: "The first output string" + # value: ${{ jobs.build_universal_ubi10_image.outputs.output1 }} + +env: + # Use repository variable if set, otherwise fallback to default registry + REGISTRY: ${{ vars.REGISTRY || 'quay.io/devfile' }} + +jobs: + build-base-image: + name: Build base image + strategy: + fail-fast: false + matrix: + runners: ['ubuntu-22.04', 'ubuntu-22.04-arm'] + runs-on: ${{matrix.runners}} + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Set arch environment variable + run: | + if [[ ${{matrix.runners}} == 'ubuntu-22.04' ]]; then + echo arch="amd64" >> $GITHUB_ENV + else + echo arch="arm64" >> $GITHUB_ENV + fi + - name: Set short_sha environment variable + run: echo short_sha="$(git rev-parse --short=7 HEAD)" >> $GITHUB_ENV + - name: Free runner space + run: sudo rm -rf /usr/local/lib/android + - name: Cleanup docker images + run: docker system prune -af + - name: Login to Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_PASSWORD }} + - name: Build base image + run: | + cd base/ubi10 && docker buildx build \ + --platform linux/${{env.arch}} \ + --progress=plain \ + --push \ + -t ${{ env.REGISTRY }}/base-developer-image:${{env.arch}}-ubi10-${{env.short_sha}} . + + publish-base-image: + name: Publish base image + runs-on: ubuntu-22.04 + needs: build-base-image + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Set short_sha environment variable + run: echo short_sha="$(git rev-parse --short=7 HEAD)" >> $GITHUB_ENV + - name: Login to Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_PASSWORD }} + - name: publish + run: | + for tag in ubi10-latest ubi10-${{env.short_sha}}; + do + docker manifest create ${{ env.REGISTRY }}/base-developer-image:${tag} \ + --amend ${{ env.REGISTRY }}/base-developer-image:amd64-ubi10-${{env.short_sha}} \ + --amend ${{ env.REGISTRY }}/base-developer-image:arm64-ubi10-${{env.short_sha}} + + docker manifest annotate ${{ env.REGISTRY }}/base-developer-image:${tag} \ + ${{ env.REGISTRY }}/base-developer-image:amd64-ubi10-${{env.short_sha}} \ + --os linux --arch amd64 + + docker manifest annotate ${{ env.REGISTRY }}/base-developer-image:${tag} \ + ${{ env.REGISTRY }}/base-developer-image:arm64-ubi10-${{env.short_sha}} \ + --os linux --arch arm64 + + docker manifest push ${{ env.REGISTRY }}/base-developer-image:${tag} + done + + # build-udi: + # name: Build udi + # strategy: + # fail-fast: false + # matrix: + # runners: ['ubuntu-22.04', 'ubuntu-22.04-arm'] + # runs-on: ${{matrix.runners}} + # needs: publish-base-image + # steps: + # - name: Checkout + # uses: actions/checkout@v4 + # - name: Set arch environment variable + # run: | + # if [[ ${{matrix.runners}} == 'ubuntu-22.04' ]]; then + # echo arch="amd64" >> $GITHUB_ENV + # else + # echo arch="arm64" >> $GITHUB_ENV + # fi + # - name: Set short_sha environment variable + # run: echo short_sha="$(git rev-parse --short=7 HEAD)" >> $GITHUB_ENV + # - name: Free runner space + # run: sudo rm -rf /usr/local/lib/android + # - name: Cleanup docker images + # run: docker system prune -af + # - name: Login to Registry + # uses: docker/login-action@v3 + # with: + # registry: ${{ env.REGISTRY }} + # username: ${{ secrets.QUAY_USERNAME }} + # password: ${{ secrets.QUAY_PASSWORD }} + # - name: Build udi + # run: | + # cd universal/ubi10 && docker buildx build \ + # --platform linux/${{env.arch}} \ + # --progress=plain \ + # --push \ + # -t ${{ env.REGISTRY }}/universal-developer-image:${{env.arch}}-ubi10-${{env.short_sha}} . + + # publish-udi: + # name: Publish udi + # runs-on: ubuntu-22.04 + # needs: build-udi + # steps: + # - name: Checkout + # uses: actions/checkout@v4 + # - name: Set short_sha environment variable + # run: echo short_sha="$(git rev-parse --short=7 HEAD)" >> $GITHUB_ENV + # - name: Login to Registry + # uses: docker/login-action@v3 + # with: + # registry: ${{ env.REGISTRY }} + # username: ${{ secrets.QUAY_USERNAME }} + # password: ${{ secrets.QUAY_PASSWORD }} + # - name: publish + # run: | + # for tag in ubi10-latest ubi10-${{env.short_sha}}; + # do + # docker manifest create ${{ env.REGISTRY }}/universal-developer-image:${tag} \ + # --amend ${{ env.REGISTRY }}/universal-developer-image:amd64-ubi10-${{env.short_sha}} \ + # --amend ${{ env.REGISTRY }}/universal-developer-image:arm64-ubi10-${{env.short_sha}} + # + # docker manifest annotate ${{ env.REGISTRY }}/universal-developer-image:${tag} \ + # ${{ env.REGISTRY }}/universal-developer-image:amd64-ubi10-${{env.short_sha}} \ + # --os linux --arch amd64 + # + # docker manifest annotate ${{ env.REGISTRY }}/universal-developer-image:${tag} \ + # ${{ env.REGISTRY }}/universal-developer-image:arm64-ubi10-${{env.short_sha}} \ + # --os linux --arch arm64 + # + # docker manifest push ${{ env.REGISTRY }}/universal-developer-image:${tag} + # done + # - name: Get tag with uniq prefix + # id: setTagName + # # set the image with uniq tag prefix (for example: quay.io/..../base-developer-image:ubi10-7ad6cab) to env. var + # # and define it for output. This output with tag image will be used in caller job + # run: | + # echo "uniq_tag=${{ env.REGISTRY }}/universal-developer-image:ubi10-${{env.short_sha}}" >> $GITHUB_OUTPUT diff --git a/.github/workflows/ubi9-build.yaml b/.github/workflows/ubi9-build.yaml index 32a9c114..fdf4a695 100644 --- a/.github/workflows/ubi9-build.yaml +++ b/.github/workflows/ubi9-build.yaml @@ -18,6 +18,10 @@ on: description: "The first output string" value: ${{ jobs.build_universal_ubi9_image.outputs.output1 }} +env: + # Use repository variable if set, otherwise fallback to default registry + REGISTRY: ${{ vars.REGISTRY || 'quay.io/devfile' }} + jobs: build-base-image: name: Build base image @@ -42,10 +46,10 @@ jobs: run: sudo rm -rf /usr/local/lib/android - name: Cleanup docker images run: docker system prune -af - - name: Login to Quay.io + - name: Login to Registry uses: docker/login-action@v3 with: - registry: quay.io + registry: ${{ env.REGISTRY }} username: ${{ secrets.QUAY_USERNAME }} password: ${{ secrets.QUAY_PASSWORD }} - name: Build base image @@ -54,7 +58,7 @@ jobs: --platform linux/${{env.arch}} \ --progress=plain \ --push \ - -t quay.io/devfile/base-developer-image:${{env.arch}}-${{env.short_sha}} . + -t ${{ env.REGISTRY }}/base-developer-image:${{env.arch}}-${{env.short_sha}} . publish-base-image: name: Publish base image @@ -65,29 +69,29 @@ jobs: uses: actions/checkout@v4 - name: Set short_sha environment variable run: echo short_sha="$(git rev-parse --short=7 HEAD)" >> $GITHUB_ENV - - name: Login to Quay.io + - name: Login to Registry uses: docker/login-action@v3 with: - registry: quay.io + registry: ${{ env.REGISTRY }} username: ${{ secrets.QUAY_USERNAME }} password: ${{ secrets.QUAY_PASSWORD }} - name: publish run: | for tag in latest ubi9-latest ubi9-${{env.short_sha}}; do - docker manifest create quay.io/devfile/base-developer-image:${tag} \ - --amend quay.io/devfile/base-developer-image:amd64-${{env.short_sha}} \ - --amend quay.io/devfile/base-developer-image:arm64-${{env.short_sha}} + docker manifest create ${{ env.REGISTRY }}/base-developer-image:${tag} \ + --amend ${{ env.REGISTRY }}/base-developer-image:amd64-${{env.short_sha}} \ + --amend ${{ env.REGISTRY }}/base-developer-image:arm64-${{env.short_sha}} - docker manifest annotate quay.io/devfile/base-developer-image:${tag} \ - quay.io/devfile/base-developer-image:amd64-${{env.short_sha}} \ + docker manifest annotate ${{ env.REGISTRY }}/base-developer-image:${tag} \ + ${{ env.REGISTRY }}/base-developer-image:amd64-${{env.short_sha}} \ --os linux --arch amd64 - docker manifest annotate quay.io/devfile/base-developer-image:${tag} \ - quay.io/devfile/base-developer-image:arm64-${{env.short_sha}} \ + docker manifest annotate ${{ env.REGISTRY }}/base-developer-image:${tag} \ + ${{ env.REGISTRY }}/base-developer-image:arm64-${{env.short_sha}} \ --os linux --arch arm64 - docker manifest push quay.io/devfile/base-developer-image:${tag} + docker manifest push ${{ env.REGISTRY }}/base-developer-image:${tag} done build-udi: @@ -114,10 +118,10 @@ jobs: run: sudo rm -rf /usr/local/lib/android - name: Cleanup docker images run: docker system prune -af - - name: Login to Quay.io + - name: Login to Registry uses: docker/login-action@v3 with: - registry: quay.io + registry: ${{ env.REGISTRY }} username: ${{ secrets.QUAY_USERNAME }} password: ${{ secrets.QUAY_PASSWORD }} - name: Build udi @@ -126,7 +130,7 @@ jobs: --platform linux/${{env.arch}} \ --progress=plain \ --push \ - -t quay.io/devfile/universal-developer-image:${{env.arch}}-${{env.short_sha}} . + -t ${{ env.REGISTRY }}/universal-developer-image:${{env.arch}}-${{env.short_sha}} . publish-udi: name: Publish udi @@ -137,33 +141,33 @@ jobs: uses: actions/checkout@v4 - name: Set short_sha environment variable run: echo short_sha="$(git rev-parse --short=7 HEAD)" >> $GITHUB_ENV - - name: Login to Quay.io + - name: Login to Registry uses: docker/login-action@v3 with: - registry: quay.io + registry: ${{ env.REGISTRY }} username: ${{ secrets.QUAY_USERNAME }} password: ${{ secrets.QUAY_PASSWORD }} - name: publish run: | for tag in latest ubi9-latest ubi9-${{env.short_sha}}; do - docker manifest create quay.io/devfile/universal-developer-image:${tag} \ - --amend quay.io/devfile/universal-developer-image:amd64-${{env.short_sha}} \ - --amend quay.io/devfile/universal-developer-image:arm64-${{env.short_sha}} + docker manifest create ${{ env.REGISTRY }}/universal-developer-image:${tag} \ + --amend ${{ env.REGISTRY }}/universal-developer-image:amd64-${{env.short_sha}} \ + --amend ${{ env.REGISTRY }}/universal-developer-image:arm64-${{env.short_sha}} - docker manifest annotate quay.io/devfile/universal-developer-image:${tag} \ - quay.io/devfile/universal-developer-image:amd64-${{env.short_sha}} \ + docker manifest annotate ${{ env.REGISTRY }}/universal-developer-image:${tag} \ + ${{ env.REGISTRY }}/universal-developer-image:amd64-${{env.short_sha}} \ --os linux --arch amd64 - docker manifest annotate quay.io/devfile/universal-developer-image:${tag} \ - quay.io/devfile/universal-developer-image:arm64-${{env.short_sha}} \ + docker manifest annotate ${{ env.REGISTRY }}/universal-developer-image:${tag} \ + ${{ env.REGISTRY }}/universal-developer-image:arm64-${{env.short_sha}} \ --os linux --arch arm64 - docker manifest push quay.io/devfile/universal-developer-image:${tag} + docker manifest push ${{ env.REGISTRY }}/universal-developer-image:${tag} done - name: Get tag with uniq prefix id: setTagName # set the image with uniq tag prefix (for example: quay.io/..../base-developer-image:ubi9-7ad6cab) to env. var # and define it for output. This output with tag image will be used in caller job run: | - echo "uniq_tag=quay.io/devfile/universal-developer-image:ubi9-${{env.short_sha}}" >> $GITHUB_OUTPUT + echo "uniq_tag=${{ env.REGISTRY }}/universal-developer-image:ubi9-${{env.short_sha}}" >> $GITHUB_OUTPUT diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index acdb4bcd..a94f3d23 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,15 +3,26 @@ Contribute to Developer Images ## Developer Base Image -### Red Hat Universal Base Image ([UBI](https://developers.redhat.com/articles/ubi-faq#)) based image ([quay.io/devfile/base-developer-image:ubi9-latest](https://quay.io/repository/devfile/base-developer-image/)) +### Red Hat Universal Base Image ([UBI](https://developers.redhat.com/articles/ubi-faq#)) based images + +Available versions: +- **UBI 9**: [quay.io/devfile/base-developer-image:ubi9-latest](https://quay.io/repository/devfile/base-developer-image/) +- **UBI 10**: [quay.io/devfile/base-developer-image:ubi10-latest](https://quay.io/repository/devfile/base-developer-image/) Build with Docker buildkit: +**UBI 9:** ```bash $ cd base/ubi9 $ DOCKER_BUILDKIT=1 docker image build --progress=plain -t quay.io/devfile/base-developer-image:ubi9-latest . ``` +**UBI 10:** +```bash +$ cd base/ubi10 +$ DOCKER_BUILDKIT=1 docker image build --progress=plain -t quay.io/devfile/base-developer-image:ubi10-latest . +``` + ## Developer Universal Image ### Red Hat Universal Base Image ([UBI](https://developers.redhat.com/articles/ubi-faq#)) based image ([quay.io/devfile/universal-developer-image:ubi9-latest](https://quay.io/repository/devfile/universal-developer-image/)) @@ -25,6 +36,7 @@ $ DOCKER_BUILDKIT=1 docker image build --progress=plain -t quay.io/devfile/unive To build for a specific architecture: +**UBI 9:** ```bash # amd64 $ DOCKER_BUILDKIT=1 docker image build --platform linux/amd64 --progress=plain -t quay.io/devfile/universal-developer-image:ubi9-latest . @@ -32,3 +44,28 @@ $ DOCKER_BUILDKIT=1 docker image build --platform linux/amd64 --progress=plain - # arm64 $ DOCKER_BUILDKIT=1 docker image build --platform linux/arm64 --progress=plain -t quay.io/devfile/universal-developer-image:ubi9-latest . ``` + +**UBI 10 Base Image:** +```bash +# amd64 +$ DOCKER_BUILDKIT=1 docker image build --platform linux/amd64 --progress=plain -t quay.io/devfile/base-developer-image:ubi10-latest . + +# arm64 +$ DOCKER_BUILDKIT=1 docker image build --platform linux/arm64 --progress=plain -t quay.io/devfile/base-developer-image:ubi10-latest . +``` + +## Registry Configuration + +### Using Custom Registry + +The build workflows support custom container registries through repository variables. This is useful for forks that want to publish to their own registry. + +**To configure:** +1. In your fork, go to **Settings** → **Secrets and Variables** → **Actions** → **Variables** +2. Add a repository variable: `REGISTRY` = `your-registry.com/your-namespace` +3. All workflows will automatically use your custom registry + +**Example:** Setting `REGISTRY` to `ghcr.io/youruser` will publish images to: +- `ghcr.io/youruser/base-developer-image:ubi9-latest` +- `ghcr.io/youruser/base-developer-image:ubi10-latest` +- `ghcr.io/youruser/universal-developer-image:ubi9-latest` diff --git a/README.md b/README.md index 13e2d540..f9e9dc7a 100644 --- a/README.md +++ b/README.md @@ -4,69 +4,82 @@ # Developer Images [![Build of UBI 9 based Developer Images](https://github.com/devfile/developer-images/actions/workflows/ubi9-build.yaml/badge.svg)](https://github.com/devfile/developer-images/actions/workflows/ubi9-build.yaml) +[![Build of UBI 10 based Developer Images](https://github.com/devfile/developer-images/actions/workflows/ubi10-build.yaml/badge.svg)](https://github.com/devfile/developer-images/actions/workflows/ubi10-build.yaml) Containers images with tools for developers 👨‍💻👩‍💻 ## Developer Base Image -### Red Hat Universal Base Image ([UBI](https://developers.redhat.com/articles/ubi-faq#)) based image ([quay.io/devfile/base-developer-image:ubi9-latest](https://quay.io/repository/devfile/base-developer-image)) +### Red Hat Universal Base Image ([UBI](https://developers.redhat.com/articles/ubi-faq#)) based images -Run the following command to test it with Docker: +Available versions: +- **UBI 9**: [quay.io/devfile/base-developer-image:ubi9-latest](https://quay.io/repository/devfile/base-developer-image) +- **UBI 10**: [quay.io/devfile/base-developer-image:ubi10-latest](https://quay.io/repository/devfile/base-developer-image) +Run the following commands to test with Docker: + +**UBI 9:** ```bash $ docker run -ti --rm \ quay.io/devfile/base-developer-image:ubi9-latest \ bash ``` + +**UBI 10:** +```bash +$ docker run -ti --rm \ + quay.io/devfile/base-developer-image:ubi10-latest \ + bash +``` ### Included Development Tools -| Tool | ubi9 based image | -|---------------------|-------------------------------------| -| `bash` |`bash` | -| `bat` |`` | -| `buildah` |`buildah` | -| `curl` |`curl` | -| `ps` |`ps` | -| `diff` |`diffutils` | -| `emacs` |`NOT AVAILABLE (fedora only)` | -| `fish` |`NOT AVAILABLE (fedora only)` | -| `gh` |`` | -| `git` |`git` | -| `git-lfs` |`git-lfs` | -| `ip` |`iproute` | -| `jq` |`jq` | -| `htop` |`NOT AVAILABLE (fedora only)` | -| `kubedock` |`` | -| `less` |`less` | -| `lsof` |`lsof` | -| `man` |`man` | -| `nano` |`nano` | -| `netcat` |`NOT AVAILABLE` | -| `netstat` |`net-tools` | -| `openssh-client` |`openssh-clients` | -| `podman` |`podman` | -| `7z` |`p7zip-plugins` | -| `ripgrep` |`` | -| `rsync` |`rsync` | -| `scp` |`openssh-clients` | -| `screen` |`NOT AVAILABLE` | -| `sed` |`sed` | -| `shasum` |`perl-Digest-SHA` | -| `socat` |`socat` | -| `sudo` |`sudo` | -| `ss` |`NOT AVAILABLE` | -| `ssl-cert` |`NOT AVAILABLE` | -| `stow` |`stow` | -| `tail` |`` | -| `tar` |`tar` | -| `time` |`time` | -| `tldr` |`NOT AVAILABLE (fedora only)` | -| `tmux` |`NOT AVAILABLE (fedora only)` | -| `vim` |`vim` | -| `wget` |`wget` | -| `zip` |`zip` | -| `zsh` |`NOT AVAILABLE (fedora only)` | -| **TOTAL SIZE** | **903MB** (341MB compressed) | +| Tool | ubi9 based image | ubi10 based image | +|---------------------|-------------------------------------|-------------------------------------| +| `bash` |`bash` |`bash` | +| `bat` |`` |`` | +| `buildah` |`buildah` |`buildah` | +| `curl` |`curl` |`curl` | +| `ps` |`ps` |`ps` | +| `diff` |`diffutils` |`diffutils` | +| `emacs` |`NOT AVAILABLE (fedora only)` |`NOT AVAILABLE (fedora only)` | +| `fish` |`NOT AVAILABLE (fedora only)` |`NOT AVAILABLE (fedora only)` | +| `gh` |`` |`` | +| `git` |`git` |`git` | +| `git-lfs` |`git-lfs` |`git-lfs` | +| `ip` |`iproute` |`iproute` | +| `jq` |`jq` |`jq` | +| `htop` |`NOT AVAILABLE (fedora only)` |`NOT AVAILABLE (fedora only)` | +| `kubedock` |`` |`` | +| `less` |`less` |`less` | +| `lsof` |`lsof` |`lsof` | +| `man` |`man` |`man` | +| `nano` |`nano` |`nano` | +| `netcat` |`NOT AVAILABLE` |`NOT AVAILABLE` | +| `netstat` |`net-tools` |`net-tools` | +| `openssh-client` |`openssh-clients` |`openssh-clients` | +| `podman` |`podman` |`podman` | +| `7z` |`p7zip-plugins` |`p7zip-plugins` | +| `ripgrep` |`` |`` | +| `rsync` |`rsync` |`rsync` | +| `scp` |`openssh-clients` |`openssh-clients` | +| `screen` |`NOT AVAILABLE` |`NOT AVAILABLE` | +| `sed` |`sed` |`sed` | +| `shasum` |`perl-Digest-SHA` |`perl-Digest-SHA` | +| `socat` |`socat` |`socat` | +| `sudo` |`sudo` |`sudo` | +| `ss` |`NOT AVAILABLE` |`NOT AVAILABLE` | +| `ssl-cert` |`NOT AVAILABLE` |`NOT AVAILABLE` | +| `stow` |`stow` |`stow` | +| `tail` |`` |`` | +| `tar` |`tar` |`tar` | +| `time` |`time` |`time` | +| `tldr` |`NOT AVAILABLE (fedora only)` |`NOT AVAILABLE (fedora only)` | +| `tmux` |`NOT AVAILABLE (fedora only)` |`NOT AVAILABLE (fedora only)` | +| `vim` |`vim` |`vim` | +| `wget` |`wget` |`wget` | +| `zip` |`zip` |`zip` | +| `zsh` |`NOT AVAILABLE (fedora only)` |`NOT AVAILABLE (fedora only)` | +| **TOTAL SIZE** | **800MB** (255MB compressed) | **789MB** (256MB compressed) | ### Extending the base image When extending the base image, `source kubedock_setup` should be called in the new image's entrypoint to set up kubedock support. This sets up a wrapper for podman to use kubedock for the following podman commands if the `KUBEDOCK_ENABLED` env variable is set to `true`: @@ -171,10 +184,30 @@ docker run -ti --rm \ #### Java JAVA_HOME_8, JAVA_HOME_11, JAVA_HOME_17, JAVA_HOME_21 +## Configuration + +### Registry Override + +The workflows support using custom container registries through the `REGISTRY` environment variable. This is useful for forks that want to publish to their own registry. + +**Default behavior:** Images are published to `quay.io/devfile` + +**To override in a fork:** +1. Go to your repository **Settings** → **Secrets and Variables** → **Actions** → **Variables** +2. Add a repository variable: `REGISTRY` = `your-registry.com/your-namespace` +3. All workflows will automatically use your custom registry + +**Example registry formats:** +- `quay.io/youruser` +- `ghcr.io/youruser` +- `docker.io/youruser` +- `your-private-registry.com/namespace` + # Builds This repo contains [actions](https://github.com/devfile/developer-images/actions), including: -* [![release latest stable](https://github.com/devfile/developer-images/actions/workflows/ubi9-build.yaml/badge.svg)](https://github.com/devfile/developer-images/actions/workflows/ubi9-build.yaml) +* [![release latest stable UBI 9](https://github.com/devfile/developer-images/actions/workflows/ubi9-build.yaml/badge.svg)](https://github.com/devfile/developer-images/actions/workflows/ubi9-build.yaml) +* [![release latest stable UBI 10](https://github.com/devfile/developer-images/actions/workflows/ubi10-build.yaml/badge.svg)](https://github.com/devfile/developer-images/actions/workflows/ubi10-build.yaml) Downstream builds can be found at the link below, which is _internal to Red Hat_. Stable builds can be found by replacing the 3.x with a specific version like 3.2. diff --git a/base/ubi10/.copy-files b/base/ubi10/.copy-files new file mode 100644 index 00000000..f59b35e5 --- /dev/null +++ b/base/ubi10/.copy-files @@ -0,0 +1,20 @@ +# This file contains a list directories or files to copy over from /home/tooling to /home/user in entrypoint.sh. +# +# For example, the following will copy /home/tooling/testfile to /home/user/testfile: +# ./testfile +# +# The goal of this file is to copy over files that cannot be used as symbolic links created by stow. +# For example, Vim does not permit .viminfo to be a symbolic link for security reasons, therefore it is copied +# over to /home/user manually without stow. +# +# When copying over directories or files from /home/tooling to /home/user using this file, remember to add the +# directory or file to .stow-local-ignore so that a symbolic link is not created. + + +# Vim does not permit .viminfo to be a symbolic link for security reasons, so manually copy it +.viminfo + +# We have to restore bash-related files back onto /home/user/ (since they will have been overwritten by the PVC) +# but we don't want them to be symbolic links (so that they persist on the PVC) +.bashrc +.bash_profile diff --git a/base/ubi10/.stow-local-ignore b/base/ubi10/.stow-local-ignore new file mode 100644 index 00000000..8cd6265b --- /dev/null +++ b/base/ubi10/.stow-local-ignore @@ -0,0 +1,17 @@ +# .viminfo cannot be a symlink for security reasons +\.viminfo + +# We store bash related files in /home/tooling/ so they aren't overriden if persistUserHome is enabled +# but we don't want them to be symbolic links (or to cause stow conflicts). They will be copied to /home/user/ manually. +\.bashrc +\.bash_profile + +# Ignore absolute symbolic links, as they are not supported by stow +\.krew +\.sdkman +\.local/bin/podman + +# Ignore files under .config directory +\.config + +\.copy-files diff --git a/base/ubi10/Dockerfile b/base/ubi10/Dockerfile new file mode 100644 index 00000000..1a74500a --- /dev/null +++ b/base/ubi10/Dockerfile @@ -0,0 +1,200 @@ +# syntax=docker/dockerfile:1.3-labs + +# https://catalog.redhat.com/en/software/containers/ubi10/ubi/66f2b46b122803e4937d11ae +FROM --platform=$BUILDPLATFORM registry.access.redhat.com/ubi10/ubi:10.0-1758186945 + +ARG TARGETARCH +LABEL maintainer="Red Hat, Inc." + +LABEL com.redhat.component="devfile-base-container" +LABEL name="devfile/base-developer-image" +LABEL version="ubi10" + +#label for EULA +LABEL com.redhat.license_terms="https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI" + +#labels for container catalog +LABEL summary="devfile base developer image" +LABEL description="Image with base developers tools. Languages SDK and runtimes excluded." +LABEL io.k8s.display-name="devfile-developer-base" +LABEL io.openshift.expose-services="" + +USER 0 + +ENV HOME=/home/tooling +RUN mkdir -p /home/tooling/ + +## install packages from epel 10 +RUN \ + # Install the primary EPEL 10 repository + dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-10.noarch.rpm && \ + \ + # Install the main list of packages + dnf install -y diffutils git git-lfs iproute jq less lsof man nano procps p7zip p7zip-plugins \ + perl-Digest-SHA net-tools openssh-clients openssl ripgrep rsync socat sudo time vim wget zip && \ + # Final update and cleanup + dnf update -y && \ + dnf clean all + +# install stow from EPEL 9 as it is not yet available in EPEL 10 +RUN \ + # Import EPEL 9 key and create the disabled repo file + rpm --import https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-9 && \ + printf '[epel9]\nname=Extra Packages for Enterprise Linux 9 - $basearch\nmetalink=https://mirrors.fedoraproject.org/metalink?repo=epel-9&arch=$basearch\nenabled=0\ngpgcheck=1\ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-9\n' > /etc/yum.repos.d/epel-9.repo && \ + # Install stow using the temporarily enabled repo + dnf install -y --enablerepo=epel9 stow && \ + dnf clean all && \ + rm /etc/yum.repos.d/epel-9.repo + +## podman buildah skopeo +RUN dnf -y reinstall shadow-utils && \ + dnf -y install podman buildah skopeo fuse-overlayfs && \ + dnf clean all + +# Download and install gh-cli depending on the architecture. +# See release page for details https://github.com/cli/cli/releases/tag/v2.78.0 +RUN \ + TEMP_DIR="$(mktemp -d)"; \ + cd "${TEMP_DIR}"; \ + GH_VERSION="2.78.0"; \ + GH_ARCH="linux_$TARGETARCH"; \ + GH_TGZ="gh_${GH_VERSION}_${GH_ARCH}.tar.gz"; \ + GH_TGZ_URL="https://github.com/cli/cli/releases/download/v${GH_VERSION}/${GH_TGZ}"; \ + GH_CHEKSUMS_URL="https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_checksums.txt"; \ + curl -sSLO "${GH_TGZ_URL}"; \ + curl -sSLO "${GH_CHEKSUMS_URL}"; \ + sha256sum --ignore-missing -c "gh_${GH_VERSION}_checksums.txt" 2>&1 | grep OK; \ + tar -zxv --no-same-owner -f "${GH_TGZ}"; \ + mv "gh_${GH_VERSION}_${GH_ARCH}"/bin/gh /usr/local/bin/; \ + mv "gh_${GH_VERSION}_${GH_ARCH}"/share/man/man1/* /usr/local/share/man/man1; \ + cd -; \ + rm -rf "${TEMP_DIR}" + +# Download and install ripgrep depending on the architecture. +# See release page for details https://github.com/BurntSushi/ripgrep/releases/tag/13.0.0 +RUN \ + TEMP_DIR="$(mktemp -d)"; \ + cd "${TEMP_DIR}"; \ + RG_VERSION="13.0.0"; \ + if [ "$TARGETARCH" = "arm64" ]; then \ + RG_ARCH="arm-unknown-linux-gnueabihf"; \ + else \ + RG_ARCH="x86_64-unknown-linux-musl"; \ + fi; \ + RG_TGZ="ripgrep-${RG_VERSION}-${RG_ARCH}.tar.gz"; \ + RG_TGZ_URL="https://github.com/BurntSushi/ripgrep/releases/download/${RG_VERSION}/${RG_TGZ}"; \ + curl -sSLO "${RG_TGZ_URL}"; \ + tar -zxv --no-same-owner -f "${RG_TGZ}"; \ + mv "ripgrep-${RG_VERSION}-${RG_ARCH}"/rg /usr/local/bin/; \ + mv "ripgrep-${RG_VERSION}-${RG_ARCH}"/doc/rg.1 /usr/local/share/man/man1; \ + cd -; \ + rm -rf "${TEMP_DIR}" + +# Download and install bat depending on the architecture. +# See release page for details https://github.com/sharkdp/bat/releases/tag/v0.18.3 +RUN \ + TEMP_DIR="$(mktemp -d)"; \ + cd "${TEMP_DIR}"; \ + BAT_VERSION="0.18.3"; \ + if [ "$TARGETARCH" = "arm64" ]; then \ + BAT_ARCH="aarch64-unknown-linux-gnu"; \ + else \ + BAT_ARCH="x86_64-unknown-linux-musl"; \ + fi; \ + BAT_TGZ="bat-v${BAT_VERSION}-${BAT_ARCH}.tar.gz"; \ + BAT_TGZ_URL="https://github.com/sharkdp/bat/releases/download/v${BAT_VERSION}/${BAT_TGZ}"; \ + curl -sSLO "${BAT_TGZ_URL}"; \ + tar -zxv --no-same-owner -f "${BAT_TGZ}"; \ + mv "bat-v${BAT_VERSION}-${BAT_ARCH}"/bat /usr/local/bin/; \ + mv "bat-v${BAT_VERSION}-${BAT_ARCH}"/bat.1 /usr/local/share/man/man1; \ + cd -; \ + rm -rf "${TEMP_DIR}" + +# Download and install fd depending on the architecture. +# See release page for details https://github.com/sharkdp/fd/releases/tag/v8.7.0 +RUN \ + TEMP_DIR="$(mktemp -d)" && \ + cd "${TEMP_DIR}" && \ + FD_VERSION="8.7.0" && \ + if [ "$TARGETARCH" = "arm64" ]; then \ + FD_ARCH="aarch64-unknown-linux-gnu"; \ + else \ + FD_ARCH="x86_64-unknown-linux-musl"; \ + fi && \ + FD_TGZ="fd-v${FD_VERSION}-${FD_ARCH}.tar.gz" && \ + FD_TGZ_URL="https://github.com/sharkdp/fd/releases/download/v${FD_VERSION}/${FD_TGZ}" && \ + curl -sSLO "${FD_TGZ_URL}" && \ + tar -xv --no-same-owner -f "${FD_TGZ}" && \ + mv "fd-v${FD_VERSION}-${FD_ARCH}"/fd /usr/local/bin && \ + mv "fd-v${FD_VERSION}-${FD_ARCH}"/fd.1 /usr/local/share/man/man1 && \ + cd - && \ + rm -rf "${TEMP_DIR}" + +# Define user directory for binaries +ENV PATH="/home/user/.local/bin:$PATH" + +# Set up environment variables to note that this is +# not starting with usernamespace and default to +# isolate the filesystem with chroot. +ENV _BUILDAH_STARTED_IN_USERNS="" BUILDAH_ISOLATION=chroot + +# Tweaks to make rootless buildah work +RUN touch /etc/subgid /etc/subuid && \ + chmod g=u /etc/subgid /etc/subuid /etc/passwd && \ + echo user:10000:65536 > /etc/subuid && \ + echo user:10000:65536 > /etc/subgid + +# Adjust storage.conf to enable Fuse storage. +RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' /usr/share/containers/storage.conf +RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers; \ + touch /var/lib/shared/overlay-images/images.lock; \ + touch /var/lib/shared/overlay-layers/layers.lock + +# Add kubedock +# See release page for details https://github.com/joyrex2001/kubedock/releases/tag/0.18.2 +ENV KUBEDOCK_VERSION 0.18.2 +ENV KUBECONFIG=/home/user/.kube/config +RUN KUBEDOCK_ARCH="linux_amd64" && \ + curl -L https://github.com/joyrex2001/kubedock/releases/download/${KUBEDOCK_VERSION}/kubedock_${KUBEDOCK_VERSION}_${KUBEDOCK_ARCH}.tar.gz | tar -C /usr/local/bin -xz --no-same-owner \ + && chmod +x /usr/local/bin/kubedock +COPY --chown=0:0 kubedock_setup.sh /usr/local/bin/kubedock_setup + +# Configure Podman wrapper +ENV PODMAN_WRAPPER_PATH=/usr/bin/podman.wrapper +ENV ORIGINAL_PODMAN_PATH=/usr/bin/podman.orig +COPY --chown=0:0 podman-wrapper.sh "${PODMAN_WRAPPER_PATH}" +RUN mv /usr/bin/podman "${ORIGINAL_PODMAN_PATH}" + +COPY --chown=0:0 entrypoint.sh / +COPY --chown=0:0 .stow-local-ignore /home/tooling/ +COPY --chown=0:0 .copy-files /home/tooling/ +RUN \ + # add user and configure it + useradd -u 10001 -G wheel,root -d /home/user --shell /bin/bash -m user && \ + # Setup $PS1 for a consistent and reasonable prompt + touch /etc/profile.d/udi_prompt.sh && \ + chown 10001 /etc/profile.d/udi_prompt.sh && \ + echo "export PS1='\W \`git branch --show-current 2>/dev/null | sed -r -e \"s@^(.+)@\(\1\) @\"\`$ '" >> /etc/profile.d/udi_prompt.sh && \ + # Copy the global git configuration to user config as global /etc/gitconfig + # file may be overwritten by a mounted file at runtime + cp /etc/gitconfig ${HOME}/.gitconfig && \ + chown 10001 ${HOME}/ ${HOME}/.viminfo ${HOME}/.gitconfig ${HOME}/.stow-local-ignore ${HOME}/.copy-files && \ + # Set permissions on /etc/passwd and /home to allow arbitrary users to write + chgrp -R 0 /home && \ + chmod -R g=u /etc/passwd /etc/group /home && \ + chmod +x /entrypoint.sh && \ + # Create symbolic links from /home/tooling/ -> /home/user/ + stow . -t /home/user/ -d /home/tooling/ && \ + # .viminfo cannot be a symbolic link for security reasons, so copy it to /home/user/ + cp /home/tooling/.viminfo /home/user/.viminfo && \ + # Bash-related files are backed up to /home/tooling/ incase they are deleted when persistUserHome is enabled. + cp /home/user/.bashrc /home/tooling/.bashrc && \ + cp /home/user/.bash_profile /home/tooling/.bash_profile && \ + chown 10001 /home/tooling/.bashrc /home/tooling/.bash_profile + +USER 10001 +ENV HOME=/home/user +WORKDIR /projects +# /usr/libexec/podman/catatonit is used to reap zombie processes +ENTRYPOINT ["/usr/libexec/podman/catatonit","--","/entrypoint.sh"] +CMD ["tail", "-f", "/dev/null"] diff --git a/base/ubi10/entrypoint.sh b/base/ubi10/entrypoint.sh new file mode 100644 index 00000000..7e7322a8 --- /dev/null +++ b/base/ubi10/entrypoint.sh @@ -0,0 +1,222 @@ + +#!/bin/bash + +# Replace /home/tooling/* path to /home/user/* path +replace_user_home() { + echo "$1" | sed "s|^/home/tooling|$HOME|" +} + +# Ensure $HOME exists when starting +if [ ! -d "${HOME}" ]; then + mkdir -p "${HOME}" +fi + +# Configure container builds to use vfs or fuse-overlayfs +if [ ! -d "${HOME}/.config/containers" ]; then + mkdir -p ${HOME}/.config/containers + if [ -c "/dev/fuse" ] && [ -f "/usr/bin/fuse-overlayfs" ]; then + (echo '[storage]';echo 'driver = "overlay"';echo '[storage.options.overlay]';echo 'mount_program = "/usr/bin/fuse-overlayfs"') > ${HOME}/.config/containers/storage.conf + else + (echo '[storage]';echo 'driver = "vfs"') > "${HOME}"/.config/containers/storage.conf + fi +fi + +# Setup $PS1 for a consistent and reasonable prompt +if [ -w "${HOME}" ] && [ ! -f "${HOME}"/.bashrc ]; then + echo "PS1='[\u@\h \W]\$ '" > "${HOME}"/.bashrc +fi + +# Add current (arbitrary) user to /etc/passwd and /etc/group +if ! whoami &> /dev/null; then + if [ -w /etc/passwd ]; then + echo "${USER_NAME:-user}:x:$(id -u):0:${USER_NAME:-user} user:${HOME}:/bin/bash" >> /etc/passwd + echo "${USER_NAME:-user}:x:$(id -u):" >> /etc/group + fi +fi + +# The user namespace is created when `UserNamespacesSupport` feature is enabled and `hostUsers` is set to false in Pod spec. +# See for more details: +# - https://kubernetes.io/docs/concepts/workloads/pods/user-namespaces/#introduction +# Assume, that HOST_USERS environment variable is provided and is set to false in that case. +# If so, update /etc/sub*id files to reflect the valid UID/GID range. +if [ "${HOST_USERS}" == "false" ]; then + echo "Running in a user namespace" + if [ -f /proc/self/uid_map ]; then + # Typical output of `/proc/self/uid_map`: + # 1. When NOT running in a user namespace: + # 0 0 4294967295 + # 2. When running in a user namespace: + # 0 1481179136 65536 + # 3. When container is run in a user namespace: + # 0 1000 1 + # 1 1001 64535 + # We can use the content of /proc/self/uid_map to detect if we are running in a user namespace. + # However, to keep things simple, we will rely on HOST_USERS environment variable only. + # This way, we avoid breaking anything. + echo "/proc/self/uid_map content: $(cat /proc/self/uid_map)" + fi + + # Why do we need to update /etc/sub*id files? + # We are already in the user namespace, so we know there are at least 65536 UIDs/GIDs available. + # For more details, see: + # - https://kubernetes.io/docs/concepts/workloads/pods/user-namespaces/#id-count-for-each-of-pods + # Podman needs to create a new user namespace for any container being launched and map the container's user + # and group IDs (UID/GID) to the corresponding user on the current namespace. + # For the mapping, podman refers to the /etc/sub*id files. + # For more details, see: + # - https://man7.org/linux/man-pages/man5/subuid.5.html + # So if the user ID exceeds 65535, it cannot be mapped if only UIDs/GIDs from 0-65535 are available. + # If that's the case, podman commands would fail. + + # Even though the range can be extended using configuration, we can rely on the fact that there are at least 65536 user IDs available in the user namespace. + # See for more details: + # - https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-UserNamespaces + + # To ensure the provided user ID stays within this range, `runAsUser` in the Pod spec should be set to a value below 65536. + # In OpenShift, the Container Security Context Constraint (SCC) should be created accordingly. + if whoami &> /dev/null; then + echo "User information: $(id)" + + USER_ID=$(id -u) + if [ ${USER_ID} -lt 65536 ]; then + USER_NAME=$(whoami) + START_ID=$(( ${USER_ID} + 1 )) + COUNT=$(( 65536 - ${START_ID} )) + IDS_RANGE="${USER_NAME}:${START_ID}:${COUNT}" + + if [ -w /etc/subuid ]; then + echo "${IDS_RANGE}" > /etc/subuid + echo "/etc/subuid updated" + fi + if [ -w /etc/subgid ]; then + echo "${IDS_RANGE}" > /etc/subgid + echo "/etc/subgid updated" + fi + fi + fi +fi + +source kubedock_setup + + +# Stow +## Required for https://github.com/eclipse/che/issues/22412 + +# /home/user/ will be mounted to by a PVC if persistUserHome is enabled +mountpoint -q /home/user/; HOME_USER_MOUNTED=$? + +# This file will be created after stowing, to guard from executing stow everytime the container is started +STOW_COMPLETE=/home/user/.stow_completed + +if [ $HOME_USER_MOUNTED -eq 0 ] && [ ! -f $STOW_COMPLETE ]; then + # There may be regular, non-symlink files in /home/user that match the + # pathing of files in /home/tooling. Stow will error out when it tries to + # stow on top of that. Instead, we can append to the current + # /home/tooling/.stow-local-ignore file to ignore pre-existing, + # non-symlinked files in /home/user that match those in /home/tooling before + # we run stow. + # + # Create two text files containing a sorted path-based list of files in + # /home/tooling and /home/user. Cut off "/home/user" and "/home/tooling" and + # only get the sub-paths so we can do a proper comparison + # + # In the case of /home/user, we want regular file types and not symbolic + # links. + find /home/user -type f -xtype f -print | sort | sed 's|/home/user||g' > /tmp/user.txt + find /home/tooling -print | sort | sed 's|/home/tooling||g' > /tmp/tooling.txt + # We compare the two files, trying to find files that exist in /home/user + # and /home/tooling. Being that the files that get flagged here are not + # already synlinks, we will want to ignore them. + IGNORE_FILES="$(comm -12 /tmp/user.txt /tmp/tooling.txt)" + # We no longer require the file lists, so remove them + rm /tmp/user.txt /tmp/tooling.txt + # For each file we need to ignore, append them to + # /home/tooling/.stow-local-ignore. + for f in $IGNORE_FILES; do echo "${f}" >> /home/tooling/.stow-local-ignore;done + # We are now ready to run stow + # + # Create symbolic links from /home/tooling/ -> /home/user/ + stow . -t /home/user/ -d /home/tooling/ --no-folding -v 2 > /tmp/stow.log 2>&1 + touch $STOW_COMPLETE +fi + +# Read .copy-files and copy files from /home/tooling to /home/user +if [ -f "/home/tooling/.copy-files" ]; then + echo "Processing .copy-files..." + while IFS= read -r line || [[ -n "$line" ]]; do + # Skip empty and commented lines + [[ -z "$line" || "$line" == \#* ]] && continue + + if [ -e "/home/tooling/$line" ]; then + tooling_path=$(realpath "/home/tooling/$line") + + # Determine target path based on whether source is a directory + if [ -d "$tooling_path" ]; then + # For directories: copy to parent directory (e.g., dir1/dir2 -> /home/user/dir1/) + target_parent=$(dirname "${HOME}/${line}") + target_full="$target_parent/$(basename "$tooling_path")" + + # Skip if target directory already exists + if [ -d "$target_full" ]; then + echo "Directory $target_full already exists, skipping..." + continue + fi + + echo "Copying directory $tooling_path to $target_parent/" + mkdir -p "$target_parent" + cp --no-clobber -r "$tooling_path" "$target_parent/" + else + # For files: copy to exact target path + target_full="${HOME}/${line}" + target_parent=$(dirname "$target_full") + + # Skip if target file already exists + if [ -e "$target_full" ]; then + echo "File $target_full already exists, skipping..." + continue + fi + + echo "Copying file $tooling_path to $target_full" + mkdir -p "$target_parent" + cp --no-clobber -r "$tooling_path" "$target_full" + fi + else + echo "Warning: /home/tooling/$line does not exist, skipping..." + fi + done < /home/tooling/.copy-files + echo "Finished processing .copy-files." +else + echo "No .copy-files found, skipping copy operation." +fi + +# Create symlinks from /home/tooling/.config to /home/user/.config +# Only create symlinks for files that don't already exist in destination. +# This is done because stow ignores the .config directory. +if [ -d /home/tooling/.config ]; then + echo "Creating .config symlinks for files that don't already exist..." + + # Find all files recursively in /home/tooling/.config + find /home/tooling/.config -type f | while read -r file; do + # Get the relative path from /home/tooling/.config + relative_path="${file#/home/tooling/.config/}" + + # Determine target path in /home/user/.config + target_file="${HOME}/.config/${relative_path}" + target_dir=$(dirname "$target_file") + + # Only create symlink if target file doesn't exist + if [ ! -e "$target_file" ]; then + # Create target directory if it doesn't exist + mkdir -p "$target_dir" + # Create symbolic link + ln -s "$file" "$target_file" + echo "Created symlink: $target_file -> $file" + else + echo "File $target_file already exists, skipping..." + fi + done + + echo "Finished creating .config symlinks." +fi + +exec "$@" \ No newline at end of file diff --git a/base/ubi10/kubedock_setup.sh b/base/ubi10/kubedock_setup.sh new file mode 100755 index 00000000..2c8400f8 --- /dev/null +++ b/base/ubi10/kubedock_setup.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Kubedock setup script meant to be run from the entrypoint script. + +LOCAL_BIN=/home/user/.local/bin +ORIGINAL_PODMAN_PATH=${ORIGINAL_PODMAN_PATH:-"/usr/bin/podman.orig"} +PODMAN_WRAPPER_PATH=${PODMAN_WRAPPER_PATH:-"/usr/bin/podman.wrapper"} + +mkdir -p "${LOCAL_BIN}" + +if [ "${KUBEDOCK_ENABLED:-false}" = "true" ]; then + echo + echo "Kubedock is enabled (env variable KUBEDOCK_ENABLED is set to true)." + + SECONDS=0 + KUBEDOCK_TIMEOUT=${KUBEDOCK_TIMEOUT:-10} + until [ -f $KUBECONFIG ]; do + if ((SECONDS > KUBEDOCK_TIMEOUT)); then + break + fi + echo "Kubeconfig doesn't exist yet. Waiting..." + sleep 1 + done + + if [ -f $KUBECONFIG ]; then + echo "Kubeconfig found." + + KUBEDOCK_PARAMS=${KUBEDOCK_PARAMS:-"--reverse-proxy --kubeconfig $KUBECONFIG"} + + echo "Starting kubedock with params \"${KUBEDOCK_PARAMS}\"..." + + kubedock server ${KUBEDOCK_PARAMS} >/tmp/kubedock.log 2>&1 & + + echo "Done." + + echo "Replacing podman with podman-wrapper..." + + ln -f -s "${PODMAN_WRAPPER_PATH}" "${LOCAL_BIN}/podman" + + export TESTCONTAINERS_RYUK_DISABLED="true" + export TESTCONTAINERS_CHECKS_DISABLE="true" + + echo "Done." + echo + else + echo "Could not find Kubeconfig at $KUBECONFIG" + echo "Giving up..." + fi +else + echo + echo "Kubedock is disabled. It can be enabled with the env variable \"KUBEDOCK_ENABLED=true\"" + echo "set in the workspace Devfile or in a Kubernetes ConfigMap in the developer namespace." + echo + ln -f -s "${ORIGINAL_PODMAN_PATH}" "${LOCAL_BIN}/podman" +fi diff --git a/base/ubi10/podman-wrapper.sh b/base/ubi10/podman-wrapper.sh new file mode 100755 index 00000000..b7f7fbc9 --- /dev/null +++ b/base/ubi10/podman-wrapper.sh @@ -0,0 +1,38 @@ +#!/bin/bash +set -euo pipefail + +PODMAN_ORIGINAL_PATH=${PODMAN_ORIGINAL_PATH:-"/usr/bin/podman.orig"} +KUBEDOCK_SUPPORTED_COMMANDS=${KUBEDOCK_SUPPORTED_COMMANDS:-"run ps exec cp logs inspect kill rm wait stop start"} + +PODMAN_ARGS=( "$@" ) + +TRUE=0 +FALSE=1 + +exec_original_podman() { + exec ${PODMAN_ORIGINAL_PATH} "${PODMAN_ARGS[@]}" +} + +exec_kubedock_podman() { + exec env CONTAINER_HOST=tcp://127.0.0.1:2475 "${PODMAN_ORIGINAL_PATH}" "${PODMAN_ARGS[@]}" +} + +podman_command() { + echo "${PODMAN_ARGS[0]}" +} + +command_is_supported_by_kubedock() { + CMD=$(podman_command) + for SUPPORTED_CMD in $KUBEDOCK_SUPPORTED_COMMANDS; do + if [ "$SUPPORTED_CMD" = "$CMD" ]; then + return $TRUE + fi + done + return ${FALSE} +} + +if command_is_supported_by_kubedock; then + exec_kubedock_podman +else + exec_original_podman +fi