Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
99 changes: 37 additions & 62 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -1302,8 +1302,8 @@ jobs:
run: |
GG_BUILD_NO_BF16=1 GG_BUILD_EXTRA_TESTS_0=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp

ggml-ci-x64-nvidia-v100-cuda:
runs-on: [self-hosted, Linux, X64, NVIDIA, V100]
ggml-ci-x64-nvidia-cuda:
runs-on: [self-hosted, Linux, X64, NVIDIA]

steps:
- name: Clone
Expand All @@ -1316,8 +1316,8 @@ jobs:
nvidia-smi
GG_BUILD_CUDA=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp

ggml-ci-x64-nvidia-v100-vulkan:
runs-on: [self-hosted, Linux, X64, NVIDIA, V100]
ggml-ci-x64-nvidia-vulkan-cm:
runs-on: [self-hosted, Linux, X64, NVIDIA]

steps:
- name: Clone
Expand All @@ -1327,25 +1327,11 @@ jobs:
- name: Test
id: ggml-ci
run: |
vulkaninfo
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp

ggml-ci-x64-nvidia-t4-cuda:
runs-on: [self-hosted, Linux, X64, NVIDIA, T4]

steps:
- name: Clone
id: checkout
uses: actions/checkout@v4

- name: Test
id: ggml-ci
run: |
nvidia-smi
GG_BUILD_CUDA=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
vulkaninfo --summary
GG_BUILD_VULKAN=1 GGML_VK_DISABLE_COOPMAT2=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp

ggml-ci-x64-nvidia-t4-vulkan:
runs-on: [self-hosted, Linux, X64, NVIDIA, T4]
ggml-ci-x64-nvidia-vulkan-cm2:
runs-on: [self-hosted, Linux, X64, NVIDIA, COOPMAT2]

steps:
- name: Clone
Expand All @@ -1355,23 +1341,9 @@ jobs:
- name: Test
id: ggml-ci
run: |
vulkaninfo
vulkaninfo --summary
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp

ggml-ci-x64-nvidia-t4-vulkan-coopmat1:
runs-on: [self-hosted, Linux, X64, NVIDIA, T4]

steps:
- name: Clone
id: checkout
uses: actions/checkout@v4

- name: Test
id: ggml-ci
run: |
vulkaninfo
GG_BUILD_VULKAN=1 GGML_VK_DISABLE_COOPMAT2=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp

ggml-ci-x64-cpu-amx:
runs-on: [self-hosted, Linux, X64, CPU, AMX]

Expand All @@ -1385,31 +1357,33 @@ jobs:
run: |
bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp

ggml-ci-x64-amd-v710-vulkan:
runs-on: [self-hosted, Linux, X64, AMD, V710]

steps:
- name: Clone
id: checkout
uses: actions/checkout@v4

- name: Test
id: ggml-ci
run: |
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp

ggml-ci-x64-amd-v710-rocm:
runs-on: [self-hosted, Linux, X64, AMD, V710]

steps:
- name: Clone
id: checkout
uses: actions/checkout@v4

- name: Test
id: ggml-ci
run: |
GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
# ggml-ci-x64-amd-vulkan:
# runs-on: [self-hosted, Linux, X64, AMD]
#
# steps:
# - name: Clone
# id: checkout
# uses: actions/checkout@v4
#
# - name: Test
# id: ggml-ci
# run: |
# vulkaninfo --summary
# GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
#
# ggml-ci-x64-amd-rocm:
# runs-on: [self-hosted, Linux, X64, AMD]
#
# steps:
# - name: Clone
# id: checkout
# uses: actions/checkout@v4
#
# - name: Test
# id: ggml-ci
# run: |
# amd-smi static
# GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp

ggml-ci-mac-metal:
runs-on: [self-hosted, macOS, ARM64]
Expand All @@ -1435,4 +1409,5 @@ jobs:
- name: Test
id: ggml-ci
run: |
vulkaninfo --summary
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp
Loading