Skip to content

Commit bee378e

Browse files
authored
ci: run the x64 and arm ci on the github machines instead (#16183)
* run the x64 ci on regular machines * set up the same thing for arm fix test-quantize-perf just like #12306 * try to disable sve * add another sve run
1 parent 5fb5576 commit bee378e

File tree

3 files changed

+98
-29
lines changed

3 files changed

+98
-29
lines changed

.github/workflows/build.yml

Lines changed: 81 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1251,56 +1251,129 @@ jobs:
12511251
# TODO: simplify the following workflows using a matrix
12521252
# TODO: run lighter CI on PRs and the full CI only on master (if needed)
12531253
ggml-ci-x64-cpu-low-perf:
1254-
runs-on: [self-hosted, Linux, X64, CPU, low-perf]
1254+
runs-on: ubuntu-22.04
12551255

12561256
steps:
12571257
- name: Clone
12581258
id: checkout
12591259
uses: actions/checkout@v4
12601260

1261+
- name: ccache
1262+
uses: ggml-org/[email protected]
1263+
with:
1264+
key: ggml-ci-x64-cpu-low-perf
1265+
evict-old-files: 1d
1266+
1267+
- name: Dependencies
1268+
id: depends
1269+
run: |
1270+
sudo apt-get update
1271+
sudo apt-get install build-essential libcurl4-openssl-dev
1272+
12611273
- name: Test
12621274
id: ggml-ci
12631275
run: |
1264-
bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
1276+
LLAMA_ARG_THREADS=$(nproc) GG_BUILD_LOW_PERF=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
12651277
12661278
ggml-ci-arm64-cpu-low-perf:
1267-
runs-on: [self-hosted, Linux, ARM64, CPU, low-perf]
1279+
runs-on: ubuntu-22.04-arm
12681280

12691281
steps:
12701282
- name: Clone
12711283
id: checkout
12721284
uses: actions/checkout@v4
12731285

1286+
- name: ccache
1287+
uses: ggml-org/[email protected]
1288+
with:
1289+
key: ggml-ci-arm64-cpu-low-perf
1290+
evict-old-files: 1d
1291+
1292+
- name: Dependencies
1293+
id: depends
1294+
run: |
1295+
sudo apt-get update
1296+
sudo apt-get install build-essential libcurl4-openssl-dev
1297+
12741298
- name: Test
12751299
id: ggml-ci
12761300
run: |
1277-
bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
1301+
LLAMA_ARG_THREADS=$(nproc) GG_BUILD_LOW_PERF=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
12781302
12791303
ggml-ci-x64-cpu-high-perf:
1280-
runs-on: [self-hosted, Linux, X64, CPU, high-perf]
1304+
runs-on: ubuntu-22.04
12811305

12821306
steps:
12831307
- name: Clone
12841308
id: checkout
12851309
uses: actions/checkout@v4
12861310

1311+
- name: ccache
1312+
uses: ggml-org/[email protected]
1313+
with:
1314+
key: ggml-ci-x64-cpu-high-perf
1315+
evict-old-files: 1d
1316+
1317+
- name: Dependencies
1318+
id: depends
1319+
run: |
1320+
sudo apt-get update
1321+
sudo apt-get install build-essential libcurl4-openssl-dev
1322+
12871323
- name: Test
12881324
id: ggml-ci
12891325
run: |
1290-
bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
1326+
LLAMA_ARG_THREADS=$(nproc) bash ./ci/run.sh ./tmp/results ./tmp/mnt
12911327
12921328
ggml-ci-arm64-cpu-high-perf:
1293-
runs-on: [self-hosted, Linux, ARM64, CPU, high-perf]
1329+
runs-on: ubuntu-22.04-arm
12941330

12951331
steps:
12961332
- name: Clone
12971333
id: checkout
12981334
uses: actions/checkout@v4
12991335

1336+
- name: ccache
1337+
uses: ggml-org/[email protected]
1338+
with:
1339+
key: ggml-ci-arm64-cpu-high-perf
1340+
evict-old-files: 1d
1341+
1342+
- name: Dependencies
1343+
id: depends
1344+
run: |
1345+
sudo apt-get update
1346+
sudo apt-get install build-essential libcurl4-openssl-dev
1347+
1348+
- name: Test
1349+
id: ggml-ci
1350+
run: |
1351+
LLAMA_ARG_THREADS=$(nproc) GG_BUILD_NO_SVE=1 GG_BUILD_NO_BF16=1 GG_BUILD_EXTRA_TESTS_0=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
1352+
1353+
ggml-ci-arm64-cpu-high-perf-sve:
1354+
runs-on: ubuntu-22.04-arm
1355+
1356+
steps:
1357+
- name: Clone
1358+
id: checkout
1359+
uses: actions/checkout@v4
1360+
1361+
- name: ccache
1362+
uses: ggml-org/[email protected]
1363+
with:
1364+
key: ggml-ci-arm64-cpu-high-perf-sve
1365+
evict-old-files: 1d
1366+
1367+
- name: Dependencies
1368+
id: depends
1369+
run: |
1370+
sudo apt-get update
1371+
sudo apt-get install build-essential libcurl4-openssl-dev
1372+
13001373
- name: Test
13011374
id: ggml-ci
13021375
run: |
1303-
GG_BUILD_NO_BF16=1 GG_BUILD_EXTRA_TESTS_0=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
1376+
LLAMA_ARG_THREADS=$(nproc) GG_BUILD_NO_BF16=1 GG_BUILD_EXTRA_TESTS_0=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
13041377
13051378
ggml-ci-x64-nvidia-cuda:
13061379
runs-on: [self-hosted, Linux, X64, NVIDIA]

ci/run.sh

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,11 @@ if [ ! -z ${GG_BUILD_MUSA} ]; then
109109
MUSA_ARCH=${MUSA_ARCH:-21}
110110
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_MUSA=ON -DMUSA_ARCHITECTURES=${MUSA_ARCH}"
111111
fi
112+
113+
if [ ! -z ${GG_BUILD_NO_SVE} ]; then
114+
# arm 9 and newer enables sve by default, adjust these flags depending on the cpu used
115+
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8.5-a+fp16+i8mm"
116+
fi
112117
## helpers
113118

114119
# download a file if it does not exist or if it is outdated
@@ -345,16 +350,16 @@ function gg_run_qwen3_0_6b {
345350

346351
wiki_test="${path_wiki}/wiki.test.raw"
347352

348-
./bin/llama-quantize ${model_bf16} ${model_q8_0} q8_0
349-
./bin/llama-quantize ${model_bf16} ${model_q4_0} q4_0
350-
./bin/llama-quantize ${model_bf16} ${model_q4_1} q4_1
351-
./bin/llama-quantize ${model_bf16} ${model_q5_0} q5_0
352-
./bin/llama-quantize ${model_bf16} ${model_q5_1} q5_1
353-
./bin/llama-quantize ${model_bf16} ${model_q2_k} q2_k
354-
./bin/llama-quantize ${model_bf16} ${model_q3_k} q3_k
355-
./bin/llama-quantize ${model_bf16} ${model_q4_k} q4_k
356-
./bin/llama-quantize ${model_bf16} ${model_q5_k} q5_k
357-
./bin/llama-quantize ${model_bf16} ${model_q6_k} q6_k
353+
./bin/llama-quantize ${model_bf16} ${model_q8_0} q8_0 $(nproc)
354+
./bin/llama-quantize ${model_bf16} ${model_q4_0} q4_0 $(nproc)
355+
./bin/llama-quantize ${model_bf16} ${model_q4_1} q4_1 $(nproc)
356+
./bin/llama-quantize ${model_bf16} ${model_q5_0} q5_0 $(nproc)
357+
./bin/llama-quantize ${model_bf16} ${model_q5_1} q5_1 $(nproc)
358+
./bin/llama-quantize ${model_bf16} ${model_q2_k} q2_k $(nproc)
359+
./bin/llama-quantize ${model_bf16} ${model_q3_k} q3_k $(nproc)
360+
./bin/llama-quantize ${model_bf16} ${model_q4_k} q4_k $(nproc)
361+
./bin/llama-quantize ${model_bf16} ${model_q5_k} q5_k $(nproc)
362+
./bin/llama-quantize ${model_bf16} ${model_q6_k} q6_k $(nproc)
358363

359364
(time ./bin/llama-cli -no-cnv --model ${model_f16} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log
360365
(time ./bin/llama-cli -no-cnv --model ${model_bf16} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-bf16.log
@@ -427,7 +432,7 @@ function gg_run_qwen3_0_6b {
427432
function gg_sum_qwen3_0_6b {
428433
gg_printf '### %s\n\n' "${ci}"
429434

430-
gg_printf 'Pythia 2.8B:\n'
435+
gg_printf 'Qwen3 0.6B:\n'
431436
gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
432437
gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)"
433438
gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)"

tests/test-quantize-perf.cpp

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -260,14 +260,7 @@ int main(int argc, char * argv[]) {
260260

261261
int64_t iterations = params.iterations;
262262

263-
264-
// Initialize GGML, ensures float conversion tables are initialized
265-
struct ggml_init_params ggml_params = {
266-
/* .mem_size = */ 1*1024,
267-
/* .mem_buffer = */ NULL,
268-
/* .no_alloc = */ true,
269-
};
270-
struct ggml_context * ctx = ggml_init(ggml_params);
263+
ggml_cpu_init();
271264

272265
for (int i = 0; i < GGML_TYPE_COUNT; i++) {
273266
ggml_type type = (ggml_type) i;
@@ -359,7 +352,5 @@ int main(int argc, char * argv[]) {
359352
}
360353
}
361354

362-
ggml_free(ctx);
363-
364355
return 0;
365356
}

0 commit comments

Comments
 (0)