release: undo debug info and attempt release

Signed-off-by: Aaron Teo <aaron.teo1@ibm.com>
This commit is contained in:
Aaron Teo 2025-11-29 23:04:02 +08:00
parent a00ecf21eb
commit bd119c7471
No known key found for this signature in database
1 changed files with 66 additions and 83 deletions

View File

@ -22,7 +22,6 @@ env:
jobs: jobs:
macOS-arm64: macOS-arm64:
if: false
runs-on: macos-14 runs-on: macos-14
steps: steps:
@ -83,7 +82,6 @@ jobs:
name: llama-bin-macos-arm64.tar.gz name: llama-bin-macos-arm64.tar.gz
macOS-x64: macOS-x64:
if: false
runs-on: macos-15-intel runs-on: macos-15-intel
steps: steps:
@ -214,7 +212,6 @@ jobs:
name: llama-bin-ubuntu-${{ matrix.build }}.tar.gz name: llama-bin-ubuntu-${{ matrix.build }}.tar.gz
ubuntu-22-vulkan: ubuntu-22-vulkan:
if: false
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04
steps: steps:
@ -275,7 +272,6 @@ jobs:
name: llama-bin-ubuntu-vulkan-x64.tar.gz name: llama-bin-ubuntu-vulkan-x64.tar.gz
windows-cpu: windows-cpu:
if: false
runs-on: windows-2025 runs-on: windows-2025
strategy: strategy:
@ -339,7 +335,6 @@ jobs:
name: llama-bin-win-cpu-${{ matrix.arch }}.zip name: llama-bin-win-cpu-${{ matrix.arch }}.zip
windows: windows:
if: false
runs-on: windows-2025 runs-on: windows-2025
env: env:
@ -422,7 +417,6 @@ jobs:
name: llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip name: llama-bin-win-${{ matrix.backend }}-${{ matrix.arch }}.zip
windows-cuda: windows-cuda:
if: false
runs-on: windows-2022 runs-on: windows-2022
strategy: strategy:
@ -491,7 +485,6 @@ jobs:
name: cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip name: cudart-llama-bin-win-cuda-${{ matrix.cuda }}-x64.zip
windows-sycl: windows-sycl:
if: false
runs-on: windows-2022 runs-on: windows-2022
defaults: defaults:
@ -570,7 +563,6 @@ jobs:
name: llama-bin-win-sycl-x64.zip name: llama-bin-win-sycl-x64.zip
windows-hip: windows-hip:
if: false
runs-on: windows-2022 runs-on: windows-2022
env: env:
@ -677,7 +669,6 @@ jobs:
name: llama-bin-win-hip-${{ matrix.name }}-x64.zip name: llama-bin-win-hip-${{ matrix.name }}-x64.zip
ios-xcode-build: ios-xcode-build:
if: false
runs-on: macos-15 runs-on: macos-15
steps: steps:
@ -738,7 +729,6 @@ jobs:
name: llama-${{ steps.tag.outputs.name }}-xcframework.tar.gz name: llama-${{ steps.tag.outputs.name }}-xcframework.tar.gz
openEuler-cann: openEuler-cann:
if: false
strategy: strategy:
matrix: matrix:
arch: [x86, aarch64] arch: [x86, aarch64]
@ -801,17 +791,17 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: needs:
# - windows - windows
# - windows-cpu - windows-cpu
# - windows-cuda - windows-cuda
# - windows-sycl - windows-sycl
# - windows-hip - windows-hip
- ubuntu-22-cpu - ubuntu-22-cpu
# - ubuntu-22-vulkan - ubuntu-22-vulkan
# - macOS-arm64 - macOS-arm64
# - macOS-x64 - macOS-x64
# - ios-xcode-build - ios-xcode-build
# - openEuler-cann - openEuler-cann
steps: steps:
- name: Clone - name: Clone
@ -836,13 +826,6 @@ jobs:
run: | run: |
mkdir -p release mkdir -p release
echo "MARKER MARKER MARKER"
ls -laR artifact/
for f in artifact/*; do
file "$f"
done
echo "MARKER MARKER MARKER"
echo "Adding CPU backend files to existing zips..." echo "Adding CPU backend files to existing zips..."
for arch in x64 arm64; do for arch in x64 arm64; do
cpu_zip="artifact/llama-bin-win-cpu-${arch}.zip" cpu_zip="artifact/llama-bin-win-cpu-${arch}.zip"
@ -875,69 +858,69 @@ jobs:
mv -v artifact/*.zip release mv -v artifact/*.zip release
mv -v artifact/*.tar.gz release mv -v artifact/*.tar.gz release
# - name: Create release - name: Create release
# id: create_release id: create_release
# uses: ggml-org/action-create-release@v1 uses: ggml-org/action-create-release@v1
# env: env:
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# with: with:
# tag_name: ${{ steps.tag.outputs.name }} tag_name: ${{ steps.tag.outputs.name }}
# body: | body: |
# ## ${{ steps.tag.outputs.name }} ## ${{ steps.tag.outputs.name }}
# > [!WARNING] > [!WARNING]
# > **Release Format Update**: Linux releases will soon use .tar.gz archives instead of .zip. Please make the necessary changes to your deployment scripts. > **Release Format Update**: Linux releases will soon use .tar.gz archives instead of .zip. Please make the necessary changes to your deployment scripts.
# ### Download Binary Releases ### Download Binary Releases
# **macOS/iOS:** **macOS/iOS:**
# - [macOS Apple Silicon (arm64)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.tar.gz) - [macOS Apple Silicon (arm64)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-macos-arm64.tar.gz)
# - [macOS Intel (x64)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-macos-x64.tar.gz) - [macOS Intel (x64)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-macos-x64.tar.gz)
# - [iOS XCFramework](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ios-xcframework.zip) - [iOS XCFramework](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ios-xcframework.zip)
# **Linux:** **Linux:**
# - [Ubuntu x64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-x64.tar.gz) - [Ubuntu x64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-x64.tar.gz)
# - [Ubuntu x64 (Vulkan)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.tar.gz) - [Ubuntu x64 (Vulkan)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-vulkan-x64.tar.gz)
# - [Ubuntu s390x (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-s390x.tar.gz) - [Ubuntu s390x (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-ubuntu-s390x.tar.gz)
# **Windows:** **Windows:**
# - [Windows x64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cpu-x64.zip) - [Windows x64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cpu-x64.zip)
# - [Windows arm64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cpu-arm64.zip) - [Windows arm64 (CPU)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cpu-arm64.zip)
# - [Windows x64 (CUDA)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cuda-12.4-x64.zip) - [Windows x64 (CUDA)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-cuda-12.4-x64.zip)
# - [Windows x64 (Vulkan)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-vulkan-x64.zip) - [Windows x64 (Vulkan)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-vulkan-x64.zip)
# - [Windows x64 (SYCL)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip) - [Windows x64 (SYCL)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip)
# - [Windows x64 (HIP)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-hip-x64.zip) - [Windows x64 (HIP)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-win-hip-x64.zip)
# **openEuler:** **openEuler:**
# - [openEuler x86 (310p)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-310p-openEuler-x86.tar.gz) - [openEuler x86 (310p)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-310p-openEuler-x86.tar.gz)
# - [openEuler x86 (910b)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-910b-openEuler-x86.tar.gz) - [openEuler x86 (910b)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-910b-openEuler-x86.tar.gz)
# - [openEuler aarch64 (310p)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-310p-openEuler-aarch64.tar.gz) - [openEuler aarch64 (310p)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-310p-openEuler-aarch64.tar.gz)
# - [openEuler aarch64 (910b)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-910b-openEuler-aarch64.tar.gz) - [openEuler aarch64 (910b)](https://github.com/ggml-org/llama.cpp/releases/download/${{ steps.tag.outputs.name }}/llama-${{ steps.tag.outputs.name }}-bin-910b-openEuler-aarch64.tar.gz)
# <details> <details>
# ${{ github.event.head_commit.message }} ${{ github.event.head_commit.message }}
# </details> </details>
# - name: Upload release - name: Upload release
# id: upload_release id: upload_release
# uses: actions/github-script@v3 uses: actions/github-script@v3
# with: with:
# github-token: ${{secrets.GITHUB_TOKEN}} github-token: ${{secrets.GITHUB_TOKEN}}
# script: | script: |
# const path = require('path'); const path = require('path');
# const fs = require('fs'); const fs = require('fs');
# const release_id = '${{ steps.create_release.outputs.id }}'; const release_id = '${{ steps.create_release.outputs.id }}';
# for (let file of await fs.readdirSync('./release')) { for (let file of await fs.readdirSync('./release')) {
# if (path.extname(file) === '.zip' || path.extname(file) === '.tar.gz') { if (path.extname(file) === '.zip' || path.extname(file) === '.tar.gz') {
# console.log('uploadReleaseAsset', file); console.log('uploadReleaseAsset', file);
# await github.repos.uploadReleaseAsset({ await github.repos.uploadReleaseAsset({
# owner: context.repo.owner, owner: context.repo.owner,
# repo: context.repo.repo, repo: context.repo.repo,
# release_id: release_id, release_id: release_id,
# name: file, name: file,
# data: await fs.readFileSync(`./release/${file}`) data: await fs.readFileSync(`./release/${file}`)
# }); });
# } }
# } }