@@ -99,21 +99,63 @@ jobs:
9999 MAMBA_NO_LOW_SPEED_LIMIT : " 1"
100100 run : |
101101 $cudaVersion = $env:CUDAVER
102- mamba install -y 'cuda' -c nvidia/label/cuda-$cudaVersion
102+ $cudaChannel = "nvidia/label/cuda-$cudaVersion"
103+ if ($IsLinux) {
104+ # Keep nvcc, cudart, and headers on the same NVIDIA label so the
105+ # detected toolkit version matches the published wheel tag.
106+ mamba install -y --channel-priority flexible --override-channels -c $cudaChannel "$cudaChannel::cuda-toolkit=$cudaVersion" "$cudaChannel::cuda-nvcc_linux-64=$cudaVersion" "$cudaChannel::cuda-cudart" "$cudaChannel::cuda-cudart-dev"
107+ } else {
108+ mamba install -y --channel-priority flexible --override-channels -c $cudaChannel "$cudaChannel::cuda-toolkit=$cudaVersion"
109+ }
110+ if ($LASTEXITCODE -ne 0) {
111+ exit $LASTEXITCODE
112+ }
103113 python -m pip install build wheel
104114
105115 - name : Build Wheel
106116 run : |
107- $cudaVersion = $env:CUDAVER.Remove($env:CUDAVER.LastIndexOf('.')).Replace('.','')
108117 $env:CUDA_PATH = $env:CONDA_PREFIX
109118 $env:CUDA_HOME = $env:CONDA_PREFIX
110119 $env:CUDA_TOOLKIT_ROOT_DIR = $env:CONDA_PREFIX
120+ $cudaHostCompilerArg = ''
121+ $env:CMAKE_ARGS = ''
111122 if ($IsLinux) {
112- $env:LD_LIBRARY_PATH = $env:CONDA_PREFIX + '/lib:' + $env:LD_LIBRARY_PATH
123+ if (Test-Path '/usr/bin/g++-12') {
124+ $env:CC = '/usr/bin/gcc-12'
125+ $env:CXX = '/usr/bin/g++-12'
126+ $env:CUDAHOSTCXX = '/usr/bin/g++-12'
127+ $cudaHostCompilerArg = " -DCMAKE_CUDA_HOST_COMPILER=$env:CUDAHOSTCXX"
128+ }
129+ if (Test-Path (Join-Path $env:CONDA_PREFIX 'include/cuda_runtime.h')) {
130+ $env:CUDAToolkit_ROOT = $env:CONDA_PREFIX
131+ $env:CUDA_TOOLKIT_ROOT_DIR = $env:CONDA_PREFIX
132+ $env:CMAKE_ARGS = "-DCUDAToolkit_ROOT=$env:CONDA_PREFIX -DCUDA_TOOLKIT_ROOT_DIR=$env:CONDA_PREFIX$cudaHostCompilerArg"
133+ $env:CPATH = "$env:CONDA_PREFIX/include:$env:CPATH"
134+ $env:CPLUS_INCLUDE_PATH = "$env:CONDA_PREFIX/include:$env:CPLUS_INCLUDE_PATH"
135+ $env:LIBRARY_PATH = "$env:CONDA_PREFIX/lib:$env:LIBRARY_PATH"
136+ $env:LD_LIBRARY_PATH = "$env:CONDA_PREFIX/lib:$env:LD_LIBRARY_PATH"
137+ } else {
138+ $env:CMAKE_ARGS = $cudaHostCompilerArg.Trim()
139+ }
140+ }
141+ $nvccPath = Join-Path $env:CONDA_PREFIX 'bin/nvcc'
142+ if (-not (Test-Path $nvccPath)) {
143+ $nvccPath = Join-Path $env:CONDA_PREFIX 'targets/x86_64-linux/bin/nvcc'
144+ }
145+ if (-not (Test-Path $nvccPath)) {
146+ throw 'Failed to find nvcc in the conda environment'
147+ }
148+ $env:CUDACXX = $nvccPath
149+ $env:PATH = "$(Split-Path $nvccPath):$env:PATH"
150+ $nvccVersion = ((& $nvccPath --version) | Select-String 'release ([0-9]+\.[0-9]+)').Matches[0].Groups[1].Value
151+ if (-not $nvccVersion) {
152+ throw 'Failed to detect the installed CUDA toolkit version'
113153 }
154+ $cudaTagVersion = $nvccVersion.Replace('.','')
114155 $env:VERBOSE = '1'
115- $env:CMAKE_ARGS = '-DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=all'
116- $env:CMAKE_ARGS = "-DGGML_CUDA_FORCE_MMQ=ON $env:CMAKE_ARGS"
156+ # Keep a portable SM set, including sm_70, instead of CMake's `all`,
157+ # which now pulls in future targets the hosted-runner toolchains cannot assemble.
158+ $env:CMAKE_ARGS = "-DGGML_CUDA_FORCE_MMQ=ON -DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=70;75;80;86;89;90 -DCMAKE_CUDA_FLAGS=--allow-unsupported-compiler $env:CMAKE_ARGS"
117159 # if ($env:AVXVER -eq 'AVX') {
118160 $env:CMAKE_ARGS = $env:CMAKE_ARGS + ' -DGGML_AVX2=off -DGGML_FMA=off -DGGML_F16C=off'
119161 # }
@@ -124,10 +166,11 @@ jobs:
124166 # $env:CMAKE_ARGS = $env:CMAKE_ARGS + ' -DGGML_AVX=off -DGGML_AVX2=off -DGGML_FMA=off -DGGML_F16C=off'
125167 # }
126168 python -m build --wheel
127- # write the build tag to the output
128- Write-Output "CUDA_VERSION=$cudaVersion " >> $env:GITHUB_ENV
169+ # Publish tags that reflect the actual installed toolkit version.
170+ Write-Output "CUDA_VERSION=$cudaTagVersion " >> $env:GITHUB_ENV
129171
130172 - uses : softprops/action-gh-release@v2
173+ if : startsWith(github.ref, 'refs/tags/')
131174 with :
132175 files : dist/*
133176 # Set tag_name to <tag>-cu<cuda_version>
0 commit comments