Skip to content

Commit

Permalink
* Upgrade presets for CUDA 12.3.0, cuDNN 8.9.5, NCCL 2.18.5
Browse files Browse the repository at this point in the history
  • Loading branch information
saudet committed Oct 24, 2023
1 parent 15fdfd6 commit 931bff1
Show file tree
Hide file tree
Showing 504 changed files with 68,359 additions and 13,649 deletions.
22 changes: 11 additions & 11 deletions .github/actions/deploy-ubuntu/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,27 +31,27 @@ runs:
export ARCH=arm64
export ARCH_CUDA=sbsa
export PREFIX=aarch64-linux-gnu
export CUDA=cuda-repo-rhel8-12-1-local-12.1.1_530.30.02-1.aarch64.rpm
export CUDNN=8.9.1.23-1.cuda12.1.aarch64
export NCCL=2.18.1-1+cuda12.1.aarch64
export CUDA=cuda-repo-rhel8-12-3-local-12.3.0_545.23.06-1.aarch64.rpm
export CUDNN=8.9.5.29-1.cuda12.2.aarch64
export NCCL=2.18.5-1+cuda12.2.aarch64
export USERLAND_BUILDME="buildme --aarch64"
elif [[ "$CI_DEPLOY_PLATFORM" == "linux-ppc64le" ]]; then
export ARCH=ppc64el
export ARCH_CUDA=ppc64le
export PREFIX=powerpc64le-linux-gnu
export CUDA=cuda-repo-rhel8-12-1-local-12.1.1_530.30.02-1.ppc64le.rpm
export CUDNN=8.9.1.23-1.cuda12.1.ppc64le
export NCCL=2.18.1-1+cuda12.1.ppc64le
export CUDA=cuda-repo-rhel8-12-3-local-12.3.0_545.23.06-1.ppc64le.rpm
export CUDNN=8.9.5.29-1.cuda12.2.ppc64le
export NCCL=2.18.5-1+cuda12.2.ppc64le
elif [[ "$CI_DEPLOY_PLATFORM" == "linux-x86" ]]; then
export ARCH=i386
export PREFIX=i686-linux-gnu
elif [[ "$CI_DEPLOY_PLATFORM" == "linux-x86_64" ]]; then
export ARCH=amd64
export ARCH_CUDA=x86_64
export PREFIX=x86_64-linux-gnu
export CUDA=cuda-repo-rhel8-12-1-local-12.1.1_530.30.02-1.x86_64.rpm
export CUDNN=8.9.1.23-1.cuda12.1.x86_64
export NCCL=2.18.1-1+cuda12.1.x86_64
export CUDA=cuda-repo-rhel8-12-3-local-12.3.0_545.23.06-1.x86_64.rpm
export CUDNN=8.9.5.29-1.cuda12.2.x86_64
export NCCL=2.18.5-1+cuda12.2.x86_64
fi
echo "ARCH=$ARCH" >> $GITHUB_ENV
echo "PREFIX=$PREFIX" >> $GITHUB_ENV
Expand Down Expand Up @@ -138,15 +138,15 @@ runs:
if [[ -n ${ARCH_CUDA:-} ]] && [[ -n ${CI_DEPLOY_NEED_CUDA:-} ]]; then
echo Installing CUDA, cuDNN, etc
curl -LO https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/$CUDA
curl -LO https://developer.download.nvidia.com/compute/cuda/12.3.0/local_installers/$CUDA
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel8/$ARCH_CUDA/libcudnn8-$CUDNN.rpm
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel8/$ARCH_CUDA/libcudnn8-devel-$CUDNN.rpm
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel8/$ARCH_CUDA/libnccl-$NCCL.rpm
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel8/$ARCH_CUDA/libnccl-devel-$NCCL.rpm
$SUDO rpm -i --force --ignorearch --nodeps $CUDA libcudnn*.rpm libnccl*.rpm
rm -f *.rpm *.tgz *.txz *.tar.*
pushd /var/cuda-repo-rhel8-12-1-local/; $SUDO rpm -i --force --ignorearch --nodeps cuda*.rpm libc*.rpm libn*.rpm; $SUDO rm *.rpm; popd
pushd /var/cuda-repo-rhel8-12-3-local/; $SUDO rpm -i --force --ignorearch --nodeps cuda*.rpm libc*.rpm libn*.rpm; $SUDO rm *.rpm; popd
$SUDO ln -sf /usr/local/cuda/lib64/ /usr/local/cuda/lib
$SUDO ln -sf /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/libcuda.so
$SUDO ln -sf /usr/local/cuda/lib64/stubs/libnvidia-ml.so /usr/local/cuda/lib64/libnvidia-ml.so
Expand Down
40 changes: 20 additions & 20 deletions .github/actions/deploy-windows/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -92,35 +92,35 @@ runs:
if "%CI_DEPLOY_PLATFORM%"=="windows-x86_64" if not "%CI_DEPLOY_NEED_CUDA%"=="" (
echo Installing CUDA, cuDNN, etc
curl -LO https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_522.06_windows.exe
curl -LO https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda_12.1.1_531.14_windows.exe
curl -LO https://developer.download.nvidia.com/compute/cuda/12.3.0/local_installers/cuda_12.3.0_545.84_windows.exe
rem curl -LO https://developer.download.nvidia.com/compute/redist/cudnn/v8.8.0/local_installers/12.0/cudnn_8.8.0.121_windows.exe
python -m gdown.cli https://drive.google.com/uc?id=11XXD0D91vd-SnvdlNOMP5C9S_dWyT8Cv
python -m gdown.cli https://drive.google.com/uc?id=1-5QHvwDZC_1rhn5W6fRHNWicXRPtqt31
curl -LO http://www.winimage.com/zLibDll/zlib123dllx64.zip
cuda_11.8.0_522.06_windows.exe -s
bash -c "rm -Rf 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.8'"
bash -c "mv 'C:/Program Files/NVIDIA Corporation/NvToolsExt' 'C:/Program Files/NVIDIA Corporation/NvToolsExt_old'"
cuda_12.1.1_531.14_windows.exe -s
cuda_12.3.0_545.84_windows.exe -s
bash -c "mv 'C:/Program Files/NVIDIA Corporation/NvToolsExt_old' 'C:/Program Files/NVIDIA Corporation/NvToolsExt'"
bash -c "ls 'C:/Program Files/NVIDIA Corporation/NvToolsExt'"
rem cudnn_8.8.0.121_windows.exe -s
unzip cudnn-windows-x86_64-8.9.1.23_cuda12-archive.zip
unzip cudnn-windows-x86_64-8.9.5.29_cuda12-archive.zip
unzip zlib123dllx64.zip
rem move "%ProgramFiles%\NVIDIA\CUDNN\v8.8\bin\*.dll" "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.1\bin"
rem move "%ProgramFiles%\NVIDIA\CUDNN\v8.8\include\*.h" "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.1\include"
rem move "%ProgramFiles%\NVIDIA\CUDNN\v8.8\lib\x64\*.lib" "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.1\lib\x64"
move cudnn-windows-x86_64-8.9.1.23_cuda12-archive\bin\*.dll "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.1\bin"
move cudnn-windows-x86_64-8.9.1.23_cuda12-archive\include\*.h "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.1\include"
move cudnn-windows-x86_64-8.9.1.23_cuda12-archive\lib\x64\*.lib "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.1\lib\x64"
move dll_x64\zlibwapi.dll "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.1\bin"
rem move "%ProgramFiles%\NVIDIA\CUDNN\v8.8\bin\*.dll" "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.3\bin"
rem move "%ProgramFiles%\NVIDIA\CUDNN\v8.8\include\*.h" "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.3\include"
rem move "%ProgramFiles%\NVIDIA\CUDNN\v8.8\lib\x64\*.lib" "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.3\lib\x64"
move cudnn-windows-x86_64-8.9.5.29_cuda12-archive\bin\*.dll "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.3\bin"
move cudnn-windows-x86_64-8.9.5.29_cuda12-archive\include\*.h "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.3\include"
move cudnn-windows-x86_64-8.9.5.29_cuda12-archive\lib\x64\*.lib "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.3\lib\x64"
move dll_x64\zlibwapi.dll "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.3\bin"
rem echo Applying hotfix to Visual Studio 2019 for CUDA
rem curl -LO https://github.com/raw/microsoft/STL/main/stl/inc/cmath
rem bash -c "find 'C:/Program Files (x86)/Microsoft Visual Studio/2019/Enterprise/VC/' -name cmath -exec cp -v cmath {} \;"
bash -c "sed -i 's/cublas_v2.h/cublas_api.h/g' 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1/include/cublasXt.h' 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1/include/cusolverDn.h' 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1/include/cusolverSp.h'"
bash -c "sed -i '/block_merge_sort.cuh/d' 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1/include/cub/cub.cuh'"
bash -c "sed -i '/device_merge_sort.cuh/d' 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1/include/cub/cub.cuh'"
bash -c "sed -i '/device_segmented_sort.cuh/d' 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1/include/cub/cub.cuh'"
bash -c "sed -i '/warp_merge_sort.cuh/d' 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.1/include/cub/cub.cuh'"
bash -c "sed -i 's/cublas_v2.h/cublas_api.h/g' 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.3/include/cublasXt.h' 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.3/include/cusolverDn.h' 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.3/include/cusolverSp.h'"
bash -c "sed -i '/block_merge_sort.cuh/d' 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.3/include/cub/cub.cuh'"
bash -c "sed -i '/device_merge_sort.cuh/d' 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.3/include/cub/cub.cuh'"
bash -c "sed -i '/device_segmented_sort.cuh/d' 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.3/include/cub/cub.cuh'"
bash -c "sed -i '/warp_merge_sort.cuh/d' 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.3/include/cub/cub.cuh'"
)
if "%CI_DEPLOY_MODULE%"=="nvcodec" (
Expand Down Expand Up @@ -217,10 +217,10 @@ runs:
C:/msys64/usr/bin/bazel.exe version
)
if exist "%ProgramFiles%\NVIDIA GPU Computing Toolkit" (
set "CUDA_PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.1"
set "CUDA_PATH_V12_1=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.1"
set "PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.1\bin;%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.1\libnvvp;%PATH%"
echo CUDA Version 12.1.0>"%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.1\version.txt"
set "CUDA_PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.3"
set "CUDA_PATH_V12_3=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.3"
set "PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.3\bin;%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.3\libnvvp;%PATH%"
echo CUDA Version 12.3.0>"%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.3\version.txt"
)
set "CCACHE_DIR=%USERPROFILE%\ccache"
set "PATH=C:\hostedtoolcache\windows\Python\3.8.10\x64;C:\msys64\%MSYSTEM%\bin;C:\msys64\usr\bin;%ProgramFiles%\apache-maven-3.6.3\bin;%PATH%"
Expand Down
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
* Refactor and improve presets for PyTorch ([pull #1360](https://github.com/bytedeco/javacpp-presets/pull/1360))
* Include `mkl_lapack.h` header file in presets for MKL ([issue #1388](https://github.com/bytedeco/javacpp-presets/issues/1388))
* Map new higher-level C++ API of Triton Inference Server ([pull #1361](https://github.com/bytedeco/javacpp-presets/pull/1361))
* Upgrade presets for OpenCV 4.8.1, DNNL 3.3, OpenBLAS 0.3.24, CPython 3.12.0, NumPy 1.26.1, SciPy 1.11.3, LLVM 17.0.1, Leptonica 1.83.1, Tesseract 5.3.3, TensorFlow Lite 2.14.0, Triton Inference Server 2.38.0, ONNX 1.14.1, ONNX Runtime 1.16.1, TVM 0.13.0, and their dependencies
* Upgrade presets for OpenCV 4.8.1, DNNL 3.3, OpenBLAS 0.3.24, CPython 3.12.0, NumPy 1.26.1, SciPy 1.11.3, LLVM 17.0.1, Leptonica 1.83.1, Tesseract 5.3.3, CUDA 12.3.0, cuDNN 8.9.5, NCCL 2.18.5, TensorFlow Lite 2.14.0, Triton Inference Server 2.38.0, ONNX 1.14.1, ONNX Runtime 1.16.1, TVM 0.13.0, and their dependencies

### June 6, 2023 version 1.5.9
* Virtualize `nvinfer1::IGpuAllocator` from TensorRT to allow customization ([pull #1367](https://github.com/bytedeco/javacpp-presets/pull/1367))
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ Each child module in turn relies by default on the included [`cppbuild.sh` scrip
* Tesseract 5.3.x https://github.com/tesseract-ocr/tesseract
* Caffe 1.0 https://github.com/BVLC/caffe
* OpenPose 1.7.0 https://github.com/CMU-Perceptual-Computing-Lab/openpose
* CUDA 12.1.x https://developer.nvidia.com/cuda-downloads
* CUDA 12.3.x https://developer.nvidia.com/cuda-downloads
* cuDNN 8.9.x https://developer.nvidia.com/cudnn
* NCCL 2.18.x https://developer.nvidia.com/nccl
* NVIDIA Video Codec SDK 12.1.x https://developer.nvidia.com/nvidia-video-codec-sdk
Expand Down
12 changes: 6 additions & 6 deletions cuda/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ Introduction
------------
This directory contains the JavaCPP Presets module for:

* CUDA 12.1.1 https://developer.nvidia.com/cuda-zone
* cuDNN 8.9.1 https://developer.nvidia.com/cudnn
* NCCL 2.18.1 https://developer.nvidia.com/nccl
* CUDA 12.3.0 https://developer.nvidia.com/cuda-zone
* cuDNN 8.9.5 https://developer.nvidia.com/cudnn
* NCCL 2.18.5 https://developer.nvidia.com/nccl

Please refer to the parent README.md file for more detailed information about the JavaCPP Presets.

Expand Down Expand Up @@ -56,22 +56,22 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic
<modelVersion>4.0.0</modelVersion>
<groupId>org.bytedeco.cuda</groupId>
<artifactId>mnistcudnn</artifactId>
<version>1.5.9</version>
<version>1.5.10-SNAPSHOT</version>
<properties>
<exec.mainClass>MNISTCUDNN</exec.mainClass>
</properties>
<dependencies>
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>cuda-platform</artifactId>
<version>12.1-8.9-1.5.9</version>
<version>12.3-8.9-1.5.10-SNAPSHOT</version>
</dependency>

<!-- Additional dependencies to use bundled CUDA, cuDNN, and NCCL -->
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>cuda-platform-redist</artifactId>
<version>12.1-8.9-1.5.9</version>
<version>12.3-8.9-1.5.10-SNAPSHOT</version>
</dependency>

</dependencies>
Expand Down
2 changes: 1 addition & 1 deletion cuda/platform/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

<groupId>org.bytedeco</groupId>
<artifactId>cuda-platform</artifactId>
<version>12.1-8.9-${project.parent.version}</version>
<version>12.3-8.9-${project.parent.version}</version>
<name>JavaCPP Presets Platform for CUDA</name>

<properties>
Expand Down
2 changes: 1 addition & 1 deletion cuda/platform/redist/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

<groupId>org.bytedeco</groupId>
<artifactId>cuda-platform-redist</artifactId>
<version>12.1-8.9-${project.parent.version}</version>
<version>12.3-8.9-${project.parent.version}</version>
<name>JavaCPP Presets Platform Redist for CUDA</name>

<properties>
Expand Down
2 changes: 1 addition & 1 deletion cuda/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

<groupId>org.bytedeco</groupId>
<artifactId>cuda</artifactId>
<version>12.1-8.9-${project.parent.version}</version>
<version>12.3-8.9-${project.parent.version}</version>
<name>JavaCPP Presets for CUDA</name>

<dependencies>
Expand Down
28 changes: 14 additions & 14 deletions cuda/samples/SampleJpegDecoder.java
Original file line number Diff line number Diff line change
Expand Up @@ -27,36 +27,36 @@
import static org.bytedeco.cuda.global.cudart.*;
import static org.bytedeco.cuda.global.nvjpeg.*;

public class SampleJpeg {
static class dev_malloc extends tDevMalloc {
final static dev_malloc instance = new dev_malloc().retainReference();
public class SampleJpegDecoder {
static class devMalloc extends tDevMalloc {
final static devMalloc instance = new devMalloc().retainReference();

@Override
public int call(PointerPointer pointerPointer, long l) {
return cudaMalloc(pointerPointer, l);
}
}

static class dev_free extends tDevFree {
final static dev_free instance = new dev_free().retainReference();
static class devFree extends tDevFree {
final static devFree instance = new devFree().retainReference();

@Override
public int call(Pointer pointer) {
return cudaFree(pointer);
}
}

static class host_malloc extends tPinnedMalloc {
final static host_malloc instance = new host_malloc().retainReference();
static class hostMalloc extends tPinnedMalloc {
final static hostMalloc instance = new hostMalloc().retainReference();

@Override
public int call(PointerPointer pointerPointer, long l, int i) {
return cudaHostAlloc(pointerPointer, l, i);
}
}

static class host_free extends tPinnedFree {
final static host_free instance = new host_free().retainReference();
static class hostFree extends tPinnedFree {
final static hostFree instance = new hostFree().retainReference();

@Override
public int call(Pointer pointer) {
Expand All @@ -70,14 +70,14 @@ public static void CHECK_NVJPEG(String functionName, int result) {
}
}

public static void main(String[] args) {
public static void main(String[] args) throws Exception {
nvjpegDevAllocator_t devAllocator = new nvjpegDevAllocator_t();
devAllocator.dev_malloc(dev_malloc.instance);
devAllocator.dev_free(dev_free.instance);
devAllocator.dev_malloc(devMalloc.instance);
devAllocator.dev_free(devFree.instance);

nvjpegPinnedAllocator_t pinnedAllocator = new nvjpegPinnedAllocator_t();
pinnedAllocator.pinned_malloc(host_malloc.instance);
pinnedAllocator.pinned_free(host_free.instance);
pinnedAllocator.pinned_malloc(hostMalloc.instance);
pinnedAllocator.pinned_free(hostFree.instance);

nvjpegHandle handle = new nvjpegHandle();
nvjpegJpegState state = new nvjpegJpegState();
Expand Down
22 changes: 11 additions & 11 deletions cuda/samples/SampleJpegEncoder.java
Original file line number Diff line number Diff line change
Expand Up @@ -34,36 +34,36 @@
import java.io.IOException;
import java.nio.file.Files;

public class SampleJpeg {
static class dev_malloc extends tDevMalloc {
final static dev_malloc instance = new dev_malloc().retainReference();
public class SampleJpegEncoder {
static class devMalloc extends tDevMalloc {
final static devMalloc instance = new devMalloc().retainReference();

@Override
public int call(PointerPointer pointerPointer, long l) {
return cudaMalloc(pointerPointer, l);
}
}

static class dev_free extends tDevFree {
final static dev_free instance = new dev_free().retainReference();
static class devFree extends tDevFree {
final static devFree instance = new devFree().retainReference();

@Override
public int call(Pointer pointer) {
return cudaFree(pointer);
}
}

static class host_malloc extends tPinnedMalloc {
final static host_malloc instance = new host_malloc().retainReference();
static class hostMalloc extends tPinnedMalloc {
final static hostMalloc instance = new hostMalloc().retainReference();

@Override
public int call(PointerPointer pointerPointer, long l, int i) {
return cudaHostAlloc(pointerPointer, l, i);
}
}

static class host_free extends tPinnedFree {
final static host_free instance = new host_free().retainReference();
static class hostFree extends tPinnedFree {
final static hostFree instance = new hostFree().retainReference();

@Override
public int call(Pointer pointer) {
Expand All @@ -83,13 +83,13 @@ public static void CHECK_NVJPEG(String functionName, int result) {
}
}

public static void main(String[] args) {
public static void main(String[] args) throws Exception {
int imageWidth = 1280;
int imageHeight = 720;

nvjpegDevAllocator_t devAllocator = new nvjpegDevAllocator_t();
devAllocator.dev_malloc(devMalloc.instance);
devAllocator.dev_free(dev_free.instance);
devAllocator.dev_free(devFree.instance);

nvjpegPinnedAllocator_t pinnedAllocator = new nvjpegPinnedAllocator_t();
pinnedAllocator.pinned_malloc(hostMalloc.instance);
Expand Down
4 changes: 2 additions & 2 deletions cuda/samples/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,14 @@
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>cuda-platform</artifactId>
<version>12.1-8.9-1.5.10-SNAPSHOT</version>
<version>12.3-8.9-1.5.10-SNAPSHOT</version>
</dependency>

<!-- Additional dependencies to use bundled CUDA, cuDNN, and NCCL -->
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>cuda-platform-redist</artifactId>
<version>12.1-8.9-1.5.10-SNAPSHOT</version>
<version>12.3-8.9-1.5.10-SNAPSHOT</version>
</dependency>

</dependencies>
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.cuda.cublas;

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.cuda.cublas;

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.cuda.cublas;

Expand Down
Loading

0 comments on commit 931bff1

Please sign in to comment.