Skip to content

Commit

Permalink
* Upgrade presets for NumPy 1.24.3, LibRaw 0.21.1, cuDNN 8.9.1, NCCL…
Browse files Browse the repository at this point in the history
… 2.18.1, PyTorch 2.0.1, TensorRT 8.6.1.6, Triton Inference Server 2.33.0
  • Loading branch information
saudet committed May 10, 2023
1 parent 9e60966 commit f59c3ad
Show file tree
Hide file tree
Showing 84 changed files with 1,503 additions and 594 deletions.
17 changes: 10 additions & 7 deletions .github/actions/deploy-centos/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,20 +14,23 @@ runs:
echo "sloppiness = file_macro,include_file_ctime,include_file_mtime,pch_defines,time_macros" >> .ccache/ccache.conf
SCL_ENABLE="devtoolset-9 rh-python38"
SCL_INSTALL="rh-python38-python-urllib3"
CENTOS_VERSION=$(rpm --eval '%{centos_ver}')
if [[ "$CENTOS_VERSION" == "6" ]]; then
find /etc/yum.repos.d/ -name *.repo | xargs -i sed -i 's/mirror\.centos\.org\/centos/vault.centos.org/g;s/$releasever/6.10/g;s/mirrorlist/#mirrorlist/g;s/#baseurl/baseurl/g' {}
SCL_ENABLE="devtoolset-9 rh-python36 python27"
SCL_INSTALL=""
fi
echo "SCL_ENABLE=$SCL_ENABLE" >> $GITHUB_ENV
echo "SCL_INSTALL=$SCL_INSTALL" >> $GITHUB_ENV
yum -y update
yum -y install centos-release-scl-rh epel-release
if [[ "$CENTOS_VERSION" == "6" ]]; then
sed -i 's/mirror\.centos\.org\/centos/vault.centos.org/g;s/6\/sclo/6.10\/sclo/g;s/mirrorlist/#mirrorlist/g;s/#baseurl/baseurl/g' /etc/yum.repos.d/CentOS-SCLo-scl-rh.repo
fi
curl -L "https://negativo17.org/repos/epel-multimedia.repo" | sed -e '/^\[[a-z\-]*\]/a priority=99' > /etc/yum.repos.d/epel-multimedia.repo
yum -y install $SCL_ENABLE rh-java-common-ant boost-devel ccache clang gcc-c++ gcc-gfortran java-1.8.0-openjdk-devel ant python python3-devel python3-pip swig file which wget unzip tar bzip2 gzip xz patch autoconf-archive automake make libtool bison flex perl-core nasm alsa-lib-devel freeglut-devel gtk2-devel libusb-devel libusb1-devel curl-devel expat-devel gettext-devel openssl-devel bzip2-devel zlib-devel SDL2-devel libva-devel libxkbcommon-devel libxkbcommon-x11-devel xcb-util* fontconfig-devel libffi-devel ragel ocl-icd-devel GeoIP-devel pcre-devel ssdeep-devel yajl-devel
yum -y install $SCL_ENABLE $SCL_INSTALL rh-java-common-ant boost-devel ccache clang gcc-c++ gcc-gfortran java-1.8.0-openjdk-devel ant python python3-devel python3-pip swig file which wget unzip tar bzip2 gzip xz patch autoconf-archive automake make libtool bison flex perl-core nasm alsa-lib-devel freeglut-devel gtk2-devel libusb-devel libusb1-devel curl-devel expat-devel gettext-devel openssl-devel bzip2-devel zlib-devel SDL2-devel libva-devel libxkbcommon-devel libxkbcommon-x11-devel xcb-util* fontconfig-devel libffi-devel ragel ocl-icd-devel GeoIP-devel pcre-devel ssdeep-devel yajl-devel
# https://gcc.gnu.org/legacy-ml/gcc-patches/2018-01/msg01962.html
sed -i 's/_mm512_abs_pd (__m512 __A)/_mm512_abs_pd (__m512d __A)/g' /opt/rh/devtoolset-9/root/usr/lib/gcc/x86_64-redhat-linux/9/include/avx512fintrin.h
source scl_source enable $SCL_ENABLE || true
Expand Down Expand Up @@ -70,10 +73,10 @@ runs:
if [[ "$CI_DEPLOY_PLATFORM" == "linux-x86_64" ]] && [[ -n ${CI_DEPLOY_NEED_CUDA:-} ]]; then
echo Installing CUDA, cuDNN, etc
curl -LO https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda-repo-rhel7-12-1-local-12.1.1_530.30.02-1.x86_64.rpm
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/libcudnn8-8.9.0.131-1.cuda12.1.x86_64.rpm
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/libcudnn8-devel-8.9.0.131-1.cuda12.1.x86_64.rpm
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/libnccl-2.17.1-1+cuda12.1.x86_64.rpm
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/libnccl-devel-2.17.1-1+cuda12.1.x86_64.rpm
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/libcudnn8-8.9.1.23-1.cuda12.1.x86_64.rpm
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/libcudnn8-devel-8.9.1.23-1.cuda12.1.x86_64.rpm
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/libnccl-2.18.1-1+cuda12.1.x86_64.rpm
curl -LO https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/libnccl-devel-2.18.1-1+cuda12.1.x86_64.rpm
rpm -i --force --ignorearch --nodeps cuda-repo-rhel7-* libcudnn*.rpm libnccl*.rpm
pushd /var/cuda-repo-rhel7-12-1-local/; rpm -i --force --ignorearch --nodeps cuda*.rpm libc*.rpm libn*.rpm; rm *.rpm; popd
Expand Down Expand Up @@ -121,8 +124,8 @@ runs:
if [[ "$CI_DEPLOY_MODULE" == "tensorrt" ]]; then
echo Installing TensorRT
python3 -m gdown.cli https://drive.google.com/uc?id=10H_HJUHy2c6w8AlrFbxvySFaNwhrmmuj
tar -hxvf TensorRT-8.6.0.12.Linux.x86_64-gnu.cuda-12.0.tar.gz -C /usr/local/
python3 -m gdown.cli https://drive.google.com/uc?id=1dVhD-DEYY42QbZe1GXl-vxe3k6KqWGsL
tar -hxvf TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-12.0.tar.gz -C /usr/local/
ln -sf /usr/local/TensorRT* /usr/local/tensorrt
fi
Expand Down
16 changes: 8 additions & 8 deletions .github/actions/deploy-ubuntu/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,16 @@ runs:
export ARCH2=sbsa
export PREFIX=aarch64-linux-gnu
export CUDA=cuda-repo-rhel8-12-1-local-12.1.1_530.30.02-1.aarch64.rpm
export CUDNN=8.9.0.131-1.cuda12.1.aarch64
export NCCL=2.17.1-1+cuda12.1.aarch64
export CUDNN=8.9.1.23-1.cuda12.1.aarch64
export NCCL=2.18.1-1+cuda12.1.aarch64
export USERLAND_BUILDME="buildme --aarch64"
elif [[ "$CI_DEPLOY_PLATFORM" == "linux-ppc64le" ]]; then
export ARCH=ppc64el
export ARCH2=ppc64le
export PREFIX=powerpc64le-linux-gnu
export CUDA=cuda-repo-rhel8-12-1-local-12.1.1_530.30.02-1.ppc64le.rpm
export CUDNN=8.9.0.131-1.cuda12.1.ppc64le
export NCCL=2.17.1-1+cuda12.1.ppc64le
export CUDNN=8.9.1.23-1.cuda12.1.ppc64le
export NCCL=2.18.1-1+cuda12.1.ppc64le
elif [[ "$CI_DEPLOY_PLATFORM" == "linux-x86" ]]; then
export ARCH=i386
export PREFIX=i686-linux-gnu
Expand All @@ -40,8 +40,8 @@ runs:
export ARCH2=x86_64
export PREFIX=x86_64-linux-gnu
export CUDA=cuda-repo-rhel8-12-1-local-12.1.1_530.30.02-1.x86_64.rpm
export CUDNN=8.9.0.131-1.cuda12.1.x86_64
export NCCL=2.17.1-1+cuda12.1.x86_64
export CUDNN=8.9.1.23-1.cuda12.1.x86_64
export NCCL=2.18.1-1+cuda12.1.x86_64
fi
echo "ARCH=$ARCH" >> $GITHUB_ENV
echo "ARCH2=$ARCH2" >> $GITHUB_ENV
Expand Down Expand Up @@ -153,8 +153,8 @@ runs:
if [[ "$CI_DEPLOY_MODULE" == "tensorrt" ]]; then
echo Installing TensorRT
python3 -m gdown.cli https://drive.google.com/uc?id=1aWz5V9ZlRWj-vaSIKMRahORPMqILQzKe
tar -hxvf TensorRT-8.6.0.12.Ubuntu-20.04.aarch64-gnu.cuda-12.0.tar.gz -C /usr/local/
python3 -m gdown.cli https://drive.google.com/uc?id=1LZRCv4ZAGiDQAu4pvADJIGntq4cGl5tU
tar -hxvf TensorRT-8.6.1.6.Ubuntu-20.04.aarch64-gnu.cuda-12.0.tar.gz -C /usr/local/
ln -sf /usr/local/TensorRT* /usr/local/tensorrt
fi
Expand Down
12 changes: 6 additions & 6 deletions .github/actions/deploy-windows/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ runs:
rm "C:/msys64/mingw32/bin/clang-cl.exe" "C:/msys64/mingw64/bin/clang-cl.exe" "C:/msys64/mingw32/bin/cmake.exe" "C:/msys64/mingw64/bin/cmake.exe"
rm "C:/Strawberry/c/lib/libz.a" "C:/Strawberry/c/lib/libzlib.a" "C:/Strawberry/c/lib/libzdll.a"
choco uninstall maven
choco install maven --version 3.6.3 --force
curl -LO https://downloads.apache.org/maven/maven-3/3.6.3/binaries/apache-maven-3.6.3-bin.tar.gz
bash -c "tar -xzf apache-maven-3.6.3-bin.tar.gz -C 'C:/Program Files/'"
python -m pip install gdown || python -m pip install gdown
Expand Down Expand Up @@ -127,9 +127,9 @@ runs:
if "%CI_DEPLOY_MODULE%"=="tensorrt" (
echo Installing TensorRT
python -m gdown.cli https://drive.google.com/uc?id=1MTNEhrOC2rTT1itn_Z-SUK_Ck_XRxLbR
unzip TensorRT-8.6.0.12.Windows10.x86_64.cuda-12.0.zip
move TensorRT-8.6.0.12 "%ProgramFiles%\NVIDIA GPU Computing Toolkit\TensorRT"
python -m gdown.cli https://drive.google.com/uc?id=1GfmJ1BKbacLpUU-0i_mGu0sjrAS0Xzzi
unzip TensorRT-8.6.1.6.Windows10.x86_64.cuda-12.0.zip
move TensorRT-8.6.1.6 "%ProgramFiles%\NVIDIA GPU Computing Toolkit\TensorRT"
)
if "%CI_DEPLOY_MODULE%"=="mkl" (
Expand Down Expand Up @@ -218,7 +218,7 @@ runs:
echo CUDA Version 12.1.0>"%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v12.1\version.txt"
)
set "CCACHE_DIR=%USERPROFILE%\ccache"
set "PATH=C:\hostedtoolcache\windows\Python\3.8.10\x64;C:\msys64\%MSYSTEM%\bin;C:\msys64\usr\bin;C:\ProgramData\chocolatey\lib\maven\apache-maven-3.6.3\bin;%PATH%"
set "PATH=C:\hostedtoolcache\windows\Python\3.8.10\x64;C:\msys64\%MSYSTEM%\bin;C:\msys64\usr\bin;%ProgramFiles%\apache-maven-3.6.3\bin;%PATH%"
where bash
where curl
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/tritonserver.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,6 @@ env:
jobs:
linux-x86_64:
runs-on: ubuntu-20.04
container: nvcr.io/nvidia/tritonserver:23.03-py3
container: nvcr.io/nvidia/tritonserver:23.04-py3
steps:
- uses: bytedeco/javacpp-presets/.github/actions/deploy-ubuntu@actions
4 changes: 2 additions & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@
* Add missing predefined `AVChannelLayout` in presets for FFmpeg ([issue #1286](https://github.com/bytedeco/javacpp-presets/issues/1286))
* Map `c10::impl::GenericDict` as returned by `c10::IValue::toGenericDict()` in presets for PyTorch
* Introduce `linux-armhf` and `linux-x86` builds to presets for TensorFlow Lite ([pull #1268](https://github.com/bytedeco/javacpp-presets/pull/1268))
* Add presets for LibRaw 0.20.2 ([pull #1211](https://github.com/bytedeco/javacpp-presets/pull/1211))
* Upgrade presets for OpenCV 4.7.0, FFmpeg 6.0 ([issue bytedeco/javacv#1693](https://github.com/bytedeco/javacv/issues/1693)), HDF5 1.14.0, Hyperscan 5.4.2 ([issue #1308](https://github.com/bytedeco/javacpp-presets/issues/1308)), Spinnaker 3.0.0.118 ([pull #1313](https://github.com/bytedeco/javacpp-presets/pull/1313)), librealsense2 2.53.1 ([pull #1305](https://github.com/bytedeco/javacpp-presets/pull/1305)), MKL 2023.1, DNNL 3.1, OpenBLAS 0.3.23, ARPACK-NG 3.9.0, CPython 3.11.3, NumPy 1.24.2, SciPy 1.10.1, LLVM 16.0.3, Leptonica 1.83.0, Tesseract 5.3.1, CUDA 12.1.1, cuDNN 8.9.0, NCCL 2.17.1, OpenCL 3.0.14, NVIDIA Video Codec SDK 12.0.16, PyTorch 2.0.0, TensorFlow Lite 2.12.0, TensorRT 8.6.0.12, Triton Inference Server 2.32.0, DepthAI 2.21.2, ONNX 1.14.0, ONNX Runtime 1.15.0, TVM 0.12.0, Bullet Physics SDK 3.25, and their dependencies
* Add presets for LibRaw 0.21.1 ([pull #1211](https://github.com/bytedeco/javacpp-presets/pull/1211))
* Upgrade presets for OpenCV 4.7.0, FFmpeg 6.0 ([issue bytedeco/javacv#1693](https://github.com/bytedeco/javacv/issues/1693)), HDF5 1.14.0, Hyperscan 5.4.2 ([issue #1308](https://github.com/bytedeco/javacpp-presets/issues/1308)), Spinnaker 3.0.0.118 ([pull #1313](https://github.com/bytedeco/javacpp-presets/pull/1313)), librealsense2 2.53.1 ([pull #1305](https://github.com/bytedeco/javacpp-presets/pull/1305)), MKL 2023.1, DNNL 3.1, OpenBLAS 0.3.23, ARPACK-NG 3.9.0, CPython 3.11.3, NumPy 1.24.3, SciPy 1.10.1, LLVM 16.0.3, Leptonica 1.83.0, Tesseract 5.3.1, CUDA 12.1.1, cuDNN 8.9.1, NCCL 2.18.1, OpenCL 3.0.14, NVIDIA Video Codec SDK 12.0.16, PyTorch 2.0.1, TensorFlow Lite 2.12.0, TensorRT 8.6.1.6, Triton Inference Server 2.33.0, DepthAI 2.21.2, ONNX 1.14.0, ONNX Runtime 1.15.0, TVM 0.12.0, Bullet Physics SDK 3.25, and their dependencies

### November 2, 2022 version 1.5.8
* Fix mapping of `torch::ExpandingArrayWithOptionalElem` in presets for PyTorch ([issue #1250](https://github.com/bytedeco/javacpp-presets/issues/1250))
Expand Down
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -210,22 +210,22 @@ Each child module in turn relies by default on the included [`cppbuild.sh` scrip
* LLVM 16.0.x http://llvm.org/releases/download.html
* libffi 3.4.x https://github.com/libffi/libffi
* libpostal 1.1 https://github.com/openvenues/libpostal
* LibRaw 0.20.2 https://www.libraw.org/download
* LibRaw 0.21.x https://www.libraw.org/download
* Leptonica 1.83.x http://www.leptonica.org/download.html
* Tesseract 5.3.x https://github.com/tesseract-ocr/tesseract
* Caffe 1.0 https://github.com/BVLC/caffe
* OpenPose 1.7.0 https://github.com/CMU-Perceptual-Computing-Lab/openpose
* CUDA 12.1.x https://developer.nvidia.com/cuda-downloads
* cuDNN 8.9.x https://developer.nvidia.com/cudnn
* NCCL 2.17.x https://developer.nvidia.com/nccl
* NCCL 2.18.x https://developer.nvidia.com/nccl
* NVIDIA Video Codec SDK 12.0.x https://developer.nvidia.com/nvidia-video-codec-sdk
* OpenCL 3.0.x https://github.com/KhronosGroup/OpenCL-ICD-Loader
* MXNet 1.9.x https://github.com/apache/incubator-mxnet
* PyTorch 2.0.x https://github.com/pytorch/pytorch
* TensorFlow 1.15.x https://github.com/tensorflow/tensorflow
* TensorFlow Lite 2.12.x https://github.com/tensorflow/tensorflow
* TensorRT 8.x https://developer.nvidia.com/tensorrt
* Triton Inference Server 2.32.x https://developer.nvidia.com/nvidia-triton-inference-server
* TensorRT 8.6.x https://developer.nvidia.com/tensorrt
* Triton Inference Server 2.33.x https://developer.nvidia.com/nvidia-triton-inference-server
* The Arcade Learning Environment 0.8.x https://github.com/mgbellemare/Arcade-Learning-Environment
* DepthAI 2.21.x https://github.com/luxonis/depthai-core
* ONNX 1.14.x https://github.com/onnx/onnx
Expand Down
4 changes: 2 additions & 2 deletions cuda/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ Introduction
This directory contains the JavaCPP Presets module for:

* CUDA 12.1.1 https://developer.nvidia.com/cuda-zone
* cuDNN 8.9.0 https://developer.nvidia.com/cudnn
* NCCL 2.17.1 https://developer.nvidia.com/nccl
* cuDNN 8.9.1 https://developer.nvidia.com/cudnn
* NCCL 2.18.1 https://developer.nvidia.com/nccl

Please refer to the parent README.md file for more detailed information about the JavaCPP Presets.

Expand Down
20 changes: 17 additions & 3 deletions cuda/src/gen/java/org/bytedeco/cuda/global/nccl.java
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,11 @@ public class nccl extends org.bytedeco.cuda.presets.nccl {
// #endif

public static final int NCCL_MAJOR = 2;
public static final int NCCL_MINOR = 17;
public static final int NCCL_MINOR = 18;
public static final int NCCL_PATCH = 1;
public static final String NCCL_SUFFIX = "";

public static final int NCCL_VERSION_CODE = 21701;
public static final int NCCL_VERSION_CODE = 21801;
// #define NCCL_VERSION(X,Y,Z) (((X) <= 2 && (Y) <= 8) ? (X) * 1000 + (Y) * 100 + (Z) : (X) * 10000 + (Y) * 100 + (Z))

// #ifdef __cplusplus
Expand Down Expand Up @@ -68,6 +68,7 @@ public class nccl extends org.bytedeco.cuda.presets.nccl {

// #define NCCL_CONFIG_UNDEF_INT INT_MIN
// #define NCCL_CONFIG_UNDEF_PTR NULL
public static final int NCCL_SPLIT_NOCOLOR = -1;
// Targeting ../nccl/ncclConfig_t.java


Expand All @@ -82,7 +83,8 @@ public class nccl extends org.bytedeco.cuda.presets.nccl {
// NCCL_CONFIG_UNDEF_INT, /* cgaClusterSize */
// NCCL_CONFIG_UNDEF_INT, /* minCTAs */
// NCCL_CONFIG_UNDEF_INT, /* maxCTAs */
// NCCL_CONFIG_UNDEF_PTR /* netName */
// NCCL_CONFIG_UNDEF_PTR, /* netName */
// NCCL_CONFIG_UNDEF_INT /* splitShare */
// }

/* Return the NCCL_VERSION_CODE of the NCCL library in the supplied integer.
Expand Down Expand Up @@ -156,6 +158,18 @@ public class nccl extends org.bytedeco.cuda.presets.nccl {
public static native @Cast("ncclResult_t") int ncclCommAbort(ncclComm comm);
public static native @Cast("ncclResult_t") int pncclCommAbort(ncclComm comm);

/* Creates one or more communicators from an existing one.
* Ranks with the same color will end up in the same communicator.
* Within the new communicator, key will be used to order ranks.
* NCCL_SPLIT_NOCOLOR as color will indicate the rank will not be part of any group
* and will therefore return a NULL communicator.
* If config is NULL, the new communicator will inherit the original communicator's
* configuration*/
public static native @Cast("ncclResult_t") int ncclCommSplit(ncclComm comm, int color, int key, @ByPtrPtr ncclComm newcomm, ncclConfig_t config);
public static native @Cast("ncclResult_t") int ncclCommSplit(ncclComm comm, int color, int key, @Cast("ncclComm**") PointerPointer newcomm, ncclConfig_t config);
public static native @Cast("ncclResult_t") int pncclCommSplit(ncclComm comm, int color, int key, @ByPtrPtr ncclComm newcomm, ncclConfig_t config);
public static native @Cast("ncclResult_t") int pncclCommSplit(ncclComm comm, int color, int key, @Cast("ncclComm**") PointerPointer newcomm, ncclConfig_t config);

/* Returns a string for each error code. */
public static native @Cast("const char*") BytePointer ncclGetErrorString(@Cast("ncclResult_t") int result);
public static native @Cast("const char*") BytePointer pncclGetErrorString(@Cast("ncclResult_t") int result);
Expand Down
1 change: 1 addition & 0 deletions cuda/src/gen/java/org/bytedeco/cuda/nccl/ncclConfig_t.java
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,5 @@ public class ncclConfig_t extends Pointer {
public native int minCTAs(); public native ncclConfig_t minCTAs(int setter);
public native int maxCTAs(); public native ncclConfig_t maxCTAs(int setter);
public native @Cast("const char*") BytePointer netName(); public native ncclConfig_t netName(BytePointer setter);
public native int splitShare(); public native ncclConfig_t splitShare(int setter);
}
4 changes: 2 additions & 2 deletions cuda/src/main/java/org/bytedeco/cuda/presets/cusparse.java
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@
* @author Samuel Audet
*/
@Properties(inherit = cudart.class, value = {
@Platform(include = "<cusparse.h>", link = "cusparse@.12"),
@Platform(value = "windows-x86_64", preload = "cusparse64_12")},
@Platform(include = "<cusparse.h>", link = "cusparse@.12", preload = "nvJitLink@.12"),
@Platform(value = "windows-x86_64", preload = {"cusparse64_12", "nvJitLink_120_0"})},
target = "org.bytedeco.cuda.cusparse", global = "org.bytedeco.cuda.global.cusparse")
@NoException
public class cusparse implements InfoMapper {
Expand Down
4 changes: 2 additions & 2 deletions libraw/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ Introduction
------------
This directory contains the JavaCPP Presets module for:

* LibRaw 0.20.2 https://www.libraw.org/
* LibRaw 0.21.1 https://www.libraw.org/

Please refer to the parent README.md file for more detailed information about the JavaCPP Presets.

Expand Down Expand Up @@ -46,7 +46,7 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>libraw-platform</artifactId>
<version>0.20.2-1.5.9-SNAPSHOT</version>
<version>0.21.1-1.5.9-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
Expand Down
2 changes: 1 addition & 1 deletion libraw/cppbuild.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ fi

# Compilation instructions at https://www.libraw.org/docs/Install-LibRaw.html

LIBRAW_VERSION=0.20.2
LIBRAW_VERSION=0.21.1
download https://github.com/LibRaw/LibRaw/archive/refs/tags/$LIBRAW_VERSION.zip LibRaw-$LIBRAW_VERSION.zip
unzip -o LibRaw-$LIBRAW_VERSION.zip

Expand Down
2 changes: 1 addition & 1 deletion libraw/platform/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

<groupId>org.bytedeco</groupId>
<artifactId>libraw-platform</artifactId>
<version>0.20.2-${project.parent.version}</version>
<version>0.21.1-${project.parent.version}</version>
<name>JavaCPP Presets Platform for LibRaw</name>

<properties>
Expand Down
2 changes: 1 addition & 1 deletion libraw/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

<groupId>org.bytedeco</groupId>
<artifactId>libraw</artifactId>
<version>0.20.2-${project.parent.version}</version>
<version>0.21.1-${project.parent.version}</version>
<name>JavaCPP Presets for LibRaw</name>

<dependencies>
Expand Down
2 changes: 1 addition & 1 deletion libraw/samples/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>libraw-platform</artifactId>
<version>0.20.2-1.5.9-SNAPSHOT</version>
<version>0.21.1-1.5.9-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
Expand Down
Loading

0 comments on commit f59c3ad

Please sign in to comment.