Skip to content

Commit

Permalink
* Add support for Windows to presets for TensorRT (pull #860)
Browse files Browse the repository at this point in the history
  • Loading branch information
StaticDefaultTester2 authored Apr 2, 2020
1 parent bebed42 commit 1ffcbe7
Show file tree
Hide file tree
Showing 10 changed files with 176 additions and 22 deletions.
2 changes: 2 additions & 0 deletions .appveyor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,8 @@ environment:
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
- PROJ: "tensorflow"
OS: windows-x86_64
- PROJ: "tensorrt"
OS: windows-x86_64
- PROJ: "ale"
OS: windows-x86_64
- PROJ: "onnxruntime"
Expand Down
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@

* Add support for Windows to presets for TensorRT ([pull #860](https://github.com/bytedeco/javacpp-presets/pull/860))
* Add dependency on presets for `jnijavacpp` and `javacpp-platform` artifact to fix issues at load time ([issue bytedeco/javacv#1305](https://github.com/bytedeco/javacv/issues/1305))
* Bundle the official Java API of ONNX Runtime via the `jnionnxruntime` library
* Add CUDA-enabled build for ONNX Runtime via `-gpu` extension
Expand Down
10 changes: 9 additions & 1 deletion ci/install-windows.sh
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ if [ "$PROJ" == "mkl" ]; then
echo Finished mkl
fi

if [ "$PROJ" == "cuda" ] || [ "$EXT" == "-gpu" ]; then
if [ "$PROJ" == "cuda" ] || [ "$PROJ" == "tensorrt" ] || [ "$EXT" == "-gpu" ]; then
echo Installing cuda
curl -L -o cuda_10.2.89_441.22_windows.exe "http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_441.22_windows.exe"
curl -L -o cudnn-10.2-windows7-x64-v7.6.5.32.zip "https://developer.download.nvidia.com/compute/redist/cudnn/v7.6.5/cudnn-10.2-windows7-x64-v7.6.5.32.zip"
Expand All @@ -128,6 +128,14 @@ if [ "$PROJ" == "cuda" ] || [ "$EXT" == "-gpu" ]; then
echo Finished cuda install
fi

if [ "$PROJ" == "tensorrt" ] || [ "$EXT" == "-gpu" ]; then
echo Installing tensorrt
/c/python27/python $APPVEYOR_BUILD_FOLDER/ci/gDownload.py 1MqoSNUEnbZPn4HNdJX3uic-Ej5ZejCaV /c/Downloads/tensorrt.zip
unzip -o /c/Downloads/tensorrt.zip -d /c/Program\ Files/NVIDIA\ GPU\ Computing\ Toolkit/
ln -sf /c/Program\ Files/NVIDIA\ GPU\ Computing\ Toolkit/TensorRT* /c/Program\ Files/NVIDIA\ GPU\ Computing\ Toolkit/TensorRT
echo Finished tensorrt install
fi

if [ "$PROJ" == "tensorflow" ]; then
curl -L http://downloads.sourceforge.net/project/swig/swigwin/swigwin-3.0.12/swigwin-3.0.12.zip -o swigwin-3.0.12.zip
unzip -o swigwin-3.0.12.zip -d /c/
Expand Down
1 change: 1 addition & 0 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -1537,6 +1537,7 @@
<module>cuda</module>
<module>mxnet</module>
<module>tensorflow</module>
<module>tensorrt</module>
<module>ale</module>
<module>onnxruntime</module>
<module>liquidfun</module>
Expand Down
6 changes: 2 additions & 4 deletions tensorrt/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -56,15 +56,13 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic
<!-- Additional dependencies to use bundled CUDA, cuDNN, NCCL, and TensorRT -->
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>cuda</artifactId>
<artifactId>cuda-platform-redist</artifactId>
<version>10.2-7.6-1.5.3-SNAPSHOT</version>
<classifier>linux-x86_64-redist</classifier>
</dependency>
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>tensorrt</artifactId>
<artifactId>tensorrt-platform-redist</artifactId>
<version>7.0-1.5.3-SNAPSHOT</version>
<classifier>linux-x86_64-redist</classifier>
</dependency>

</dependencies>
Expand Down
6 changes: 6 additions & 0 deletions tensorrt/cppbuild.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,12 @@ case $PLATFORM in
exit 1
fi
;;
windows-x86_64)
if [[ ! -f "C:/Program Files/NVIDIA GPU Computing Toolkit/TensorRT/include/NvInfer.h" ]]; then
echo "Please install TensorRT in C:/Program Files/NVIDIA GPU Computing Toolkit/TensorRT/"
exit 1
fi
;;
*)
echo "Error: Platform \"$PLATFORM\" is not supported"
;;
Expand Down
9 changes: 8 additions & 1 deletion tensorrt/platform/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,12 @@
<version>${project.version}</version>
<classifier>${javacpp.platform.linux-x86_64}</classifier>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>${javacpp.moduleId}</artifactId>
<version>${project.version}</version>
<classifier>${javacpp.platform.windows-x86_64}</classifier>
</dependency>
</dependencies>

<build>
Expand All @@ -48,7 +54,7 @@
<configuration>
<archive>
<manifestEntries>
<Class-Path>${javacpp.moduleId}.jar ${javacpp.moduleId}-linux-x86_64.jar</Class-Path>
<Class-Path>${javacpp.moduleId}.jar ${javacpp.moduleId}-linux-x86_64.jar ${javacpp.moduleId}-windows-x86_64.jar</Class-Path>
</manifestEntries>
</archive>
</configuration>
Expand Down Expand Up @@ -94,6 +100,7 @@
<moduleInfoSource>
module org.bytedeco.${javacpp.moduleId}.platform {
requires org.bytedeco.${javacpp.moduleId}.linux.x86_64;
requires org.bytedeco.${javacpp.moduleId}.windows.x86_64;
}
</moduleInfoSource>
</module>
Expand Down
111 changes: 111 additions & 0 deletions tensorrt/platform/redist/pom.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>

<parent>
<groupId>org.bytedeco</groupId>
<artifactId>javacpp-presets</artifactId>
<version>1.5.3-SNAPSHOT</version>
<relativePath>../../../</relativePath>
</parent>

<groupId>org.bytedeco</groupId>
<artifactId>tensorrt-platform-redist</artifactId>
<version>7.0-${project.parent.version}</version>
<name>JavaCPP Presets Platform Redist for TensorRT</name>

<properties>
<javacpp.moduleId>tensorrt</javacpp.moduleId>
<javacpp.platform.extension>-redist</javacpp.platform.extension>
</properties>

<dependencies>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>${javacpp.moduleId}-platform</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>${javacpp.moduleId}</artifactId>
<version>${project.version}</version>
<classifier>${javacpp.platform.linux-x86_64}</classifier>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
<artifactId>${javacpp.moduleId}</artifactId>
<version>${project.version}</version>
<classifier>${javacpp.platform.windows-x86_64}</classifier>
</dependency>
</dependencies>

<build>
<plugins>
<plugin>
<artifactId>maven-jar-plugin</artifactId>
<executions>
<execution>
<id>default-jar</id>
<configuration>
<archive>
<manifestEntries>
<Class-Path>${javacpp.moduleId}.jar ${javacpp.moduleId}-linux-x86_64-redist.jar ${javacpp.moduleId}-windows-x86_64-redist.jar</Class-Path>
</manifestEntries>
</archive>
</configuration>
</execution>
<execution>
<id>empty-javadoc-jar</id>
<goals>
<goal>jar</goal>
</goals>
<configuration>
<classifier>javadoc</classifier>
</configuration>
</execution>
<execution>
<id>empty-sources-jar</id>
<goals>
<goal>jar</goal>
</goals>
<configuration>
<classifier>sources</classifier>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.moditect</groupId>
<artifactId>moditect-maven-plugin</artifactId>
<executions>
<execution>
<id>add-module-infos</id>
<phase>none</phase>
</execution>
<execution>
<id>add-platform-module-info</id>
<phase>package</phase>
<goals>
<goal>add-module-info</goal>
</goals>
<configuration>
<modules>
<module>
<file>${project.build.directory}/${project.artifactId}.jar</file>
<moduleInfoSource>
module org.bytedeco.${javacpp.moduleId}.platform {
requires org.bytedeco.${javacpp.moduleId}.linux.x86_64.redist;
requires org.bytedeco.${javacpp.moduleId}.windows.x86_64.redist;
}
</moduleInfoSource>
</module>
</modules>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>

</project>
6 changes: 2 additions & 4 deletions tensorrt/samples/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,13 @@
<!-- Additional dependencies to use bundled CUDA, cuDNN, NCCL, and TensorRT -->
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>cuda</artifactId>
<artifactId>cuda-platform-redist</artifactId>
<version>10.2-7.6-1.5.3-SNAPSHOT</version>
<classifier>linux-x86_64-redist</classifier>
</dependency>
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>tensorrt</artifactId>
<artifactId>tensorrt-platform-redist</artifactId>
<version>7.0-1.5.3-SNAPSHOT</version>
<classifier>linux-x86_64-redist</classifier>
</dependency>

</dependencies>
Expand Down
46 changes: 34 additions & 12 deletions tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer.java
Original file line number Diff line number Diff line change
Expand Up @@ -43,16 +43,29 @@
*/
@Properties(
inherit = {cublas.class, cudnn.class, nvrtc.class},
value = @Platform(
value = "linux-x86_64",
compiler = "cpp11",
include = {"NvInferVersion.h", "NvInferRuntimeCommon.h", "NvInferRuntime.h", "NvInfer.h", "NvUtils.h"},
includepath = {"/usr/include/x86_64-linux-gnu/", "/usr/local/tensorrt/include/"},
link = "nvinfer@.7",
preload = "myelin@.1",
linkpath = {"/usr/lib/x86_64-linux-gnu/", "/usr/local/tensorrt/lib/"}),
value = {
@Platform(
value = {"linux-x86_64", "windows-x86_64"},
compiler = "cpp11",
include = {"NvInferVersion.h", "NvInferRuntimeCommon.h", "NvInferRuntime.h", "NvInfer.h", "NvUtils.h"},
link = "nvinfer@.7"
),
@Platform(
value = "linux-x86_64",
includepath = {"/usr/include/x86_64-linux-gnu/", "/usr/local/tensorrt/include/"},
linkpath = {"/usr/lib/x86_64-linux-gnu/", "/usr/local/tensorrt/lib/"},
preload = "myelin@.1"
),
@Platform(
value = "windows-x86_64",
includepath = "C:/Program Files/NVIDIA GPU Computing Toolkit/TensorRT/include",
linkpath = "C:/Program Files/NVIDIA GPU Computing Toolkit/TensorRT/lib/",
preload = "myelin64_1"
)
},
target = "org.bytedeco.tensorrt.nvinfer",
global = "org.bytedeco.tensorrt.global.nvinfer")
global = "org.bytedeco.tensorrt.global.nvinfer"
)
public class nvinfer implements LoadEnabled, InfoMapper {
static { Loader.checkVersion("org.bytedeco", "tensorrt"); }

Expand All @@ -62,13 +75,22 @@ public class nvinfer implements LoadEnabled, InfoMapper {
List<String> resources = properties.get("platform.preloadresource");

// Only apply this at load time since we don't want to copy the CUDA libraries here
if (!Loader.isLoadLibraries() || !platform.equals("linux-x86_64")) {
if (!Loader.isLoadLibraries()) {
return;
}
int i = 0;
String[] libs = {"cudart", "cublasLt", "cublas", "cudnn"};
String[] libs = {"cudart", "cublasLt", "cublas", "cudnn", "nvrtc"};
for (String lib : libs) {
lib += lib.equals("cudnn") ? "@.7" : lib.equals("cudart") ? "@.10.2" : "@.10";
switch (platform) {
case "linux-x86_64":
lib += lib.equals("cudnn") ? "@.7" : lib.equals("cudart") ? "@.10.2" : lib.equals("nvrtc") ? "@.10.2" : "@.10";
break;
case "windows-x86_64":
lib += lib.equals("cudnn") ? "64_7" : lib.equals("cudart") ? "64_102" : lib.equals("nvrtc") ? "64_102_0" : "64_10";
break;
default:
continue; // no CUDA
}
if (!preloads.contains(lib)) {
preloads.add(i++, lib);
}
Expand Down

0 comments on commit 1ffcbe7

Please sign in to comment.