diff --git a/.github/actions/deploy-ubuntu/action.yml b/.github/actions/deploy-ubuntu/action.yml index 99403f48cc7..108bea62600 100644 --- a/.github/actions/deploy-ubuntu/action.yml +++ b/.github/actions/deploy-ubuntu/action.yml @@ -212,16 +212,16 @@ runs: if [[ "$CI_DEPLOY_PLATFORM" == "linux-arm64" ]] && [[ "$CI_DEPLOY_MODULE" == "tensorrt" ]]; then echo Installing TensorRT # python3 -m gdown.cli https://drive.google.com/uc?id=1LZRCv4ZAGiDQAu4pvADJIGntq4cGl5tU - curl -LO https://github.com/bytedeco/binaries/releases/download/1.5.10/TensorRT-8.6.1.6.Ubuntu-20.04.aarch64-gnu.cuda-12.0.tar.gz - $SUDO tar -hxvf TensorRT-8.6.1.6.Ubuntu-20.04.aarch64-gnu.cuda-12.0.tar.gz -C /usr/local/ + curl -LO https://developer.download.nvidia.com/compute/machine-learning/tensorrt/10.0.1/tars/TensorRT-10.0.1.6.Ubuntu-22.04.aarch64-gnu.cuda-12.4.tar.gz + $SUDO tar -hxvf TensorRT-10.0.1.6.Ubuntu-22.04.aarch64-gnu.cuda-12.4.tar.gz -C /usr/local/ $SUDO ln -sf /usr/local/TensorRT* /usr/local/tensorrt fi if [[ "$CI_DEPLOY_PLATFORM" == "linux-x86_64" ]] && [[ "$CI_DEPLOY_MODULE" == "tensorrt" ]]; then echo Installing TensorRT # python3 -m gdown.cli https://drive.google.com/uc?id=1dVhD-DEYY42QbZe1GXl-vxe3k6KqWGsL - curl -LO https://github.com/bytedeco/binaries/releases/download/1.5.10/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-12.0.tar.gz - $SUDO tar -hxvf TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-12.0.tar.gz -C /usr/local/ + curl -LO https://developer.download.nvidia.com/compute/machine-learning/tensorrt/10.0.1/tars/TensorRT-10.0.1.6.Linux.x86_64-gnu.cuda-12.4.tar.gz + $SUDO tar -hxvf TensorRT-10.0.1.6.Linux.x86_64-gnu.cuda-12.4.tar.gz -C /usr/local/ $SUDO ln -sf /usr/local/TensorRT* /usr/local/tensorrt fi diff --git a/.github/actions/deploy-windows/action.yml b/.github/actions/deploy-windows/action.yml index e23f86bfec7..8fab07bd268 100644 --- a/.github/actions/deploy-windows/action.yml +++ b/.github/actions/deploy-windows/action.yml @@ -150,9 +150,9 @@ runs: if "%CI_DEPLOY_MODULE%"=="tensorrt" ( echo Installing TensorRT rem python -m gdown.cli https://drive.google.com/uc?id=1GfmJ1BKbacLpUU-0i_mGu0sjrAS0Xzzi - curl -LO https://github.com/bytedeco/binaries/releases/download/1.5.10/TensorRT-8.6.1.6.Windows10.x86_64.cuda-12.0.zip - unzip TensorRT-8.6.1.6.Windows10.x86_64.cuda-12.0.zip - move TensorRT-8.6.1.6 "%ProgramFiles%\NVIDIA GPU Computing Toolkit\TensorRT" + curl -LO https://developer.download.nvidia.com/compute/machine-learning/tensorrt/10.0.1/zip/TensorRT-10.0.1.6.Windows10.win10.cuda-12.4.zip + unzip TensorRT-10.0.1.6.Windows10.win10.cuda-12.4.zip + move TensorRT-10.0.1.6 "%ProgramFiles%\NVIDIA GPU Computing Toolkit\TensorRT" ) if "%CI_DEPLOY_MODULE%"=="mkl" ( diff --git a/.github/workflows/tritonserver.yml b/.github/workflows/tritonserver.yml index 2d755f7cc03..5f489211a74 100644 --- a/.github/workflows/tritonserver.yml +++ b/.github/workflows/tritonserver.yml @@ -19,6 +19,6 @@ env: jobs: linux-x86_64: runs-on: ubuntu-20.04 - container: nvcr.io/nvidia/tritonserver:23.12-py3 + container: nvcr.io/nvidia/tritonserver:24.03-py3 steps: - uses: bytedeco/javacpp-presets/.github/actions/deploy-ubuntu@actions diff --git a/CHANGELOG.md b/CHANGELOG.md index 718dcf91de1..abc874f1e94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ * Build FFmpeg with zimg to enable zscale filter ([pull #1481](https://github.com/bytedeco/javacpp-presets/pull/1481)) * Enable PulseAudio support for FFmpeg on Linux ([pull #1472](https://github.com/bytedeco/javacpp-presets/pull/1472)) * Virtualize `btCollisionWorld`, `btOverlapFilterCallback`, `btOverlapCallback` from Bullet Physics SDK ([pull #1475](https://github.com/bytedeco/javacpp-presets/pull/1475)) - * Upgrade presets for FFmpeg 7.0, DNNL 3.4.1, PyTorch 2.2.2 ([pull #1466](https://github.com/bytedeco/javacpp-presets/pull/1466)), ONNX 1.16.0, ONNX Runtime 1.17.3, TVM 0.15.0, and their dependencies + * Upgrade presets for FFmpeg 7.0, DNNL 3.4.1, PyTorch 2.2.2 ([pull #1466](https://github.com/bytedeco/javacpp-presets/pull/1466)), SentencePiece 0.2.0, TensorFlow Lite 2.16.1, TensorRT 10.0.1.6, Triton Inference Server 2.44.0, ONNX 1.16.0, ONNX Runtime 1.17.3, TVM 0.15.0, and their dependencies ### January 29, 2024 version 1.5.10 * Introduce `macosx-arm64` builds for PyTorch ([pull #1463](https://github.com/bytedeco/javacpp-presets/pull/1463)) diff --git a/README.md b/README.md index 66b1cf55cf4..c3eb305c466 100644 --- a/README.md +++ b/README.md @@ -180,7 +180,7 @@ The JavaCPP Presets depend on Maven, a powerful build system for Java, so before Each child module in turn relies by default on the included [`cppbuild.sh` scripts](#the-cppbuildsh-scripts), explained below, to install its corresponding native libraries in the `cppbuild` subdirectory. To use native libraries already installed somewhere else on the system, other installation directories than `cppbuild` can also be specified either in the `pom.xml` files or in the `.java` configuration files. The following versions are supported: * OpenCV 4.9.x https://opencv.org/releases/ - * FFmpeg 6.1.x http://ffmpeg.org/download.html + * FFmpeg 7.0.x http://ffmpeg.org/download.html * FlyCapture 2.13.x https://www.flir.com/products/flycapture-sdk * Spinnaker 3.0.x https://www.flir.com/products/spinnaker-sdk * libdc1394 2.2.6 http://sourceforge.net/projects/libdc1394/files/ @@ -198,7 +198,7 @@ Each child module in turn relies by default on the included [`cppbuild.sh` scrip * LZ4 1.9.x https://github.com/lz4/lz4 * MKL 2024.x https://software.intel.com/mkl * MKL-DNN 0.21.x https://github.com/oneapi-src/oneDNN - * DNNL 3.3.x https://github.com/oneapi-src/oneDNN + * DNNL 3.4.x https://github.com/oneapi-src/oneDNN * OpenBLAS 0.3.26 http://www.openblas.net/ * ARPACK-NG 3.9.x https://github.com/opencollab/arpack-ng * CMINPACK 1.3.8 https://github.com/devernay/cminpack @@ -223,18 +223,18 @@ Each child module in turn relies by default on the included [`cppbuild.sh` scrip * NVIDIA Video Codec SDK 12.1.x https://developer.nvidia.com/nvidia-video-codec-sdk * OpenCL 3.0.x https://github.com/KhronosGroup/OpenCL-ICD-Loader * MXNet 1.9.x https://github.com/apache/incubator-mxnet - * PyTorch 2.1.x https://github.com/pytorch/pytorch - * SentencePiece 0.1.99 https://github.com/google/sentencepiece + * PyTorch 2.2.x https://github.com/pytorch/pytorch + * SentencePiece 0.2.0 https://github.com/google/sentencepiece * TensorFlow 1.15.x https://github.com/tensorflow/tensorflow - * TensorFlow Lite 2.15.x https://github.com/tensorflow/tensorflow - * TensorRT 8.6.x https://developer.nvidia.com/tensorrt - * Triton Inference Server 2.41.x https://developer.nvidia.com/nvidia-triton-inference-server + * TensorFlow Lite 2.16.x https://github.com/tensorflow/tensorflow + * TensorRT 10.0.x https://developer.nvidia.com/tensorrt + * Triton Inference Server 2.44.x https://developer.nvidia.com/nvidia-triton-inference-server * The Arcade Learning Environment 0.8.x https://github.com/mgbellemare/Arcade-Learning-Environment * DepthAI 2.24.x https://github.com/luxonis/depthai-core - * ONNX 1.15.x https://github.com/onnx/onnx + * ONNX 1.16.x https://github.com/onnx/onnx * nGraph 0.26.0 https://github.com/NervanaSystems/ngraph - * ONNX Runtime 1.16.x https://github.com/microsoft/onnxruntime - * TVM 0.14.x https://github.com/apache/tvm + * ONNX Runtime 1.17.x https://github.com/microsoft/onnxruntime + * TVM 0.15.x https://github.com/apache/tvm * Bullet Physics SDK 3.25 https://pybullet.org * LiquidFun http://google.github.io/liquidfun/ * Qt 5.15.x https://download.qt.io/archive/qt/ diff --git a/platform/pom.xml b/platform/pom.xml index 996624de576..297a0693da9 100644 --- a/platform/pom.xml +++ b/platform/pom.xml @@ -297,7 +297,7 @@ org.bytedeco sentencepiece-platform - 0.1.99-${project.version} + 0.2.0-${project.version} @@ -307,17 +307,17 @@ org.bytedeco tensorflow-lite-platform - 2.15.0-${project.version} + 2.16.1-${project.version} org.bytedeco tensorrt-platform - 8.6-${project.version} + 10.0-${project.version} org.bytedeco tritonserver-platform - 2.41-${project.version} + 2.44-${project.version} diff --git a/sentencepiece/README.md b/sentencepiece/README.md index 3bc7e407826..eab096ddc26 100644 --- a/sentencepiece/README.md +++ b/sentencepiece/README.md @@ -9,7 +9,7 @@ Introduction ------------ This directory contains the JavaCPP Presets module for: - * SentencePiece 0.1.99 https://github.com/google/sentencepiece + * SentencePiece 0.2.0 https://github.com/google/sentencepiece Please refer to the parent README.md file for more detailed information about the JavaCPP Presets. @@ -25,7 +25,7 @@ Sample Usage ------------ Here is a simple example of SentencePiece ported to Java from this C++ example: - * https://github.com/google/sentencepiece/blob/v0.1.99/doc/api.md + * https://github.com/google/sentencepiece/blob/v0.2.0/doc/api.md We can use [Maven 3](http://maven.apache.org/) to download and install automatically all the class files as well as the native binaries. To run this sample code, after creating the `pom.xml` and `SentencePieceExample.java` source files below, simply execute on the command line: ```bash @@ -39,7 +39,7 @@ $ mvn compile exec:java exec.args="en.wiki.bpe.vs10000.model" 4.0.0 org.bytedeco.sentencepiece sentencepiece-example - 1.5.10 + 1.5.11-SNAPSHOT SentencePieceExample 1.8 @@ -49,7 +49,7 @@ $ mvn compile exec:java exec.args="en.wiki.bpe.vs10000.model" org.bytedeco sentencepiece-platform - 0.1.99-1.5.10 + 0.2.0-1.5.11-SNAPSHOT diff --git a/sentencepiece/cppbuild.sh b/sentencepiece/cppbuild.sh index 30963c9455e..2a28ff880f8 100755 --- a/sentencepiece/cppbuild.sh +++ b/sentencepiece/cppbuild.sh @@ -7,7 +7,7 @@ if [[ -z "$PLATFORM" ]]; then exit fi -SENTENCEPIECE_VERSION=0.1.99 +SENTENCEPIECE_VERSION=0.2.0 download https://github.com/google/sentencepiece/archive/refs/tags/v$SENTENCEPIECE_VERSION.zip sentencepiece-$SENTENCEPIECE_VERSION.zip mkdir -p $PLATFORM diff --git a/sentencepiece/platform/pom.xml b/sentencepiece/platform/pom.xml index 407be48adc6..25a3ab36460 100644 --- a/sentencepiece/platform/pom.xml +++ b/sentencepiece/platform/pom.xml @@ -12,7 +12,7 @@ org.bytedeco sentencepiece-platform - 0.1.99-${project.parent.version} + 0.2.0-${project.parent.version} JavaCPP Presets Platform for SentencePiece diff --git a/sentencepiece/pom.xml b/sentencepiece/pom.xml index 793abb0074a..990960af9f2 100644 --- a/sentencepiece/pom.xml +++ b/sentencepiece/pom.xml @@ -11,7 +11,7 @@ org.bytedeco sentencepiece - 0.1.99-${project.parent.version} + 0.2.0-${project.parent.version} JavaCPP Presets for SentencePiece diff --git a/sentencepiece/samples/pom.xml b/sentencepiece/samples/pom.xml index b5682cda693..ecb6058783c 100644 --- a/sentencepiece/samples/pom.xml +++ b/sentencepiece/samples/pom.xml @@ -12,7 +12,7 @@ org.bytedeco sentencepiece-platform - 0.1.99-1.5.11-SNAPSHOT + 0.2.0-1.5.11-SNAPSHOT diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ImmutableNBestSentencePieceText.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ImmutableNBestSentencePieceText.java index fde469d8b48..029f39c1367 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ImmutableNBestSentencePieceText.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ImmutableNBestSentencePieceText.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ImmutableSentencePieceText.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ImmutableSentencePieceText.java index 462003540cf..a95e8d733c4 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ImmutableSentencePieceText.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ImmutableSentencePieceText.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ImmutableSentencePieceText_ImmutableSentencePiece.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ImmutableSentencePieceText_ImmutableSentencePiece.java index b075b4b2bf9..4a98cfa5f55 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ImmutableSentencePieceText_ImmutableSentencePiece.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ImmutableSentencePieceText_ImmutableSentencePiece.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/IntVector.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/IntVector.java index b41ce08f17d..c4b75ceaa68 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/IntVector.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/IntVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/IntVectorFloatPairVector.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/IntVectorFloatPairVector.java index 81af82c97b9..7fc84cb0359 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/IntVectorFloatPairVector.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/IntVectorFloatPairVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ModelProto.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ModelProto.java index b77aaeb12eb..20122c3767c 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ModelProto.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/ModelProto.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/NBestSentencePieceText.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/NBestSentencePieceText.java index 4dac4ae1fed..8b220234241 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/NBestSentencePieceText.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/NBestSentencePieceText.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/NormalizerSpec.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/NormalizerSpec.java index 4e14c2fedd6..51a500dcce6 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/NormalizerSpec.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/NormalizerSpec.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/PretokenizerForTrainingInterface.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/PretokenizerForTrainingInterface.java index 2de8ce8b74b..bc52e38ce54 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/PretokenizerForTrainingInterface.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/PretokenizerForTrainingInterface.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentenceIterator.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentenceIterator.java index 99f2cb5bcc7..e5024e33762 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentenceIterator.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentenceIterator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; @@ -7,7 +7,7 @@ import org.bytedeco.javacpp.annotation.*; import static org.bytedeco.sentencepiece.global.sentencepiece.*; - // namespace pretokenizer + // namespace normalizer // Iterator over the training sentences. // Training sentences are loaded sequentially as follows: diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceNormalizer.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceNormalizer.java new file mode 100644 index 00000000000..754dcee130a --- /dev/null +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceNormalizer.java @@ -0,0 +1,52 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.sentencepiece; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.sentencepiece.global.sentencepiece.*; + + +@Namespace("sentencepiece") @NoOffset @Properties(inherit = org.bytedeco.sentencepiece.presets.sentencepiece.class) +public class SentencePieceNormalizer extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SentencePieceNormalizer(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public SentencePieceNormalizer(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public SentencePieceNormalizer position(long position) { + return (SentencePieceNormalizer)super.position(position); + } + @Override public SentencePieceNormalizer getPointer(long i) { + return new SentencePieceNormalizer((Pointer)this).offsetAddress(i); + } + + public SentencePieceNormalizer() { super((Pointer)null); allocate(); } + private native void allocate(); + + + + + + public native @ByVal Status LoadFromSerializedProto(@StdString String serialized); + + public native @ByVal Status LoadFromRuleTSV(@StdString String filename); + + public native @ByVal Status LoadFromRuleName(@StdString String name); + + public native @ByVal Status Normalize(@StdString String input, + @StdString @Cast({"char*", "std::string*"}) BytePointer normalized); + + public native @ByVal Status Normalize(@StdString String input, + @StdString @Cast({"char*", "std::string*"}) BytePointer normalized, + @Cast("size_t*") @StdVector SizeTPointer norm_to_orig); + + public native @StdString String Normalize(@StdString String input); + + public native NormalizerSpec mutable_normalizer_spec(); + + public native @StdString String serialized_model_proto(); +} diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceProcessor.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceProcessor.java index e59744e80cd..fc899f7f646 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceProcessor.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceProcessor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; @@ -318,6 +318,21 @@ public class SentencePieceProcessor extends Pointer { // #undef DEFINE_SPP_SERIALIZED_PROTO_IMPL // #undef DEFINE_SPP_IMMUTABLE_PROTO_IMPL + ////////////////////////////////////////////////////////////// + // Normalization methods. + + // Normalize `input`. + public native @ByVal Status Normalize(@StdString String input, + @StdString @Cast({"char*", "std::string*"}) BytePointer normalized); + + // Normalize `input`. Stores the utf8-byte offset from + // the normalized string to the original input. + public native @ByVal Status Normalize(@StdString String input, + @StdString @Cast({"char*", "std::string*"}) BytePointer normalized, + @Cast("size_t*") @StdVector SizeTPointer norm_to_orig); + + public native @StdString String Normalize(@StdString String input); + ////////////////////////////////////////////////////////////// // Vocabulary management methods. // @@ -378,4 +393,9 @@ public class SentencePieceProcessor extends Pointer { // returns immutable model proto as std::string. // Useful to save the state of this instance via Python's pickle object. public native @StdString String serialized_model_proto(); + + // Returns mutable normalizer_spec. + // Updating the intenral normalization during the encoding/decoding are not + // recommended and may result in unexpected behavior. Use at your own risk. + public native NormalizerSpec mutable_normalizer_spec(); } diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceText.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceText.java index c0ec9040a3a..39ad6487450 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceText.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceText.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceText_SentencePiece.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceText_SentencePiece.java index 8de0dfe388b..c3769d7d783 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceText_SentencePiece.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceText_SentencePiece.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceTrainer.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceTrainer.java index 8496fa818ae..4fe9a18809b 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceTrainer.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/SentencePieceTrainer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; @@ -62,6 +62,22 @@ public class SentencePieceTrainer extends Pointer { public static native @ByVal Status Train( @Const @ByRef StringStringMap kwargs); + // The same as above, but passes the list of sentences. + public static native @ByVal Status Train(@StdString String args, + @Const @ByRef StringVector sentences, + @StdString @Cast({"char*", "std::string*"}) BytePointer serialized_model_proto/*=nullptr*/); + public static native @ByVal Status Train(@StdString String args, + @Const @ByRef StringVector sentences); + + // The same as above, but passes the list of sentences. + public static native @ByVal Status Train( + @Const @ByRef StringStringMap kwargs, + @Const @ByRef StringVector sentences, + @StdString @Cast({"char*", "std::string*"}) BytePointer serialized_model_proto/*=nullptr*/); + public static native @ByVal Status Train( + @Const @ByRef StringStringMap kwargs, + @Const @ByRef StringVector sentences); + // Handy function to make a normalizer spec from the pre-compiled // normalization name. Do not use this method in production as it crashes // When `name` is invalid. Useful for unittesting. diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/Status.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/Status.java index 4f692bf06dc..7a6bb49592a 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/Status.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/Status.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/StringStringMap.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/StringStringMap.java index 9b45d5aade4..52d6b4d54bc 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/StringStringMap.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/StringStringMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/StringVector.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/StringVector.java index 19a1d3ee404..7c24169b174 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/StringVector.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/StringVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/StringVectorFloatPairVector.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/StringVectorFloatPairVector.java index 996481faa86..2d7b2d4a6e4 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/StringVectorFloatPairVector.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/StringVectorFloatPairVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/TrainerSpec.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/TrainerSpec.java index b2c00856295..8d30318f038 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/TrainerSpec.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/TrainerSpec.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece; diff --git a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/global/sentencepiece.java b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/global/sentencepiece.java index 85931a3bdb0..68ad4206399 100644 --- a/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/global/sentencepiece.java +++ b/sentencepiece/src/gen/java/org/bytedeco/sentencepiece/global/sentencepiece.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.sentencepiece.global; @@ -88,6 +88,9 @@ public class sentencepiece extends org.bytedeco.sentencepiece.presets.sentencepi // Targeting ../ModelProto.java +// Targeting ../NormalizerSpec.java + + // namespace normalizer // #ifndef SWIGGO @@ -118,6 +121,10 @@ public class sentencepiece extends org.bytedeco.sentencepiece.presets.sentencepi // std::random_device. @Namespace("sentencepiece") public static native void SetRandomGeneratorSeed(@Cast("unsigned int") int seed); +// Set the global log level. The default loglevel is 0. +// The log is emitted only when min_log_level >= output_log_level. +@Namespace("sentencepiece") public static native void SetMinLogLevel(int v); + // IO related functions to absorb model formats. // Loads `model_proto` from `filename`. // We can instantiate SentencePieceProcessor as follows: @@ -156,17 +163,16 @@ public class sentencepiece extends org.bytedeco.sentencepiece.presets.sentencepi // #include // #include +// #include // #include "sentencepiece_processor.h" // Targeting ../TrainerSpec.java -// Targeting ../NormalizerSpec.java - - // Targeting ../PretokenizerForTrainingInterface.java + // namespace pretokenizer // Targeting ../SentenceIterator.java @@ -174,6 +180,13 @@ public class sentencepiece extends org.bytedeco.sentencepiece.presets.sentencepi // Targeting ../SentencePieceTrainer.java +// Targeting ../SentencePieceNormalizer.java + + + +// Converts the utf8 byte spans into Unicode char span. +@Namespace("sentencepiece") public static native void ConvertToUnicodeAlignment(@StdString String orig, @StdString String norm, + @Cast("size_t*") @StdVector SizeTPointer norm_to_orig); // namespace sentencepiece diff --git a/sentencepiece/src/main/java/org/bytedeco/sentencepiece/presets/sentencepiece.java b/sentencepiece/src/main/java/org/bytedeco/sentencepiece/presets/sentencepiece.java index 1bb5a75dd45..32de9c5956c 100644 --- a/sentencepiece/src/main/java/org/bytedeco/sentencepiece/presets/sentencepiece.java +++ b/sentencepiece/src/main/java/org/bytedeco/sentencepiece/presets/sentencepiece.java @@ -60,6 +60,7 @@ public void map(InfoMap infoMap) { .put(new Info( "sentencepiece::ModelInterface", "sentencepiece::normalizer::Normalizer", + "sentencepiece::SentencePieceNormalizer::Load", "sentencepiece::SentencePieceTrainer::GetNormalizerSpec", "sentencepiece::SentencePieceProcessor::SetVocabulary" ).skip()); diff --git a/tensorflow-lite/README.md b/tensorflow-lite/README.md index 75e6acc4be0..0190bb412f0 100644 --- a/tensorflow-lite/README.md +++ b/tensorflow-lite/README.md @@ -9,7 +9,7 @@ Introduction ------------ This directory contains the JavaCPP Presets module for: - * TensorFlow Lite 2.15.0 https://www.tensorflow.org/lite + * TensorFlow Lite 2.16.1 https://www.tensorflow.org/lite Please refer to the parent README.md file for more detailed information about the JavaCPP Presets. @@ -25,7 +25,7 @@ Sample Usage ------------ Here is a simple example of TensorFlow ported to Java from this C++ source file: - * https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/lite/examples/minimal/minimal.cc + * https://github.com/tensorflow/tensorflow/blob/v2.16.1/tensorflow/lite/examples/minimal/minimal.cc We can use [Maven 3](http://maven.apache.org/) to download and install automatically all the class files as well as the native binaries. To run this sample code, after creating the `pom.xml` and `Minimal.java` source files below, simply execute on the command line: ```bash @@ -38,7 +38,7 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic 4.0.0 org.bytedeco.tensorflow-lite examples - 1.5.10 + 1.5.11-SNAPSHOT Minimal @@ -46,7 +46,7 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic org.bytedeco tensorflow-lite-platform - 2.15.0-1.5.10 + 2.16.1-1.5.11-SNAPSHOT diff --git a/tensorflow-lite/cppbuild.sh b/tensorflow-lite/cppbuild.sh index b31c1a4eaca..b03054b526f 100755 --- a/tensorflow-lite/cppbuild.sh +++ b/tensorflow-lite/cppbuild.sh @@ -12,7 +12,7 @@ if [[ "$EXTENSION" == *gpu ]]; then export CMAKE_FLAGS="-DTFLITE_ENABLE_GPU=ON" fi -TENSORFLOW_VERSION=2.15.0 +TENSORFLOW_VERSION=2.16.1 download https://github.com/tensorflow/tensorflow/archive/v$TENSORFLOW_VERSION.tar.gz tensorflow-$TENSORFLOW_VERSION.tar.gz mkdir -p "$PLATFORM$EXTENSION" diff --git a/tensorflow-lite/platform/pom.xml b/tensorflow-lite/platform/pom.xml index a93668eaf6b..285fc58b9f7 100644 --- a/tensorflow-lite/platform/pom.xml +++ b/tensorflow-lite/platform/pom.xml @@ -12,7 +12,7 @@ org.bytedeco tensorflow-lite-platform - 2.15.0-${project.parent.version} + 2.16.1-${project.parent.version} JavaCPP Presets Platform for TensorFlow Lite diff --git a/tensorflow-lite/pom.xml b/tensorflow-lite/pom.xml index 392aa850583..55d0e78bbde 100644 --- a/tensorflow-lite/pom.xml +++ b/tensorflow-lite/pom.xml @@ -16,7 +16,7 @@ tensorflowlite - 2.15.0 + 2.16.1 ${basedir}/cppbuild/${javacpp.platform}${javacpp.platform.extension}/tensorflow-${tensorflow.version}/ diff --git a/tensorflow-lite/samples/pom.xml b/tensorflow-lite/samples/pom.xml index e06512cdaa2..f103b421598 100644 --- a/tensorflow-lite/samples/pom.xml +++ b/tensorflow-lite/samples/pom.xml @@ -12,7 +12,7 @@ org.bytedeco tensorflow-lite-platform - 2.15.0-1.5.11-SNAPSHOT + 2.16.1-1.5.11-SNAPSHOT diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Allocation.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Allocation.java index 0cbf8dd1a5b..58ce34c1051 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Allocation.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Allocation.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/AsyncSubgraph.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/AsyncSubgraph.java index ae1b6aab6cf..060e0f84044 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/AsyncSubgraph.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/AsyncSubgraph.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/BuiltinOpResolver.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/BuiltinOpResolver.java index 2f97bafdc0a..4fd6125db6d 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/BuiltinOpResolver.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/BuiltinOpResolver.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/BuiltinOpResolverWithXNNPACK.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/BuiltinOpResolverWithXNNPACK.java index f8441a45eb5..05eaa29ec02 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/BuiltinOpResolverWithXNNPACK.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/BuiltinOpResolverWithXNNPACK.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/BuiltinOpResolverWithoutDefaultDelegates.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/BuiltinOpResolverWithoutDefaultDelegates.java index cdb9334b310..b76e1e780f2 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/BuiltinOpResolverWithoutDefaultDelegates.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/BuiltinOpResolverWithoutDefaultDelegates.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/CommonOpaqueConversionUtil.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/CommonOpaqueConversionUtil.java index 12a74b7d8ce..372d301dadb 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/CommonOpaqueConversionUtil.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/CommonOpaqueConversionUtil.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ErrorReporter.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ErrorReporter.java index 51590ed9693..6311a43a111 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ErrorReporter.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ErrorReporter.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ExternalCpuBackendContext.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ExternalCpuBackendContext.java index 5257eaa617d..9391bb1ee2b 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ExternalCpuBackendContext.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ExternalCpuBackendContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/FileCopyAllocation.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/FileCopyAllocation.java index 48c74190a25..00d50b91f09 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/FileCopyAllocation.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/FileCopyAllocation.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_Pointer_int_int.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_Pointer_int_int.java index 77c1c5969fa..1274a147540 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_Pointer_int_int.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_Pointer_int_int.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_external_Pointer_int_int.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_external_Pointer_int_int.java index 61c0f0fa3dc..10e6c38b21a 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_external_Pointer_int_int.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_external_Pointer_int_int.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_v1_Pointer_int_int.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_v1_Pointer_int_int.java index f0ba775f86f..3785fb4abbd 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_v1_Pointer_int_int.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_v1_Pointer_int_int.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_v2_Pointer_int_int.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_v2_Pointer_int_int.java index 11e5d769e20..8c45958aeb5 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_v2_Pointer_int_int.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_v2_Pointer_int_int.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_v3_Pointer_int_int.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_v3_Pointer_int_int.java index 2bb3cd99c9b..79bb0ae95fb 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_v3_Pointer_int_int.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_builtin_op_v3_Pointer_int_int.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_Pointer_BytePointer_int.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_Pointer_BytePointer_int.java index daaa88551ed..62a203ecb0c 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_Pointer_BytePointer_int.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_Pointer_BytePointer_int.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_Pointer_String_int.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_Pointer_String_int.java index c5186801552..9820f35e3b4 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_Pointer_String_int.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_Pointer_String_int.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_external_Pointer_String_int.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_external_Pointer_String_int.java index 25d1ed61e77..f4e6ac21313 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_external_Pointer_String_int.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_external_Pointer_String_int.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v1_Pointer_BytePointer_int.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v1_Pointer_BytePointer_int.java index 22b1c5e235b..afeb4b931f6 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v1_Pointer_BytePointer_int.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v1_Pointer_BytePointer_int.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v1_Pointer_String_int.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v1_Pointer_String_int.java index 98e49ea0ed4..3e54c055dbd 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v1_Pointer_String_int.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v1_Pointer_String_int.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v2_Pointer_BytePointer_int.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v2_Pointer_BytePointer_int.java index 2f6dc1a75e9..d49b9179f6e 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v2_Pointer_BytePointer_int.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v2_Pointer_BytePointer_int.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v2_Pointer_String_int.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v2_Pointer_String_int.java index a13fcf38f58..a68d3df4071 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v2_Pointer_String_int.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v2_Pointer_String_int.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v3_Pointer_BytePointer_int.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v3_Pointer_BytePointer_int.java index e87c1f5922b..935bcc653d7 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v3_Pointer_BytePointer_int.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v3_Pointer_BytePointer_int.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v3_Pointer_String_int.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v3_Pointer_String_int.java index f3f65d4d750..5913e25a910 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v3_Pointer_String_int.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Find_custom_op_v3_Pointer_String_int.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/FlatBufferModel.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/FlatBufferModel.java index a87b55b9846..90defb16e0a 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/FlatBufferModel.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/FlatBufferModel.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Free_TfLiteOpaqueContext_Pointer.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Free_TfLiteOpaqueContext_Pointer.java index 6bec969d74e..f1de56df64d 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Free_TfLiteOpaqueContext_Pointer.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Free_TfLiteOpaqueContext_Pointer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/GraphInfo.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/GraphInfo.java index a22be46719d..d4e45d58471 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/GraphInfo.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/GraphInfo.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Init_TfLiteOpaqueContext_BytePointer_long.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Init_TfLiteOpaqueContext_BytePointer_long.java index 70b77eefbce..7b56a6da691 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Init_TfLiteOpaqueContext_BytePointer_long.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Init_TfLiteOpaqueContext_BytePointer_long.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Init_TfLiteOpaqueContext_String_long.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Init_TfLiteOpaqueContext_String_long.java index 57e0570a353..8451f3cefd1 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Init_TfLiteOpaqueContext_String_long.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Init_TfLiteOpaqueContext_String_long.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InitializationStatus.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InitializationStatus.java index f4e23752e58..ccccba7d248 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InitializationStatus.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InitializationStatus.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/IntIntPair.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/IntIntPair.java index 571dec3ca8f..854748fb1a9 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/IntIntPair.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/IntIntPair.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/IntIntPairVector.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/IntIntPairVector.java index d75bd613cc0..0e94fc2767c 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/IntIntPairVector.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/IntIntPairVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/IntResourceBaseMap.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/IntResourceBaseMap.java index 127c5debec6..6bab0e1a799 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/IntResourceBaseMap.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/IntResourceBaseMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Interpreter.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Interpreter.java index 78fef3186d8..2ad8fe467e1 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Interpreter.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Interpreter.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -758,6 +758,7 @@ public static class Check_cancelled_func_Pointer extends FunctionPointer { * 5. kTfLiteError: Unexpected/runtime failure. \n * \warning This is an experimental API and subject to change. \n */ public native @Cast("TfLiteStatus") int ModifyGraphWithDelegate(TfLiteDelegate delegate); + public native @Cast("TfLiteStatus") int ModifyGraphWithDelegate(TfLiteOpaqueDelegateStruct delegate); // Owning handle to a TfLiteDelegate instance. @@ -771,9 +772,12 @@ public static class Check_cancelled_func_Pointer extends FunctionPointer { /** \warning This is an experimental API and subject to change. \n - * \brief Ensure the data in {@code tensor.data} is readable. In case delegate is - * used, it might require to copy the data from delegate buffer to raw - * memory. */ + * \brief Ensure the data in {@code tensor.data} is readable. If a + * delegate has been used, and {@code SetAllowBufferHandleOutput(true)} has been + * called, tensor outputs may be stored as delegate buffer handles whose data + * is not directly readable until this method has been called. + * In such cases, this method will copy the data from the delegate buffer + * handle to CPU memory. */ public native @Cast("TfLiteStatus") int EnsureTensorDataIsReadable(int tensor_index); /** \warning This is an experimental API and subject to change. \n diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterBuilder.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterBuilder.java index f1ce7677f6e..ebda8da18c7 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterBuilder.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterBuilder.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -17,8 +17,8 @@ public class InterpreterBuilder extends Pointer { /** For this constructor, the ErrorReporter will be extracted from the * FlatBufferModel. - * {@code options} object is copied during construction. So caller can release it */ - // after calling the constructor. + * {@code options} object is copied during construction. So caller can release it + * after calling the constructor. */ public InterpreterBuilder(@Const @ByRef FlatBufferModel model, @Const @ByRef OpResolver op_resolver, @Const InterpreterOptions options_experimental/*=nullptr*/) { super((Pointer)null); allocate(model, op_resolver, options_experimental); } @@ -32,8 +32,8 @@ private native void allocate(@Const @ByRef FlatBufferModel model, /** Builds an interpreter given only the raw flatbuffer Model object (instead * of a FlatBufferModel). Mostly used for testing. * If {@code error_reporter} is null, then DefaultErrorReporter() is used. - * {@code options} object is copied during construction. So caller can release it */ - // after calling the constructor. + * {@code options} object is copied during construction. So caller can release it + * after calling the constructor. */ public InterpreterBuilder(@Cast("const tflite::Model*") Pointer model, @Const @ByRef OpResolver op_resolver, ErrorReporter error_reporter/*=tflite::DefaultErrorReporter()*/, diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterOptions.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterOptions.java index a01d539a266..f8d451227ea 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterOptions.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -11,13 +11,16 @@ /** Options class for {@code Interpreter}. * WARNING: This is an experimental API and subject to change. */ -@Namespace("tflite") @NoOffset @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) +@Namespace("tflite") @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class InterpreterOptions extends Pointer { static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public InterpreterOptions(Pointer p) { super(p); } + /** Default native constructor. */ + public InterpreterOptions() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ public InterpreterOptions(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public InterpreterOptions(Pointer p) { super(p); } + private native void allocate(); private native void allocateArray(long size); @Override public InterpreterOptions position(long position) { return (InterpreterOptions)super.position(position); @@ -26,9 +29,6 @@ public class InterpreterOptions extends Pointer { return new InterpreterOptions((Pointer)this).offsetAddress(i); } - public InterpreterOptions() { super((Pointer)null); allocate(); } - private native void allocate(); - /** Preserving all intermediates tensors for debugging. * WARNING: This is an experimental API and subject to change. */ public native void SetPreserveAllTensors(@Cast("bool") boolean value/*=true*/); @@ -77,4 +77,16 @@ public class InterpreterOptions extends Pointer { // WARNING: This is an experimental API and subject to change. public native void SetDisableDelegateClustering(@Cast("bool") boolean value/*=true*/); public native void SetDisableDelegateClustering(); + + // If set to `true`, the CAST op will cache its output when its input is a + // constant tensor. + // + // WARNING: This is an experimental API and subject to change. + public native void SetCacheConstantCastOp(@Cast("bool") boolean value); + + // If `true`, the CAST op will cache its output when its input is a constant + // tensor. + // + // WARNING: This is an experimental API and subject to change. + public native @Cast("bool") boolean GetCacheConstantCastOp(); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterTest.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterTest.java index 5f5cdd2faf0..efb5997a4bf 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterTest.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterTest.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterUtils.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterUtils.java index 4c9c2dcde30..3ea7b4aac09 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterUtils.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterUtils.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterWrapper.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterWrapper.java index 9c925a9429d..7b95f4ac7c9 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterWrapper.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/InterpreterWrapper.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Invoke_TfLiteOpaqueContext_TfLiteOpaqueNode.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Invoke_TfLiteOpaqueContext_TfLiteOpaqueNode.java index f6b56045390..081609e1ffe 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Invoke_TfLiteOpaqueContext_TfLiteOpaqueNode.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Invoke_TfLiteOpaqueContext_TfLiteOpaqueNode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/MemoryAllocation.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/MemoryAllocation.java index 716e82b99ae..baffdbb2c2b 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/MemoryAllocation.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/MemoryAllocation.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/MemoryPlanner.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/MemoryPlanner.java index 4ec550a34fb..b22f5cf27a8 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/MemoryPlanner.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/MemoryPlanner.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/MutableOpResolver.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/MutableOpResolver.java index fe5659ea61d..f454f33329d 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/MutableOpResolver.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/MutableOpResolver.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/NodeSubset.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/NodeSubset.java index 4fd8d8a1ab3..70cd5ffad2b 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/NodeSubset.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/NodeSubset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/NodeSubsetVector.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/NodeSubsetVector.java index 734359e5b0c..f45701262a9 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/NodeSubsetVector.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/NodeSubsetVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/OpResolver.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/OpResolver.java index b45bf0464c3..93362311f0b 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/OpResolver.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/OpResolver.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/OpResolverInternal.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/OpResolverInternal.java index 8f0d2ef3f5b..1af1bc51b70 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/OpResolverInternal.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/OpResolverInternal.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Prepare_TfLiteOpaqueContext_TfLiteOpaqueNode.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Prepare_TfLiteOpaqueContext_TfLiteOpaqueNode.java index 978df0ba478..151b76e414e 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Prepare_TfLiteOpaqueContext_TfLiteOpaqueNode.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Prepare_TfLiteOpaqueContext_TfLiteOpaqueNode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Profiler.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Profiler.java index 8a0da9776a7..1011f29b639 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Profiler.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Profiler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RegistrationExternalsCache.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RegistrationExternalsCache.java index 53d3bbb65a8..79eabfe4a7a 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RegistrationExternalsCache.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RegistrationExternalsCache.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RegistrationNodePair.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RegistrationNodePair.java index 37b2cbf6aa7..2c7e8036e00 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RegistrationNodePair.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RegistrationNodePair.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RegistrationNodePairVector.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RegistrationNodePairVector.java index e7b803f25c9..e6fffa9e34e 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RegistrationNodePairVector.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RegistrationNodePairVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Reporter_Pointer_BytePointer_Pointer.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Reporter_Pointer_BytePointer_Pointer.java index 9f7b9dc76a6..cd046f4197f 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Reporter_Pointer_BytePointer_Pointer.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Reporter_Pointer_BytePointer_Pointer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Reporter_Pointer_String_Pointer.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Reporter_Pointer_String_Pointer.java index e44437942ad..d868468a6f2 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Reporter_Pointer_String_Pointer.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Reporter_Pointer_String_Pointer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ResourceBase.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ResourceBase.java index 37882055bf3..ddb3c37ab83 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ResourceBase.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ResourceBase.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RootProfiler.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RootProfiler.java index 40494a0e598..6e23554460b 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RootProfiler.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/RootProfiler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedDelegateOperatorProfile.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedDelegateOperatorProfile.java index 67531cb672e..732bb8ee899 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedDelegateOperatorProfile.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedDelegateOperatorProfile.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedDelegateProfiledOperatorProfile.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedDelegateProfiledOperatorProfile.java index 299ba304d50..6717e68b4bf 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedDelegateProfiledOperatorProfile.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedDelegateProfiledOperatorProfile.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedOperatorProfile.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedOperatorProfile.java index 7b6d89bd41f..b520cc03495 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedOperatorProfile.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedOperatorProfile.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedProfile.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedProfile.java index dd6affdd447..4b5f688c6d6 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedProfile.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedProfile.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedRuntimeInstrumentationProfile.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedRuntimeInstrumentationProfile.java index 2fdaf184d89..10dd3045ab5 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedRuntimeInstrumentationProfile.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/ScopedRuntimeInstrumentationProfile.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SignatureRunner.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SignatureRunner.java index 6e3b2e66c79..62b8f2be1f7 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SignatureRunner.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SignatureRunner.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SignatureRunnerHelper.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SignatureRunnerHelper.java index 8ec57441e24..f9daabb39e2 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SignatureRunnerHelper.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SignatureRunnerHelper.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SignatureRunnerJNIHelper.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SignatureRunnerJNIHelper.java index 6c06f49e938..d9edb3913e3 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SignatureRunnerJNIHelper.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SignatureRunnerJNIHelper.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SingleOpModel.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SingleOpModel.java index f68c4306f85..edbd11ba9e8 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SingleOpModel.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SingleOpModel.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StderrReporter.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StderrReporter.java index f274556d1a9..8716329fadf 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StderrReporter.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StderrReporter.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StringIntMap.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StringIntMap.java index f3cb75b85b7..efd6a8ecd33 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StringIntMap.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StringIntMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StringStringMap.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StringStringMap.java index ca6a497a911..38ab4eb18a9 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StringStringMap.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StringStringMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StringVector.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StringVector.java index 14a4ec258d1..a9a163f80b5 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StringVector.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/StringVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Subgraph.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Subgraph.java index 8b30555de1d..4c916208d5b 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Subgraph.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Subgraph.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -434,6 +434,12 @@ public Subgraph(ErrorReporter error_reporter, // Return read-only vector of node indices in the order of execution. + // Return read-only vector of node indices in the order of execution before + // any delegate was applied. + // + // Note: if no delegate is applied, this vector will be empty. + public native @StdVector IntPointer pre_delegation_execution_plan(); + public native @StdMove RegistrationNodePairVector nodes_and_registration(); // Get a pointer to an operation and registration data structure if in bounds. @@ -647,6 +653,9 @@ public static class SubgraphAllocInfo extends Pointer { // Set the given `InterpreterOptions` object. public native void SetOptions(InterpreterOptions options); + // WARNING: This is an experimental API and subject to change. + public native @Const InterpreterOptions GetOptions(); + // WARNING: This is an experimental API and subject to change. // True if all intermediates tensors should be preserved for debugging. public native @Cast("bool") boolean ShouldPreserveAllTensors(); diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SubgraphVector.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SubgraphVector.java index dc1ad8aa16f..f603941f062 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SubgraphVector.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/SubgraphVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TelemetryProfiler.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TelemetryProfiler.java index f9112aae470..55a4da3989c 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TelemetryProfiler.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TelemetryProfiler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TelemetryStatusCode.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TelemetryStatusCode.java index 03ce72cf936..d7cfe48baa1 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TelemetryStatusCode.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TelemetryStatusCode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TensorHandle.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TensorHandle.java index e32820c10ef..71bd6e364a4 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TensorHandle.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TensorHandle.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TestDelegate.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TestDelegate.java index 474cc94f994..33edaba4e6e 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TestDelegate.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TestDelegate.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TestDelegation.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TestDelegation.java index 1dc2bfed269..eda0580364e 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TestDelegation.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TestDelegation.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteAffineQuantization.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteAffineQuantization.java index b7fbd1cc904..5c38f5f1b0b 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteAffineQuantization.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteAffineQuantization.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,13 +9,13 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -// Parameters for asymmetric quantization across a dimension (i.e per output -// channel quantization). -// quantized_dimension specifies which dimension the scales and zero_points -// correspond to. -// For a particular value in quantized_dimension, quantized values can be -// converted back to float using: -// real_value = scale * (quantized_value - zero_point) +/** Parameters for asymmetric quantization across a dimension (i.e per output + * channel quantization). + * quantized_dimension specifies which dimension the scales and zero_points + * correspond to. + * For a particular value in quantized_dimension, quantized values can be + * converted back to float using: + * {@code real_value = scale * (quantized_value - zero_point)} */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteAffineQuantization extends Pointer { static { Loader.load(); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteArrayDeleter.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteArrayDeleter.java deleted file mode 100644 index 0106a4ac1e7..00000000000 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteArrayDeleter.java +++ /dev/null @@ -1,34 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.tensorflowlite; - -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; - - -// Function object used as a deleter for unique_ptr holding TFLite*Array -// objects. -@Namespace("tflite::array_internal") @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) -public class TfLiteArrayDeleter extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public TfLiteArrayDeleter() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public TfLiteArrayDeleter(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TfLiteArrayDeleter(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public TfLiteArrayDeleter position(long position) { - return (TfLiteArrayDeleter)super.position(position); - } - @Override public TfLiteArrayDeleter getPointer(long i) { - return new TfLiteArrayDeleter((Pointer)this).offsetAddress(i); - } - - public native @Name("operator ()") void apply(TfLiteIntArray a); - public native @Name("operator ()") void apply(TfLiteFloatArray a); -} diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteArrayInfo.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteArrayInfo.java deleted file mode 100644 index 1cda7150b0a..00000000000 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteArrayInfo.java +++ /dev/null @@ -1,32 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.tensorflowlite; - -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; - - -// Maps T to the corresponding TfLiteArray type. - -@Name("tflite::array_internal::TfLiteArrayInfo") @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) -public class TfLiteArrayInfo extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public TfLiteArrayInfo() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public TfLiteArrayInfo(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public TfLiteArrayInfo(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public TfLiteArrayInfo position(long position) { - return (TfLiteArrayInfo)super.position(position); - } - @Override public TfLiteArrayInfo getPointer(long i) { - return new TfLiteArrayInfo((Pointer)this).offsetAddress(i); - } - -} diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteComplex128.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteComplex128.java index 3cdfa092308..97209523cdc 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteComplex128.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteComplex128.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,7 +9,7 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -// Double-precision complex data type compatible with the C99 definition. +/** Double-precision complex data type compatible with the C99 definition. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteComplex128 extends Pointer { static { Loader.load(); } @@ -29,5 +29,5 @@ public class TfLiteComplex128 extends Pointer { } public native double re(); public native TfLiteComplex128 re(double setter); - public native double im(); public native TfLiteComplex128 im(double setter); // real and imaginary parts, respectively. + public native double im(); public native TfLiteComplex128 im(double setter); /** real and imaginary parts, respectively. */ } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteComplex64.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteComplex64.java index 3aff387396f..01f0b9886a7 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteComplex64.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteComplex64.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,7 +9,7 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -// Single-precision complex data type compatible with the C99 definition. +/** Single-precision complex data type compatible with the C99 definition. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteComplex64 extends Pointer { static { Loader.load(); } @@ -29,5 +29,5 @@ public class TfLiteComplex64 extends Pointer { } public native float re(); public native TfLiteComplex64 re(float setter); - public native float im(); public native TfLiteComplex64 im(float setter); // real and imaginary parts, respectively. + public native float im(); public native TfLiteComplex64 im(float setter); /** real and imaginary parts, respectively. */ } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteContext.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteContext.java index 5ce79c37f19..388a2d90d0a 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteContext.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,6 +9,19 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; +/** {@code TfLiteContext} allows an op to access the tensors. + * + * {@code TfLiteContext} is a struct that is created by the TF Lite runtime + * and passed to the "methods" (C function pointers) in the + * {@code TfLiteRegistration} struct that are used to define custom ops and custom + * delegate kernels. It contains information and methods (C function pointers) + * that can be called by the code implementing a custom op or a custom delegate + * kernel. These methods provide access to the context in which that custom op + * or custom delegate kernel occurs, such as access to the input and output + * tensors for that op, as well as methods for allocating memory buffers + * and intermediate tensors, etc. + * + * See also {@code TfLiteOpaqueContext}, which is an more ABI-stable equivalent. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteContext extends Pointer { static { Loader.load(); } @@ -27,47 +40,60 @@ public class TfLiteContext extends Pointer { return new TfLiteContext((Pointer)this).offsetAddress(i); } - // Number of tensors in the context. + /** Number of tensors in the context. */ + + /// + /// + /// + /// + /// + /// public native @Cast("size_t") long tensors_size(); public native TfLiteContext tensors_size(long setter); - // The execution plan contains a list of the node indices in execution - // order. execution_plan->size is the current number of nodes. And, - // execution_plan->data[0] is the first node that needs to be run. - // TfLiteDelegates can traverse the current execution plan by iterating - // through each member of this array and using GetNodeAndRegistration() to - // access details about a node. i.e. - // - // TfLiteIntArray* execution_plan; - // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan)); - // for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) { - // int node_index = execution_plan->data[exec_index]; - // TfLiteNode* node; - // TfLiteRegistration* reg; - // context->GetNodeAndRegistration(context, node_index, &node, ®); - // } - // Note: the memory pointed by '`*execution_plan` is OWNED by TfLite runtime. - // Future calls to GetExecutionPlan invalidates earlier outputs. The following - // code snippet shows the issue of such an invocation pattern. After calling - // CheckNode, subsequent access to `plan_1st` is undefined. - // - // void CheckNode(const TfLiteNode* node) { - // ... - // TfLiteIntArray* plan_2nd; - // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_2nd)); - // ... - // } - // - // TfLiteIntArray* plan_1st; - // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_1st)); - // for (int exec_index = 0; exec_index < plan_1st->size; exec_index++) { - // int node_index = plan_1st->data[exec_index]; - // TfLiteNode* node; - // TfLiteRegistration* reg; - // context->GetNodeAndRegistration(context, node_index, &node, ®); - // CheckNode(node); - // } - // - // WARNING: This is an experimental interface that is subject to change. + /** The execution plan contains a list of the node indices in execution + * order. execution_plan->size is the current number of nodes. And, + * execution_plan->data[0] is the first node that needs to be run. + * TfLiteDelegates can traverse the current execution plan by iterating + * through each member of this array and using GetNodeAndRegistration() to + * access details about a node. i.e. + * + * + * TfLiteIntArray* execution_plan; + * TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, + * &execution_plan)); + * for (int exec_index = 0; exec_index < execution_plan->size; + * exec_index++) { + * int node_index = execution_plan->data[exec_index]; + * TfLiteNode* node; + * TfLiteRegistration* reg; + * context->GetNodeAndRegistration(context, node_index, &node, ®); + * } + * + * Note: the memory pointed by '{@code *execution_plan} is OWNED by TfLite runtime. + * Future calls to GetExecutionPlan invalidates earlier outputs. The + * following code snippet shows the issue of such an invocation pattern. + * After calling CheckNode, subsequent access to {@code plan_1st} is undefined. + * + * void CheckNode(const TfLiteNode* node) { + * ... + * TfLiteIntArray* plan_2nd; + * TF_LITE_ENSURE_STATUS( + * context->GetExecutionPlan(context, &plan_2nd) + * ); + * ... + * } + * + * TfLiteIntArray* plan_1st; + * TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_1st)); + * for (int exec_index = 0; exec_index < plan_1st->size; exec_index++) { + * int node_index = plan_1st->data[exec_index]; + * TfLiteNode* node; + * TfLiteRegistration* reg; + * context->GetNodeAndRegistration(context, node_index, &node, ®); + * CheckNode(node); + * } + * + * WARNING: This is an experimental interface that is subject to change. */ public static class GetExecutionPlan_TfLiteContext_PointerPointer extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -79,14 +105,14 @@ public static class GetExecutionPlan_TfLiteContext_PointerPointer extends Functi } public native GetExecutionPlan_TfLiteContext_PointerPointer GetExecutionPlan(); public native TfLiteContext GetExecutionPlan(GetExecutionPlan_TfLiteContext_PointerPointer setter); - // An array of tensors in the interpreter context (of length `tensors_size`) + /** An array of tensors in the interpreter context (of length {@code tensors_size}) */ public native TfLiteTensor tensors(); public native TfLiteContext tensors(TfLiteTensor setter); - // opaque full context ptr (an opaque c++ data structure) + /** opaque full context ptr (an opaque c++ data structure) */ public native Pointer impl_(); public native TfLiteContext impl_(Pointer setter); - // Request memory pointer be resized. Updates dimensions on the tensor. - // NOTE: ResizeTensor takes ownership of newSize. + /** Request memory pointer be resized. Updates dimensions on the tensor. + * NOTE: ResizeTensor takes ownership of newSize. */ public static class ResizeTensor_TfLiteContext_TfLiteTensor_TfLiteIntArray extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -97,12 +123,12 @@ public static class ResizeTensor_TfLiteContext_TfLiteTensor_TfLiteIntArray exten TfLiteIntArray new_size); } public native ResizeTensor_TfLiteContext_TfLiteTensor_TfLiteIntArray ResizeTensor(); public native TfLiteContext ResizeTensor(ResizeTensor_TfLiteContext_TfLiteTensor_TfLiteIntArray setter); - // Request that an error be reported with format string msg. + /** Request that an error be reported with format string msg. */ - // Add `tensors_to_add` tensors, preserving pre-existing Tensor entries. If - // non-null, the value pointed to by `first_new_tensor_index` will be set to - // the index of the first new tensor. + /** Add {@code tensors_to_add} tensors, preserving pre-existing Tensor entries. If + * non-null, the value pointed to by {@code first_new_tensor_index} will be set to + * the index of the first new tensor. */ public static class AddTensors_TfLiteContext_int_IntPointer extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -112,10 +138,13 @@ public static class AddTensors_TfLiteContext_int_IntPointer extends FunctionPoin public native @Cast("TfLiteStatus") int call(TfLiteContext arg0, int tensors_to_add, IntPointer first_new_tensor_index); } + + /// public native AddTensors_TfLiteContext_int_IntPointer AddTensors(); public native TfLiteContext AddTensors(AddTensors_TfLiteContext_int_IntPointer setter); - // Get a Tensor node by node_index. - // WARNING: This is an experimental interface that is subject to change. + /** Get a Tensor node by node_index. + * + * WARNING: This is an experimental interface that is subject to change. */ public static class GetNodeAndRegistration_TfLiteContext_int_PointerPointer_PointerPointer extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -128,8 +157,8 @@ public static class GetNodeAndRegistration_TfLiteContext_int_PointerPointer_Poin } public native GetNodeAndRegistration_TfLiteContext_int_PointerPointer_PointerPointer GetNodeAndRegistration(); public native TfLiteContext GetNodeAndRegistration(GetNodeAndRegistration_TfLiteContext_int_PointerPointer_PointerPointer setter); - // Replace ops with one or more stub delegate operations. This function - // does not take ownership of `nodes_to_replace`. + /** Replace ops with one or more stub delegate operations. This function + * does not take ownership of {@code nodes_to_replace}. */ public static class ReplaceNodeSubsetsWithDelegateKernels_TfLiteContext_TfLiteRegistration_TfLiteIntArray_TfLiteDelegate extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -142,12 +171,15 @@ public static class ReplaceNodeSubsetsWithDelegateKernels_TfLiteContext_TfLiteRe } public native ReplaceNodeSubsetsWithDelegateKernels_TfLiteContext_TfLiteRegistration_TfLiteIntArray_TfLiteDelegate ReplaceNodeSubsetsWithDelegateKernels(); public native TfLiteContext ReplaceNodeSubsetsWithDelegateKernels(ReplaceNodeSubsetsWithDelegateKernels_TfLiteContext_TfLiteRegistration_TfLiteIntArray_TfLiteDelegate setter); - // Number of threads that are recommended to subsystems like gemmlowp and - // eigen. + /** Number of threads that are recommended to subsystems like gemmlowp and + * eigen. */ + + /// public native int recommended_num_threads(); public native TfLiteContext recommended_num_threads(int setter); - // Access external contexts by type. - // WARNING: This is an experimental interface that is subject to change. + /** Access external contexts by type. + * + * WARNING: This is an experimental interface that is subject to change. */ public static class GetExternalContext_TfLiteContext_int extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -157,10 +189,13 @@ public static class GetExternalContext_TfLiteContext_int extends FunctionPointer public native TfLiteExternalContext call(TfLiteContext arg0, @Cast("TfLiteExternalContextType") int arg1); } + + /// public native GetExternalContext_TfLiteContext_int GetExternalContext(); public native TfLiteContext GetExternalContext(GetExternalContext_TfLiteContext_int setter); - // Set the value of a external context. Does not take ownership of the - // pointer. - // WARNING: This is an experimental interface that is subject to change. + /** Set the value of a external context. Does not take ownership of the + * pointer. + * + * WARNING: This is an experimental interface that is subject to change. */ public static class SetExternalContext_TfLiteContext_int_TfLiteExternalContext extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -170,21 +205,28 @@ public static class SetExternalContext_TfLiteContext_int_TfLiteExternalContext e public native void call(TfLiteContext arg0, @Cast("TfLiteExternalContextType") int arg1, TfLiteExternalContext arg2); } + + /// public native SetExternalContext_TfLiteContext_int_TfLiteExternalContext SetExternalContext(); public native TfLiteContext SetExternalContext(SetExternalContext_TfLiteContext_int_TfLiteExternalContext setter); - // Flag for allowing float16 precision for FP32 calculation. - // default: false. - // WARNING: This is an experimental API and subject to change. + /** Flag for allowing float16 precision for FP32 calculation. + * default: false. + * + * WARNING: This is an experimental API and subject to change. */ public native @Cast("bool") boolean allow_fp32_relax_to_fp16(); public native TfLiteContext allow_fp32_relax_to_fp16(boolean setter); - // Pointer to the op-level profiler, if set; nullptr otherwise. + /** Pointer to the op-level profiler, if set; nullptr otherwise. */ + + /// public native Pointer profiler(); public native TfLiteContext profiler(Pointer setter); - // Allocate persistent buffer which has the same life time as the interpreter. - // Returns nullptr on failure. - // The memory is allocated from heap for TFL, and from tail in TFLM. - // This method is only available in Init or Prepare stage. - // WARNING: This is an experimental interface that is subject to change. + /** Allocate persistent buffer which has the same life time as the + * interpreter. Returns {@code nullptr} on failure. The memory is allocated from + * heap for TFL, and from tail in TFLM. This method is only available in + * {@code Init} or {@code Prepare} stage. + * + * WARNING: This is an experimental interface that is subject + * to change. */ public static class AllocatePersistentBuffer_TfLiteContext_long extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -193,14 +235,19 @@ public static class AllocatePersistentBuffer_TfLiteContext_long extends Function private native void allocate(); public native Pointer call(TfLiteContext ctx, @Cast("size_t") long bytes); } + + /// + /// public native AllocatePersistentBuffer_TfLiteContext_long AllocatePersistentBuffer(); public native TfLiteContext AllocatePersistentBuffer(AllocatePersistentBuffer_TfLiteContext_long setter); - // Allocate a buffer which will be deallocated right after invoke phase. - // The memory is allocated from heap in TFL, and from volatile arena in TFLM. - // This method is only available in invoke stage. - // NOTE: If possible use RequestScratchBufferInArena method to avoid memory - // allocation during inference time. - // WARNING: This is an experimental interface that is subject to change. + /** Allocate a buffer which will be deallocated right after invoke phase. + * The memory is allocated from heap in TFL, and from volatile arena in TFLM. + * This method is only available in invoke stage. + * + * NOTE: If possible use {@code RequestScratchBufferInArena} method to avoid memory + * allocation during inference time. + * + * WARNING: This is an experimental interface that is subject to change. */ public static class AllocateBufferForEval_TfLiteContext_long_PointerPointer extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -210,13 +257,16 @@ public static class AllocateBufferForEval_TfLiteContext_long_PointerPointer exte public native @Cast("TfLiteStatus") int call(TfLiteContext ctx, @Cast("size_t") long bytes, @Cast("void**") PointerPointer ptr); } + + /// public native AllocateBufferForEval_TfLiteContext_long_PointerPointer AllocateBufferForEval(); public native TfLiteContext AllocateBufferForEval(AllocateBufferForEval_TfLiteContext_long_PointerPointer setter); - // Request a scratch buffer in the arena through static memory planning. - // This method is only available in Prepare stage and the buffer is allocated - // by the interpreter between Prepare and Eval stage. In Eval stage, - // GetScratchBuffer API can be used to fetch the address. - // WARNING: This is an experimental interface that is subject to change. + /** Request a scratch buffer in the arena through static memory planning. + * This method is only available in {@code Prepare} stage and the buffer is + * allocated by the interpreter between Prepare and Eval stage. In {@code Eval} + * stage, {@code GetScratchBuffer} API can be used to fetch the address. + * + * WARNING: This is an experimental interface that is subject to change. */ public static class RequestScratchBufferInArena_TfLiteContext_long_IntPointer extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -226,11 +276,14 @@ public static class RequestScratchBufferInArena_TfLiteContext_long_IntPointer ex public native @Cast("TfLiteStatus") int call(TfLiteContext ctx, @Cast("size_t") long bytes, IntPointer buffer_idx); } + + /// public native RequestScratchBufferInArena_TfLiteContext_long_IntPointer RequestScratchBufferInArena(); public native TfLiteContext RequestScratchBufferInArena(RequestScratchBufferInArena_TfLiteContext_long_IntPointer setter); - // Get the scratch buffer pointer. - // This method is only available in Eval stage. - // WARNING: This is an experimental interface that is subject to change. + /** Get the scratch buffer pointer. + * This method is only available in Eval stage. + * + * WARNING: This is an experimental interface that is subject to change. */ public static class GetScratchBuffer_TfLiteContext_int extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -239,12 +292,15 @@ public static class GetScratchBuffer_TfLiteContext_int extends FunctionPointer { private native void allocate(); public native Pointer call(TfLiteContext ctx, int buffer_idx); } + + /// public native GetScratchBuffer_TfLiteContext_int GetScratchBuffer(); public native TfLiteContext GetScratchBuffer(GetScratchBuffer_TfLiteContext_int setter); - // Resize the memory pointer of the `tensor`. This method behaves the same as - // `ResizeTensor`, except that it makes a copy of the shape array internally - // so the shape array could be deallocated right afterwards. - // WARNING: This is an experimental interface that is subject to change. + /** Resize the memory pointer of the {@code tensor}. This method behaves the same as + * {@code ResizeTensor}, except that it makes a copy of the shape array internally + * so the shape array could be deallocated right afterwards. + * + * WARNING: This is an experimental interface that is subject to change. */ public static class ResizeTensorExplicit_TfLiteContext_TfLiteTensor_int_IntPointer extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -255,28 +311,32 @@ public static class ResizeTensorExplicit_TfLiteContext_TfLiteTensor_int_IntPoint TfLiteTensor tensor, int dims, @Const IntPointer shape); } + + /// + /// + /// public native ResizeTensorExplicit_TfLiteContext_TfLiteTensor_int_IntPointer ResizeTensorExplicit(); public native TfLiteContext ResizeTensorExplicit(ResizeTensorExplicit_TfLiteContext_TfLiteTensor_int_IntPointer setter); - // This method provides a preview of post-delegation partitioning. Each - // TfLiteDelegateParams in the referenced array corresponds to one instance of - // the delegate kernel. - // Example usage: - // - // TfLiteIntArray* nodes_to_replace = ...; - // TfLiteDelegateParams* params_array; - // int num_partitions = 0; - // TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning( - // context, delegate, nodes_to_replace, ¶ms_array, &num_partitions)); - // for (int idx = 0; idx < num_partitions; idx++) { - // const auto& partition_params = params_array[idx]; - // ... - // } - // - // NOTE: The context owns the memory referenced by partition_params_array. It - // will be cleared with another call to PreviewDelegatePartitioning, or after - // TfLiteDelegateParams::Prepare returns. - // - // WARNING: This is an experimental interface that is subject to change. + /** This method provides a preview of post-delegation partitioning. Each + * TfLiteDelegateParams in the referenced array corresponds to one instance + * of the delegate kernel. Example usage: + * + * TfLiteIntArray* nodes_to_replace = ...; + * TfLiteDelegateParams* params_array; + * int num_partitions = 0; + * TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning( + * context, delegate, nodes_to_replace, ¶ms_array, + * &num_partitions)); + * for (int idx = 0; idx < num_partitions; idx++) { + * const auto& partition_params = params_array[idx]; + * ... + * } + * + * NOTE: The context owns the memory referenced by partition_params_array. It + * will be cleared with another call to PreviewDelegatePartitioning, or after + * TfLiteDelegateParams::Prepare returns. + * + * WARNING: This is an experimental interface that is subject to change. */ public static class PreviewDelegatePartitioning_TfLiteContext_TfLiteIntArray_PointerPointer_IntPointer extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -287,11 +347,16 @@ public static class PreviewDelegatePartitioning_TfLiteContext_TfLiteIntArray_Poi TfLiteContext context, @Const TfLiteIntArray nodes_to_replace, @Cast("TfLiteDelegateParams**") PointerPointer partition_params_array, IntPointer num_partitions); } + + /// + /// public native PreviewDelegatePartitioning_TfLiteContext_TfLiteIntArray_PointerPointer_IntPointer PreviewDelegatePartitioning(); public native TfLiteContext PreviewDelegatePartitioning(PreviewDelegatePartitioning_TfLiteContext_TfLiteIntArray_PointerPointer_IntPointer setter); - // Returns a TfLiteTensor struct for a given index. - // WARNING: This is an experimental interface that is subject to change. - // WARNING: This method may not be available on all platforms. + /** Returns a TfLiteTensor struct for a given index. + * + * WARNING: This is an experimental interface that is subject to change. + * + * WARNING: This method may not be available on all platforms. */ public static class GetTensor_TfLiteContext_int extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -301,11 +366,16 @@ public static class GetTensor_TfLiteContext_int extends FunctionPointer { public native TfLiteTensor call(@Const TfLiteContext context, int tensor_idx); } + + /// + /// public native GetTensor_TfLiteContext_int GetTensor(); public native TfLiteContext GetTensor(GetTensor_TfLiteContext_int setter); - // Returns a TfLiteEvalTensor struct for a given index. - // WARNING: This is an experimental interface that is subject to change. - // WARNING: This method may not be available on all platforms. + /** Returns a TfLiteEvalTensor struct for a given index. + * + * WARNING: This is an experimental interface that is subject to change. + * + * WARNING: This method may not be available on all platforms. */ public static class GetEvalTensor_TfLiteContext_int extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -315,16 +385,18 @@ public static class GetEvalTensor_TfLiteContext_int extends FunctionPointer { public native TfLiteEvalTensor call(@Const TfLiteContext context, int tensor_idx); } + + /// public native GetEvalTensor_TfLiteContext_int GetEvalTensor(); public native TfLiteContext GetEvalTensor(GetEvalTensor_TfLiteContext_int setter); - // Retrieves named metadata buffer from the TFLite model. - // Returns kTfLiteOk if metadata is successfully obtained from the flatbuffer - // Model: that is, there exists a `metadata` entry with given `name` string. - // (see TFLite's schema.fbs). - // The corresponding `buffer` information is populated in `ptr` & `bytes`. - // The data from `ptr` is valid for the lifetime of the Interpreter. - // - // WARNING: This is an experimental interface that is subject to change. + /** Retrieves named metadata buffer from the TFLite model. + * Returns kTfLiteOk if metadata is successfully obtained from the flatbuffer + * Model: that is, there exists a {@code metadata} entry with given {@code name} string. + * (see TFLite's schema.fbs). + * The corresponding {@code buffer} information is populated in {@code ptr} & {@code bytes}. + * The data from {@code ptr} is valid for the lifetime of the Interpreter. + * + * WARNING: This is an experimental interface that is subject to change. */ public static class GetModelMetadata_TfLiteContext_BytePointer_PointerPointer_SizeTPointer extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -335,16 +407,20 @@ public static class GetModelMetadata_TfLiteContext_BytePointer_PointerPointer_Si @Cast("const char*") BytePointer name, @Cast("const char**") PointerPointer ptr, @Cast("size_t*") SizeTPointer bytes); } + + /// + /// public native GetModelMetadata_TfLiteContext_BytePointer_PointerPointer_SizeTPointer GetModelMetadata(); public native TfLiteContext GetModelMetadata(GetModelMetadata_TfLiteContext_BytePointer_PointerPointer_SizeTPointer setter); - // Retrieves the corresponding TfLiteContext of a subgraph that the given - // subgraph_index points to and switches to the delegate context for that - // subgraph. If an invalid subgraph index is given, returns kTfLiteError. - // NOTE: This function is expected to be paired with ReleaseSubgraphContext() - // once the delegate preparation is done and/or the delegate context functions - // are no longer needed. - // - // WARNING: This is an experimental interface that is subject to change. + /** Retrieves the corresponding TfLiteContext of a subgraph that the given + * subgraph_index points to and switches to the delegate context for that + * subgraph. If an invalid subgraph index is given, returns kTfLiteError. + * + * NOTE: This function is expected to be paired with ReleaseSubgraphContext() + * once the delegate preparation is done and/or the delegate context + * functions are no longer needed. + * + * WARNING: This is an experimental interface that is subject to change. */ public static class AcquireSubgraphContext_TfLiteContext_int_PointerPointer extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -355,14 +431,18 @@ public static class AcquireSubgraphContext_TfLiteContext_int_PointerPointer exte TfLiteContext context, int subgraph_index, @Cast("TfLiteContext**") PointerPointer acquired_context); } + + /// + /// public native AcquireSubgraphContext_TfLiteContext_int_PointerPointer AcquireSubgraphContext(); public native TfLiteContext AcquireSubgraphContext(AcquireSubgraphContext_TfLiteContext_int_PointerPointer setter); - // Releases the subgraph context by switching back to the TFLite kernel - // context for the subgraph that the given subgraph_index points to. - // NOTE: This function is expected to be used after AcquireSubgraphContext() - // once the delegate preparation is done and/or the delegate context functions - // are no longer needed. - // - // WARNING: This is an experimental interface that is subject to change. + /** Releases the subgraph context by switching back to the TFLite kernel + * context for the subgraph that the given subgraph_index points to. + * + * NOTE: This function is expected to be used after AcquireSubgraphContext() + * once the delegate preparation is done and/or the delegate context + * functions are no longer needed. + * + * WARNING: This is an experimental interface that is subject to change. */ public static class ReleaseSubgraphContext_TfLiteContext_int extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteCustomAllocation.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteCustomAllocation.java index 666f38704f9..1af21cf5f3e 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteCustomAllocation.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteCustomAllocation.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,10 +9,10 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -// Defines a custom memory allocation not owned by the runtime. -// `data` should be aligned to kDefaultTensorAlignment defined in -// lite/util.h. (Currently 64 bytes) -// NOTE: See Interpreter.SetCustomAllocationForTensor for details on usage. +/** Defines a custom memory allocation not owned by the runtime. + * {@code data} should be aligned to kDefaultTensorAlignment defined in + * lite/util.h. (Currently 64 bytes) + * NOTE: See {@code Interpreter::SetCustomAllocationForTensor} for details on usage. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteCustomAllocation extends Pointer { static { Loader.load(); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDelegate.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDelegate.java index 14d7a25d705..b086dc615f8 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDelegate.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDelegate.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,7 +9,7 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -// WARNING: This is an experimental interface that is subject to change. +/** WARNING: This is an experimental interface that is subject to change. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteDelegate extends Pointer { static { Loader.load(); } @@ -28,16 +28,17 @@ public class TfLiteDelegate extends Pointer { return new TfLiteDelegate((Pointer)this).offsetAddress(i); } - // Data that delegate needs to identify itself. This data is owned by the - // delegate. The delegate is owned in the user code, so the delegate is - // responsible for deallocating this when it is destroyed. + /** Data that delegate needs to identify itself. This data is owned by the + * delegate. The delegate is owned in the user code, so the delegate is + * responsible for deallocating this when it is destroyed. */ public native Pointer data_(); public native TfLiteDelegate data_(Pointer setter); - // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the - // delegate a view of the current graph through TfLiteContext*. It typically - // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels() - // to ask the TensorFlow lite runtime to create macro-nodes to represent - // delegated subgraphs of the original graph. + /** Invoked by {@code ModifyGraphWithDelegate}. This prepare is called, giving the + * delegate a view of the current graph through {@code TfLiteContext*}. It + * typically will look at the nodes and call + * {@code ReplaceNodeSubsetsWithDelegateKernels()} to ask the TensorFlow lite + * runtime to create macro-nodes to represent delegated subgraphs of the + * original graph. */ public static class Prepare_TfLiteContext_TfLiteDelegate extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -49,10 +50,10 @@ public static class Prepare_TfLiteContext_TfLiteDelegate extends FunctionPointer } public native Prepare_TfLiteContext_TfLiteDelegate Prepare(); public native TfLiteDelegate Prepare(Prepare_TfLiteContext_TfLiteDelegate setter); - // Copy the data from delegate buffer handle into raw memory of the given - // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as - // long as it follows the rules for kTfLiteDynamic tensors, in which case this - // cannot be null. + /** Copy the data from delegate buffer handle into raw memory of the given + * {@code tensor}. Note that the delegate is allowed to allocate the raw bytes as + * long as it follows the rules for {@code kTfLiteDynamic} tensors, in which case + * this cannot be null. */ public static class CopyFromBufferHandle_TfLiteContext_TfLiteDelegate_int_TfLiteTensor extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -66,8 +67,8 @@ public static class CopyFromBufferHandle_TfLiteContext_TfLiteDelegate_int_TfLite } public native CopyFromBufferHandle_TfLiteContext_TfLiteDelegate_int_TfLiteTensor CopyFromBufferHandle(); public native TfLiteDelegate CopyFromBufferHandle(CopyFromBufferHandle_TfLiteContext_TfLiteDelegate_int_TfLiteTensor setter); - // Copy the data from raw memory of the given 'tensor' to delegate buffer - // handle. This can be null if the delegate doesn't use its own buffer. + /** Copy the data from raw memory of the given {@code tensor} to delegate buffer + * handle. This can be null if the delegate doesn't use its own buffer. */ public static class CopyToBufferHandle_TfLiteContext_TfLiteDelegate_int_TfLiteTensor extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -81,10 +82,10 @@ public static class CopyToBufferHandle_TfLiteContext_TfLiteDelegate_int_TfLiteTe } public native CopyToBufferHandle_TfLiteContext_TfLiteDelegate_int_TfLiteTensor CopyToBufferHandle(); public native TfLiteDelegate CopyToBufferHandle(CopyToBufferHandle_TfLiteContext_TfLiteDelegate_int_TfLiteTensor setter); - // Free the Delegate Buffer Handle. Note: This only frees the handle, but - // this doesn't release the underlying resource (e.g. textures). The - // resources are either owned by application layer or the delegate. - // This can be null if the delegate doesn't use its own buffer. + /** Free the Delegate Buffer Handle. Note: This only frees the handle, but + * this doesn't release the underlying resource (e.g. textures). The + * resources are either owned by application layer or the delegate. + * This can be null if the delegate doesn't use its own buffer. */ public static class FreeBufferHandle_TfLiteContext_TfLiteDelegate_IntPointer extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -97,16 +98,18 @@ public native void call(TfLiteContext context, } public native FreeBufferHandle_TfLiteContext_TfLiteDelegate_IntPointer FreeBufferHandle(); public native TfLiteDelegate FreeBufferHandle(FreeBufferHandle_TfLiteContext_TfLiteDelegate_IntPointer setter); - // Bitmask flags. See the comments in `TfLiteDelegateFlags`. + /** Bitmask flags. See the comments in {@code TfLiteDelegateFlags}. */ + + /// public native @Cast("int64_t") long flags(); public native TfLiteDelegate flags(long setter); - // The opaque delegate builder associated with this object. If set then the - // TF Lite runtime will give precedence to this field. E.g. instead of - // invoking 'Prepare' via the function pointer inside the 'TfLiteDelegate' - // object, the runtime will first check if the corresponding function - // pointer inside 'opaque_delegate_builder' is set and if so invoke that. - // - // If this field is non-null, then the 'Prepare' field (of the - // 'TfLiteDelegate') should be null. + /** The opaque delegate builder associated with this object. If set then the + * TF Lite runtime will give precedence to this field. E.g. instead of + * invoking {@code Prepare} via the function pointer inside the {@code TfLiteDelegate} + * object, the runtime will first check if the corresponding function + * pointer inside {@code opaque_delegate_builder} is set and if so invoke that. + * + * If this field is non-null, then the {@code Prepare} field (of the + * {@code TfLiteDelegate}) should be null. */ public native TfLiteOpaqueDelegateBuilder opaque_delegate_builder(); public native TfLiteDelegate opaque_delegate_builder(TfLiteOpaqueDelegateBuilder setter); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDelegateParams.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDelegateParams.java index ee21ec403f6..3b55893ddf5 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDelegateParams.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDelegateParams.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -10,13 +10,13 @@ // #endif // TF_LITE_STATIC_MEMORY -// WARNING: This is an experimental interface that is subject to change. -// -// Currently, TfLiteDelegateParams has to be allocated in a way that it's -// trivially destructable. It will be stored as `builtin_data` field in -// `TfLiteNode` of the delegate node. -// -// See also the `CreateDelegateParams` function in `interpreter.cc` details. +/** WARNING: This is an experimental interface that is subject to change. + * + * Currently, TfLiteDelegateParams has to be allocated in a way that it's + * trivially destructable. It will be stored as {@code builtin_data} field in + * {@code TfLiteNode} of the delegate node. + * + * See also the {@code CreateDelegateParams} function in {@code interpreter.cc} details. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteDelegateParams extends Pointer { static { Loader.load(); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDelegatePtrVector.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDelegatePtrVector.java index 53ef68dab2d..8f8247a1857 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDelegatePtrVector.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDelegatePtrVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDimensionMetadata.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDimensionMetadata.java index 8e8bd83fcce..6661a95f69e 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDimensionMetadata.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteDimensionMetadata.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,7 +9,7 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -// Metadata to encode each dimension in a sparse tensor. +/** Metadata to encode each dimension in a sparse tensor. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteDimensionMetadata extends Pointer { static { Loader.load(); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteEvalTensor.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteEvalTensor.java index 204872259a4..35cde2bd4cb 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteEvalTensor.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteEvalTensor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -10,8 +10,8 @@ // #endif // TF_LITE_STATIC_MEMORY -// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount -// of information required for a kernel to run during TfLiteRegistration::Eval. +/** Light-weight tensor struct for TF Micro runtime. Provides the minimal amount + * of information required for a kernel to run during TfLiteRegistration::Eval. */ // TODO(b/160955687): Move this field into TF_LITE_STATIC_MEMORY when TFLM // builds with this flag by default internally. @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) @@ -32,15 +32,15 @@ public class TfLiteEvalTensor extends Pointer { return new TfLiteEvalTensor((Pointer)this).offsetAddress(i); } - // A union of data pointers. The appropriate type should be used for a typed - // tensor based on `type`. + /** A union of data pointers. The appropriate type should be used for a typed + * tensor based on {@code type}. */ public native @ByRef TfLitePtrUnion data(); public native TfLiteEvalTensor data(TfLitePtrUnion setter); - // A pointer to a structure representing the dimensionality interpretation - // that the buffer should have. + /** A pointer to a structure representing the dimensionality interpretation + * that the buffer should have. */ public native TfLiteIntArray dims(); public native TfLiteEvalTensor dims(TfLiteIntArray setter); - // The data type specification for data stored in `data`. This affects - // what member of `data` union should be used. + /** The data type specification for data stored in {@code data}. This affects + * what member of {@code data} union should be used. */ public native @Cast("TfLiteType") int type(); public native TfLiteEvalTensor type(int setter); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteExternalContext.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteExternalContext.java index 8eb849f3310..ae78a24bb7b 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteExternalContext.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteExternalContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,11 +9,11 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -// An external context is a collection of information unrelated to the TF Lite -// framework, but useful to a subset of the ops. TF Lite knows very little -// about the actual contexts, but it keeps a list of them, and is able to -// refresh them if configurations like the number of recommended threads -// change. +/** An external context is a collection of information unrelated to the TF Lite + * framework, but useful to a subset of the ops. TF Lite knows very little + * about the actual contexts, but it keeps a list of them, and is able to + * refresh them if configurations like the number of recommended threads + * change. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteExternalContext extends Pointer { static { Loader.load(); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteFloat16.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteFloat16.java index 20a66baef6e..4b2c89e2453 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteFloat16.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteFloat16.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,7 +9,7 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -// Half precision data type compatible with the C99 definition. +/** Half precision data type compatible with the C99 definition. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteFloat16 extends Pointer { static { Loader.load(); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteFloatArray.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteFloatArray.java index eb9f58d4bad..cff94587097 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteFloatArray.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteFloatArray.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -10,7 +10,7 @@ // #endif // TF_LITE_STATIC_MEMORY -// Fixed size list of floats. Used for per-channel quantization. +/** Fixed size list of floats. Used for per-channel quantization. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteFloatArray extends Pointer { static { Loader.load(); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteIntArray.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteIntArray.java index 74528e7e085..6b2be18b7d9 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteIntArray.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteIntArray.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,8 +9,8 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -// Fixed size list of integers. Used for dimensions and inputs/outputs tensor -// indices +/** Fixed size list of integers. Used for dimensions and inputs/outputs tensor + * indices */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteIntArray extends Pointer { static { Loader.load(); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteInternalBackendContext.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteInternalBackendContext.java index bc4efa19a0f..db4ad9faf8b 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteInternalBackendContext.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteInternalBackendContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteInterpreter.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteInterpreter.java index 7100bd96dbb..ac881b9d37b 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteInterpreter.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteInterpreter.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteInterpreterOptions.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteInterpreterOptions.java index 1c9586a61bd..f1e0a723466 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteInterpreterOptions.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteInterpreterOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteModel.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteModel.java index 1141cad206c..a6177870f2e 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteModel.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteModel.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -10,9 +10,13 @@ // #endif // __cplusplus -/** \addtogroup c_api tensorflow/lite/c/c_api.h +// clang-format off +// NOLINTBEGIN(whitespace/line_length) +/** \defgroup c_api lite/c/c_api.h * \{ */ +// NOLINTEND(whitespace/line_length) +// clang-format on // This header should be valid in both C (e.g. C99) and C++, // so 'void' in parameters is not redundant. diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteNode.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteNode.java index b2931c6a97c..a9c28efb3c0 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteNode.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteNode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,9 +9,9 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -// A structure representing an instance of a node. -// This structure only exhibits the inputs, outputs, user defined data and some -// node properties (like statefulness), not other features like the type. +/** A structure representing an instance of a node. + * This structure only exhibits the inputs, outputs, user defined data and some + * node properties (like statefulness), not other features like the type. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteNode extends Pointer { static { Loader.load(); } @@ -30,38 +30,44 @@ public class TfLiteNode extends Pointer { return new TfLiteNode((Pointer)this).offsetAddress(i); } - // Inputs to this node expressed as indices into the simulator's tensors. + /** Inputs to this node expressed as indices into the simulator's tensors. */ public native TfLiteIntArray inputs(); public native TfLiteNode inputs(TfLiteIntArray setter); - // Outputs to this node expressed as indices into the simulator's tensors. + /** Outputs to this node expressed as indices into the simulator's tensors. */ public native TfLiteIntArray outputs(); public native TfLiteNode outputs(TfLiteIntArray setter); - // intermediate tensors to this node expressed as indices into the simulator's - // tensors. + /** intermediate tensors to this node expressed as indices into the + * simulator's tensors. */ public native TfLiteIntArray intermediates(); public native TfLiteNode intermediates(TfLiteIntArray setter); - // Temporary tensors uses during the computations. This usually contains no - // tensors, but ops are allowed to change that if they need scratch space of - // any sort. + /** Temporary tensors uses during the computations. This usually contains no + * tensors, but ops are allowed to change that if they need scratch space of + * any sort. */ public native TfLiteIntArray temporaries(); public native TfLiteNode temporaries(TfLiteIntArray setter); - // Opaque data provided by the node implementer through `Registration.init`. + /** Opaque data provided by the node implementer through {@code Registration.init}. */ public native Pointer user_data(); public native TfLiteNode user_data(Pointer setter); - // Opaque data provided to the node if the node is a builtin. This is usually - // a structure defined in builtin_op_data.h + /** Opaque data provided to the node if the node is a builtin. This is usually + * a structure defined in builtin_op_data.h */ + + /// public native Pointer builtin_data(); public native TfLiteNode builtin_data(Pointer setter); - // Custom initial data. This is the opaque data provided in the flatbuffer. - // WARNING: This is an experimental interface that is subject to change. + /** Custom initial data. This is the opaque data provided in the flatbuffer. + * + * WARNING: This is an experimental interface that is subject to change. */ public native @Const Pointer custom_initial_data(); public native TfLiteNode custom_initial_data(Pointer setter); + + /// public native int custom_initial_data_size(); public native TfLiteNode custom_initial_data_size(int setter); - // The pointer to the delegate. This is non-null only when the node is - // created by calling `interpreter.ModifyGraphWithDelegate`. - // WARNING: This is an experimental interface that is subject to change. + /** The pointer to the delegate. This is non-null only when the node is + * created by calling {@code interpreter.ModifyGraphWithDelegate}. + * + * WARNING: This is an experimental interface that is subject to change. */ public native TfLiteDelegate delegate(); public native TfLiteNode delegate(TfLiteDelegate setter); - // Whether this op might have side effect (e.g. stateful op). + /** Whether this op might have side effect (e.g. stateful op). */ public native @Cast("bool") boolean might_have_side_effect(); public native TfLiteNode might_have_side_effect(boolean setter); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueContext.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueContext.java index 75993931b49..bd4a9fbe9b5 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueContext.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueDelegateBuilder.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueDelegateBuilder.java index fd079b4b78d..2a541428e0f 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueDelegateBuilder.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueDelegateBuilder.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,16 +9,20 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -// `TfLiteOpaqueDelegateBuilder` is used for constructing -// `TfLiteOpaqueDelegate`, see `TfLiteOpaqueDelegateCreate` below. Note: -// This struct is not ABI stable. -// -// For forward source compatibility `TfLiteOpaqueDelegateBuilder` objects should -// be brace-initialized, so that all fields (including any that might be added -// in the future) get zero-initialized. The purpose of each field is exactly -// the same as with `TfLiteDelegate`. -// -// WARNING: This is an experimental interface that is subject to change. +/** {@code TfLiteOpaqueDelegateBuilder} is used for constructing + * {@code TfLiteOpaqueDelegate}, see {@code TfLiteOpaqueDelegateCreate} in c_api_opaque.h. + * NOTE: This struct is not ABI stable. + * + * For forward source compatibility {@code TfLiteOpaqueDelegateBuilder} objects + * should be brace-initialized, so that all fields (including any that might be + * added in the future) get zero-initialized. The purpose of each field is + * exactly the same as with {@code TfLiteDelegate}. + * + * NOTE: This type is part of the TensorFlow Lite Extension APIs. + * We reserve the right to make changes to this API in future releases, + * potentially including non-backwards-compatible changes, on a different + * schedule than for the other TensorFlow Lite APIs. See + * https://www.tensorflow.org/guide/versions#separate_version_number_for_tensorflow_lite_extension_apis. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteOpaqueDelegateBuilder extends Pointer { static { Loader.load(); } @@ -37,15 +41,16 @@ public class TfLiteOpaqueDelegateBuilder extends Pointer { return new TfLiteOpaqueDelegateBuilder((Pointer)this).offsetAddress(i); } - // Data that delegate needs to identify itself. This data is owned by the - // delegate. The delegate is owned in the user code, so the delegate is - // responsible for deallocating this when it is destroyed. + /** Data that delegate needs to identify itself. This data is owned by the + * delegate. The delegate is owned in the user code, so the delegate is + * responsible for deallocating this when it is destroyed. */ public native Pointer data(); public native TfLiteOpaqueDelegateBuilder data(Pointer setter); - // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the - // delegate a view of the current graph through TfLiteContext*. It typically - // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels() - // to ask the TensorFlow lite runtime to create macro-nodes to represent - // delegated subgraphs of the original graph. + /** Invoked by ModifyGraphWithDelegate. This prepare is called, giving the + * delegate a view of the current graph through {@code TfLiteContext*}. It + * typically will look at the nodes and call + * {@code ReplaceNodeSubsetsWithDelegateKernels()} to ask the TensorFlow lite + * runtime to create macro-nodes to represent delegated subgraphs of the + * original graph. */ public static class Prepare_TfLiteOpaqueContext_TfLiteOpaqueDelegateStruct_Pointer extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -56,10 +61,10 @@ public static class Prepare_TfLiteOpaqueContext_TfLiteOpaqueDelegateStruct_Point @Cast("TfLiteOpaqueDelegate*") TfLiteOpaqueDelegateStruct delegate, Pointer data); } public native Prepare_TfLiteOpaqueContext_TfLiteOpaqueDelegateStruct_Pointer Prepare(); public native TfLiteOpaqueDelegateBuilder Prepare(Prepare_TfLiteOpaqueContext_TfLiteOpaqueDelegateStruct_Pointer setter); - // Copies the data from delegate buffer handle into raw memory of the given - // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as - // long as it follows the rules for kTfLiteDynamic tensors, in which case this - // cannot be null. + /** Copies the data from delegate buffer handle into raw memory of the given + * {@code tensor}. Note that the delegate is allowed to allocate the raw bytes as + * long as it follows the rules for kTfLiteDynamic tensors, in which case + * this cannot be null. */ public static class CopyFromBufferHandle_TfLiteOpaqueContext_TfLiteOpaqueDelegateStruct_Pointer_int_TfLiteOpaqueTensor extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -71,8 +76,8 @@ public static class CopyFromBufferHandle_TfLiteOpaqueContext_TfLiteOpaqueDelegat @Cast("TfLiteBufferHandle") int buffer_handle, TfLiteOpaqueTensor tensor); } public native CopyFromBufferHandle_TfLiteOpaqueContext_TfLiteOpaqueDelegateStruct_Pointer_int_TfLiteOpaqueTensor CopyFromBufferHandle(); public native TfLiteOpaqueDelegateBuilder CopyFromBufferHandle(CopyFromBufferHandle_TfLiteOpaqueContext_TfLiteOpaqueDelegateStruct_Pointer_int_TfLiteOpaqueTensor setter); - // Copies the data from raw memory of the given 'tensor' to delegate buffer - // handle. This can be null if the delegate doesn't use its own buffer. + /** Copies the data from raw memory of the given {@code tensor} to delegate buffer + * handle. This can be null if the delegate doesn't use its own buffer. */ public static class CopyToBufferHandle_TfLiteOpaqueContext_TfLiteOpaqueDelegateStruct_Pointer_int_TfLiteOpaqueTensor extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -84,10 +89,10 @@ public static class CopyToBufferHandle_TfLiteOpaqueContext_TfLiteOpaqueDelegateS @Cast("TfLiteBufferHandle") int buffer_handle, TfLiteOpaqueTensor tensor); } public native CopyToBufferHandle_TfLiteOpaqueContext_TfLiteOpaqueDelegateStruct_Pointer_int_TfLiteOpaqueTensor CopyToBufferHandle(); public native TfLiteOpaqueDelegateBuilder CopyToBufferHandle(CopyToBufferHandle_TfLiteOpaqueContext_TfLiteOpaqueDelegateStruct_Pointer_int_TfLiteOpaqueTensor setter); - // Frees the Delegate Buffer Handle. Note: This only frees the handle, but - // this doesn't release the underlying resource (e.g. textures). The - // resources are either owned by application layer or the delegate. - // This can be null if the delegate doesn't use its own buffer. + /** Frees the Delegate Buffer Handle. Note: This only frees the handle, but + * this doesn't release the underlying resource (e.g. textures). The + * resources are either owned by application layer or the delegate. + * This can be null if the delegate doesn't use its own buffer. */ public static class FreeBufferHandle_TfLiteOpaqueContext_TfLiteOpaqueDelegateStruct_Pointer_IntPointer extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -99,6 +104,6 @@ public native void call(TfLiteOpaqueContext context, @Cast("TfLiteBufferHandle*") IntPointer handle); } public native FreeBufferHandle_TfLiteOpaqueContext_TfLiteOpaqueDelegateStruct_Pointer_IntPointer FreeBufferHandle(); public native TfLiteOpaqueDelegateBuilder FreeBufferHandle(FreeBufferHandle_TfLiteOpaqueContext_TfLiteOpaqueDelegateStruct_Pointer_IntPointer setter); - // Bitmask flags. See the comments in `TfLiteDelegateFlags`. + /** Bitmask flags. See the comments in {@code TfLiteDelegateFlags}. */ public native @Cast("int64_t") long flags(); public native TfLiteOpaqueDelegateBuilder flags(long setter); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueDelegateParams.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueDelegateParams.java index ce73cc04559..9576afa4b30 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueDelegateParams.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueDelegateParams.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,14 +9,14 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -// WARNING: This is an experimental interface that is subject to change. -// -// Currently, TfLiteOpaqueDelegateParams has to be allocated in a way that it's -// trivially destructable. It will be stored as `builtin_data` field in -// `TfLiteNode` of the delegate node. -// -// See also the `CreateOpaqueDelegateParams` function in `subgraph.cc` -// details. +/** WARNING: This is an experimental interface that is subject to change. + * + * Currently, TfLiteOpaqueDelegateParams has to be allocated in a way that it's + * trivially destructable. It will be stored as {@code builtin_data} field in + * {@code TfLiteNode} of the delegate node. + * + * See also the {@code CreateOpaqueDelegateParams} function in {@code subgraph.cc} + * details. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteOpaqueDelegateParams extends Pointer { static { Loader.load(); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueDelegateStruct.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueDelegateStruct.java index 3db1ca500e4..a0cbc1b0e0d 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueDelegateStruct.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueDelegateStruct.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -15,7 +15,10 @@ * This is an abstract type that is intended to have the same * role as TfLiteDelegate, but without exposing the implementation * details of how delegates are implemented. + * * WARNING: This is an experimental type and subject to change. */ + +/// @Opaque @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteOpaqueDelegateStruct extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueNode.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueNode.java index 702aa782205..660d8b37ffc 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueNode.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueNode.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueTensor.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueTensor.java index c4d605a63ef..2a113ff297f 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueTensor.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteOpaqueTensor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLitePtrUnion.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLitePtrUnion.java index 8daef6af780..3c094252c4c 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLitePtrUnion.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLitePtrUnion.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,7 +9,11 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -/* A union of pointers that points to memory for a given tensor. */ +/** A union of pointers that points to memory for a given tensor. + * + * Do not access these members directly, if possible, use + * {@code GetTensorData(tensor)} instead, otherwise only access {@code .data}, as + * other members are deprecated. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLitePtrUnion extends Pointer { static { Loader.load(); } @@ -28,9 +32,6 @@ public class TfLitePtrUnion extends Pointer { return new TfLitePtrUnion((Pointer)this).offsetAddress(i); } - /* Do not access these members directly, if possible, use - * GetTensorData(tensor) instead, otherwise only access .data, as other - * members are deprecated. */ public native IntPointer i32(); public native TfLitePtrUnion i32(IntPointer setter); public native @Cast("uint32_t*") IntPointer u32(); public native TfLitePtrUnion u32(IntPointer setter); public native @Cast("int64_t*") LongPointer i64(); public native TfLitePtrUnion i64(LongPointer setter); @@ -47,6 +48,6 @@ public class TfLitePtrUnion extends Pointer { public native TfLiteComplex64 c64(); public native TfLitePtrUnion c64(TfLiteComplex64 setter); public native TfLiteComplex128 c128(); public native TfLitePtrUnion c128(TfLiteComplex128 setter); public native BytePointer int8(); public native TfLitePtrUnion int8(BytePointer setter); - /* Only use this member. */ + /** Only use this member. */ public native Pointer data(); public native TfLitePtrUnion data(Pointer setter); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteQuantization.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteQuantization.java index 037a773659b..cae23b4bf5a 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteQuantization.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteQuantization.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,7 +9,7 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -// Structure specifying the quantization used by the tensor, if-any. +/** Structure specifying the quantization used by the tensor, if-any. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteQuantization extends Pointer { static { Loader.load(); } @@ -28,10 +28,10 @@ public class TfLiteQuantization extends Pointer { return new TfLiteQuantization((Pointer)this).offsetAddress(i); } - // The type of quantization held by params. + /** The type of quantization held by params. */ public native @Cast("TfLiteQuantizationType") int type(); public native TfLiteQuantization type(int setter); - // Holds an optional reference to a quantization param structure. The actual - // type depends on the value of the `type` field (see the comment there for - // the values and corresponding types). + /** Holds an optional reference to a quantization param structure. The actual + * type depends on the value of the {@code type} field (see the comment there for + * the values and corresponding types). */ public native Pointer params(); public native TfLiteQuantization params(Pointer setter); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteQuantizationParams.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteQuantizationParams.java index 4bb7900ff8c..888ac37a20e 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteQuantizationParams.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteQuantizationParams.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,12 +9,11 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -/** Legacy. Will be deprecated in favor of TfLiteAffineQuantization. +/** Legacy. Will be deprecated in favor of {@code TfLiteAffineQuantization}. * If per-layer quantization is specified this field will still be populated in - * addition to TfLiteAffineQuantization. + * addition to {@code TfLiteAffineQuantization}. * Parameters for asymmetric quantization. Quantized values can be converted - * back to float using: - * real_value = scale * (quantized_value - zero_point) */ + * back to float using: {@code real_value = scale * (quantized_value - zero_point)} */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteQuantizationParams extends Pointer { static { Loader.load(); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration.java index 18f26693cf5..a7e795220a3 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,6 +9,13 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; +/** {@code TfLiteRegistration} defines the implementation of an operation + * (a built-in op, custom op, or custom delegate kernel). + * + * It is a struct containing "methods" (C function pointers) that will be + * invoked by the TF Lite runtime to evaluate instances of the operation. + * + * See also {@code TfLiteRegistrationExternal} which is a more ABI-stable equivalent. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteRegistration extends Pointer { static { Loader.load(); } @@ -27,24 +34,26 @@ public class TfLiteRegistration extends Pointer { return new TfLiteRegistration((Pointer)this).offsetAddress(i); } - // Initializes the op from serialized data. - // Called only *once* for the lifetime of the op, so any one-time allocations - // should be made here (unless they depend on tensor sizes). - // - // If a built-in op: - // `buffer` is the op's params data (TfLiteLSTMParams*). - // `length` is zero. - // If custom op: - // `buffer` is the op's `custom_options`. - // `length` is the size of the buffer. - // - // Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer - // or an instance of a struct). - // - // The returned pointer will be stored with the node in the `user_data` field, - // accessible within prepare and invoke functions below. - // NOTE: if the data is already in the desired format, simply implement this - // function to return `nullptr` and implement the free function to be a no-op. + /** Initializes the op from serialized data. + * Called only *once* for the lifetime of the op, so any one-time allocations + * should be made here (unless they depend on tensor sizes). + * + * * If a built-in op: + * * {@code buffer} is the op's params data (TfLiteLSTMParams*). + * * {@code length} is zero. + * * If custom op: + * * {@code buffer} is the op's {@code custom_options}. + * * {@code length} is the size of the buffer. + * + * Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer + * or an instance of a struct). + * + * The returned pointer will be stored with the node in the {@code user_data} + * field, accessible within prepare and invoke functions below. + * + * NOTE: if the data is already in the desired format, simply implement this + * function to return {@code nullptr} and implement the free function to be a + * no-op. */ public static class Init_TfLiteContext_BytePointer_long extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -55,7 +64,8 @@ public static class Init_TfLiteContext_BytePointer_long extends FunctionPointer } public native Init_TfLiteContext_BytePointer_long init(); public native TfLiteRegistration init(Init_TfLiteContext_BytePointer_long setter); - // The pointer `buffer` is the data previously returned by an init invocation. + /** The pointer {@code buffer} is the data previously returned by an init + * invocation. */ public static class Free_TfLiteContext_Pointer extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -64,14 +74,16 @@ public static class Free_TfLiteContext_Pointer extends FunctionPointer { private native void allocate(); public native void call(TfLiteContext context, Pointer buffer); } + + /// public native @Name("free") Free_TfLiteContext_Pointer _free(); public native TfLiteRegistration _free(Free_TfLiteContext_Pointer setter); - // prepare is called when the inputs this node depends on have been resized. - // context->ResizeTensor() can be called to request output tensors to be - // resized. - // Can be called multiple times for the lifetime of the op. - // - // Returns kTfLiteOk on success. + /** prepare is called when the inputs this node depends on have been resized. + * {@code context->ResizeTensor()} can be called to request output tensors to be + * resized. + * Can be called multiple times for the lifetime of the op. + * + * Returns {@code kTfLiteOk} on success. */ public static class Prepare_TfLiteContext_TfLiteNode extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -80,10 +92,14 @@ public static class Prepare_TfLiteContext_TfLiteNode extends FunctionPointer { private native void allocate(); public native @Cast("TfLiteStatus") int call(TfLiteContext context, TfLiteNode node); } + + /// public native Prepare_TfLiteContext_TfLiteNode prepare(); public native TfLiteRegistration prepare(Prepare_TfLiteContext_TfLiteNode setter); - // Execute the node (should read node->inputs and output to node->outputs). - // Returns kTfLiteOk on success. + /** Execute the node (should read {@code node->inputs} and output to + * {@code node->outputs}). + * + * Returns {@code kTfLiteOk} on success. */ public static class Invoke_TfLiteContext_TfLiteNode extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -94,11 +110,11 @@ public static class Invoke_TfLiteContext_TfLiteNode extends FunctionPointer { } public native Invoke_TfLiteContext_TfLiteNode invoke(); public native TfLiteRegistration invoke(Invoke_TfLiteContext_TfLiteNode setter); - // profiling_string is called during summarization of profiling information - // in order to group executions together. Providing a value here will cause a - // given op to appear multiple times is the profiling report. This is - // particularly useful for custom ops that can perform significantly - // different calculations depending on their `user-data`. + /** {@code profiling_string} is called during summarization of profiling information + * in order to group executions together. Providing a value here will cause a + * given op to appear multiple times is the profiling report. This is + * particularly useful for custom ops that can perform significantly + * different calculations depending on their {@code user-data}. */ public static class Profiling_string_TfLiteContext_TfLiteNode extends FunctionPointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ @@ -108,44 +124,54 @@ public static class Profiling_string_TfLiteContext_TfLiteNode extends FunctionPo public native @Cast("const char*") BytePointer call(@Const TfLiteContext context, @Const TfLiteNode node); } + + /// public native Profiling_string_TfLiteContext_TfLiteNode profiling_string(); public native TfLiteRegistration profiling_string(Profiling_string_TfLiteContext_TfLiteNode setter); - // Builtin codes. If this kernel refers to a builtin this is the code - // of the builtin. This is so we can do marshaling to other frameworks like - // NN API. - // Note: It is the responsibility of the registration binder to set this - // properly. + /** Builtin codes. If this kernel refers to a builtin this is the code + * of the builtin. This is so we can do marshaling to other frameworks like + * NN API. + * + * Note: It is the responsibility of the registration binder to set this + * properly. */ + + /// + /// public native int builtin_code(); public native TfLiteRegistration builtin_code(int setter); - // Custom op name. If the op is a builtin, this will be null. - // Note: It is the responsibility of the registration binder to set this - // properly. - // WARNING: This is an experimental interface that is subject to change. + /** Custom op name. If the op is a builtin, this will be {@code null}. + * + * Note: It is the responsibility of the registration binder to set this + * properly. + * + * WARNING: This is an experimental interface that is subject to change. */ public native @Cast("const char*") BytePointer custom_name(); public native TfLiteRegistration custom_name(BytePointer setter); - // The version of the op. - // Note: It is the responsibility of the registration binder to set this - // properly. + /** The version of the op. + * Note: It is the responsibility of the registration binder to set this + * properly. */ public native int version(); public native TfLiteRegistration version(int setter); - // The external version of `TfLiteRegistration`. Since we can't use internal - // types (such as `TfLiteContext`) for C API to maintain ABI stability. - // C API user will provide `TfLiteRegistrationExternal` to implement custom - // ops. We keep it inside of `TfLiteRegistration` and use it to route - // callbacks properly. + /** The external version of {@code TfLiteRegistration}. Since we can't use internal + * types (such as {@code TfLiteContext}) for C API to maintain ABI stability. + * C API user will provide {@code TfLiteRegistrationExternal} to implement custom + * ops. We keep it inside of {@code TfLiteRegistration} and use it to route + * callbacks properly. */ + + /// public native TfLiteRegistrationExternal registration_external(); public native TfLiteRegistration registration_external(TfLiteRegistrationExternal setter); - // Retrieves asynchronous kernel. - // - // If the `async_kernel` field is nullptr, it means the operation described by - // this TfLiteRegistration object does not support asynchronous execution. - // Otherwise, the function that the field points to should only be called for - // delegate kernel nodes, i.e. `node` should be a delegate kernel node created - // by applying a delegate. - // If the function returns nullptr, that means that the underlying delegate - // does not support asynchronous execution for this `node`. - - // Indicates if an operator's output may safely overwrite its inputs. - // See the comments in `TfLiteInPlaceOp`. + /** Retrieves asynchronous kernel. + * + * If the {@code async_kernel} field is nullptr, it means the operation described + * by this TfLiteRegistration object does not support asynchronous execution. + * Otherwise, the function that the field points to should only be called for + * delegate kernel nodes, i.e. {@code node} should be a delegate kernel node + * created by applying a delegate. If the function returns nullptr, that + * means that the underlying delegate does not support asynchronous execution + * for this {@code node}. */ + + /** Indicates if an operator's output may safely overwrite its inputs. + * See the comments in {@code TfLiteInPlaceOp}. */ public native @Cast("uint64_t") long inplace_operator(); public native TfLiteRegistration inplace_operator(long setter); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistrationExternal.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistrationExternal.java index e3ff59e4879..c10ccde80f6 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistrationExternal.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistrationExternal.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration_V1.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration_V1.java index 6878ec64d85..f0fe9723eab 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration_V1.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration_V1.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,16 +9,17 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -/** \private */ -// Old version of `TfLiteRegistration` to maintain binary backward -// compatibility. -// The legacy registration type must be a POD struct type whose field types must -// be a prefix of the field types in TfLiteRegistration, and offset of the first -// field in TfLiteRegistration that is not present in the legacy registration -// type must be greater than or equal to the size of the legacy registration -// type. -// WARNING: This structure is deprecated / not an official part of the -// API. It should be only used for binary backward compatibility. +/** \private + * Old version of {@code TfLiteRegistration} to maintain binary backward + * compatibility. + * The legacy registration type must be a POD struct type whose field types + * must be a prefix of the field types in TfLiteRegistration, and offset of the + * first field in TfLiteRegistration that is not present in the legacy + * registration type must be greater than or equal to the size of the legacy + * registration type. + * + * WARNING: This structure is deprecated / not an official part of the + * API. It should be only used for binary backward compatibility. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteRegistration_V1 extends Pointer { static { Loader.load(); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration_V2.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration_V2.java index 6c560541ad1..13ae73df288 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration_V2.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration_V2.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,16 +9,17 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -/** \private */ -// Old version of `TfLiteRegistration` to maintain binary backward -// compatibility. -// The legacy registration type must be a POD struct type whose field types must -// be a prefix of the field types in TfLiteRegistration, and offset of the first -// field in TfLiteRegistration that is not present in the legacy registration -// type must be greater than or equal to the size of the legacy registration -// type. -// WARNING: This structure is deprecated / not an official part of the -// API. It should be only used for binary backward compatibility. +/** \private + * Old version of {@code TfLiteRegistration} to maintain binary backward + * compatibility. + * The legacy registration type must be a POD struct type whose field types + * must be a prefix of the field types in TfLiteRegistration, and offset of the + * first field in TfLiteRegistration that is not present in the legacy + * registration type must be greater than or equal to the size of the legacy + * registration type. + * + * WARNING: This structure is deprecated / not an official part of the + * API. It should be only used for binary backward compatibility. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteRegistration_V2 extends Pointer { static { Loader.load(); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration_V3.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration_V3.java index ac0d5feb680..fb05950bfb8 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration_V3.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteRegistration_V3.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,16 +9,17 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -/** \private */ -// Old version of `TfLiteRegistration` to maintain binary backward -// compatibility. -// The legacy registration type must be a POD struct type whose field types must -// be a prefix of the field types in TfLiteRegistration, and offset of the first -// field in TfLiteRegistration that is not present in the legacy registration -// type must be greater than or equal to the size of the legacy registration -// type. -// WARNING: This structure is deprecated / not an official part of the -// API. It should be only used for binary backward compatibility. +/** \private + * Old version of {@code TfLiteRegistration} to maintain binary backward + * compatibility. + * The legacy registration type must be a POD struct type whose field types + * must be a prefix of the field types in TfLiteRegistration, and offset of the + * first field in TfLiteRegistration that is not present in the legacy + * registration type must be greater than or equal to the size of the legacy + * registration type. + * + * WARNING: This structure is deprecated / not an official part of the + * API. It should be only used for binary backward compatibility. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteRegistration_V3 extends Pointer { static { Loader.load(); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteSignatureRunner.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteSignatureRunner.java index f8a09b13a26..c8422e434b9 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteSignatureRunner.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteSignatureRunner.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteSparsity.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteSparsity.java index 11a0c87a34a..6b4053da236 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteSparsity.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteSparsity.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,8 +9,8 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -// Parameters used to encode a sparse tensor. For detailed explanation of each -// field please refer to lite/schema/schema.fbs. +/** Parameters used to encode a sparse tensor. For detailed explanation of each + * field please refer to lite/schema/schema.fbs. */ @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteSparsity extends Pointer { static { Loader.load(); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryConversionMetadata.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryConversionMetadata.java index f5a05d5896d..61bfddcfcb8 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryConversionMetadata.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryConversionMetadata.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryGpuDelegateSettings.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryGpuDelegateSettings.java index 5b0ad94f4d8..44cc272a692 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryGpuDelegateSettings.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryGpuDelegateSettings.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryInterpreterSettings.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryInterpreterSettings.java index feb57a8eccc..e3b092ee966 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryInterpreterSettings.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryInterpreterSettings.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryProfilerStruct.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryProfilerStruct.java index b7c89dab8c0..e4bdfe9a2e7 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryProfilerStruct.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetryProfilerStruct.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetrySettings.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetrySettings.java index 7bd4b2505eb..2325e188db0 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetrySettings.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetrySettings.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetrySubgraphInfo.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetrySubgraphInfo.java index dcbd161ef8a..71bc3989263 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetrySubgraphInfo.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTelemetrySubgraphInfo.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTensor.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTensor.java index b9007eaf24e..6751b5c8276 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTensor.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTensor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; @@ -9,8 +9,8 @@ import static org.bytedeco.tensorflowlite.global.tensorflowlite.*; -// A tensor in the interpreter system which is a wrapper around a buffer of -// data including a dimensionality (or NULL if not currently defined). +/** A tensor in the interpreter system which is a wrapper around a buffer of + * data including a dimensionality (or NULL if not currently defined). */ // #ifndef TF_LITE_STATIC_MEMORY @Properties(inherit = org.bytedeco.tensorflowlite.presets.tensorflowlite.class) public class TfLiteTensor extends Pointer { @@ -30,68 +30,80 @@ public class TfLiteTensor extends Pointer { return new TfLiteTensor((Pointer)this).offsetAddress(i); } - // The data type specification for data stored in `data`. This affects - // what member of `data` union should be used. + /** The data type specification for data stored in {@code data}. This affects + * what member of {@code data} union should be used. */ public native @Cast("TfLiteType") int type(); public native TfLiteTensor type(int setter); - // A union of data pointers. The appropriate type should be used for a typed - // tensor based on `type`. + /** A union of data pointers. The appropriate type should be used for a typed + * tensor based on {@code type}. */ public native @ByRef TfLitePtrUnion data(); public native TfLiteTensor data(TfLitePtrUnion setter); - // A pointer to a structure representing the dimensionality interpretation - // that the buffer should have. NOTE: the product of elements of `dims` - // and the element datatype size should be equal to `bytes` below. + /** A pointer to a structure representing the dimensionality interpretation + * that the buffer should have. NOTE: the product of elements of {@code dims} + * and the element datatype size should be equal to {@code bytes} below. */ public native TfLiteIntArray dims(); public native TfLiteTensor dims(TfLiteIntArray setter); - // Quantization information. + /** Quantization information. */ public native @ByRef TfLiteQuantizationParams params(); public native TfLiteTensor params(TfLiteQuantizationParams setter); - // How memory is mapped - // kTfLiteMmapRo: Memory mapped read only. - // i.e. weights - // kTfLiteArenaRw: Arena allocated read write memory - // (i.e. temporaries, outputs). + /** How memory is mapped + * kTfLiteMmapRo: Memory mapped read only. + * i.e. weights + * kTfLiteArenaRw: Arena allocated read write memory + * (i.e. temporaries, outputs). */ public native @Cast("TfLiteAllocationType") int allocation_type(); public native TfLiteTensor allocation_type(int setter); - // The number of bytes required to store the data of this Tensor. I.e. - // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if - // type is kTfLiteFloat32 and dims = {3, 2} then - // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. + /** The number of bytes required to store the data of this Tensor. I.e. + * (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if + * type is kTfLiteFloat32 and dims = {3, 2} then + * bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. */ public native @Cast("size_t") long bytes(); public native TfLiteTensor bytes(long setter); - // An opaque pointer to a tflite::MMapAllocation + /** An opaque pointer to a tflite::MMapAllocation */ public native @Const Pointer allocation(); public native TfLiteTensor allocation(Pointer setter); - // Null-terminated name of this tensor. + /** Null-terminated name of this tensor. */ + + /// public native @Cast("const char*") BytePointer name(); public native TfLiteTensor name(BytePointer setter); - // The delegate which knows how to handle `buffer_handle`. - // WARNING: This is an experimental interface that is subject to change. + /** The delegate which knows how to handle {@code buffer_handle}. + * + * WARNING: This is an experimental interface that is subject to change. */ + + /// public native TfLiteDelegate delegate(); public native TfLiteTensor delegate(TfLiteDelegate setter); - // An integer buffer handle that can be handled by `delegate`. - // The value is valid only when delegate is not null. - // WARNING: This is an experimental interface that is subject to change. + /** An integer buffer handle that can be handled by {@code delegate}. + * The value is valid only when delegate is not null. + * + * WARNING: This is an experimental interface that is subject to change. */ + + /// public native @Cast("TfLiteBufferHandle") int buffer_handle(); public native TfLiteTensor buffer_handle(int setter); - // If the delegate uses its own buffer (e.g. GPU memory), the delegate is - // responsible to set data_is_stale to true. - // `delegate->CopyFromBufferHandle` can be called to copy the data from - // delegate buffer. - // WARNING: This is an // experimental interface that is subject to change. + /** If the delegate uses its own buffer (e.g. GPU memory), the delegate is + * responsible to set data_is_stale to true. + * {@code delegate->CopyFromBufferHandle} can be called to copy the data from + * delegate buffer. + * + * WARNING: This is an experimental interface that is subject to change. */ public native @Cast("bool") boolean data_is_stale(); public native TfLiteTensor data_is_stale(boolean setter); - // True if the tensor is a variable. + /** True if the tensor is a variable. */ public native @Cast("bool") boolean is_variable(); public native TfLiteTensor is_variable(boolean setter); - // Quantization information. Replaces params field above. + /** Quantization information. Replaces params field above. */ + + /// public native @ByRef TfLiteQuantization quantization(); public native TfLiteTensor quantization(TfLiteQuantization setter); - // Parameters used to encode a sparse tensor. - // This is optional. The field is NULL if a tensor is dense. - // WARNING: This is an experimental interface that is subject to change. + /** Parameters used to encode a sparse tensor. + * This is optional. The field is NULL if a tensor is dense. + * + * WARNING: This is an experimental interface that is subject to change. */ public native TfLiteSparsity sparsity(); public native TfLiteTensor sparsity(TfLiteSparsity setter); - // Optional. Encodes shapes with unknown dimensions with -1. This field is - // only populated when unknown dimensions exist in a read-write tensor (i.e. - // an input or output tensor). (e.g. `dims` contains [1, 1, 1, 3] and - // `dims_signature` contains [1, -1, -1, 3]). If no unknown dimensions exist - // then `dims_signature` is either null, or set to an empty array. Note that - // this field only exists when TF_LITE_STATIC_MEMORY is not defined. + /** Optional. Encodes shapes with unknown dimensions with -1. This field is + * only populated when unknown dimensions exist in a read-write tensor (i.e. + * an input or output tensor). (e.g. {@code dims} contains [1, 1, 1, 3] and + * {@code dims_signature} contains [1, -1, -1, 3]). If no unknown dimensions exist + * then {@code dims_signature} is either null, or set to an empty array. Note that + * this field only exists when TF_LITE_STATIC_MEMORY is not defined. */ public native @Const TfLiteIntArray dims_signature(); public native TfLiteTensor dims_signature(TfLiteIntArray setter); } diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTensorDeleter.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTensorDeleter.java index efd87a4af6f..a31d1b16e34 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTensorDeleter.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteTensorDeleter.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteVerifier.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteVerifier.java index 2e5704dd35c..ae9674b1871 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteVerifier.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/TfLiteVerifier.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/VariantData.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/VariantData.java index 6e3096ab101..b404fec4113 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/VariantData.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/VariantData.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite; diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/global/tensorflowlite.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/global/tensorflowlite.java index 6e580ad93d8..c7205d005fc 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/global/tensorflowlite.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/global/tensorflowlite.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorflowlite.global; @@ -336,16 +336,24 @@ public class tensorflowlite extends org.bytedeco.tensorflowlite.presets.tensorfl See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +// WARNING: Users of TensorFlow Lite should not include this file directly, but +// should instead include "third_party/tensorflow/lite/c/c_api_types.h". +// Only the TensorFlow Lite implementation itself should include this file +// directly. /** This file declares types used by the pure C inference API defined in /** c_api.h, some of which are also used in the C++ and C kernel and interpreter -/** APIs. */ - -// WARNING: Users of TensorFlow Lite should not include this file directly, -// but should instead include -// "third_party/tensorflow/lite/c/c_api_types.h". -// Only the TensorFlow Lite implementation itself should include this -// file directly. +/** APIs. +/** */ +// clang-format off +// NOLINTBEGIN(whitespace/line_length) +/** \note Users of TensorFlow Lite should use +/**
{@code
+/** #include "tensorflow/lite/c/c_api_types.h"
+/** }
+/** to access the APIs documented on this page. */ +// NOLINTEND(whitespace/line_length) +// clang-format on // IWYU pragma: private, include "third_party/tensorflow/lite/c/c_api_types.h" @@ -357,9 +365,13 @@ public class tensorflowlite extends org.bytedeco.tensorflowlite.presets.tensorfl // #ifdef __cplusplus // #endif -/** \addtogroup c_api_types tensorflow/lite/c/c_api_types.h +// clang-format off +// NOLINTBEGIN(whitespace/line_length) +/** \defgroup c_api_types lite/c/c_api_types.h * \{ */ +// NOLINTEND(whitespace/line_length) +// clang-format on // Define TFL_CAPI_EXPORT macro to export a function properly with a shared // library. @@ -468,6 +480,7 @@ public class tensorflowlite extends org.bytedeco.tensorflowlite.presets.tensorfl * TfLiteDelegate; allows delegation of nodes to alternative backends. * For TF Lite in Play Services, this is an opaque type, * but for regular TF Lite, this is just a typedef for TfLiteDelegate. + * * WARNING: This is an experimental type and subject to change. */ // #if TFLITE_WITH_STABLE_ABI || TFLITE_USE_OPAQUE_DELEGATE // #else @@ -529,11 +542,10 @@ public class tensorflowlite extends org.bytedeco.tensorflowlite.presets.tensorfl See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -// \warning Note: Users of TensorFlow Lite should not include this file -// directly, but should instead include -// "third_party/tensorflow/lite/c/c_api.h". Only the TensorFlow Lite -// implementation itself should include this -// file directly. +// WARNING: Users of TensorFlow Lite should not include this file directly, but +// should instead include "third_party/tensorflow/lite/c/c_api.h". +// Only the TensorFlow Lite implementation itself should include this file +// directly. // #ifndef TENSORFLOW_LITE_CORE_C_C_API_H_ // #define TENSORFLOW_LITE_CORE_C_C_API_H_ @@ -555,9 +567,10 @@ public class tensorflowlite extends org.bytedeco.tensorflowlite.presets.tensorfl /// /// /// +/// // #include "tensorflow/lite/core/c/registration_external.h" // IWYU pragma: export -/** C API for TensorFlow Lite: +/** C API for TensorFlow Lite. * * The API leans towards simplicity and uniformity instead of convenience, as * most usage will be by language-specific wrappers. It provides largely the @@ -601,7 +614,17 @@ public class tensorflowlite extends org.bytedeco.tensorflowlite.presets.tensorfl * TfLiteInterpreterDelete(interpreter); * TfLiteInterpreterOptionsDelete(options); * TfLiteModelDelete(model); - * */ + * + * */ +// clang-format off +// NOLINTBEGIN(whitespace/line_length) +/** \note Users of TensorFlow Lite should use +/**
{@code
+/** #include "tensorflow/lite/c/c_api.h"
+/** }
+/** to access the APIs documented on this page. */ +// NOLINTEND(whitespace/line_length) +// clang-format on // #ifdef __cplusplus // Targeting ../TfLiteModel.java @@ -798,7 +821,6 @@ public static native void TfLiteInterpreterOptionsSetErrorReporter( * interpreter's lifetime. * \warning This is an experimental API and subject to change. */ -/// /// public static native void TfLiteInterpreterOptionsAddRegistrationExternal( TfLiteInterpreterOptions options, @@ -808,9 +830,7 @@ public static native void TfLiteInterpreterOptionsAddRegistrationExternal( * {@code TfLiteInterpreterCancel}. * * By default it is disabled and calling to {@code TfLiteInterpreterCancel} will - * return kTfLiteError. See {@code TfLiteInterpreterCancel}. - * - * \warning This is an experimental API and subject to change. */ + * return kTfLiteError. See {@code TfLiteInterpreterCancel}. */ /// /// @@ -1001,7 +1021,6 @@ public static native int TfLiteInterpreterGetOutputTensorCount( * The ownership of the tensor remains with the TFLite runtime, meaning the * caller should not deallocate the pointer. */ -/// /// /// public static native TfLiteTensor TfLiteInterpreterGetTensor(@Const TfLiteInterpreter interpreter, @@ -1016,9 +1035,7 @@ public static native TfLiteTensor TfLiteInterpreterGetTensor(@Const TfLiteInterp * Non-blocking and thread safe. * * Returns kTfLiteError if cancellation is not enabled via - * {@code TfLiteInterpreterOptionsEnableCancellation}. - * - * \warning This is an experimental API and subject to change. */ + * {@code TfLiteInterpreterOptionsEnableCancellation}. */ /// /// @@ -1398,9 +1415,9 @@ public static native void TfLiteRegistrationExternalSetInplaceOperator( // #endif // TENSORFLOW_LITE_CORE_C_REGISTRATION_EXTERNAL_H_ -// Parsed from tensorflow/lite/c/c_api_experimental.h +// Parsed from tensorflow/lite/c/common.h -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -1414,17 +1431,30 @@ public static native void TfLiteRegistrationExternalSetInplaceOperator( See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -// #ifndef TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_ -// #define TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_ -// #include "tensorflow/lite/core/c/c_api_experimental.h" +/** \file +/** +/** This file defines common C types and APIs for implementing operations, +/** delegates and other constructs in TensorFlow Lite. The actual operations and +/** delegates can be defined using C++, but the interface between the +/** interpreter and the operations are C. +/** +/** For documentation, see tensorflow/lite/core/c/common.h. +/** +/** See also c_api_opaque.h which has more ABI-stable variants of some of these +/** APIs. */ -// #endif // TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_ +// #ifndef TENSORFLOW_LITE_C_COMMON_H_ +// #define TENSORFLOW_LITE_C_COMMON_H_ +// #include "tensorflow/lite/core/c/common.h" -// Parsed from tensorflow/lite/core/c/c_api_experimental.h +// #endif // TENSORFLOW_LITE_C_COMMON_H_ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +// Parsed from tensorflow/lite/core/c/common.h + +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -1438,1101 +1468,1189 @@ public static native void TfLiteRegistrationExternalSetInplaceOperator( See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -/** WARNING: Users of TensorFlow Lite should not include this file directly, -/** but should instead include -/** "third_party/tensorflow/lite/c/c_api_experimental.h". -/** Only the TensorFlow Lite implementation itself should include this -/** file directly. */ -// #ifndef TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_ -// #define TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_ - -// #include "tensorflow/lite/builtin_ops.h" -// #include "tensorflow/lite/core/c/c_api.h" -// #include "tensorflow/lite/core/c/common.h" - -// #ifdef __cplusplus -// #endif // __cplusplus - -// -------------------------------------------------------------------------- -/** Resets all variable tensors to zero. - * - * WARNING: This is an experimental API and subject to change. */ - -/// -/// -/// -/// -public static native @Cast("TfLiteStatus") int TfLiteInterpreterResetVariableTensors( - TfLiteInterpreter interpreter); +// WARNING: Users of TensorFlow Lite should not include this file directly, but +// should instead include "third_party/tensorflow/lite/c/common.h". +// Only the TensorFlow Lite implementation itself should include this file +// directly. -/** Adds an op registration for a builtin operator. - * - * Op registrations are used to map ops referenced in the flatbuffer model - * to executable function pointers ({@code TfLiteRegistration}s). - * - * NOTE: The interpreter will make a shallow copy of {@code registration} internally, - * so the caller should ensure that its contents (function pointers, etc...) - * remain valid for the duration of the interpreter's lifetime. A common - * practice is making the provided {@code TfLiteRegistration} instance static. - * - * Code that uses this function should NOT call - * {@code TfLiteInterpreterOptionsSetOpResolver} (or related functions) on the same - * options object. - * - * WARNING: This is an experimental API and subject to change. */ +/** This file defines common C types and APIs for implementing operations, +/** delegates and other constructs in TensorFlow Lite. The actual operations and +/** delegates can be defined using C++, but the interface between the +/** interpreter and the operations are C. +/** +/** Summary of abstractions: +/** * {@code TF_LITE_ENSURE} - self-sufficient error checking +/** * {@code TfLiteStatus} - status reporting +/** * {@code TfLiteIntArray} - stores tensor shapes (dims), +/** * {@code TfLiteContext} - allows an op to access the tensors +/** * {@code TfLiteTensor} - tensor (a multidimensional array) +/** * {@code TfLiteNode} - a single node or operation +/** * {@code TfLiteRegistration} - the implementation of a conceptual operation. +/** * {@code TfLiteDelegate} - allows delegation of nodes to alternative backends. +/** +/** Some abstractions in this file are created and managed by Interpreter. +/** +/** NOTE: The order of values in these structs are "semi-ABI stable". New values +/** should be added only to the end of structs and never reordered. +/** */ +// clang-format off +// NOLINTBEGIN(whitespace/line_length) +/** \note Users of TensorFlow Lite should use +/**
{@code
+/** #include "tensorflow/lite/c/common.h"
+/** }
+/** to access the APIs documented on this page. */ +// NOLINTEND(whitespace/line_length) +// clang-format on -/// -/// -/// -/// -/// -public static native void TfLiteInterpreterOptionsAddBuiltinOp( - TfLiteInterpreterOptions options, @Cast("TfLiteBuiltinOperator") int op, - @Const TfLiteRegistration registration, int min_version, - int max_version); +// IWYU pragma: private, include "third_party/tensorflow/lite/c/common.h" -/** Adds an op registration for a custom operator. - * - * Op registrations are used to map ops referenced in the flatbuffer model - * to executable function pointers ({@code TfLiteRegistration}s). - * - * NOTE: The interpreter will make a shallow copy of {@code registration} internally, - * so the caller should ensure that its contents (function pointers, etc...) - * remain valid for the duration of any created interpreter's lifetime. A - * common practice is making the provided {@code TfLiteRegistration} instance static. - * - * The lifetime of the string pointed to by {@code name} must be at least as long - * as the lifetime of the {@code TfLiteInterpreterOptions}. - * - * Code that uses this function should NOT call - * {@code TfLiteInterpreterOptionsSetOpResolver} (or related functions) on the same - * options object. - * - * WARNING: This is an experimental API and subject to change. */ +// #ifndef TENSORFLOW_LITE_CORE_C_COMMON_H_ +// #define TENSORFLOW_LITE_CORE_C_COMMON_H_ -/// -/// -/// -/// -/// -public static native void TfLiteInterpreterOptionsAddCustomOp( - TfLiteInterpreterOptions options, @Cast("const char*") BytePointer name, - @Const TfLiteRegistration registration, int min_version, - int max_version); -public static native void TfLiteInterpreterOptionsAddCustomOp( - TfLiteInterpreterOptions options, String name, - @Const TfLiteRegistration registration, int min_version, - int max_version); -// Targeting ../Find_builtin_op_external_Pointer_int_int.java +// #include +// #include +// #include +// #include +// #include "tensorflow/lite/core/c/c_api_types.h" // IWYU pragma: export -// Targeting ../Find_custom_op_external_Pointer_String_int.java +// #ifdef __cplusplus +// #endif // __cplusplus +// clang-format off +// NOLINTBEGIN(whitespace/line_length) +/** \defgroup common lite/c/common.h + * \{ + */ +// NOLINTEND(whitespace/line_length) +// clang-format on +/** The list of external context types known to TF Lite. This list exists solely +/** to avoid conflicts and to ensure ops can share the external contexts they +/** need. Access to the external contexts is controlled by one of the +/** corresponding support files. */ +/** enum TfLiteExternalContextType */ +public static final int + kTfLiteEigenContext = 0, /** include eigen_support.h to use. */ + kTfLiteGemmLowpContext = 1, /** include gemm_support.h to use. */ + kTfLiteEdgeTpuContext = 2, /** Placeholder for Edge TPU support. */ + kTfLiteCpuBackendContext = 3, /** include cpu_backend_context.h to use. */ + kTfLiteMaxExternalContexts = 4; -/// -/// -/// -public static native void TfLiteInterpreterOptionsSetOpResolverExternal( - TfLiteInterpreterOptions options, - Find_builtin_op_external_Pointer_int_int find_builtin_op, - Find_custom_op_external_Pointer_String_int find_custom_op, - Pointer op_resolver_user_data); -// Targeting ../Find_builtin_op_Pointer_int_int.java +// Forward declare so dependent structs and methods can reference these types +// prior to the struct definitions. +// Targeting ../TfLiteExternalContext.java -// Targeting ../Find_custom_op_Pointer_BytePointer_int.java +public static final int kTfLiteOptionalTensor = (-1); +// Targeting ../TfLiteIntArray.java -/// -/// -/// -/// -/// -public static native void TfLiteInterpreterOptionsSetOpResolverExternalWithFallback( - TfLiteInterpreterOptions options, - Find_builtin_op_external_Pointer_int_int find_builtin_op_external, - Find_custom_op_external_Pointer_String_int find_custom_op_external, - Find_builtin_op_Pointer_int_int find_builtin_op, - Find_custom_op_Pointer_BytePointer_int find_custom_op, - Pointer op_resolver_user_data); -// Targeting ../Find_custom_op_Pointer_String_int.java +/** Given the size (number of elements) in a TfLiteIntArray, calculate its size + * in bytes. */ +public static native @Cast("size_t") long TfLiteIntArrayGetSizeInBytes(int size); -public static native void TfLiteInterpreterOptionsSetOpResolverExternalWithFallback( - TfLiteInterpreterOptions options, - Find_builtin_op_external_Pointer_int_int find_builtin_op_external, - Find_custom_op_external_Pointer_String_int find_custom_op_external, - Find_builtin_op_Pointer_int_int find_builtin_op, - Find_custom_op_Pointer_String_int find_custom_op, - Pointer op_resolver_user_data); +// #ifndef TF_LITE_STATIC_MEMORY +/** Create a array of a given {@code size} (uninitialized entries). + * This returns a pointer, that you must free using TfLiteIntArrayFree(). */ +public static native TfLiteIntArray TfLiteIntArrayCreate(int size); +// #endif -/** Registers callbacks for resolving builtin or custom operators. - * - * The {@code TfLiteInterpreterOptionsSetOpResolver} function provides an alternative - * method for registering builtin ops and/or custom ops, by providing operator - * resolver callbacks. Unlike using {@code TfLiteInterpreterOptionsAddBuiltinOp} - * and/or {@code TfLiteInterpreterOptionsAddAddCustomOp}, these let you register all - * the operators in a single call. - * - * Code that uses this function should NOT call - * {@code TfLiteInterpreterOptionsAddBuiltin} or - * {@code TfLiteInterpreterOptionsAddCustomOp} on the same options object. - * - * If {@code op_resolver_user_data} is non-null, its lifetime must be at least as - * long as the lifetime of the {@code TfLiteInterpreterOptions}. - * - * WARNING: This is an experimental API and subject to change. - * - * DEPRECATED: use TfLiteInterpreterOptionsSetOpResolverExternal instead. */ +/** Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise. */ +public static native int TfLiteIntArrayEqual(@Const TfLiteIntArray a, @Const TfLiteIntArray b); -/// -public static native void TfLiteInterpreterOptionsSetOpResolver( - TfLiteInterpreterOptions options, - Find_builtin_op_Pointer_int_int find_builtin_op, - Find_custom_op_Pointer_BytePointer_int find_custom_op, - Pointer op_resolver_user_data); -public static native void TfLiteInterpreterOptionsSetOpResolver( - TfLiteInterpreterOptions options, - Find_builtin_op_Pointer_int_int find_builtin_op, - Find_custom_op_Pointer_String_int find_custom_op, - Pointer op_resolver_user_data); -// Targeting ../Find_builtin_op_v3_Pointer_int_int.java +/** Check if an intarray equals an array. Returns 1 if equals, 0 otherwise. */ +public static native int TfLiteIntArrayEqualsArray(@Const TfLiteIntArray a, int b_size, + @Const IntPointer b_data); +public static native int TfLiteIntArrayEqualsArray(@Const TfLiteIntArray a, int b_size, + @Const IntBuffer b_data); +public static native int TfLiteIntArrayEqualsArray(@Const TfLiteIntArray a, int b_size, + @Const int[] b_data); +// #ifndef TF_LITE_STATIC_MEMORY +/** Create a copy of an array passed as {@code src}. + * You are expected to free memory with TfLiteIntArrayFree */ +public static native TfLiteIntArray TfLiteIntArrayCopy(@Const TfLiteIntArray src); -// Targeting ../Find_custom_op_v3_Pointer_BytePointer_int.java +/** Free memory of array {@code a}. */ +public static native void TfLiteIntArrayFree(TfLiteIntArray a); +// Targeting ../TfLiteFloatArray.java -/// -public static native void TfLiteInterpreterOptionsSetOpResolverV3( - TfLiteInterpreterOptions options, - Find_builtin_op_v3_Pointer_int_int find_builtin_op_v3, - Find_custom_op_v3_Pointer_BytePointer_int find_custom_op_v3, - Pointer op_resolver_user_data); -// Targeting ../Find_custom_op_v3_Pointer_String_int.java +/** Given the size (number of elements) in a TfLiteFloatArray, calculate its + * size in bytes. */ +public static native int TfLiteFloatArrayGetSizeInBytes(int size); +// #ifndef TF_LITE_STATIC_MEMORY +/** Create a array of a given {@code size} (uninitialized entries). + * This returns a pointer, that you must free using TfLiteFloatArrayFree(). */ +public static native TfLiteFloatArray TfLiteFloatArrayCreate(int size); -public static native void TfLiteInterpreterOptionsSetOpResolverV3( - TfLiteInterpreterOptions options, - Find_builtin_op_v3_Pointer_int_int find_builtin_op_v3, - Find_custom_op_v3_Pointer_String_int find_custom_op_v3, - Pointer op_resolver_user_data); -// Targeting ../Find_builtin_op_v2_Pointer_int_int.java +/** Create a copy of an array passed as {@code src}. + * You are expected to free memory with TfLiteFloatArrayFree. */ +public static native TfLiteFloatArray TfLiteFloatArrayCopy(@Const TfLiteFloatArray src); +/** Free memory of array {@code a}. */ +public static native void TfLiteFloatArrayFree(TfLiteFloatArray a); +// #endif // TF_LITE_STATIC_MEMORY -// Targeting ../Find_custom_op_v2_Pointer_BytePointer_int.java +// Since we must not depend on any libraries, define a minimal subset of +// error macros while avoiding names that have pre-conceived meanings like +// assert and check. +// Try to make all reporting calls through TF_LITE_KERNEL_LOG rather than +// calling the context->ReportError function directly, so that message strings +// can be stripped out if the binary size needs to be severely optimized. +// #ifndef TF_LITE_STRIP_ERROR_STRINGS +// #define TF_LITE_KERNEL_LOG(context, ...) +// do { +// (context)->ReportError((context), __VA_ARGS__); +// } while (false) + +// #define TF_LITE_MAYBE_KERNEL_LOG(context, ...) +// do { +// if ((context) != nullptr) { +// (context)->ReportError((context), __VA_ARGS__); +// } +// } while (false) +// #else // TF_LITE_STRIP_ERROR_STRINGS +// #define ARGS_UNUSED(...) (void)sizeof(#__VA_ARGS__) +// #define TF_LITE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__) +// #define TF_LITE_MAYBE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__) +// #endif // TF_LITE_STRIP_ERROR_STRINGS + +/** Check whether value is true, and if not return kTfLiteError from + * the current function (and report the error string msg). */ +// #define TF_LITE_ENSURE_MSG(context, value, ...) +// do { +// if (!(value)) { +// TF_LITE_KERNEL_LOG((context), __FILE__ " " __VA_ARGS__); +// return kTfLiteError; +// } +// } while (0) + +/** Check whether the value {@code a} is true, and if not return kTfLiteError from + * the current function, while also reporting the location of the error. */ +// #define TF_LITE_ENSURE(context, a) +// do { +// if (!(a)) { +// TF_LITE_KERNEL_LOG((context), "%s:%d %s was not true.", __FILE__, +// __LINE__, #a); +// return kTfLiteError; +// } +// } while (0) /// -public static native void TfLiteInterpreterOptionsSetOpResolverV2( - TfLiteInterpreterOptions options, - Find_builtin_op_v2_Pointer_int_int find_builtin_op_v2, - Find_custom_op_v2_Pointer_BytePointer_int find_custom_op_v2, - Pointer op_resolver_user_data); -// Targeting ../Find_custom_op_v2_Pointer_String_int.java +// #define TF_LITE_ENSURE_STATUS(a) +// do { +// const TfLiteStatus s = (a); +// if (s != kTfLiteOk) { +// return s; +// } +// } while (0) +/** Check whether the value {@code a == b} is true, and if not return kTfLiteError + * from the current function, while also reporting the location of the error. + * {@code a} and {@code b} may be evaluated more than once, so no side effects or + * extremely expensive computations should be done. + * + * NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes. */ +// #define TF_LITE_ENSURE_EQ(context, a, b) +// do { +// if ((a) != (b)) { +// TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%d != %d)", __FILE__, +// __LINE__, #a, #b, (a), (b)); +// return kTfLiteError; +// } +// } while (0) -public static native void TfLiteInterpreterOptionsSetOpResolverV2( - TfLiteInterpreterOptions options, - Find_builtin_op_v2_Pointer_int_int find_builtin_op_v2, - Find_custom_op_v2_Pointer_String_int find_custom_op_v2, - Pointer op_resolver_user_data); -// Targeting ../Find_builtin_op_v1_Pointer_int_int.java +// #define TF_LITE_ENSURE_TYPES_EQ(context, a, b) +// do { +// if ((a) != (b)) { +// TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%s != %s)", __FILE__, +// __LINE__, #a, #b, TfLiteTypeGetName(a), +// TfLiteTypeGetName(b)); +// return kTfLiteError; +// } +// } while (0) +// #define TF_LITE_ENSURE_NEAR(context, a, b, epsilon) +// do { +// auto delta = ((a) > (b)) ? ((a) - (b)) : ((b) - (a)); +// if (delta > epsilon) { +// TF_LITE_KERNEL_LOG((context), "%s:%d %s not near %s (%f != %f)", +// __FILE__, __LINE__, #a, #b, static_cast(a), +// static_cast(b)); +// return kTfLiteError; +// } +// } while (0) -// Targeting ../Find_custom_op_v1_Pointer_BytePointer_int.java +// #define TF_LITE_ENSURE_OK(context, status) +// do { +// const TfLiteStatus s = (status); +// if ((s) != kTfLiteOk) { +// return s; +// } +// } while (0) +// Targeting ../TfLiteComplex64.java +// Targeting ../TfLiteComplex128.java -/// -/// -/// -public static native void TfLiteInterpreterOptionsSetOpResolverV1( - TfLiteInterpreterOptions options, - Find_builtin_op_v1_Pointer_int_int find_builtin_op_v1, - Find_custom_op_v1_Pointer_BytePointer_int find_custom_op_v1, - Pointer op_resolver_user_data); -// Targeting ../Find_custom_op_v1_Pointer_String_int.java +// Targeting ../TfLiteFloat16.java -public static native void TfLiteInterpreterOptionsSetOpResolverV1( - TfLiteInterpreterOptions options, - Find_builtin_op_v1_Pointer_int_int find_builtin_op_v1, - Find_custom_op_v1_Pointer_String_int find_custom_op_v1, - Pointer op_resolver_user_data); -/** Returns a new interpreter using the provided model and options, or null on - * failure, where the model uses only the operators explicitly added to the - * options. This is the same as {@code TFLiteInterpreterCreate} from {@code c_api.h}, - * except that the only operators that are supported are the ones registered - * in {@code options} via calls to {@code TfLiteInterpreterOptionsSetOpResolver}, - * {@code TfLiteInterpreterOptionsAddBuiltinOp}, and/or - * {@code TfLiteInterpreterOptionsAddCustomOp}. - * - * * {@code model} must be a valid model instance. The caller retains ownership of - * the object, and can destroy it immediately after creating the interpreter; - * the interpreter will maintain its own reference to the underlying model - * data. - * * {@code options} should not be null. The caller retains ownership of the object, - * and can safely destroy it immediately after creating the interpreter. - * - * NOTE: The client *must* explicitly allocate tensors before attempting to - * access input tensor data or invoke the interpreter. - * - * WARNING: This is an experimental API and subject to change. */ -/// -public static native TfLiteInterpreter TfLiteInterpreterCreateWithSelectedOps(@Const TfLiteModel model, - @Const TfLiteInterpreterOptions options); +/** Return the name of a given type, for error reporting purposes. */ +public static native @Cast("const char*") BytePointer TfLiteTypeGetName(@Cast("TfLiteType") int type); -/** Enable or disable the NN API delegate for the interpreter (true to enable). - * - * WARNING: This is an experimental API and subject to change. */ +/** SupportedQuantizationTypes. */ +/** enum TfLiteQuantizationType */ +public static final int + /** No quantization. */ + kTfLiteNoQuantization = 0, + /** Affine quantization (with support for per-channel quantization). + * Corresponds to TfLiteAffineQuantization. */ + kTfLiteAffineQuantization = 1; +// Targeting ../TfLiteQuantization.java -/// -/// -/// -public static native void TfLiteInterpreterOptionsSetUseNNAPI( - TfLiteInterpreterOptions options, @Cast("bool") boolean enable); -/** Enable or disable CPU fallback for the interpreter (true to enable). - * If enabled, TfLiteInterpreterInvoke will do automatic fallback from - * executing with delegate(s) to regular execution without delegates - * (i.e. on CPU). - * - * Allowing the fallback is suitable only if both of the following hold: - * - The caller is known not to cache pointers to tensor data across - * TfLiteInterpreterInvoke calls. - * - The model is not stateful (no variables, no LSTMs) or the state isn't - * needed between batches. - * - * When delegate fallback is enabled, TfLiteInterpreterInvoke will - * behave as follows: - * If one or more delegates were set in the interpreter options - * (see TfLiteInterpreterOptionsAddDelegate), - * AND inference fails, - * then the interpreter will fall back to not using any delegates. - * In that case, the previously applied delegate(s) will be automatically - * undone, and an attempt will be made to return the interpreter to an - * invokable state, which may invalidate previous tensor addresses, - * and the inference will be attempted again, using input tensors with - * the same value as previously set. - * - * WARNING: This is an experimental API and subject to change. */ -public static native void TfLiteInterpreterOptionsSetEnableDelegateFallback( - TfLiteInterpreterOptions options, @Cast("bool") boolean enable); +// Targeting ../TfLiteAffineQuantization.java -// Set if buffer handle output is allowed. -// -/** When using hardware delegation, Interpreter will make the data of output - * tensors available in {@code tensor->data} by default. If the application can - * consume the buffer handle directly (e.g. reading output from OpenGL - * texture), it can set this flag to false, so Interpreter won't copy the - * data from buffer handle to CPU memory. WARNING: This is an experimental - * API and subject to change. */ -public static native void TfLiteSetAllowBufferHandleOutput( - @Const TfLiteInterpreter interpreter, @Cast("bool") boolean allow_buffer_handle_output); -/** Allow a delegate to look at the graph and modify the graph to handle - * parts of the graph themselves. After this is called, the graph may - * contain new nodes that replace 1 more nodes. - * 'delegate' must outlive the interpreter. - * Use {@code TfLiteInterpreterOptionsAddDelegate} instead of this unless - * absolutely required. - * Returns one of the following three status codes: - * 1. kTfLiteOk: Success. - * 2. kTfLiteDelegateError: Delegation failed due to an error in the - * delegate. The Interpreter has been restored to its pre-delegation state. - * NOTE: This undoes all delegates previously applied to the Interpreter. - * 3. kTfLiteError: Unexpected/runtime failure. - * WARNING: This is an experimental API and subject to change. */ +// Targeting ../TfLitePtrUnion.java -/// -public static native @Cast("TfLiteStatus") int TfLiteInterpreterModifyGraphWithDelegate( - @Const TfLiteInterpreter interpreter, TfLiteDelegate delegate); -/** Returns the tensor index corresponding to the input tensor - * - * WARNING: This is an experimental API and subject to change. */ -/// -public static native int TfLiteInterpreterGetInputTensorIndex( - @Const TfLiteInterpreter interpreter, int input_index); +/** Memory allocation strategies. + * * {@code kTfLiteMmapRo}: Read-only memory-mapped data, or data externally + * allocated. + * * {@code kTfLiteArenaRw}: Arena allocated with no guarantees about persistence, + * and available during eval. + * * {@code kTfLiteArenaRwPersistent}: Arena allocated but persistent across eval, + * and only available during eval. + * * {@code kTfLiteDynamic}: Allocated during eval, or for string tensors. + * * {@code kTfLitePersistentRo}: Allocated and populated during prepare. This is + * useful for tensors that can be computed during prepare and treated + * as constant inputs for downstream ops (also in prepare). + * * {@code kTfLiteCustom}: Custom memory allocation provided by the user. See + * TfLiteCustomAllocation below. + * * {@code kTfLiteVariantObject}: Allocation is an arbitrary type-erased C++ + * object. + * Allocation and deallocation are done through {@code new} and {@code delete}. */ +/** enum TfLiteAllocationType */ +public static final int + kTfLiteMemNone = 0, + kTfLiteMmapRo = 1, + kTfLiteArenaRw = 2, + kTfLiteArenaRwPersistent = 3, + kTfLiteDynamic = 4, + kTfLitePersistentRo = 5, + kTfLiteCustom = 6, + kTfLiteVariantObject = 7; -/** Returns the tensor index corresponding to the output tensor +/** Memory allocation strategies. * - * WARNING: This is an experimental API and subject to change. */ + * TfLiteAllocationType values have been overloaded to mean more than their + * original intent. This enum should only be used to document the allocation + * strategy used by a tensor for it data. */ +/** enum TfLiteAllocationStrategy */ +public static final int + kTfLiteAllocationStrategyUnknown = 0, + kTfLiteAllocationStrategyNone = 1, /** No data is allocated. */ + kTfLiteAllocationStrategyMMap = 2, /** Data is mmaped. */ + kTfLiteAllocationStrategyArena = 3, /** Handled by the arena. */ + kTfLiteAllocationStrategyMalloc = 4, /** Uses {@code malloc}/{@code free}. */ + kTfLiteAllocationStrategyNew = 5; /** Uses {@code new[]}/{@code delete[]}. */ + +/** Describes how stable a tensor attribute is with regards to an interpreter + * runs. */ +/** enum TfLiteRunStability */ +public static final int + kTfLiteRunStabilityUnknown = 0, + kTfLiteRunStabilityUnstable = 1, /** May change at any time. */ + kTfLiteRunStabilitySingleRun = 2, /** Will stay the same for one run. */ + kTfLiteRunStabilityAcrossRuns = 3; /** Will stay the same across all runs. */ -/// -/// -public static native int TfLiteInterpreterGetOutputTensorIndex( - @Const TfLiteInterpreter interpreter, int output_index); +/** Describes the steps of a TFLite operation life cycle. */ +/** enum TfLiteRunStep */ +public static final int + kTfLiteRunStepUnknown = 0, + kTfLiteRunStepInit = 1, + kTfLiteRunStepPrepare = 2, + kTfLiteRunStepEval = 3; -/** Assigns (or reassigns) a custom memory allocation for the given - * tensor. {@code flags} is a bitmask, see TfLiteCustomAllocationFlags. - * The runtime does NOT take ownership of the underlying memory. - * - * NOTE: User needs to call TfLiteInterpreterAllocateTensors() after this. - * Invalid/insufficient buffers will cause an error during - * TfLiteInterpreterAllocateTensors or TfLiteInterpreterInvoke (in case of - * dynamic shapes in the graph). - * - * Parameters should satisfy the following conditions: - * 1. tensor->allocation_type == kTfLiteArenaRw or kTfLiteArenaRwPersistent - * In general, this is true for I/O tensors & variable tensors. - * 2. allocation->data has the appropriate permissions for runtime access - * (Read-only for inputs, Read-Write for others), and outlives - * TfLiteInterpreter. - * 3. allocation->bytes >= tensor->bytes. - * This condition is checked again if any tensors are resized. - * 4. allocation->data should be aligned to kDefaultTensorAlignment - * defined in lite/util.h. (Currently 64 bytes) - * This check is skipped if kTfLiteCustomAllocationFlagsSkipAlignCheck is - * set through {@code flags}. - * WARNING: This is an experimental API and subject to change. */ +/** The delegates should use zero or positive integers to represent handles. + * -1 is reserved from unallocated status. */ +/** enum */ +public static final int + kTfLiteNullBufferHandle = -1; -/// -public static native @Cast("TfLiteStatus") int TfLiteInterpreterSetCustomAllocationForTensor( - TfLiteInterpreter interpreter, int tensor_index, - @Const TfLiteCustomAllocation allocation, @Cast("int64_t") long flags); +/** Storage format of each dimension in a sparse tensor. */ +/** enum TfLiteDimensionType */ +public static final int + kTfLiteDimDense = 0, + kTfLiteDimSparseCSR = 1; +// Targeting ../TfLiteDimensionMetadata.java -/** -------------------------------------------------------------------------- - * SignatureRunner APIs -

- * Attempts to cancel in flight invocation if any. - * This will not affect calls to {@code Invoke} that happend after this. - * Non blocking and thread safe. - * Returns kTfLiteError if cancellation is not enabled, otherwise returns - * kTfLiteOk. - * NOTE: Calling this function will cancel in-flight invocations - * in all SignatureRunners built from the same interpreter. - * - * WARNING: This is an experimental API and subject to change. */ -public static native @Cast("TfLiteStatus") int TfLiteSignatureRunnerCancel( - TfLiteSignatureRunner signature_runner); -// Forward declaration, to avoid need for dependency on -// tensorflow/lite/profiling/telemetry/profiler.h. +// Targeting ../TfLiteSparsity.java -/** Registers the telemetry profiler to the interpreter. - * Note: The interpreter does not take the ownership of profiler, but callers - * must ensure profiler->data outlives the lifespan of the interpreter. - * - * WARNING: This is an experimental API and subject to change. */ -public static native void TfLiteInterpreterOptionsSetTelemetryProfiler( - TfLiteInterpreterOptions options, - TfLiteTelemetryProfilerStruct profiler); -// #ifdef __cplusplus // extern "C" -// #endif // __cplusplus +// Targeting ../TfLiteCustomAllocation.java -// #endif // TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_ -// Parsed from tensorflow/lite/c/common.h +/** The flags used in {@code Interpreter::SetCustomAllocationForTensor}. + * Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc. */ +/** enum TfLiteCustomAllocationFlags */ +public static final int + kTfLiteCustomAllocationFlagsNone = 0, + /** Skips checking whether allocation.data points to an aligned buffer as + * expected by the TFLite runtime. + * NOTE: Setting this flag can cause crashes when calling Invoke(). + * Use with caution. */ + kTfLiteCustomAllocationFlagsSkipAlignCheck = 1; +// Targeting ../TfLiteTensor.java -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +// Targeting ../TfLiteNode.java + + +// #else // defined(TF_LITE_STATIC_MEMORY)? +// NOTE: This flag is opt-in only at compile time. +// +// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct +// contains only the minimum fields required to initialize and prepare a micro +// inference graph. The fields in this struct have been ordered from +// largest-to-smallest for optimal struct sizeof. +// +// This struct does not use: +// - allocation +// - buffer_handle +// - data_is_stale +// - delegate +// - dims_signature +// - name +// - sparsity + +// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains +// only the minimum fields required to represent a node. +// +// This struct does not use: +// - delegate +// - intermediates +// - temporaries +// Targeting ../TfLiteEvalTensor.java + + + +// #ifndef TF_LITE_STATIC_MEMORY +/** Free data memory of tensor {@code t}. */ +public static native void TfLiteTensorDataFree(TfLiteTensor t); + +/** Free quantization data. */ +public static native void TfLiteQuantizationFree(TfLiteQuantization quantization); + +/** Free sparsity parameters. */ +public static native void TfLiteSparsityFree(TfLiteSparsity sparsity); + +/** Free memory of tensor {@code t}. */ +public static native void TfLiteTensorFree(TfLiteTensor t); + +/** Set all of a tensor's fields (and free any previously allocated data). */ +public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, TfLiteIntArray dims, + @ByVal TfLiteQuantizationParams quantization, @Cast("char*") BytePointer buffer, + @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type, + @Const Pointer allocation, @Cast("bool") boolean is_variable, + TfLiteTensor tensor); +public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String name, TfLiteIntArray dims, + @ByVal TfLiteQuantizationParams quantization, @Cast("char*") ByteBuffer buffer, + @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type, + @Const Pointer allocation, @Cast("bool") boolean is_variable, + TfLiteTensor tensor); +public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, TfLiteIntArray dims, + @ByVal TfLiteQuantizationParams quantization, @Cast("char*") byte[] buffer, + @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type, + @Const Pointer allocation, @Cast("bool") boolean is_variable, + TfLiteTensor tensor); +public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String name, TfLiteIntArray dims, + @ByVal TfLiteQuantizationParams quantization, @Cast("char*") BytePointer buffer, + @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type, + @Const Pointer allocation, @Cast("bool") boolean is_variable, + TfLiteTensor tensor); +public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, TfLiteIntArray dims, + @ByVal TfLiteQuantizationParams quantization, @Cast("char*") ByteBuffer buffer, + @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type, + @Const Pointer allocation, @Cast("bool") boolean is_variable, + TfLiteTensor tensor); +public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String name, TfLiteIntArray dims, + @ByVal TfLiteQuantizationParams quantization, @Cast("char*") byte[] buffer, + @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type, + @Const Pointer allocation, @Cast("bool") boolean is_variable, + TfLiteTensor tensor); + +/** Copies the contents of {@code src} in {@code dst}. + * Function does nothing if either {@code src} or {@code dst} is passed as nullptr and + * return {@code kTfLiteOk}. + * Returns {@code kTfLiteError} if {@code src} and {@code dst} doesn't have matching data size. + * Note function copies contents, so it won't create new data pointer + * or change allocation type. + * All Tensor related properties will be copied from {@code src} to {@code dst} like + * quantization, sparsity, ... */ +public static native @Cast("TfLiteStatus") int TfLiteTensorCopy(@Const TfLiteTensor src, TfLiteTensor dst); + +/** Change the size of the memory block owned by {@code tensor} to {@code num_bytes}. + * Tensors with allocation types other than {@code kTfLiteDynamic} will be ignored + * and a {@code kTfLiteOk} will be returned. {@code tensor}'s internal data buffer will be + * assigned a pointer which can safely be passed to free or realloc if + * {@code num_bytes} is zero. If {@code preserve_data} is true, tensor data will be + * unchanged in the range from the start of the region up to the minimum of the + * old and new sizes. In the case of NULL tensor, or an error allocating new + * memory, returns {@code kTfLiteError}. */ +public static native @Cast("TfLiteStatus") int TfLiteTensorResizeMaybeCopy(@Cast("size_t") long num_bytes, TfLiteTensor tensor, + @Cast("bool") boolean preserve_data); + +/** Change the size of the memory block owned by {@code tensor} to {@code num_bytes}. + * Tensors with allocation types other than {@code kTfLiteDynamic} will be ignored + * and a {@code kTfLiteOk} will be returned. {@code tensor}'s internal data buffer will be + * assigned a pointer which can safely be passed to free or realloc if + * {@code num_bytes} is zero. Tensor data will be unchanged in the range from the + * start of the region up to the minimum of the old and new sizes. In the case + * of NULL tensor, or an error allocating new memory, returns {@code kTfLiteError}. */ + +/// +/// +public static native @Cast("TfLiteStatus") int TfLiteTensorRealloc(@Cast("size_t") long num_bytes, TfLiteTensor tensor); +// Targeting ../TfLiteDelegateParams.java - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ +// Targeting ../TfLiteOpaqueDelegateParams.java -/** \file -/** -/** This file defines common C types and APIs for implementing operations, -/** delegates and other constructs in TensorFlow Lite. The actual operations and -/** delegates can be defined using C++, but the interface between the -/** interpreter and the operations are C. -/** -/** For documentation, see tensorflow/lite/core/c/common.h. */ -// #ifndef TENSORFLOW_LITE_C_COMMON_H_ -// #define TENSORFLOW_LITE_C_COMMON_H_ +// Targeting ../TfLiteContext.java -// #include "tensorflow/lite/core/c/common.h" -// #endif // TENSORFLOW_LITE_C_COMMON_H_ +/** {@code TfLiteRegistrationExternal} is an external version of {@code TfLiteRegistration} + * for C API which doesn't use internal types (such as {@code TfLiteContext}) but + * only uses stable API types (such as {@code TfLiteOpaqueContext}). The purpose of + * each field is the exactly the same as with {@code TfLiteRegistration}. */ -// Parsed from tensorflow/lite/core/c/common.h +/** The valid values of the {@code inplace_operator} field in {@code TfLiteRegistration}. + * This allow an op to signal to the runtime that the same data pointer + * may be passed as an input and output without impacting the result. + * This does not mean that the memory can safely be reused, it is up to the + * runtime to determine this, e.g. if another op consumes the same input or not + * or if an input tensor has sufficient memory allocated to store the output + * data. + * + * Setting these flags authorizes the runtime to set the data pointers of an + * input and output tensor to the same value. In such cases, the memory + * required by the output must be less than or equal to that required by the + * shared input, never greater. If kTfLiteInplaceOpDataUnmodified is set, then + * the runtime can share the same input tensor with multiple operator's + * outputs, provided that kTfLiteInplaceOpDataUnmodified is set for all of + * them. Otherwise, if an input tensor is consumed by multiple operators, it + * may only be shared with the operator which is the last to consume it. + * + * Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc. */ +/** enum TfLiteInPlaceOp */ +public static final int + /** The default value. This indicates that the same data pointer cannot safely + * be passed as an op's input and output. */ + kTfLiteInplaceOpNone = 0, + /** This indicates that an op's first output's data is identical to its first + * input's data, for example Reshape. */ + kTfLiteInplaceOpDataUnmodified = 1, + /** Setting kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput means + * that InputN may be shared with OutputN instead of with the first output. + * This flag requires one or more of kTfLiteInplaceOpInputNShared to be set. */ + +/// + kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput = 2, + /** kTfLiteInplaceOpInputNShared indicates that it is safe for an op to share + * InputN's data pointer with an output tensor. If + * kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is set then + * kTfLiteInplaceOpInputNShared indicates that InputN may be shared + * with OutputN, otherwise kTfLiteInplaceOpInputNShared indicates that InputN + * may be shared with the first output. + * + * Indicates that an op's first input may be shared with the first output + * tensor. kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput has + * no impact on the behavior allowed by this flag. */ + kTfLiteInplaceOpInput0Shared = 4, + /** Indicates that an op's second input may be shared with the first output + * if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is not set + * or second output if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput + * is set. */ + kTfLiteInplaceOpInput1Shared = 8, + /** Indicates that an op's third input may be shared with the first output + * if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is not set + * or third output if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput + * is + * set. */ + kTfLiteInplaceOpInput2Shared = 16; +public static native @MemberGetter int kTfLiteInplaceOpMaxValue(); +public static final int + /** Placeholder to ensure that enum can hold 64 bit values to accommodate + * future fields. */ + kTfLiteInplaceOpMaxValue = kTfLiteInplaceOpMaxValue(); -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/** The number of shareable inputs supported. */ -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +/// +/// +@MemberGetter public static native int kTfLiteMaxSharableOpInputs(); +public static final int kTfLiteMaxSharableOpInputs = kTfLiteMaxSharableOpInputs(); +// Targeting ../TfLiteRegistration.java - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ +// Targeting ../TfLiteRegistration_V3.java -// This file defines common C types and APIs for implementing operations, -// delegates and other constructs in TensorFlow Lite. The actual operations and -// delegates can be defined using C++, but the interface between the interpreter -// and the operations are C. -// -// Summary of abstractions -// TF_LITE_ENSURE - Self-sufficient error checking -// TfLiteStatus - Status reporting -// TfLiteIntArray - stores tensor shapes (dims), -// TfLiteContext - allows an op to access the tensors -// TfLiteTensor - tensor (a multidimensional array) -// TfLiteNode - a single node or operation -// TfLiteRegistration - the implementation of a conceptual operation. -// TfLiteDelegate - allows delegation of nodes to alternative backends. -// -// Some abstractions in this file are created and managed by Interpreter. -// -// NOTE: The order of values in these structs are "semi-ABI stable". New values -// should be added only to the end of structs and never reordered. -/** WARNING: Users of TensorFlow Lite should not include this file directly, -/** but should instead include -/** "third_party/tensorflow/lite/c/common.h". -/** Only the TensorFlow Lite implementation itself should include this -/** file directly. */ -// IWYU pragma: private, include "third_party/tensorflow/lite/c/common.h" +// Targeting ../TfLiteRegistration_V2.java -// #ifndef TENSORFLOW_LITE_CORE_C_COMMON_H_ -// #define TENSORFLOW_LITE_CORE_C_COMMON_H_ -// #include -// #include -// #include -// #include +// Targeting ../TfLiteRegistration_V1.java -// #include "tensorflow/lite/core/c/c_api_types.h" // IWYU pragma: export -// #ifdef __cplusplus -// #endif // __cplusplus -// The list of external context types known to TF Lite. This list exists solely -// to avoid conflicts and to ensure ops can share the external contexts they -// need. Access to the external contexts is controlled by one of the -// corresponding support files. -/** enum TfLiteExternalContextType */ +/** The flags used in {@code TfLiteDelegate}. Note that this is a bitmask, so the + * values should be 1, 2, 4, 8, ...etc. */ +/** enum TfLiteDelegateFlags */ public static final int - kTfLiteEigenContext = 0, // include eigen_support.h to use. - kTfLiteGemmLowpContext = 1, // include gemm_support.h to use. - kTfLiteEdgeTpuContext = 2, // Placeholder for Edge TPU support. - kTfLiteCpuBackendContext = 3, // include cpu_backend_context.h to use. - kTfLiteMaxExternalContexts = 4; + +/// + kTfLiteDelegateFlagsNone = 0, + /** The flag is set if the delegate can handle dynamic sized tensors. + * For example, the output shape of a {@code Resize} op with non-constant shape + * can only be inferred when the op is invoked. + * In this case, the Delegate is responsible for calling + * {@code SetTensorToDynamic} to mark the tensor as a dynamic tensor, and calling + * {@code ResizeTensor} when invoking the op. + * + * If the delegate isn't capable to handle dynamic tensors, this flag need + * to be set to false. */ + +/// +/// + kTfLiteDelegateFlagsAllowDynamicTensors = 1, -// Forward declare so dependent structs and methods can reference these types -// prior to the struct definitions. -// Targeting ../TfLiteExternalContext.java + /** This flag can be used by delegates (that allow dynamic tensors) to ensure + * applicable tensor shapes are automatically propagated in the case of + * tensor resizing. This means that non-dynamic (allocation_type != + * kTfLiteDynamic) I/O tensors of a delegate kernel will have correct shapes + * before its Prepare() method is called. The runtime leverages TFLite + * builtin ops in the original execution plan to propagate shapes. + * + * A few points to note: + * 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is + * false, this one is redundant since the delegate kernels are re-initialized + * every time tensors are resized. + * 2. Enabling this flag adds some overhead to AllocateTensors(), since extra + * work is required to prepare the original execution plan. + * 3. This flag requires that the original execution plan only have ops with + * valid registrations (and not 'dummy' custom ops like with Flex). + * + * WARNING: This feature is experimental and subject to change. */ + kTfLiteDelegateFlagsRequirePropagatedShapes = 2, + /** This flag can be used by delegates to request per-operator profiling. If a + * node is a delegate node, this flag will be checked before profiling. If + * set, then the node will not be profiled. The delegate will then add per + * operator information using {@code Profiler::EventType::OPERATOR_INVOKE_EVENT} + * and the results will appear in the operator-wise Profiling section and not + * in the Delegate internal section. */ + kTfLiteDelegateFlagsPerOperatorProfiling = 4; +// Targeting ../TfLiteDelegate.java -public static final int kTfLiteOptionalTensor = (-1); -// Targeting ../TfLiteIntArray.java +/** Build a {@code null} delegate, with all the fields properly set to their default + * values. */ + +/// +/// +public static native @ByVal TfLiteDelegate TfLiteDelegateCreate(); +// Targeting ../TfLiteOpaqueDelegateBuilder.java -// Given the size (number of elements) in a TfLiteIntArray, calculate its size -// in bytes. -public static native @Cast("size_t") long TfLiteIntArrayGetSizeInBytes(int size); // #ifndef TF_LITE_STATIC_MEMORY -// Create a array of a given `size` (uninitialized entries). -// This returns a pointer, that you must free using TfLiteIntArrayFree(). -public static native TfLiteIntArray TfLiteIntArrayCreate(int size); -// #endif +// See c_api_opaque.h. +// This declaration in common.h is only for backwards compatibility. +// NOTE: This function is part of the TensorFlow Lite Extension APIs, see above. +public static native @Cast("TfLiteOpaqueDelegate*") TfLiteOpaqueDelegateStruct TfLiteOpaqueDelegateCreate( + @Const TfLiteOpaqueDelegateBuilder opaque_delegate_builder); -// Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise. -public static native int TfLiteIntArrayEqual(@Const TfLiteIntArray a, @Const TfLiteIntArray b); +// See c_api_opaque.h. +// This declaration in common.h is only for backwards compatibility. +// NOTE: This function is part of the TensorFlow Lite Extension APIs, see above. +public static native void TfLiteOpaqueDelegateDelete(@Cast("TfLiteOpaqueDelegate*") TfLiteOpaqueDelegateStruct delegate); +// #endif // TF_LITE_STATIC_MEMORY -// Check if an intarray equals an array. Returns 1 if equals, 0 otherwise. -public static native int TfLiteIntArrayEqualsArray(@Const TfLiteIntArray a, int b_size, - @Const IntPointer b_data); -public static native int TfLiteIntArrayEqualsArray(@Const TfLiteIntArray a, int b_size, - @Const IntBuffer b_data); -public static native int TfLiteIntArrayEqualsArray(@Const TfLiteIntArray a, int b_size, - @Const int[] b_data); +// See c_api_opaque.h. +// This declaration in common.h is only for backwards compatibility. +// NOTE: This function is part of the TensorFlow Lite Extension APIs, see above. +public static native Pointer TfLiteOpaqueDelegateGetData(@Cast("const TfLiteOpaqueDelegate*") TfLiteOpaqueDelegateStruct delegate); -// #ifndef TF_LITE_STATIC_MEMORY -// Create a copy of an array passed as `src`. -// You are expected to free memory with TfLiteIntArrayFree -public static native TfLiteIntArray TfLiteIntArrayCopy(@Const TfLiteIntArray src); +/** Returns a tensor data allocation strategy. */ +public static native @Cast("TfLiteAllocationStrategy") int TfLiteTensorGetAllocationStrategy( + @Const TfLiteTensor t); -// Free memory of array `a`. -public static native void TfLiteIntArrayFree(TfLiteIntArray a); -// Targeting ../TfLiteFloatArray.java +/** Returns how stable a tensor data buffer address is across runs. */ +public static native @Cast("TfLiteRunStability") int TfLiteTensorGetBufferAddressStability(@Const TfLiteTensor t); +/** Returns how stable a tensor data values are across runs. */ +/// +public static native @Cast("TfLiteRunStability") int TfLiteTensorGetDataStability(@Const TfLiteTensor t); -// Given the size (number of elements) in a TfLiteFloatArray, calculate its size -// in bytes. -public static native int TfLiteFloatArrayGetSizeInBytes(int size); +/** Returns the operation step when the data of a tensor is populated. + * + * Some operations can precompute their results before the evaluation step. + * This makes the data available earlier for subsequent operations. */ -// #ifndef TF_LITE_STATIC_MEMORY -// Create a array of a given `size` (uninitialized entries). -// This returns a pointer, that you must free using TfLiteFloatArrayFree(). -public static native TfLiteFloatArray TfLiteFloatArrayCreate(int size); +/// +public static native @Cast("TfLiteRunStep") int TfLiteTensorGetDataKnownStep(@Const TfLiteTensor t); -// Create a copy of an array passed as `src`. -// You are expected to free memory with TfLiteFloatArrayFree. -public static native TfLiteFloatArray TfLiteFloatArrayCopy(@Const TfLiteFloatArray src); +/** Returns the operation steop when the shape of a tensor is computed. + * + * Some operations can precompute the shape of their results before the + * evaluation step. This makes the shape available earlier for subsequent + * operations. */ +public static native @Cast("TfLiteRunStep") int TfLiteTensorGetShapeKnownStep(@Const TfLiteTensor t); -// Free memory of array `a`. -public static native void TfLiteFloatArrayFree(TfLiteFloatArray a); -// #endif // TF_LITE_STATIC_MEMORY +/** \} */ +// Ends `\addtogroup`, it's important for the doc generator that this doesn't +// include the CC code below. -// Since we must not depend on any libraries, define a minimal subset of -// error macros while avoiding names that have pre-conceived meanings like -// assert and check. +// #ifdef __cplusplus // extern "C" -// Try to make all reporting calls through TF_LITE_KERNEL_LOG rather than -// calling the context->ReportError function directly, so that message strings -// can be stripped out if the binary size needs to be severely optimized. -// #ifndef TF_LITE_STRIP_ERROR_STRINGS -// #define TF_LITE_KERNEL_LOG(context, ...) -// do { -// (context)->ReportError((context), __VA_ARGS__); -// } while (false) +// #include +// Targeting ../VariantData.java -// #define TF_LITE_MAYBE_KERNEL_LOG(context, ...) -// do { -// if ((context) != nullptr) { -// (context)->ReportError((context), __VA_ARGS__); -// } -// } while (false) -// #else // TF_LITE_STRIP_ERROR_STRINGS -// #define ARGS_UNUSED(...) (void)sizeof(#__VA_ARGS__) -// #define TF_LITE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__) -// #define TF_LITE_MAYBE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__) -// #endif // TF_LITE_STRIP_ERROR_STRINGS -// Check whether value is true, and if not return kTfLiteError from -// the current function (and report the error string msg). -// #define TF_LITE_ENSURE_MSG(context, value, msg) -// do { -// if (!(value)) { -// TF_LITE_KERNEL_LOG((context), __FILE__ " " msg); -// return kTfLiteError; -// } -// } while (0) -// Check whether the value `a` is true, and if not return kTfLiteError from -// the current function, while also reporting the location of the error. -// #define TF_LITE_ENSURE(context, a) -// do { -// if (!(a)) { -// TF_LITE_KERNEL_LOG((context), "%s:%d %s was not true.", __FILE__, -// __LINE__, #a); -// return kTfLiteError; -// } -// } while (0) +// Concrete implementations extend `AbstractVariantData` with CRPT. -// #define TF_LITE_ENSURE_STATUS(a) -// do { -// const TfLiteStatus s = (a); -// if (s != kTfLiteOk) { -// return s; -// } -// } while (0) +// Analogous to `TfLiteTensorRealloc` for allocation of tensors whose +// data member points to an arbitrary C++ object. `VariantType` refers +// to the erased type of said object and `VariantArgs` refers to +// a list of argument types with which to construct a new `VariantType`. +// `VariantArgs` must match a constructor of `VariantType`. -// Check whether the value `a == b` is true, and if not return kTfLiteError from -// the current function, while also reporting the location of the error. -// `a` and `b` may be evaluated more than once, so no side effects or -// extremely expensive computations should be done. -// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes. -// #define TF_LITE_ENSURE_EQ(context, a, b) -// do { -// if ((a) != (b)) { -// TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%d != %d)", __FILE__, -// __LINE__, #a, #b, (a), (b)); -// return kTfLiteError; -// } -// } while (0) +// #endif // __cplusplus +// #endif // TENSORFLOW_LITE_CORE_C_COMMON_H_ -// #define TF_LITE_ENSURE_TYPES_EQ(context, a, b) -// do { -// if ((a) != (b)) { -// TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%s != %s)", __FILE__, -// __LINE__, #a, #b, TfLiteTypeGetName(a), -// TfLiteTypeGetName(b)); -// return kTfLiteError; -// } -// } while (0) -// #define TF_LITE_ENSURE_NEAR(context, a, b, epsilon) -// do { -// auto delta = ((a) > (b)) ? ((a) - (b)) : ((b) - (a)); -// if (delta > epsilon) { -// TF_LITE_KERNEL_LOG((context), "%s:%d %s not near %s (%f != %f)", -// __FILE__, __LINE__, #a, #b, static_cast(a), -// static_cast(b)); -// return kTfLiteError; -// } -// } while (0) +// Parsed from tensorflow/lite/c/c_api_experimental.h -// #define TF_LITE_ENSURE_OK(context, status) -// do { -// const TfLiteStatus s = (status); -// if ((s) != kTfLiteOk) { -// return s; -// } -// } while (0) -// Targeting ../TfLiteComplex64.java +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at -// Targeting ../TfLiteComplex128.java + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +// #ifndef TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_ +// #define TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_ -// Targeting ../TfLiteFloat16.java +/** For documentation, see + * third_party/tensorflow/lite/core/c/c_api_experimental.h */ +// #include "tensorflow/lite/core/c/c_api_experimental.h" +// #endif // TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_ -// Return the name of a given type, for error reporting purposes. -public static native @Cast("const char*") BytePointer TfLiteTypeGetName(@Cast("TfLiteType") int type); -// SupportedQuantizationTypes. -/** enum TfLiteQuantizationType */ -public static final int - // No quantization. - kTfLiteNoQuantization = 0, - // Affine quantization (with support for per-channel quantization). - // Corresponds to TfLiteAffineQuantization. - kTfLiteAffineQuantization = 1; -// Targeting ../TfLiteQuantization.java +// Parsed from tensorflow/lite/core/c/c_api_experimental.h +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. -// Targeting ../TfLiteAffineQuantization.java +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +/** WARNING: Users of TensorFlow Lite should not include this file directly, +/** but should instead include +/** "third_party/tensorflow/lite/c/c_api_experimental.h". +/** Only the TensorFlow Lite implementation itself should include this +/** file directly. */ +// #ifndef TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_ +// #define TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_ -// Targeting ../TfLitePtrUnion.java +// #include +// #include "tensorflow/lite/builtin_ops.h" +// #include "tensorflow/lite/c/c_api_types.h" +// #include "tensorflow/lite/core/c/c_api.h" +// #include "tensorflow/lite/core/c/common.h" +// #ifdef __cplusplus +// #endif // __cplusplus -// Memory allocation strategies. -// * kTfLiteMmapRo: Read-only memory-mapped data, or data externally allocated. -// * kTfLiteArenaRw: Arena allocated with no guarantees about persistence, -// and available during eval. -// * kTfLiteArenaRwPersistent: Arena allocated but persistent across eval, and -// only available during eval. -// * kTfLiteDynamic: Allocated during eval, or for string tensors. -// * kTfLitePersistentRo: Allocated and populated during prepare. This is -// useful for tensors that can be computed during prepare and treated -// as constant inputs for downstream ops (also in prepare). -// * kTfLiteCustom: Custom memory allocation provided by the user. See -// TfLiteCustomAllocation below. -// * kTfLiteVariantObject: Allocation is an arbitrary type-erased C++ object. -// Allocation and deallocation are done through `new` and `delete`. -/** enum TfLiteAllocationType */ -public static final int - kTfLiteMemNone = 0, - kTfLiteMmapRo = 1, - kTfLiteArenaRw = 2, - kTfLiteArenaRwPersistent = 3, - kTfLiteDynamic = 4, - kTfLitePersistentRo = 5, - kTfLiteCustom = 6, - kTfLiteVariantObject = 7; +// -------------------------------------------------------------------------- +/** Resets all variable tensors to zero. + * + * WARNING: This is an experimental API and subject to change. */ +public static native @Cast("TfLiteStatus") int TfLiteInterpreterResetVariableTensors( + TfLiteInterpreter interpreter); -// Memory allocation strategies. -// -// TfLiteAllocationType values have been overloaded to mean more than their -// original intent. This enum should only be used to document the allocation -// strategy used by a tensor for it data. -/** enum TfLiteAllocationStrategy */ -public static final int - kTfLiteAllocationStrategyUnknown = 0, - kTfLiteAllocationStrategyNone = 1, // No data is allocated. - kTfLiteAllocationStrategyMMap = 2, // Data is mmaped. - kTfLiteAllocationStrategyArena = 3, // Handled by the arena. - kTfLiteAllocationStrategyMalloc = 4, // Uses `malloc`/`free`. - kTfLiteAllocationStrategyNew = 5; // Uses `new[]`/`delete[]`. - -// Describes how stable a tensor attribute is with regards to an interpreter -// runs. -/** enum TfLiteRunStability */ -public static final int - kTfLiteRunStabilityUnknown = 0, - kTfLiteRunStabilityUnstable = 1, // May change at any time. - kTfLiteRunStabilitySingleRun = 2, // Will stay the same for one run. - kTfLiteRunStabilityAcrossRuns = 3; // Will stay the same across all runs. +// Returns the number of variable tensors associated with the model. +public static native int TfLiteInterpreterGetVariableTensorCount( + @Const TfLiteInterpreter interpreter); -// Describes the steps of a TFLite operation life cycle. -/** enum TfLiteRunStep */ -public static final int - kTfLiteRunStepUnknown = 0, - kTfLiteRunStepInit = 1, - kTfLiteRunStepPrepare = 2, - kTfLiteRunStepEval = 3; +// Returns the tensor associated with the variable tensor index. +// REQUIRES: 0 <= input_index < +// TfLiteInterpreterGetVariableTensorCount(interpreter) -// The delegates should use zero or positive integers to represent handles. -// -1 is reserved from unallocated status. -/** enum */ -public static final int - kTfLiteNullBufferHandle = -1; +/// +/// +/// +/// +public static native TfLiteTensor TfLiteInterpreterGetVariableTensor( + @Const TfLiteInterpreter interpreter, int variable_index); -// Storage format of each dimension in a sparse tensor. -/** enum TfLiteDimensionType */ -public static final int - kTfLiteDimDense = 0, - kTfLiteDimSparseCSR = 1; -// Targeting ../TfLiteDimensionMetadata.java +/** Adds an op registration for a builtin operator. + * + * Op registrations are used to map ops referenced in the flatbuffer model + * to executable function pointers ({@code TfLiteRegistration}s). + * + * NOTE: The interpreter will make a shallow copy of {@code registration} internally, + * so the caller should ensure that its contents (function pointers, etc...) + * remain valid for the duration of the interpreter's lifetime. A common + * practice is making the provided {@code TfLiteRegistration} instance static. + * + * Code that uses this function should NOT call + * {@code TfLiteInterpreterOptionsSetOpResolver} (or related functions) on the same + * options object. + * + * WARNING: This is an experimental API and subject to change. */ +/// +/// +/// +/// +/// +public static native void TfLiteInterpreterOptionsAddBuiltinOp( + TfLiteInterpreterOptions options, @Cast("TfLiteBuiltinOperator") int op, + @Const TfLiteRegistration registration, int min_version, + int max_version); -// Targeting ../TfLiteSparsity.java +/** Adds an op registration for a custom operator. + * + * Op registrations are used to map ops referenced in the flatbuffer model + * to executable function pointers ({@code TfLiteRegistration}s). + * + * NOTE: The interpreter will make a shallow copy of {@code registration} internally, + * so the caller should ensure that its contents (function pointers, etc...) + * remain valid for the duration of any created interpreter's lifetime. A + * common practice is making the provided {@code TfLiteRegistration} instance static. + * + * The lifetime of the string pointed to by {@code name} must be at least as long + * as the lifetime of the {@code TfLiteInterpreterOptions}. + * + * Code that uses this function should NOT call + * {@code TfLiteInterpreterOptionsSetOpResolver} (or related functions) on the same + * options object. + * + * WARNING: This is an experimental API and subject to change. */ +/// +/// +/// +/// +/// +public static native void TfLiteInterpreterOptionsAddCustomOp( + TfLiteInterpreterOptions options, @Cast("const char*") BytePointer name, + @Const TfLiteRegistration registration, int min_version, + int max_version); +public static native void TfLiteInterpreterOptionsAddCustomOp( + TfLiteInterpreterOptions options, String name, + @Const TfLiteRegistration registration, int min_version, + int max_version); +// Targeting ../Find_builtin_op_external_Pointer_int_int.java -// Targeting ../TfLiteCustomAllocation.java +// Targeting ../Find_custom_op_external_Pointer_String_int.java -// The flags used in `Interpreter::SetCustomAllocationForTensor`. -// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc. -/** enum TfLiteCustomAllocationFlags */ -public static final int - kTfLiteCustomAllocationFlagsNone = 0, - // Skips checking whether allocation.data points to an aligned buffer as - // expected by the TFLite runtime. - // NOTE: Setting this flag can cause crashes when calling Invoke(). - // Use with caution. - kTfLiteCustomAllocationFlagsSkipAlignCheck = 1; -// Targeting ../TfLiteTensor.java +/// +/// +/// +public static native void TfLiteInterpreterOptionsSetOpResolverExternal( + TfLiteInterpreterOptions options, + Find_builtin_op_external_Pointer_int_int find_builtin_op, + Find_custom_op_external_Pointer_String_int find_custom_op, + Pointer op_resolver_user_data); +// Targeting ../Find_builtin_op_Pointer_int_int.java -// Targeting ../TfLiteNode.java +// Targeting ../Find_custom_op_Pointer_BytePointer_int.java -// #else // defined(TF_LITE_STATIC_MEMORY)? -// NOTE: This flag is opt-in only at compile time. -// -// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct -// contains only the minimum fields required to initialize and prepare a micro -// inference graph. The fields in this struct have been ordered from -// largest-to-smallest for optimal struct sizeof. -// -// This struct does not use: -// - allocation -// - buffer_handle -// - data_is_stale -// - delegate -// - dims_signature -// - name -// - sparsity -// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains -// only the minimum fields required to represent a node. -// -// This struct does not use: -// - delegate -// - intermediates -// - temporaries -// Targeting ../TfLiteEvalTensor.java +/// +/// +/// +/// +/// +public static native void TfLiteInterpreterOptionsSetOpResolverExternalWithFallback( + TfLiteInterpreterOptions options, + Find_builtin_op_external_Pointer_int_int find_builtin_op_external, + Find_custom_op_external_Pointer_String_int find_custom_op_external, + Find_builtin_op_Pointer_int_int find_builtin_op, + Find_custom_op_Pointer_BytePointer_int find_custom_op, + Pointer op_resolver_user_data); +// Targeting ../Find_custom_op_Pointer_String_int.java -// #ifndef TF_LITE_STATIC_MEMORY -// Free data memory of tensor `t`. -public static native void TfLiteTensorDataFree(TfLiteTensor t); +public static native void TfLiteInterpreterOptionsSetOpResolverExternalWithFallback( + TfLiteInterpreterOptions options, + Find_builtin_op_external_Pointer_int_int find_builtin_op_external, + Find_custom_op_external_Pointer_String_int find_custom_op_external, + Find_builtin_op_Pointer_int_int find_builtin_op, + Find_custom_op_Pointer_String_int find_custom_op, + Pointer op_resolver_user_data); -// Free quantization data. -public static native void TfLiteQuantizationFree(TfLiteQuantization quantization); +/** Registers callbacks for resolving builtin or custom operators. + * + * The {@code TfLiteInterpreterOptionsSetOpResolver} function provides an alternative + * method for registering builtin ops and/or custom ops, by providing operator + * resolver callbacks. Unlike using {@code TfLiteInterpreterOptionsAddBuiltinOp} + * and/or {@code TfLiteInterpreterOptionsAddAddCustomOp}, these let you register all + * the operators in a single call. + * + * Code that uses this function should NOT call + * {@code TfLiteInterpreterOptionsAddBuiltin} or + * {@code TfLiteInterpreterOptionsAddCustomOp} on the same options object. + * + * If {@code op_resolver_user_data} is non-null, its lifetime must be at least as + * long as the lifetime of the {@code TfLiteInterpreterOptions}. + * + * WARNING: This is an experimental API and subject to change. + * + * DEPRECATED: use TfLiteInterpreterOptionsSetOpResolverExternal instead. */ -// Free sparsity parameters. -public static native void TfLiteSparsityFree(TfLiteSparsity sparsity); +/// +public static native void TfLiteInterpreterOptionsSetOpResolver( + TfLiteInterpreterOptions options, + Find_builtin_op_Pointer_int_int find_builtin_op, + Find_custom_op_Pointer_BytePointer_int find_custom_op, + Pointer op_resolver_user_data); +public static native void TfLiteInterpreterOptionsSetOpResolver( + TfLiteInterpreterOptions options, + Find_builtin_op_Pointer_int_int find_builtin_op, + Find_custom_op_Pointer_String_int find_custom_op, + Pointer op_resolver_user_data); +// Targeting ../Find_builtin_op_v3_Pointer_int_int.java -// Free memory of tensor `t`. -public static native void TfLiteTensorFree(TfLiteTensor t); -// Set all of a tensor's fields (and free any previously allocated data). -public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, TfLiteIntArray dims, - @ByVal TfLiteQuantizationParams quantization, @Cast("char*") BytePointer buffer, - @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type, - @Const Pointer allocation, @Cast("bool") boolean is_variable, - TfLiteTensor tensor); -public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String name, TfLiteIntArray dims, - @ByVal TfLiteQuantizationParams quantization, @Cast("char*") ByteBuffer buffer, - @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type, - @Const Pointer allocation, @Cast("bool") boolean is_variable, - TfLiteTensor tensor); -public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, TfLiteIntArray dims, - @ByVal TfLiteQuantizationParams quantization, @Cast("char*") byte[] buffer, - @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type, - @Const Pointer allocation, @Cast("bool") boolean is_variable, - TfLiteTensor tensor); -public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String name, TfLiteIntArray dims, - @ByVal TfLiteQuantizationParams quantization, @Cast("char*") BytePointer buffer, - @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type, - @Const Pointer allocation, @Cast("bool") boolean is_variable, - TfLiteTensor tensor); -public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, TfLiteIntArray dims, - @ByVal TfLiteQuantizationParams quantization, @Cast("char*") ByteBuffer buffer, - @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type, - @Const Pointer allocation, @Cast("bool") boolean is_variable, - TfLiteTensor tensor); -public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String name, TfLiteIntArray dims, - @ByVal TfLiteQuantizationParams quantization, @Cast("char*") byte[] buffer, - @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type, - @Const Pointer allocation, @Cast("bool") boolean is_variable, - TfLiteTensor tensor); +// Targeting ../Find_custom_op_v3_Pointer_BytePointer_int.java -// Copies the contents of 'src' in 'dst'. -// Function does nothing if either 'src' or 'dst' is passed as nullptr and -// return kTfLiteOk. -// Returns kTfLiteError if 'src' and 'dst' doesn't have matching data size. -// Note function copies contents, so it won't create new data pointer -// or change allocation type. -// All Tensor related properties will be copied from 'src' to 'dst' like -// quantization, sparsity, ... -public static native @Cast("TfLiteStatus") int TfLiteTensorCopy(@Const TfLiteTensor src, TfLiteTensor dst); -// Change the size of the memory block owned by `tensor` to `num_bytes`. -// Tensors with allocation types other than `kTfLiteDynamic` will be ignored and -// a kTfLiteOk will be returned. -// `tensor`'s internal data buffer will be assigned a pointer -// which can safely be passed to free or realloc if `num_bytes` is zero. -// If `preserve_data` is true, tensor data will be unchanged in the range from -// the start of the region up to the minimum of the old and new sizes. In the -// case of NULL tensor, or an error allocating new memory, returns -// `kTfLiteError`. -public static native @Cast("TfLiteStatus") int TfLiteTensorResizeMaybeCopy(@Cast("size_t") long num_bytes, TfLiteTensor tensor, - @Cast("bool") boolean preserve_data); -// Change the size of the memory block owned by `tensor` to `num_bytes`. -// Tensors with allocation types other than kTfLiteDynamic will be ignored and -// a kTfLiteOk will be returned. -// `tensor`'s internal data buffer will be assigned a pointer -// which can safely be passed to free or realloc if `num_bytes` is zero. -// Tensor data will be unchanged in the range from the start of the region up to -// the minimum of the old and new sizes. In the case -// of NULL tensor, or an error allocating new memory, returns `kTfLiteError`. -public static native @Cast("TfLiteStatus") int TfLiteTensorRealloc(@Cast("size_t") long num_bytes, TfLiteTensor tensor); -// Targeting ../TfLiteDelegateParams.java +/// +public static native void TfLiteInterpreterOptionsSetOpResolverV3( + TfLiteInterpreterOptions options, + Find_builtin_op_v3_Pointer_int_int find_builtin_op_v3, + Find_custom_op_v3_Pointer_BytePointer_int find_custom_op_v3, + Pointer op_resolver_user_data); +// Targeting ../Find_custom_op_v3_Pointer_String_int.java -// Targeting ../TfLiteOpaqueDelegateParams.java +public static native void TfLiteInterpreterOptionsSetOpResolverV3( + TfLiteInterpreterOptions options, + Find_builtin_op_v3_Pointer_int_int find_builtin_op_v3, + Find_custom_op_v3_Pointer_String_int find_custom_op_v3, + Pointer op_resolver_user_data); +// Targeting ../Find_builtin_op_v2_Pointer_int_int.java -// Targeting ../TfLiteContext.java +// Targeting ../Find_custom_op_v2_Pointer_BytePointer_int.java -// `TfLiteRegistrationExternal` is an external version of `TfLiteRegistration` -// for C API which doesn't use internal types (such as `TfLiteContext`) but only -// uses stable API types (such as `TfLiteOpaqueContext`). The purpose of each -// field is the exactly the same as with `TfLiteRegistration`. +/// +public static native void TfLiteInterpreterOptionsSetOpResolverV2( + TfLiteInterpreterOptions options, + Find_builtin_op_v2_Pointer_int_int find_builtin_op_v2, + Find_custom_op_v2_Pointer_BytePointer_int find_custom_op_v2, + Pointer op_resolver_user_data); +// Targeting ../Find_custom_op_v2_Pointer_String_int.java -// The valid values of the `inplace_operator` field in `TfLiteRegistration`. -// This allow an op to signal to the runtime that the same data pointer -// may be passed as an input and output without impacting the result. -// This does not mean that the memory can safely be reused, it is up to the -// runtime to determine this, e.g. if another op consumes the same input or not -// or if an input tensor has sufficient memory allocated to store the output -// data. -// -// Setting these flags authorizes the runtime to set the data pointers of an -// input and output tensor to the same value. In such cases, the memory required -// by the output must be less than or equal to that required by the shared -// input, never greater. If kTfLiteInplaceOpDataUnmodified is set, then the -// runtime can share the same input tensor with multiple operator's outputs, -// provided that kTfLiteInplaceOpDataUnmodified is set for all of them. -// Otherwise, if an input tensor is consumed by multiple operators, it may only -// be shared with the operator which is the last to consume it. -// -// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc. -/** enum TfLiteInPlaceOp */ -public static final int - // The default value. This indicates that the same data pointer cannot safely - // be passed as an op's input and output. - kTfLiteInplaceOpNone = 0, - // This indicates that an op's first output's data is identical to its first - // input's data, for example Reshape. - kTfLiteInplaceOpDataUnmodified = 1, - // Setting kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput means - // that InputN may be shared with OutputN instead of with the first output. - // This flag requires one or more of kTfLiteInplaceOpInputNShared to be set. - kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput = 2, - // kTfLiteInplaceOpInputNShared indicates that it is safe for an op to share - // InputN's data pointer with an output tensor. If - // kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is set then - // kTfLiteInplaceOpInputNShared indicates that InputN may be shared - // with OutputN, otherwise kTfLiteInplaceOpInputNShared indicates that InputN - // may be shared with the first output. - // - // Indicates that an op's first input may be shared with the first output - // tensor. kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput has - // no impact on the behavior allowed by this flag. - kTfLiteInplaceOpInput0Shared = 4, - // Indicates that an op's second input may be shared with the first output - // if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is not set - // or second output if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput - // is set. - kTfLiteInplaceOpInput1Shared = 8, - // Indicates that an op's third input may be shared with the first output - // if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is not set - // or third output if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is - // set. - kTfLiteInplaceOpInput2Shared = 16; -public static native @MemberGetter int kTfLiteInplaceOpMaxValue(); -public static final int - // Placeholder to ensure that enum can hold 64 bit values to accommodate - // future fields. - kTfLiteInplaceOpMaxValue = kTfLiteInplaceOpMaxValue(); -// The number of shareable inputs supported. -@MemberGetter public static native int kTfLiteMaxSharableOpInputs(); -public static final int kTfLiteMaxSharableOpInputs = kTfLiteMaxSharableOpInputs(); -// Targeting ../TfLiteRegistration.java +public static native void TfLiteInterpreterOptionsSetOpResolverV2( + TfLiteInterpreterOptions options, + Find_builtin_op_v2_Pointer_int_int find_builtin_op_v2, + Find_custom_op_v2_Pointer_String_int find_custom_op_v2, + Pointer op_resolver_user_data); +// Targeting ../Find_builtin_op_v1_Pointer_int_int.java -// Targeting ../TfLiteRegistration_V3.java +// Targeting ../Find_custom_op_v1_Pointer_BytePointer_int.java -// Targeting ../TfLiteRegistration_V2.java +/// +/// +/// +public static native void TfLiteInterpreterOptionsSetOpResolverV1( + TfLiteInterpreterOptions options, + Find_builtin_op_v1_Pointer_int_int find_builtin_op_v1, + Find_custom_op_v1_Pointer_BytePointer_int find_custom_op_v1, + Pointer op_resolver_user_data); +// Targeting ../Find_custom_op_v1_Pointer_String_int.java -// Targeting ../TfLiteRegistration_V1.java +public static native void TfLiteInterpreterOptionsSetOpResolverV1( + TfLiteInterpreterOptions options, + Find_builtin_op_v1_Pointer_int_int find_builtin_op_v1, + Find_custom_op_v1_Pointer_String_int find_custom_op_v1, + Pointer op_resolver_user_data); +/** Returns a new interpreter using the provided model and options, or null on + * failure, where the model uses only the operators explicitly added to the + * options. This is the same as {@code TFLiteInterpreterCreate} from {@code c_api.h}, + * except that the only operators that are supported are the ones registered + * in {@code options} via calls to {@code TfLiteInterpreterOptionsSetOpResolver}, + * {@code TfLiteInterpreterOptionsAddBuiltinOp}, and/or + * {@code TfLiteInterpreterOptionsAddCustomOp}. + * + * * {@code model} must be a valid model instance. The caller retains ownership of + * the object, and can destroy it immediately after creating the interpreter; + * the interpreter will maintain its own reference to the underlying model + * data. + * * {@code options} should not be null. The caller retains ownership of the object, + * and can safely destroy it immediately after creating the interpreter. + * + * NOTE: The client *must* explicitly allocate tensors before attempting to + * access input tensor data or invoke the interpreter. + * + * WARNING: This is an experimental API and subject to change. */ -// The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the -// values should be 1, 2, 4, 8, ...etc. -/** enum TfLiteDelegateFlags */ -public static final int - kTfLiteDelegateFlagsNone = 0, - // The flag is set if the delegate can handle dynamic sized tensors. - // For example, the output shape of a `Resize` op with non-constant shape - // can only be inferred when the op is invoked. - // In this case, the Delegate is responsible for calling - // `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling - // `ResizeTensor` when invoking the op. - // - // If the delegate isn't capable to handle dynamic tensors, this flag need - // to be set to false. - kTfLiteDelegateFlagsAllowDynamicTensors = 1, +/// +public static native TfLiteInterpreter TfLiteInterpreterCreateWithSelectedOps(@Const TfLiteModel model, + @Const TfLiteInterpreterOptions options); - // This flag can be used by delegates (that allow dynamic tensors) to ensure - // applicable tensor shapes are automatically propagated in the case of tensor - // resizing. - // This means that non-dynamic (allocation_type != kTfLiteDynamic) I/O tensors - // of a delegate kernel will have correct shapes before its Prepare() method - // is called. The runtime leverages TFLite builtin ops in the original - // execution plan to propagate shapes. - // - // A few points to note: - // 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is - // false, this one is redundant since the delegate kernels are re-initialized - // every time tensors are resized. - // 2. Enabling this flag adds some overhead to AllocateTensors(), since extra - // work is required to prepare the original execution plan. - // 3. This flag requires that the original execution plan only have ops with - // valid registrations (and not 'dummy' custom ops like with Flex). - // WARNING: This feature is experimental and subject to change. - kTfLiteDelegateFlagsRequirePropagatedShapes = 2, +/** Enable or disable the NN API delegate for the interpreter (true to enable). + * + * WARNING: This is an experimental API and subject to change. */ - // This flag can be used by delegates to request per-operator profiling. If a - // node is a delegate node, this flag will be checked before profiling. If - // set, then the node will not be profiled. The delegate will then add per - // operator information using Profiler::EventType::OPERATOR_INVOKE_EVENT and - // the results will appear in the operator-wise Profiling section and not in - // the Delegate internal section. - kTfLiteDelegateFlagsPerOperatorProfiling = 4; -// Targeting ../TfLiteDelegate.java +/// +/// +/// +public static native void TfLiteInterpreterOptionsSetUseNNAPI( + TfLiteInterpreterOptions options, @Cast("bool") boolean enable); +/** Enable or disable CPU fallback for the interpreter (true to enable). + * If enabled, TfLiteInterpreterInvoke will do automatic fallback from + * executing with delegate(s) to regular execution without delegates + * (i.e. on CPU). + * + * Allowing the fallback is suitable only if both of the following hold: + * - The caller is known not to cache pointers to tensor data across + * TfLiteInterpreterInvoke calls. + * - The model is not stateful (no variables, no LSTMs) or the state isn't + * needed between batches. + * + * When delegate fallback is enabled, TfLiteInterpreterInvoke will + * behave as follows: + * If one or more delegates were set in the interpreter options + * (see TfLiteInterpreterOptionsAddDelegate), + * AND inference fails, + * then the interpreter will fall back to not using any delegates. + * In that case, the previously applied delegate(s) will be automatically + * undone, and an attempt will be made to return the interpreter to an + * invokable state, which may invalidate previous tensor addresses, + * and the inference will be attempted again, using input tensors with + * the same value as previously set. + * + * WARNING: This is an experimental API and subject to change. */ +public static native void TfLiteInterpreterOptionsSetEnableDelegateFallback( + TfLiteInterpreterOptions options, @Cast("bool") boolean enable); +/** Allow a delegate to look at the graph and modify the graph to handle + * parts of the graph themselves. After this is called, the graph may + * contain new nodes that replace 1 more nodes. + * 'delegate' must outlive the interpreter. + * Use {@code TfLiteInterpreterOptionsAddDelegate} instead of this unless + * absolutely required. + * Returns one of the following three status codes: + * 1. kTfLiteOk: Success. + * 2. kTfLiteDelegateError: Delegation failed due to an error in the + * delegate. The Interpreter has been restored to its pre-delegation state. + * NOTE: This undoes all delegates previously applied to the Interpreter. + * 3. kTfLiteError: Unexpected/runtime failure. + * WARNING: This is an experimental API and subject to change. */ -// Build a 'null' delegate, with all the fields properly set to their default -// values. -public static native @ByVal TfLiteDelegate TfLiteDelegateCreate(); -// Targeting ../TfLiteOpaqueDelegateBuilder.java +/// +public static native @Cast("TfLiteStatus") int TfLiteInterpreterModifyGraphWithDelegate( + @Const TfLiteInterpreter interpreter, TfLiteDelegate delegate); +/** Returns the tensor index corresponding to the input tensor + * + * WARNING: This is an experimental API and subject to change. */ +/// +public static native int TfLiteInterpreterGetInputTensorIndex( + @Const TfLiteInterpreter interpreter, int input_index); -// #ifndef TF_LITE_STATIC_MEMORY -// Creates an opaque delegate and returns its address. The opaque delegate will -// behave according to the provided 'opaque_delegate_builder'. The lifetime of -// the objects pointed to by any of the fields within the -// 'opaque_delegate_builder' must outlive the returned -// 'TfLiteOpaqueDelegate' and any 'TfLiteInterpreter', -// 'TfLiteInterpreterOptions', 'tflite::Interpreter', or -// 'tflite::InterpreterBuilder' that the delegate is added to. The returned -// address should be passed to 'TfLiteOpaqueDelegateDelete' for deletion. If -// 'opaque_delegate_builder' is a null pointer, then a null pointer will be -// returned. -public static native @Cast("TfLiteOpaqueDelegate*") TfLiteOpaqueDelegateStruct TfLiteOpaqueDelegateCreate( - @Const TfLiteOpaqueDelegateBuilder opaque_delegate_builder); +/** Returns the tensor index corresponding to the output tensor + * + * WARNING: This is an experimental API and subject to change. */ -// Deletes the provided opaque 'delegate'. This function has no effect if the -// 'delegate' is a null pointer. -public static native void TfLiteOpaqueDelegateDelete(@Cast("TfLiteOpaqueDelegate*") TfLiteOpaqueDelegateStruct delegate); -// #endif // TF_LITE_STATIC_MEMORY +/// +/// +public static native int TfLiteInterpreterGetOutputTensorIndex( + @Const TfLiteInterpreter interpreter, int output_index); -// Returns a pointer to the data associated with the provided opaque 'delegate'. -// -// A null pointer will be returned when: -// - The 'delegate' is null. -// - The 'data' field of the 'TfLiteOpaqueDelegateBuilder' used to construct the -// 'delegate' was null. -// - Or in case of any other error. -// - The 'delegate' has been constructed via a 'TfLiteOpaqueDelegateBuilder', -// but the 'data' field of the 'TfLiteOpaqueDelegateBuilder' is null. -// -// The data_ field of 'delegate' will be returned if the -// 'opaque_delegate_builder' field is null. -public static native Pointer TfLiteOpaqueDelegateGetData(@Cast("const TfLiteOpaqueDelegate*") TfLiteOpaqueDelegateStruct delegate); +/** Assigns (or reassigns) a custom memory allocation for the given + * tensor. {@code flags} is a bitmask, see TfLiteCustomAllocationFlags. + * The runtime does NOT take ownership of the underlying memory. + * + * NOTE: User needs to call TfLiteInterpreterAllocateTensors() after this. + * Invalid/insufficient buffers will cause an error during + * TfLiteInterpreterAllocateTensors or TfLiteInterpreterInvoke (in case of + * dynamic shapes in the graph). + * + * Parameters should satisfy the following conditions: + * 1. tensor->allocation_type == kTfLiteArenaRw or kTfLiteArenaRwPersistent + * In general, this is true for I/O tensors & variable tensors. + * 2. allocation->data has the appropriate permissions for runtime access + * (Read-only for inputs, Read-Write for others), and outlives + * TfLiteInterpreter. + * 3. allocation->bytes >= tensor->bytes. + * This condition is checked again if any tensors are resized. + * 4. allocation->data should be aligned to kDefaultTensorAlignment + * defined in lite/util.h. (Currently 64 bytes) + * This check is skipped if kTfLiteCustomAllocationFlagsSkipAlignCheck is + * set through {@code flags}. + * WARNING: This is an experimental API and subject to change. */ -// Returns a tensor data allocation strategy. -public static native @Cast("TfLiteAllocationStrategy") int TfLiteTensorGetAllocationStrategy( - @Const TfLiteTensor t); +/// +/// +public static native @Cast("TfLiteStatus") int TfLiteInterpreterSetCustomAllocationForTensor( + TfLiteInterpreter interpreter, int tensor_index, + @Const TfLiteCustomAllocation allocation, @Cast("int64_t") long flags); -// Returns how stable a tensor data buffer address is across runs. -public static native @Cast("TfLiteRunStability") int TfLiteTensorGetBufferAddressStability(@Const TfLiteTensor t); +/** -------------------------------------------------------------------------- + * BufferHandle APIs +

+ * Sets the delegate buffer handle for the given tensor. + * + * This function sets the buffer handle for a tensor that is used by other + * computing hardware such as EdgeTpu. For example, EdgeTpu delegate imports a + * tensor's memory into EdgeTpu's virtual address and returns a buffer handle. + * Then EdgeTpu delegate calls this API to associate the tensor with the buffer + * handle. + * + * WARNING: This is an experimental API and subject to change. */ -// Returns how stable a tensor data values are across runs. -public static native @Cast("TfLiteRunStability") int TfLiteTensorGetDataStability(@Const TfLiteTensor t); +/// +public static native @Cast("TfLiteStatus") int TfLiteInterpreterSetBufferHandle( + TfLiteInterpreter interpreter, TfLiteTensor tensor, + @Cast("TfLiteBufferHandle") int buffer_handle, @Cast("TfLiteOpaqueDelegate*") TfLiteOpaqueDelegateStruct delegate); -// Returns the operation step when the data of a tensor is populated. -// -// Some operations can precompute their results before the evaluation step. This -// makes the data available earlier for subsequent operations. -public static native @Cast("TfLiteRunStep") int TfLiteTensorGetDataKnownStep(@Const TfLiteTensor t); +/** Gets the delegate buffer handle, and the delegate which can process + * the buffer handle. + * + * WARNING: This is an experimental API and subject to change. */ -// Returns the operation steop when the shape of a tensor is computed. -// -// Some operations can precompute the shape of their results before the -// evaluation step. This makes the shape available earlier for subsequent -// operations. -public static native @Cast("TfLiteRunStep") int TfLiteTensorGetShapeKnownStep(@Const TfLiteTensor t); +/// +public static native @Cast("TfLiteStatus") int TfLiteInterpreterGetBufferHandle( + TfLiteInterpreter interpreter, int tensor_index, + @Cast("TfLiteBufferHandle*") IntPointer buffer_handle, @Cast("TfLiteOpaqueDelegate**") PointerPointer delegate); +public static native @Cast("TfLiteStatus") int TfLiteInterpreterGetBufferHandle( + TfLiteInterpreter interpreter, int tensor_index, + @Cast("TfLiteBufferHandle*") IntPointer buffer_handle, @Cast("TfLiteOpaqueDelegate**") @ByPtrPtr TfLiteOpaqueDelegateStruct delegate); +public static native @Cast("TfLiteStatus") int TfLiteInterpreterGetBufferHandle( + TfLiteInterpreter interpreter, int tensor_index, + @Cast("TfLiteBufferHandle*") IntBuffer buffer_handle, @Cast("TfLiteOpaqueDelegate**") @ByPtrPtr TfLiteOpaqueDelegateStruct delegate); +public static native @Cast("TfLiteStatus") int TfLiteInterpreterGetBufferHandle( + TfLiteInterpreter interpreter, int tensor_index, + @Cast("TfLiteBufferHandle*") int[] buffer_handle, @Cast("TfLiteOpaqueDelegate**") @ByPtrPtr TfLiteOpaqueDelegateStruct delegate); -// #ifdef __cplusplus // extern "C" +/** Sets whether buffer handle output is allowed. + * When using hardware delegation, Interpreter will make the data of output + * tensors available in {@code tensor->data} by default. If the application can + * consume the buffer handle directly (e.g. reading output from OpenGL + * texture), it can set this flag to false, so Interpreter won't copy the + * data from buffer handle to CPU memory. + * + * WARNING: This is an experimental API and subject to change. */ -// #include -// Targeting ../VariantData.java +/// +public static native void TfLiteSetAllowBufferHandleOutput( + @Const TfLiteInterpreter interpreter, @Cast("bool") boolean allow_buffer_handle_output); +/** -------------------------------------------------------------------------- + * SignatureRunner APIs +

+ * Attempts to cancel in flight invocation if any. + * This will not affect calls to {@code Invoke} that happend after this. + * Non blocking and thread safe. + * Returns kTfLiteError if cancellation is not enabled, otherwise returns + * kTfLiteOk. + * NOTE: Calling this function will cancel in-flight invocations + * in all SignatureRunners built from the same interpreter. + * + * WARNING: This is an experimental API and subject to change. */ +public static native @Cast("TfLiteStatus") int TfLiteSignatureRunnerCancel( + TfLiteSignatureRunner signature_runner); +// Forward declaration, to avoid need for dependency on +// tensorflow/lite/profiling/telemetry/profiler.h. -// Concrete implementations extend `AbstractVariantData` with CRPT. +/** Registers the telemetry profiler to the interpreter. + * Note: The interpreter does not take the ownership of profiler, but callers + * must ensure profiler->data outlives the lifespan of the interpreter. + * + * WARNING: This is an experimental API and subject to change. */ -// Analogous to `TfLiteTensorRealloc` for allocation of tensors whose -// data member points to an arbitrary C++ object. `VariantType` refers -// to the erased type of said object and `VariantArgs` refers to -// a list of argument types with which to construct a new `VariantType`. -// `VariantArgs` must match a constructor of `VariantType`. +/// +public static native void TfLiteInterpreterOptionsSetTelemetryProfiler( + TfLiteInterpreterOptions options, + TfLiteTelemetryProfilerStruct profiler); +/** Ensures the data of the tensor at the given index is readable. + * Note: If a delegate has been used, and {@code SetAllowBufferHandleOutput(true)} + * has been called, tensor outputs may be stored as delegate buffer handles + * whose data is not directly readable until this method has been called. In + * such cases, this method will copy the data from the delegate buffer handle + * to CPU memory. + * + * WARNING: This is an experimental API and subject to change. */ +public static native @Cast("TfLiteStatus") int TfLiteInterpreterEnsureTensorDataIsReadable( + TfLiteInterpreter interpreter, int tensor_index); +// #ifdef __cplusplus // extern "C" // #endif // __cplusplus -// #endif // TENSORFLOW_LITE_CORE_C_COMMON_H_ + +// #endif // TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_ // Parsed from tensorflow/lite/core/api/error_reporter.h @@ -3440,9 +3558,6 @@ public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String // No string mapping is included here, since the TF Lite packed representation // doesn't correspond to a C++ type well. -// Targeting ../TfLiteTypeToType.java - - @@ -3686,9 +3801,9 @@ public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String // #include "tensorflow/lite/core/c/common.h" // #include "tensorflow/lite/schema/schema_generated.h" // #include "tensorflow/lite/util.h" -// Targeting ../ValueHasher.java - +// Some versions of gcc don't support partial specialization in class scope, +// so these are defined in a namescope. // Targeting ../MutableOpResolver.java @@ -3954,7 +4069,7 @@ public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String /** For documentation, see third_party/tensorflow/lite/core/interpreter_builder.h. */ -// #include "tensorflow/lite/core/interpreter_builder.h" +// #include "tensorflow/lite/core/interpreter_builder.h" // IWYU pragma: export // namespace tflite // #endif // TENSORFLOW_LITE_INTERPRETER_BUILDER_H_ @@ -4086,7 +4201,9 @@ public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String // #ifndef TENSORFLOW_LITE_KERNELS_REGISTER_H_ // #define TENSORFLOW_LITE_KERNELS_REGISTER_H_ -// #include "tensorflow/lite/core/kernels/register.h" +/** For documentation, see third_party/tensorflow/lite/core/kernels/register.h */ + +// #include "tensorflow/lite/core/kernels/register.h" // IWYU pragma: export // namespace builtin // namespace ops diff --git a/tensorflow-lite/src/main/java/org/bytedeco/tensorflowlite/presets/tensorflowlite.java b/tensorflow-lite/src/main/java/org/bytedeco/tensorflowlite/presets/tensorflowlite.java index e0d38f01f3d..2258881116d 100644 --- a/tensorflow-lite/src/main/java/org/bytedeco/tensorflowlite/presets/tensorflowlite.java +++ b/tensorflow-lite/src/main/java/org/bytedeco/tensorflowlite/presets/tensorflowlite.java @@ -53,10 +53,10 @@ "tensorflow/lite/c/c_api.h", "tensorflow/lite/core/c/c_api.h", "tensorflow/lite/core/c/registration_external.h", - "tensorflow/lite/c/c_api_experimental.h", - "tensorflow/lite/core/c/c_api_experimental.h", "tensorflow/lite/c/common.h", "tensorflow/lite/core/c/common.h", + "tensorflow/lite/c/c_api_experimental.h", + "tensorflow/lite/core/c/c_api_experimental.h", "tensorflow/lite/core/api/error_reporter.h", "tensorflow/lite/core/api/op_resolver.h", "tensorflow/lite/core/api/profiler.h", diff --git a/tensorrt/README.md b/tensorrt/README.md index 76f5b8dcf46..8a71be6966c 100644 --- a/tensorrt/README.md +++ b/tensorrt/README.md @@ -17,7 +17,7 @@ Introduction ------------ This directory contains the JavaCPP Presets module for: - * TensorRT 8.6.1.6 https://developer.nvidia.com/tensorrt + * TensorRT 10.0.1.6 https://developer.nvidia.com/tensorrt Please refer to the parent README.md file for more detailed information about the JavaCPP Presets. @@ -46,7 +46,7 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic 4.0.0 org.bytedeco.tensorrt samplegooglenet - 1.5.10 + 1.5.11-SNAPSHOT SampleGoogleNet @@ -54,19 +54,19 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic org.bytedeco tensorrt-platform - 8.6-1.5.10 + 10.0-1.5.11-SNAPSHOT org.bytedeco cuda-platform-redist - 12.3-8.9-1.5.10 + 12.3-8.9-1.5.11-SNAPSHOT org.bytedeco tensorrt-platform-redist - 8.6-1.5.10 + 10.0-1.5.11-SNAPSHOT diff --git a/tensorrt/platform/pom.xml b/tensorrt/platform/pom.xml index 4384760e074..bbad10485d2 100644 --- a/tensorrt/platform/pom.xml +++ b/tensorrt/platform/pom.xml @@ -12,7 +12,7 @@ org.bytedeco tensorrt-platform - 8.6-${project.parent.version} + 10.0-${project.parent.version} JavaCPP Presets Platform for TensorRT diff --git a/tensorrt/platform/redist/pom.xml b/tensorrt/platform/redist/pom.xml index 028eae5d169..7971a83113c 100644 --- a/tensorrt/platform/redist/pom.xml +++ b/tensorrt/platform/redist/pom.xml @@ -12,7 +12,7 @@ org.bytedeco tensorrt-platform-redist - 8.6-${project.parent.version} + 10.0-${project.parent.version} JavaCPP Presets Platform Redist for TensorRT diff --git a/tensorrt/pom.xml b/tensorrt/pom.xml index aa3601f8ded..37ae1e1ff11 100644 --- a/tensorrt/pom.xml +++ b/tensorrt/pom.xml @@ -11,7 +11,7 @@ org.bytedeco tensorrt - 8.6-${project.parent.version} + 10.0-${project.parent.version} JavaCPP Presets for TensorRT diff --git a/tensorrt/samples/pom.xml b/tensorrt/samples/pom.xml index 17fb19b82a8..372c4dc9667 100644 --- a/tensorrt/samples/pom.xml +++ b/tensorrt/samples/pom.xml @@ -12,7 +12,7 @@ org.bytedeco tensorrt-platform - 8.6-1.5.11-SNAPSHOT + 10.0-1.5.11-SNAPSHOT @@ -24,7 +24,7 @@ org.bytedeco tensorrt-platform-redist - 8.6-1.5.11-SNAPSHOT + 10.0-1.5.11-SNAPSHOT diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvinfer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvinfer.java index 0eafb080488..bede32826a0 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvinfer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvinfer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.global; @@ -24,15 +24,20 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Parsed from NvInferVersion.h /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 * - * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual - * property and proprietary rights in and to this material, related - * documentation and any modifications thereto. Any use, reproduction, - * disclosure or distribution of this material and related documentation - * without an express license agreement from NVIDIA CORPORATION or - * its affiliates is strictly prohibited. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ /** @@ -44,9 +49,9 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // #define NV_INFER_VERSION_H /** TensorRT major version. */ -public static final int NV_TENSORRT_MAJOR = 8; +public static final int NV_TENSORRT_MAJOR = 10; /** TensorRT minor version. */ -public static final int NV_TENSORRT_MINOR = 6; +public static final int NV_TENSORRT_MINOR = 0; /** TensorRT patch version. */ public static final int NV_TENSORRT_PATCH = 1; /** TensorRT build number. */ @@ -59,16 +64,6 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** TensorRT LWS patch version. */ public static final int NV_TENSORRT_LWS_PATCH = 0; -// This #define is deprecated in TensorRT 8.6 and will be removed in 10.0. Use NV_TENSORRT_MAJOR. -/** Shared object library major version number. */ -public static final int NV_TENSORRT_SONAME_MAJOR = 8; -// This #define is deprecated in TensorRT 8.6 and will be removed in 10.0. Use NV_TENSORRT_MINOR. -/** Shared object library minor version number. */ -public static final int NV_TENSORRT_SONAME_MINOR = 6; -// This #define is deprecated in TensorRT 8.6 and will be removed in 10.0. Use NV_TENSORRT_PATCH. -/** Shared object library patch version number. */ -public static final int NV_TENSORRT_SONAME_PATCH = 1; - /** An early access release */ public static final int NV_TENSORRT_RELEASE_TYPE_EARLY_ACCESS = 0; /** A release candidate */ @@ -85,15 +80,20 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Parsed from NvInferRuntimeBase.h /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual - * property and proprietary rights in and to this material, related - * documentation and any modifications thereto. Any use, reproduction, - * disclosure or distribution of this material and related documentation - * without an express license agreement from NVIDIA CORPORATION or - * its affiliates is strictly prohibited. + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ // #ifndef NV_INFER_RUNTIME_BASE_H @@ -150,11 +150,16 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * * This file contains common definitions, data structures and interfaces shared between the standard and safe runtime. * - * \warning Do not directly include this file. Instead include either NvInferRuntime.h (for the standard runtime) or - * NvInferSafeRuntime.h (for the safety runtime). + * \warning Do not directly include this file. Instead include one of: + * * NvInferRuntime.h (for the standard runtime) + * * NvInferSafeRuntime.h (for the safety runtime) + * * NvInferConsistency.h (for consistency checker) + * * NvInferPluginUtils.h (for plugin utilities) * */ +// #if !defined(NV_INFER_INTERNAL_INCLUDE_RUNTIME_BASE) && !defined(TRT_VCAST_SAFE) +// #endif -// forward declare some CUDA types to avoid an include dependency +/** Forward declare some CUDA types to avoid an include dependency. */ // Targeting ../nvinfer/cublasContext.java @@ -162,29 +167,32 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { +/** Construct a single integer denoting TensorRT version. + * Usable in preprocessor expressions. */ +// #define NV_TENSORRT_VERSION_INT(major, minor, patch) ((major) *10000L + (minor) *100L + (patch) *1L) + +/** TensorRT version as a single integer. + * Usable in preprocessor expressions. */ + //! //! //! public static native @MemberGetter int NV_TENSORRT_VERSION(); public static final int NV_TENSORRT_VERSION = NV_TENSORRT_VERSION(); + /** * \namespace nvinfer1 * * \brief The TensorRT API version 1 namespace. * */ - -@Namespace("nvinfer1") @MemberGetter public static native int kNV_TENSORRT_VERSION_IMPL(); -public static final int kNV_TENSORRT_VERSION_IMPL = kNV_TENSORRT_VERSION_IMPL(); // major, minor, patch - /** char_t is the type used by TensorRT to represent all valid characters. */ /** AsciiChar is the type used by TensorRT to represent valid ASCII characters. - * This type is used by IPluginV2, PluginField, IPluginCreator, IPluginRegistry, and - * ILogger due to their use in automotive safety context. */ + * This type is widely used in automotive safety context. */ /** Forward declare IErrorRecorder for use in other interfaces. */ -/** Forward declare IGpuAllocator for use in other interfaces. */ + /** Declaration of EnumMaxImpl struct to store maximum number of elements in an enumeration type. */ // namespace impl @@ -199,7 +207,7 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** 32-bit floating point format. */ kFLOAT(0), - /** IEEE 16-bit floating-point format. */ + /** IEEE 16-bit floating-point format -- has a 5 bit exponent and 11 bit significand. */ kHALF(1), /** Signed 8-bit integer representing a quantized floating-point value. */ @@ -221,14 +229,22 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * to equivalent floating point values. * {kFLOAT, kHALF} to kUINT8 conversion will convert the floating point values * to integer values by truncating towards zero. This conversion has undefined behavior for - * floating point values outside the range [0.0f, 256.0f) after truncation. + * floating point values outside the range [0.0F, 256.0F) after truncation. * kUINT8 conversions are not supported for {kINT8, kINT32, kBOOL}. */ kUINT8(5), /** Signed 8-bit floating point with - * 1 sign bit, 4 exponent bits, 3 mantissa bits, and exponent-bias 7. - * \warning kFP8 is not supported yet and will result in an error or undefined behavior. */ - kFP8(6); + * 1 sign bit, 4 exponent bits, 3 mantissa bits, and exponent-bias 7. */ + kFP8(6), + + /** Brain float -- has an 8 bit exponent and 8 bit significand. */ + kBF16(7), + + /** Signed 64-bit integer type. */ + kINT64(8), + + /** Signed 4-bit integer type. */ + kINT4(9); public final int value; private DataType(int v) { this.value = v; } @@ -236,18 +252,14 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { public DataType intern() { for (DataType e : values()) if (e.value == value) return e; return this; } @Override public String toString() { return intern().name(); } } -// Targeting ../nvinfer/EnumMaxImpl.java - +/** Maximum number of elements in DataType enum. @see DataType */ - -// Targeting ../nvinfer/Dims32.java +// Targeting ../nvinfer/Dims64.java /** - * Alias for Dims32. - * - * \warning: This alias might change in the future. + * Alias for Dims64. * */ @@ -257,6 +269,10 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { //! //! //! +//! +//! +//! +//! /** * \enum TensorFormat @@ -267,101 +283,112 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * * @see IPluginV2::supportsFormat(), safe::ICudaEngine::getBindingFormat() * + * Many of the formats are **vector-major** or **vector-minor**. These formats specify + * a vector dimension and scalars per vector. + * For example, suppose that the tensor has has dimensions [M,N,C,H,W], + * the vector dimension is C and there are V scalars per vector. + * + * * A **vector-major** format splits the vectorized dimension into two axes in the + * memory layout. The vectorized dimension is replaced by an axis of length ceil(C/V) + * and a new dimension of length V is appended. For the example tensor, the memory layout + * is equivalent to an array with dimensions [M][N][ceil(C/V)][H][W][V]. + * Tensor coordinate (m,n,c,h,w) maps to array location [m][n][c/V][h][w][c\%V]. + * + * * A **vector-minor** format moves the vectorized dimension to become the last axis + * in the memory layout. For the example tensor, the memory layout is equivalent to an + * array with dimensions [M][N][H][W][ceil(C/V)*V]. Tensor coordinate (m,n,c,h,w) maps + * array location subscript [m][n][h][w][c]. + * + * In interfaces that refer to "components per element", that's the value of V above. + * * For more information about data formats, see the topic "Data Format Description" located in the - * TensorRT Developer Guide. + * TensorRT Developer Guide. https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#data-format-desc * */ @Namespace("nvinfer1") public enum TensorFormat { - /** Row major linear format. - * For a tensor with dimensions {N, C, H, W} or {numbers, channels, - * columns, rows}, the dimensional index corresponds to {3, 2, 1, 0} - * and thus the order is W minor. + /** Memory layout is similar to an array in C or C++. + * The stride of each dimension is the product of the dimensions after it. + * The last dimension has unit stride. * - * For DLA usage, the tensor sizes are limited to C,H,W in the range [1,8192]. - * */ + * For DLA usage, the tensor sizes are limited to C,H,W in the range [1,8192]. */ + +//! kLINEAR(0), - /** Two wide channel vectorized row major format. This format is bound to - * FP16. It is only available for dimensions >= 3. - * For a tensor with dimensions {N, C, H, W}, - * the memory layout is equivalent to a C array with dimensions - * [N][(C+1)/2][H][W][2], with the tensor coordinates (n, c, h, w) - * mapping to array subscript [n][c/2][h][w][c%2]. */ + /** Vector-major format with two scalars per vector. + * Vector dimension is third to last. + * + * This format requires FP16 or BF16 and at least three dimensions. */ kCHW2(1), - /** Eight channel format where C is padded to a multiple of 8. This format - * is bound to FP16. It is only available for dimensions >= 3. - * For a tensor with dimensions {N, C, H, W}, - * the memory layout is equivalent to the array with dimensions - * [N][H][W][(C+7)/8*8], with the tensor coordinates (n, c, h, w) - * mapping to array subscript [n][h][w][c]. */ + /** Vector-minor format with eight scalars per vector. + * Vector dimension is third to last. + * This format requires FP16 or BF16 and at least three dimensions. */ +//! //! //! kHWC8(2), - /** Four wide channel vectorized row major format. This format is bound to - * INT8 or FP16. It is only available for dimensions >= 3. - * For INT8, the C dimension must be a build-time constant. - * For a tensor with dimensions {N, C, H, W}, - * the memory layout is equivalent to a C array with dimensions - * [N][(C+3)/4][H][W][4], with the tensor coordinates (n, c, h, w) - * mapping to array subscript [n][c/4][h][w][c%4]. + /** Vector-major format with four scalars per vector. + * Vector dimension is third to last. + * + * This format requires INT8 or FP16 and at least three dimensions. + * For INT8, the length of the vector dimension must be a build-time constant. * * Deprecated usage: * * If running on the DLA, this format can be used for acceleration - * with the caveat that C must be equal or lesser than 4. + * with the caveat that C must be less than or equal to 4. * If used as DLA input and the build option kGPU_FALLBACK is not specified, - * it needs to meet line stride requirement of DLA format. Column stride in bytes should - * be a multiple of 32 on Xavier and 64 on Orin. */ + * it needs to meet line stride requirement of DLA format. Column stride in + * bytes must be a multiple of 64 on Orin. */ //! //! kCHW4(3), - /** Sixteen wide channel vectorized row major format. This format is bound - * to FP16. It is only available for dimensions >= 3. - * For a tensor with dimensions {N, C, H, W}, - * the memory layout is equivalent to a C array with dimensions - * [N][(C+15)/16][H][W][16], with the tensor coordinates (n, c, h, w) - * mapping to array subscript [n][c/16][h][w][c%16]. + /** Vector-major format with 16 scalars per vector. + * Vector dimension is third to last. + * + * This format requires INT8 or FP16 and at least three dimensions. * * For DLA usage, this format maps to the native feature format for FP16, - * and the tensor sizes are limited to C,H,W in the range [1,8192]. - * */ + * and the tensor sizes are limited to C,H,W in the range [1,8192]. */ +//! //! kCHW16(4), - /** Thirty-two wide channel vectorized row major format. This format is - * only available for dimensions >= 3. - * For a tensor with dimensions {N, C, H, W}, - * the memory layout is equivalent to a C array with dimensions - * [N][(C+31)/32][H][W][32], with the tensor coordinates (n, c, h, w) - * mapping to array subscript [n][c/32][h][w][c%32]. + /** Vector-major format with 32 scalars per vector. + * Vector dimension is third to last. + * + * This format requires at least three dimensions. * * For DLA usage, this format maps to the native feature format for INT8, * and the tensor sizes are limited to C,H,W in the range [1,8192]. */ + +//! kCHW32(5), - /** Eight channel format where C is padded to a multiple of 8. This format - * is bound to FP16, and it is only available for dimensions >= 4. - * For a tensor with dimensions {N, C, D, H, W}, - * the memory layout is equivalent to an array with dimensions - * [N][D][H][W][(C+7)/8*8], with the tensor coordinates (n, c, d, h, w) - * mapping to array subscript [n][d][h][w][c]. */ + /** Vector-minor format with eight scalars per vector. + * Vector dimension is fourth to last. + * + * This format requires FP16 or BF16 and at least four dimensions. */ + +//! kDHWC8(6), - /** Thirty-two wide channel vectorized row major format. This format is - * bound to FP16 and INT8 and is only available for dimensions >= 4. - * For a tensor with dimensions {N, C, D, H, W}, - * the memory layout is equivalent to a C array with dimensions - * [N][(C+31)/32][D][H][W][32], with the tensor coordinates (n, c, d, h, w) - * mapping to array subscript [n][c/32][d][h][w][c%32]. */ + /** Vector-major format with 32 scalars per vector. + * Vector dimension is fourth to last. + * + * This format requires FP16 or INT8 and at least four dimensions. */ + +//! kCDHW32(7), - /** Non-vectorized channel-last format. This format is bound to either FP32 or UINT8, - * and is only available for dimensions >= 3. */ + /** Vector-minor format where channel dimension is third to last and unpadded. + * + * This format requires either FP32 or UINT8 and at least three dimensions. */ //! kHWC(8), @@ -380,29 +407,33 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** DLA image format. For a tensor with dimension {N, C, H, W} the C axis * always has unit stride. The stride for stepping along the H axis is rounded up - * to 32 bytes on Xavier and 64 bytes on Orin. C can only be 1, 3 or 4. + * to 64 bytes on Orin. C can only be 1, 3 or 4. * If C == 1, it will map to grayscale format. * If C == 3 or C == 4, it will map to color image format. And if C == 3, * the stride for stepping along the W axis needs to be padded to 4 in elements. * * When C is {1, 3, 4}, then C' is {1, 4, 4} respectively, * the memory layout is equivalent to a C array with dimensions - * [N][H][roundUp(W, 32/C'/elementSize)][C'] on Xavier and [N][H][roundUp(W, 64/C'/elementSize)][C'] on Orin + * [N][H][roundUp(W, 64/C'/elementSize)][C'] on Orin * where elementSize is 2 for FP16 * and 1 for Int8. The tensor coordinates (n, c, h, w) mapping to array * subscript [n][h][w][c]. */ + +//! kDLA_HWC4(10), - /** Sixteen channel format where C is padded to a multiple of 16. This format - * is bound to FP16. It is only available for dimensions >= 3. - * For a tensor with dimensions {N, C, H, W}, - * the memory layout is equivalent to the array with dimensions - * [N][H][W][(C+15)/16*16], with the tensor coordinates (n, c, h, w) - * mapping to array subscript [n][h][w][c]. */ + /** Vector-minor format with 16 scalars per vector. + * Vector dimension is third to last. + * + * This requires FP16 and at least three dimensions. */ + +//! kHWC16(11), - /** Non-vectorized channel-last format. This format is bound to FP32. - * It is only available for dimensions >= 4. */ + /** Vector-minor format with one scalar per vector. + * Vector dimension is fourth to last. + * + * This format requires FP32 and at least four dimensions. */ kDHWC(12); public final int value; @@ -411,11 +442,47 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { public TensorFormat intern() { for (TensorFormat e : values()) if (e.value == value) return e; return this; } @Override public String toString() { return intern().name(); } } + + + +//! +//! +//! +// Targeting ../nvinfer/InterfaceInfo.java + + + +/** + * \enum APILanguage + * + * \brief Programming language used in the implementation of a TRT interface + * */ +@Namespace("nvinfer1") public enum APILanguage { + kCPP(0), + kPYTHON(1); + + public final int value; + private APILanguage(int v) { this.value = v; } + private APILanguage(APILanguage e) { this.value = e.value; } + public APILanguage intern() { for (APILanguage e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +/** Maximum number of elements in APILanguage enum. @see APILanguage */ + +// Targeting ../nvinfer/IVersionedInterface.java + + /** Maximum number of elements in TensorFormat enum. @see TensorFormat */ // namespace impl + +/** + * \enum AllocatorFlag + * + * \brief Allowed type of memory allocation. + * */ @Namespace("nvinfer1") public enum AllocatorFlag { - /** TensorRT may call realloc() on this allocation */ + /** TensorRT may call realloc() on this allocation. */ kRESIZABLE(0); public final int value; @@ -426,15 +493,39 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { } /** Maximum number of elements in AllocatorFlag enum. @see AllocatorFlag */ // namespace impl +// Targeting ../nvinfer/IGpuAllocator.java + + // namespace v_1_0 + +/** + * \class IGpuAllocator + * + * \brief Application-implemented class for controlling allocation on the GPU. + * + * \warning The lifetime of an IGpuAllocator object must exceed that of all objects that use it. + * + * This class is intended as a base class for allocators that implement synchronous allocation. + * If you want the benefits of asynchronous allocation, you can do either of: + * + * * Derive your class from IGpuAllocator and override all four of its virtual methods + * for allocation/deallocation, including the two deprecated methods. + * + * * Derive your class from IGpuAsyncAllocator and override its two pure virtual + * methods for allocation/deallocation. + * + * The latter style is preferred because it does not tie code to deprecated methods. + * + * @see IGpuAsyncAllocator. + * */ + //! //! //! -// Targeting ../nvinfer/IGpuAllocator.java - - +//! +//! // Targeting ../nvinfer/ILogger.java @@ -520,8 +611,8 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { kFAILED_INITIALIZATION(6), /** - * An error occurred during execution that caused TensorRT to end prematurely, either an asynchronous error or - * other execution errors reported by CUDA/DLA. In a dynamic system, the + * An error occurred during execution that caused TensorRT to end prematurely, either an asynchronous error, + * user cancellation, or other execution errors reported by CUDA/DLA. In a dynamic system, the * data can be thrown away and the next frame can be processed or execution can be retried. * This is either an execution error or a memory error. * */ @@ -565,7 +656,7 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** * An error occurred due to the network not being supported on the device due to constraints of the hardware or - * system. An example is running a unsafe layer in a safety certified context, or a resource requirement for the + * system. An example is running an unsafe layer in a safety certified context, or a resource requirement for the * current network is greater than the capabilities of the target device. The network is otherwise correct, but * the network and hardware combination is problematic. This can be recoverable. * Examples: @@ -586,6 +677,40 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Targeting ../nvinfer/IErrorRecorder.java // class IErrorRecorder + // namespace v_1_0 + +/** + * \class IErrorRecorder + * + * \brief Reference counted application-implemented error reporting interface for TensorRT objects. + * + * The error reporting mechanism is a user-defined object that interacts with the internal state of the object + * that it is assigned to in order to determine information about abnormalities in execution. The error recorder + * gets both an error enum that is more descriptive than pass/fail and also a string description that gives more + * detail on the exact failure modes. In the safety context, the error strings are all limited to 128 bytes + * or less in length, including the NULL terminator. + * + * The ErrorRecorder gets passed along to any class that is created from another class that has an ErrorRecorder + * assigned to it. For example, assigning an ErrorRecorder to an IBuilder allows all INetwork's, ILayer's, and + * ITensor's to use the same error recorder. For functions that have their own ErrorRecorder accessor functions. + * This allows registering a different error recorder or de-registering of the error recorder for that specific + * object. + * + * ErrorRecorder objects that are used in the safety runtime must define an implementation-dependent upper limit + * of errors whose information can be stored, and drop errors above this upper limit. The limit must fit in int32_t. + * The IErrorRecorder::hasOverflowed() method is used to signal that one or more errors have been dropped. + * + * The ErrorRecorder object implementation must be thread safe. All locking and synchronization is pushed to the + * interface implementation and TensorRT does not hold any synchronization primitives when calling the interface + * functions. + * + * The lifetime of the ErrorRecorder object must exceed the lifetime of all TensorRT objects that use it. + * */ + + +//! +//! +//! /** * \enum TensorIOMode @@ -608,6 +733,32 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { public TensorIOMode intern() { for (TensorIOMode e : values()) if (e.value == value) return e; return this; } @Override public String toString() { return intern().name(); } } +// Targeting ../nvinfer/IStreamReader.java + + + // namespace v_1_0 + +/** + * \class IStreamReader + * + * \brief Application-implemented class for reading data in a stream-based manner. + * + * \note To ensure compatibility of source code with future versions of TensorRT, use IStreamReader, not + * v_1_0::IStreamReader + * */ +// Targeting ../nvinfer/IPluginResource.java + + // class IPluginResource + // namespace v_1_0 + +/** + * \class IPluginResource + * + * \brief Interface for plugins to define custom resources that could be shared through the plugin registry + * + * @see IPluginRegistry::acquirePluginResource + * @see IPluginRegistry::releasePluginResource + * */ /** Maximum number of elements in TensorIOMode enum. @see TensorIOMode */ // namespace impl // namespace nvinfer1 @@ -615,7 +766,7 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** * \brief Return the library version number. * - * The format is as for TENSORRT_VERSION: (TENSORRT_MAJOR * 1000) + (TENSORRT_MINOR * 100) + TENSOR_PATCH. + * The format is as for TENSORRT_VERSION: (MAJOR * 100 + MINOR) * 100 + PATCH * */ public static native @NoException(true) int getInferLibVersion(); @@ -625,20 +776,27 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Parsed from NvInferRuntimePlugin.h /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 * - * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual - * property and proprietary rights in and to this material, related - * documentation and any modifications thereto. Any use, reproduction, - * disclosure or distribution of this material and related documentation - * without an express license agreement from NVIDIA CORPORATION or - * its affiliates is strictly prohibited. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ // #ifndef NV_INFER_RUNTIME_PLUGIN_H // #define NV_INFER_RUNTIME_PLUGIN_H +public static final int NV_INFER_INTERNAL_INCLUDE_RUNTIME_BASE = 1; +// #include "NvInferRuntimeBase.h" //! @@ -649,7 +807,7 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { //! //! //! -// #include "NvInferRuntimeBase.h" +// #undef NV_INFER_INTERNAL_INCLUDE_RUNTIME_BASE /** * \file NvInferRuntimePlugin.h @@ -673,15 +831,28 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * @see IPluginV2::supportsFormat() * */ + +//! +//! + +/** + * \brief Bit at the plugin version to identify that it is a plugin. + * */ + + //! //! //! //! +//! +@Namespace("nvinfer1") @MemberGetter public static native int kPLUGIN_VERSION_PYTHON_BIT(); +public static final int kPLUGIN_VERSION_PYTHON_BIT = kPLUGIN_VERSION_PYTHON_BIT(); // Targeting ../nvinfer/PluginTensorDesc.java -/** \struct PluginVersion +/** + * \struct PluginVersion * * \brief Definition of plugin versions. * @@ -695,7 +866,9 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** IPluginV2IOExt */ kV2_IOEXT((byte)(2)), /** IPluginV2DynamicExt */ - kV2_DYNAMICEXT((byte)(3)); + kV2_DYNAMICEXT((byte)(3)), + /** IPluginV2DynamicExt-based Python plugins */ + kV2_DYNAMICEXT_PYTHON((byte)(kPLUGIN_VERSION_PYTHON_BIT | 3)); public final byte value; private PluginVersion(byte v) { this.value = v; } @@ -703,6 +876,24 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { public PluginVersion intern() { for (PluginVersion e : values()) if (e.value == value) return e; return this; } @Override public String toString() { return intern().name(); } } + +/** + * \enum PluginCreatorVersion + * + * \brief Enum to identify version of the plugin creator. + * */ +@Namespace("nvinfer1") public enum PluginCreatorVersion { + /** IPluginCreator */ + kV1(0), + /** IPluginCreator-based Python plugin creators */ + kV1_PYTHON(kPLUGIN_VERSION_PYTHON_BIT); + + public final int value; + private PluginCreatorVersion(int v) { this.value = v; } + private PluginCreatorVersion(PluginCreatorVersion e) { this.value = e.value; } + public PluginCreatorVersion intern() { for (PluginCreatorVersion e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} // Targeting ../nvinfer/IPluginV2.java @@ -715,9 +906,9 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** * \enum PluginFieldType + * * \brief The possible field types for custom layer. * */ - @Namespace("nvinfer1") public enum PluginFieldType { /** FP16 field type. */ kFLOAT16(0), @@ -736,7 +927,13 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** nvinfer1::Dims field type. */ kDIMS(7), /** Unknown field type. */ - kUNKNOWN(8); + kUNKNOWN(8), + /** BF16 field type. */ + kBF16(9), + /** INT64 field type. */ + kINT64(10), + /** FP8 field type. */ + kFP8(11); public final int value; private PluginFieldType(int v) { this.value = v; } @@ -750,9 +947,77 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Targeting ../nvinfer/PluginFieldCollection.java + +/** + * \enum PluginCapabilityType + * + * \brief Enumerates the different capability types a IPluginV3 object may have + * */ +@Namespace("nvinfer1") public enum PluginCapabilityType { + /** Core capability. Every IPluginV3 object must have this. */ + kCORE(0), + /** Build capability. IPluginV3 objects provided to TensorRT build phase must have this. */ + kBUILD(1), + /** Runtime capability. IPluginV3 objects provided to TensorRT build and execution phases must have this. */ + kRUNTIME(2); + + public final int value; + private PluginCapabilityType(int v) { this.value = v; } + private PluginCapabilityType(PluginCapabilityType e) { this.value = e.value; } + public PluginCapabilityType intern() { for (PluginCapabilityType e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + +/** + * \enum TensorRTPhase + * + * \brief Indicates a phase of operation of TensorRT + * */ +@Namespace("nvinfer1") public enum TensorRTPhase { + /** Build phase of TensorRT */ + kBUILD(0), + /** Execution phase of TensorRT */ + kRUNTIME(1); + + public final int value; + private TensorRTPhase(int v) { this.value = v; } + private TensorRTPhase(TensorRTPhase e) { this.value = e.value; } + public TensorRTPhase intern() { for (TensorRTPhase e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Targeting ../nvinfer/IPluginCreatorInterface.java + + // Targeting ../nvinfer/IPluginCreator.java + // namespace v_1_0 + +/** + * \class IPluginCreatorInterface + * + * \brief Base class for all plugin creator versions. + * + * @see IPluginCreator and IPluginRegistry + * */ + + +//! +//! +//! +//! +//! + +/** + * \class IPluginCreator + * + * \brief Plugin creator class for user implemented layers. + * + * @see IPlugin and IPluginFactory + * + * @deprecated Deprecated in TensorRT 10.0. Please implement IPluginCreatorV3One instead along with IPluginV3 plugins + * instead. + * */ // namespace nvinfer1 @@ -762,15 +1027,20 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Parsed from NvInferRuntimeCommon.h /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual - * property and proprietary rights in and to this material, related - * documentation and any modifications thereto. Any use, reproduction, - * disclosure or distribution of this material and related documentation - * without an express license agreement from NVIDIA CORPORATION or - * its affiliates is strictly prohibited. + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ // #ifndef NV_INFER_RUNTIME_COMMON_H @@ -787,13 +1057,14 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * \file NvInferRuntimeCommon.h * * This file provides the nvinfer1::IPluginRegistry interface, which will be moved to the NvInferRuntime.h header - * in TensorRT 9.0. + * in a future release. * - * \warning This file will be removed in TensorRT 9.0. + * \warning This file will be removed in a future release. * * \warning Do not directly include this file. Instead include NvInferRuntime.h * */ // #include "NvInferRuntimeBase.h" +// #undef NV_INFER_INTERNAL_INCLUDE_RUNTIME_BASE // #include "NvInferRuntimePlugin.h" // Targeting ../nvinfer/IPluginRegistry.java @@ -807,20 +1078,25 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Parsed from NvInferLegacyDims.h /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual - * property and proprietary rights in and to this material, related - * documentation and any modifications thereto. Any use, reproduction, - * disclosure or distribution of this material and related documentation - * without an express license agreement from NVIDIA CORPORATION or - * its affiliates is strictly prohibited. + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ // #ifndef NV_INFER_LEGACY_DIMS_H // #define NV_INFER_LEGACY_DIMS_H - +// #include "NvInferRuntimeBase.h" //! @@ -830,7 +1106,7 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { //! //! //! -// #include "NvInferRuntimeCommon.h" +// #undef NV_INFER_INTERNAL_INCLUDE_RUNTIME_BASE /** * \file NvInferLegacyDims.h @@ -865,15 +1141,20 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Parsed from NvInferRuntime.h /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 * - * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual - * property and proprietary rights in and to this material, related - * documentation and any modifications thereto. Any use, reproduction, - * disclosure or distribution of this material and related documentation - * without an express license agreement from NVIDIA CORPORATION or - * its affiliates is strictly prohibited. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ // #ifndef NV_INFER_RUNTIME_H @@ -913,20 +1194,16 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * network operations that are DLA compatible and the resulting serialized engine can be executed using standalone * DLA runtime APIs. See sampleCudla for an example of integrating cuDLA APIs with TensorRT APIs. * */ - @Namespace("nvinfer1") public enum EngineCapability { /** * Standard: TensorRT flow without targeting the safety runtime. * This flow supports both DeviceType::kGPU and DeviceType::kDLA. * */ - kSTANDARD(0), - - /** @deprecated Deprecated in TensorRT 8.0. Superseded by kSTANDARD. */ //! //! - kDEFAULT(kSTANDARD.value), + kSTANDARD(0), /** * Safety: TensorRT flow with restrictions targeting the safety runtime. @@ -934,24 +1211,18 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * This flow supports only DeviceType::kGPU. * * This flag is only supported in NVIDIA Drive(R) products. */ - kSAFETY(1), - - /** @deprecated Deprecated in TensorRT 8.0. Superseded by kSAFETY. */ //! //! - kSAFE_GPU(kSAFETY.value), + kSAFETY(1), /** * DLA Standalone: TensorRT flow with restrictions targeting external, to TensorRT, DLA runtimes. * See DLA documentation for list of supported layers and formats. * This flow supports only DeviceType::kDLA. * */ - kDLA_STANDALONE(2), - - /** @deprecated Deprecated in TensorRT 8.0. Superseded by kDLA_STANDALONE. */ - kSAFE_DLA(kDLA_STANDALONE.value); + kDLA_STANDALONE(2); public final int value; private EngineCapability(int v) { this.value = v; } @@ -1010,6 +1281,7 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** * \enum TensorLocation + * * \brief The location for tensor data storage, device or host. * */ @Namespace("nvinfer1") public enum TensorLocation { @@ -1041,20 +1313,145 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Targeting ../nvinfer/IPluginV2DynamicExt.java +// Targeting ../nvinfer/IPluginResourceContext.java + + +// Targeting ../nvinfer/IPluginCapability.java + + + // namespace v_1_0 + +/** + * \class IPluginCapability + * + * \brief Base class for plugin capability interfaces + * + * IPluginCapability represents a split in TensorRT V3 plugins to sub-objects that expose different types of + * capabilites a plugin may have, as opposed to a single interface which defines all capabilities and behaviors of a + * plugin. + * + * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. + * + * @see PluginCapabilityType + * */ +// Targeting ../nvinfer/IPluginV3.java + + + + // namespace v_1_0 + +/** + * \class IPluginV3 + * + * \brief Plugin class for the V3 generation of user-implemented layers. + * + * IPluginV3 acts as a wrapper around the plugin capability interfaces that define the actual behavior of the plugin. + * + * @see IPluginCapability + * @see IPluginCreatorV3One + * @see IPluginRegistry + * */ +// Targeting ../nvinfer/IPluginV3OneCore.java + + +// Targeting ../nvinfer/IPluginV3OneBuild.java + + +// Targeting ../nvinfer/IPluginV3OneRuntime.java + + + // namespace v_1_0 + +/** + * \class IPluginV3OneCore + * + * \brief A plugin capability interface that enables the core capability (PluginCapabilityType::kCORE). + * + * @see IPluginCapability + * @see PluginCapabilityType + * @see IPluginV3::getCapabilityInterface() + * */ + + +//! +//! +//! +//! + +/** + * \class IPluginV3OneBuild + * + * \brief A plugin capability interface that enables the build capability (PluginCapabilityType::kBUILD). Exposes + * methods that allow the expression of the build time properties and behavior of a plugin. + * + * @see IPluginCapability + * @see PluginCapabilityType + * @see IPluginV3::getCapabilityInterface() + * */ + + +//! +//! +//! +//! + +/** + * \class IPluginV3OneRuntime + * + * \brief A plugin capability interface that enables the runtime capability (PluginCapabilityType::kRUNTIME). Exposes + * methods that allow the expression of the runtime properties and behavior of a plugin. + * + * @see IPluginCapability + * @see PluginCapabilityType + * @see IPluginV3::getCapabilityInterface() + * */ +// Targeting ../nvinfer/IPluginCreatorV3One.java + + + // namespace v_1_0 + +/** + * \class IPluginCreatorV3One + * + * \brief A plugin creator class capable of producing IPluginV3 objects + * + * @see IPluginV3 + * @see IPluginRegistry + * */ // Targeting ../nvinfer/IProfiler.java + // namespace v_1_0 + +/** + * \class IProfiler + * + * \brief Application-implemented interface for profiling. + * + * When this class is added to an execution context, the profiler will be called once per layer for each invocation of + * executeV2()/enqueueV3(). + * + * It is not recommended to run inference with profiler enabled when the inference execution time is critical since the + * profiler may affect execution time negatively. + * */ + + +//! +//! +//! +//! /** * \enum WeightsRole + * * \brief How a layer uses particular Weights. * * The power weights of an IScaleLayer are omitted. Refitting those is not supported. * */ @Namespace("nvinfer1") public enum WeightsRole { - /** kernel for IConvolutionLayer, IDeconvolutionLayer, or IFullyConnectedLayer */ + /** kernel for IConvolutionLayer or IDeconvolutionLayer */ kKERNEL(0), - /** bias for IConvolutionLayer, IDeconvolutionLayer, or IFullyConnectedLayer */ + /** bias for IConvolutionLayer or IDeconvolutionLayer */ kBIAS(1), /** shift part of IScaleLayer */ kSHIFT(2), @@ -1127,7 +1524,8 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** Maximum number of elements in TempfileControlFlag enum. @see TempfileControlFlag */ -/** \brief Represents a collection of one or more TempfileControlFlag values combined using bitwise-OR operations. +/** + * \brief Represents a collection of one or more TempfileControlFlag values combined using bitwise-OR operations. * * @see TempfileControlFlag, * IRuntime::setTempfileControlFlags(), @@ -1185,18 +1583,21 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * * \brief List of tactic sources for TensorRT. * - * @see TacticSources, IBuilderConfig::setTacticSources(), IBuilderConfig::getTacticSources(), - * PreviewFeature::kDISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805 + * @see TacticSources, IBuilderConfig::setTacticSources(), IBuilderConfig::getTacticSources() * */ @Namespace("nvinfer1") public enum TacticSource { - /** cuBLAS tactics. Enabled by default. - * \note Disabling kCUBLAS will cause the cublas handle passed to plugins in attachToContext to be null. */ + /** cuBLAS tactics. Disabled by default. + * \note Disabling kCUBLAS will cause the cuBLAS handle passed to plugins in attachToContext to be null. + * @deprecated Deprecated in TensorRT 10.0. */ kCUBLAS(0), - /** cuBLAS LT tactics. - * Enabled for x86 platforms and only enabled for non-x86 platforms when CUDA >= 11.0 by default. */ + + /** cuBLAS LT tactics. Enabled by default. + * @deprecated Deprecated in TensorRT 9.0. */ kCUBLAS_LT(1), - /** cuDNN tactics. Enabled by default. - * \note Disabling kCUDNN will cause the cuDNN handle passed to plugins in attachToContext to be null. */ + + /** cuDNN tactics. Disabled by default. + * \note Disabling kCUDNN will cause the cuDNN handle passed to plugins in attachToContext to be null. + * @deprecated Deprecated in TensorRT 10.0. */ kCUDNN(2), /** Enables convolution tactics implemented with edge mask tables. These tactics tradeoff memory for performance by @@ -1245,12 +1646,7 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** Do not print any layer information. */ kNONE(1), /** Print detailed layer information including layer names and layer parameters. */ - kDETAILED(2), - - /** @deprecated Deprecated in TensorRT 8.0. Superseded by kLAYER_NAMES_ONLY. */ - kDEFAULT(kLAYER_NAMES_ONLY.value), - /** @deprecated Deprecated in TensorRT 8.0. Superseded by kDETAILED. */ - kVERBOSE(kDETAILED.value); + kDETAILED(2); public final int value; private ProfilingVerbosity(int v) { this.value = v; } @@ -1261,12 +1657,109 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** Maximum number of profile verbosity levels in ProfilingVerbosity enum. @see ProfilingVerbosity */ + +/** + * \brief Represents one or more SerializationFlag values using binary OR + * operations, e.g., 1U << SerializationFlag::kEXCLUDE_LEAN_RUNTIME + * + * @see ISerializationConfig::setFlags(), ISerializationConfig::getFlags() + * */ + + +//! +//! +//! +//! + +/** + * \enum SerializationFlag + * + * \brief List of valid flags that the engine can enable when serializing the bytes. + * + * @see ISerializationConfig::setFlags(), ISerializationConfig::getFlags() + * */ +@Namespace("nvinfer1") public enum SerializationFlag { + /** Exclude the weights that can be refitted. */ + kEXCLUDE_WEIGHTS(0), + /** Exclude the lean runtime. */ + kEXCLUDE_LEAN_RUNTIME(1); + + public final int value; + private SerializationFlag(int v) { this.value = v; } + private SerializationFlag(SerializationFlag e) { this.value = e.value; } + public SerializationFlag intern() { for (SerializationFlag e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + +/** Maximum number of serialization flags in SerializationFlag enum. @see SerializationFlag */ + +// Targeting ../nvinfer/ISerializationConfig.java + + + +/** + * \enum ExecutionContextAllocationStrategy + * + * \brief Different memory allocation behaviors for IExecutionContext. + * + * IExecutionContext requires a block of device memory for internal activation tensors during inference. The user can + * either let the execution context manage the memory in various ways or allocate the memory themselves. + * + * @see ICudaEngine::createExecutionContext() + * @see IExecutionContext::setDeviceMemory() + * */ +@Namespace("nvinfer1") public enum ExecutionContextAllocationStrategy { + /** Default static allocation with the maximum size across all profiles. */ + kSTATIC(0), + /** Reallocate for a profile when it's selected. */ + kON_PROFILE_CHANGE(1), + /** The user supplies custom allocation to the execution context. */ + kUSER_MANAGED(2); + + public final int value; + private ExecutionContextAllocationStrategy(int v) { this.value = v; } + private ExecutionContextAllocationStrategy(ExecutionContextAllocationStrategy e) { this.value = e.value; } + public ExecutionContextAllocationStrategy intern() { for (ExecutionContextAllocationStrategy e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} + +/** + * \brief Maximum number of memory allocation strategies in ExecutionContextAllocationStrategy enum. + * + * @see ExecutionContextAllocationStrategy + * */ + // Targeting ../nvinfer/ICudaEngine.java // Targeting ../nvinfer/IOutputAllocator.java + // namespace v_1_0 + +/** + * \class IOutputAllocator + * + * \brief Callback from ExecutionContext::enqueueV3() + * + * @see IExecutionContext::enqueueV3() + * */ +// Targeting ../nvinfer/IDebugListener.java + + + // namespace v_1_0 + +/** + * \class IDebugListener + * + * \brief User-implemented callback for notification when value of a debug tensor is updated. + * */ + + +//! +//! +//! +//! // Targeting ../nvinfer/IExecutionContext.java // class IExecutionContext @@ -1375,24 +1868,73 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Targeting ../nvinfer/ILoggerFinder.java +// Targeting ../nvinfer/IGpuAsyncAllocator.java + + + // namespace v_1_0 + +/** + * \class IGpuAsyncAllocator + * + * \brief Application-implemented class for controlling asynchronous (stream ordered) memory allocation on the GPU. + * + * \warning The lifetime of an IGpuAsyncAllocator object must exceed that of all objects that use it. + * + * The advantage of deriving from IGpuAsyncAllocator instead of IGpuAllocator is that you only have + * to override two methods: allocateAsync() and deallocateAsync() to implement an allocator with + * asynchronous capability, whereas deriving from IGpuAllocator requires overriding four methods, + * including two deprecated methods. + * + * @see IGpuAllocator */ // namespace nvinfer1 +/** + * \brief Return the library major version number. + * */ + +//! +//! +public static native @NoException(true) int getInferLibMajorVersion(); +/** + * \brief Return the library minor version number. + * */ + +//! +//! +public static native @NoException(true) int getInferLibMinorVersion(); +/** + * \brief Return the library patch version number. + * */ + +//! +//! +public static native @NoException(true) int getInferLibPatchVersion(); +/** + * \brief Return the library build version number. + * */ +public static native @NoException(true) int getInferLibBuildVersion(); + // #endif // NV_INFER_RUNTIME_H // Parsed from NvInfer.h /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual - * property and proprietary rights in and to this material, related - * documentation and any modifications thereto. Any use, reproduction, - * disclosure or distribution of this material and related documentation - * without an express license agreement from NVIDIA CORPORATION or - * its affiliates is strictly prohibited. + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ // #ifndef NV_INFER_H @@ -1449,8 +1991,8 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { @Namespace("nvinfer1") public enum LayerType { /** Convolution layer. */ kCONVOLUTION(0), - /** Fully connected layer. */ - kFULLY_CONNECTED(1), + /** Cast layer */ + kCAST(1), /** Activation layer. */ kACTIVATION(2), /** Pooling layer. */ @@ -1487,62 +2029,60 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { kRAGGED_SOFTMAX(18), /** Constant layer. */ kCONSTANT(19), - /** RNNv2 layer. */ - kRNN_V2(20), /** Identity layer. */ - kIDENTITY(21), + kIDENTITY(20), /** PluginV2 layer. */ - kPLUGIN_V2(22), + kPLUGIN_V2(21), /** Slice layer. */ - kSLICE(23), + kSLICE(22), /** Shape layer. */ - kSHAPE(24), + kSHAPE(23), /** Parametric ReLU layer. */ - kPARAMETRIC_RELU(25), + kPARAMETRIC_RELU(24), /** Resize Layer. */ - kRESIZE(26), + kRESIZE(25), /** Loop Trip limit layer */ - kTRIP_LIMIT(27), + kTRIP_LIMIT(26), /** Loop Recurrence layer */ - kRECURRENCE(28), + kRECURRENCE(27), /** Loop Iterator layer */ - kITERATOR(29), + kITERATOR(28), /** Loop output layer */ - kLOOP_OUTPUT(30), + kLOOP_OUTPUT(29), /** Select layer. */ - kSELECT(31), + kSELECT(30), /** Fill layer */ - kFILL(32), + kFILL(31), /** Quantize layer */ - kQUANTIZE(33), + kQUANTIZE(32), /** Dequantize layer */ - kDEQUANTIZE(34), + kDEQUANTIZE(33), /** Condition layer */ - kCONDITION(35), + kCONDITION(34), /** Conditional Input layer */ - kCONDITIONAL_INPUT(36), + kCONDITIONAL_INPUT(35), /** Conditional Output layer */ - kCONDITIONAL_OUTPUT(37), + kCONDITIONAL_OUTPUT(36), /** Scatter layer */ - kSCATTER(38), + kSCATTER(37), /** Einsum layer */ - kEINSUM(39), + kEINSUM(38), /** Assertion layer */ - kASSERTION(40), + kASSERTION(39), /** OneHot layer */ - kONE_HOT(41), + kONE_HOT(40), /** NonZero layer */ - kNON_ZERO(42), + kNON_ZERO(41), /** Grid sample layer */ - kGRID_SAMPLE(43), + kGRID_SAMPLE(42), /** NMS layer */ - kNMS(44), + kNMS(43), /** Reverse sequence layer */ - kREVERSE_SEQUENCE(45), + kREVERSE_SEQUENCE(44), /** Normalization layer */ - kNORMALIZATION(46), - /** Cast layer */ - kCAST(47); + kNORMALIZATION(45), + /** PluginV3 layer. */ + kPLUGIN_V3(46); public final int value; private LayerType(int v) { this.value = v; } @@ -1599,7 +2139,11 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** Scaled tanh activation: alpha*tanh(beta*x) */ kSCALED_TANH(10), /** Thresholded ReLU activation: x>alpha ? x : 0 */ - kTHRESHOLDED_RELU(11); + kTHRESHOLDED_RELU(11), + /** GELU erf activation: 0.5 * x * (1 + erf(sqrt(0.5) * x)) */ + kGELU_ERF(12), + /** GELU tanh activation: 0.5 * x * (1 + tanh(sqrt(2/pi) * (0.044715F * pow(x, 3) + x))) */ + kGELU_TANH(13); public final int value; private ActivationType(int v) { this.value = v; } @@ -1626,8 +2170,8 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * \brief Enumerates the modes of padding to perform in convolution, deconvolution and pooling layer, * padding mode takes precedence if setPaddingMode() and setPrePadding() are also used. * - * There are three padding styles, EXPLICIT, SAME, and CAFFE, with each style having two variants. - * The EXPLICIT and CAFFE styles determine if the final sampling location is used or not. + * There are two padding styles, EXPLICIT and SAME with each style having two variants. + * The EXPLICIT style determine if the final sampling location is used or not. * The SAME style determine if the asymmetry in the padding is on the pre or post padding. * *

{@code
@@ -1649,18 +2193,10 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
  *  
{@code
  *          O = floor((M - DK) / S) + 1
  *  }
- * - CAFFE_ROUND_DOWN: - *
{@code
- *          O = floor((I + B * 2 - DK) / S) + 1
- *  }
* - EXPLICIT_ROUND_UP: *
{@code
  *          O = ceil((M - DK) / S) + 1
  *  }
- * - CAFFE_ROUND_UP: - *
{@code
- *          O = ceil((I + B * 2 - DK) / S) + 1
- *  }
* - SAME_UPPER: *
{@code
  *          O = ceil(I / S)
@@ -1678,9 +2214,7 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
  * 
  *  Formulas for Deconvolution:
  *      - EXPLICIT_ROUND_DOWN:
- *      - CAFFE_ROUND_DOWN:
  *      - EXPLICIT_ROUND_UP:
- *      - CAFFE_ROUND_UP:
  *  
{@code
  *          O = (I - 1) * S + DK - (B + A)
  *  }
@@ -1722,14 +2256,6 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * A = floor(P / 2) * B = P - A * }
- * - CAFFE_ROUND_DOWN: - *
{@code
- *          EXPLICIT_ROUND_DOWN - ((EXPLICIT_ROUND_DOWN - 1) * S >= I + B)
- *  }
- * - CAFFE_ROUND_UP: - *
{@code
- *          EXPLICIT_ROUND_UP - ((EXPLICIT_ROUND_UP - 1) * S >= I + B)
- *  }
* * Pooling Example 1: *
{@code
@@ -1793,54 +2319,6 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
  *  
{@code
  *      Given I = {6, 6}, B = {3, 3}, A = {3, 3}, S = {2, 2}, F = {3, 3}. What is O?
  *  }
- * - * - CAFFE_ROUND_DOWN: - *
{@code
- *      Computation:
- *          M = {6, 6} + {3, 3} + {3, 3} ==> {12, 12}
- *          EXPLICIT_ROUND_DOWN ==> floor((M - F) / S) + 1
- *                              ==> floor(({12, 12} - {3, 3}) / {2, 2}) + {1, 1}
- *                              ==> {5, 5}
- *          DIFF = (((EXPLICIT_ROUND_DOWN - 1) * S >= I + B) ? {1, 1} : {0, 0})
- *            ==> ({5, 5} - {1, 1}) * {2, 2} >= {6, 6} + {3, 3} ? {1, 1} : {0,0}
- *            ==> {0, 0}
- *          O ==> EXPLICIT_ROUND_DOWN - DIFF
- *            ==> {5, 5} - {0, 0}
- *            ==> {5, 5}
- *  }
- * - CAFFE_ROUND_UP: - *
{@code
- *      Computation:
- *          M = {6, 6} + {3, 3} + {3, 3} ==> {12, 12}
- *          EXPLICIT_ROUND_UP ==> ceil((M - F) / S) + 1
- *                            ==> ceil(({12, 12} - {3, 3}) / {2, 2}) + {1, 1}
- *                            ==> {6, 6}
- *          DIFF = (((EXPLICIT_ROUND_UP - 1) * S >= I + B) ? {1, 1} : {0, 0})
- *            ==> ({6, 6} - {1, 1}) * {2, 2} >= {6, 6} + {3, 3} ? {1, 1} : {0,0}
- *            ==> {1, 1}
- *          O ==> EXPLICIT_ROUND_UP - DIFF
- *            ==> {6, 6} - {1, 1}
- *            ==> {5, 5}
- *  }
- * - * The sample points are {0, 2, 4, 6, 8} in each dimension.
- * CAFFE_ROUND_DOWN and CAFFE_ROUND_UP have two restrictions each on usage with pooling operations. - * This will cause getDimensions to return an empty dimension and also to reject the network - * at validation time.
- * For more information on original reference code, see - * https://github.com/BVLC/caffe/blob/master/src/caffe/layers/pooling_layer.cpp - * - * - Restriction 1: - *
{@code
- *      CAFFE_ROUND_DOWN: B >= F is an error if (B - S) < F
- *      CAFFE_ROUND_UP: (B + S) >= (F + 1) is an error if B < (F + 1)
- *  }
- * - * - Restriction 2: - *
{@code
- *      CAFFE_ROUND_DOWN: (B - S) >= F is an error if B >= F
- *      CAFFE_ROUND_UP: B >= (F + 1) is an error if (B + S) >= (F + 1)
- *  }
* */ @Namespace("nvinfer1") public enum PaddingMode { /** Use explicit padding, rounding output size down. */ @@ -1850,11 +2328,7 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** Use SAME padding, with prePadding <= postPadding. */ kSAME_UPPER(2), /** Use SAME padding, with prePadding >= postPadding. */ - kSAME_LOWER(3), - /** Use CAFFE padding, rounding output size down, uses prePadding value. */ - kCAFFE_ROUND_DOWN(4), - /** Use CAFFE padding, rounding output size up, uses prePadding value. */ - kCAFFE_ROUND_UP(5); + kSAME_LOWER(3); public final int value; private PaddingMode(int v) { this.value = v; } @@ -1871,9 +2345,6 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Targeting ../nvinfer/IConvolutionLayer.java -// Targeting ../nvinfer/IFullyConnectedLayer.java - - // Targeting ../nvinfer/IActivationLayer.java @@ -1884,9 +2355,12 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * \brief The type of pooling to perform in a pooling layer. * */ @Namespace("nvinfer1") public enum PoolingType { - kMAX(0), // Maximum over elements - kAVERAGE(1), // Average over elements. If the tensor is padded, the count includes the padding - kMAX_AVERAGE_BLEND(2);// Blending between max and average pooling: (1-blendFactor)*maxPool + blendFactor*avgPool + /** Maximum over elements */ + kMAX(0), + /** Average over elements. If the tensor is padded, the count includes the padding */ + kAVERAGE(1), + /** Blending between max and average pooling: (1-blendFactor)*maxPool + blendFactor*avgPool */ + kMAX_AVERAGE_BLEND(2); public final int value; private PoolingType(int v) { this.value = v; } @@ -1953,9 +2427,10 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * * Operations kAND, kOR, and kXOR must have inputs of DataType::kBOOL. * - * Operation kPOW must have inputs of DataType::kFLOAT, DataType::kHALF, or DataType::kINT8. + * Operation kPOW must have inputs of floating-point type or DataType::kINT8. * - * All other operations must have inputs of DataType::kFLOAT, DataType::kHALF, DataType::kINT8, or DataType::kINT32. + * All other operations must have inputs of floating-point type, DataType::kINT8, DataType::kINT32, or + * DataType::kINT64. * * @see IElementWiseLayer * */ @@ -2034,212 +2509,10 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Targeting ../nvinfer/IGatherLayer.java - -/** - * \enum RNNOperation - * - * \brief Enumerates the RNN operations that may be performed by an RNN layer. - * - * __Equation definitions__ - * - * The equations below have the following naming convention: - * - * ~~~ - * t := current time step - * - * i := input gate - * o := output gate - * f := forget gate - * z := update gate - * r := reset gate - * c := cell gate - * h := hidden gate - * - * g[t] denotes the output of gate g at timestep t, e.g. - * f[t] is the output of the forget gate f. - * - * X[t] := input tensor for timestep t - * C[t] := cell state for timestep t - * H[t] := hidden state for timestep t - * - * W[g] := W (input) parameter weight matrix for gate g - * R[g] := U (recurrent) parameter weight matrix for gate g - * Wb[g] := W (input) parameter bias vector for gate g - * Rb[g] := U (recurrent) parameter bias vector for gate g - * - * Unless otherwise specified, all operations apply pointwise - * to elements of each operand tensor. - * - * ReLU(X) := max(X, 0) - * tanh(X) := hyperbolic tangent of X - * sigmoid(X) := 1 / (1 + exp(-X)) - * exp(X) := e^X - * - * A.B denotes matrix multiplication of A and B. - * A*B denotes pointwise multiplication of A and B. - * ~~~ - * - * __Equations__ - * - * Depending on the value of RNNOperation chosen, each sub-layer of the RNN - * layer will perform one of the following operations: - * - * ~~~ - * ::kRELU - * - * H[t] := ReLU(W[i].X[t] + R[i].H[t-1] + Wb[i] + Rb[i]) - * - * ::kTANH - * - * H[t] := tanh(W[i].X[t] + R[i].H[t-1] + Wb[i] + Rb[i]) - * - * ::kLSTM - * - * i[t] := sigmoid(W[i].X[t] + R[i].H[t-1] + Wb[i] + Rb[i]) - * f[t] := sigmoid(W[f].X[t] + R[f].H[t-1] + Wb[f] + Rb[f]) - * o[t] := sigmoid(W[o].X[t] + R[o].H[t-1] + Wb[o] + Rb[o]) - * c[t] := tanh(W[c].X[t] + R[c].H[t-1] + Wb[c] + Rb[c]) - * - * C[t] := f[t]*C[t-1] + i[t]*c[t] - * H[t] := o[t]*tanh(C[t]) - * - * ::kGRU - * - * z[t] := sigmoid(W[z].X[t] + R[z].H[t-1] + Wb[z] + Rb[z]) - * r[t] := sigmoid(W[r].X[t] + R[r].H[t-1] + Wb[r] + Rb[r]) - * h[t] := tanh(W[h].X[t] + r[t]*(R[h].H[t-1] + Rb[h]) + Wb[h]) - * - * H[t] := (1 - z[t])*h[t] + z[t]*H[t-1] - * ~~~ - * - * @see IRNNv2Layer - * */ -@Namespace("nvinfer1") public enum RNNOperation { - /** Single gate RNN w/ ReLU activation function. */ - kRELU(0), - /** Single gate RNN w/ TANH activation function. */ - kTANH(1), - /** Four-gate LSTM network w/o peephole connections. */ - kLSTM(2), - /** Three-gate network consisting of Gated Recurrent Units. */ - kGRU(3); - - public final int value; - private RNNOperation(int v) { this.value = v; } - private RNNOperation(RNNOperation e) { this.value = e.value; } - public RNNOperation intern() { for (RNNOperation e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - -/** - * Maximum number of elements in RNNOperation enum. - * - * @see RNNOperation - * */ - - -/** - * \enum RNNDirection - * - * \brief Enumerates the RNN direction that may be performed by an RNN layer. - * - * @see IRNNv2Layer - * */ -@Namespace("nvinfer1") public enum RNNDirection { - /** Network iterations from first input to last input. */ - kUNIDIRECTION(0), - /** Network iterates from first to last and vice versa and outputs concatenated. */ - kBIDIRECTION(1); - - public final int value; - private RNNDirection(int v) { this.value = v; } - private RNNDirection(RNNDirection e) { this.value = e.value; } - public RNNDirection intern() { for (RNNDirection e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - -/** - * Maximum number of elements in RNNDirection enum. - * - * @see RNNDirection - * */ - - -/** - * \enum RNNInputMode - * - * \brief Enumerates the RNN input modes that may occur with an RNN layer. - * - * If the RNN is configured with RNNInputMode::kLINEAR, then for each gate {@code g} in the first layer of the RNN, - * the input vector {@code X[t]} (length {@code E}) is left-multiplied by the gate's corresponding weight matrix {@code W[g]} - * (dimensions {@code HxE}) as usual, before being used to compute the gate output as described by \ref RNNOperation. - * - * If the RNN is configured with RNNInputMode::kSKIP, then this initial matrix multiplication is "skipped" - * and {@code W[g]} is conceptually an identity matrix. In this case, the input vector {@code X[t]} must have length {@code H} - * (the size of the hidden state). - * - * @see IRNNv2Layer - * */ -@Namespace("nvinfer1") public enum RNNInputMode { - /** Perform the normal matrix multiplication in the first recurrent layer. */ - kLINEAR(0), - /** No operation is performed on the first recurrent layer. */ - kSKIP(1); - - public final int value; - private RNNInputMode(int v) { this.value = v; } - private RNNInputMode(RNNInputMode e) { this.value = e.value; } - public RNNInputMode intern() { for (RNNInputMode e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - -/** - * Maximum number of elements in RNNInputMode enum. - * - * @see RNNInputMode - * */ - - -/** - * \enum RNNGateType - * - * \brief Identifies an individual gate within an RNN cell. - * - * @see RNNOperation - * */ -@Namespace("nvinfer1") public enum RNNGateType { - /** Input gate (i). */ - kINPUT(0), - /** Output gate (o). */ - kOUTPUT(1), - /** Forget gate (f). */ - kFORGET(2), - /** Update gate (z). */ - kUPDATE(3), - /** Reset gate (r). */ - kRESET(4), - /** Cell gate (c). */ - kCELL(5), - /** Hidden gate (h). */ - kHIDDEN(6); - - public final int value; - private RNNGateType(int v) { this.value = v; } - private RNNGateType(RNNGateType e) { this.value = e.value; } - public RNNGateType intern() { for (RNNGateType e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - -/** - * Maximum number of elements in RNNGateType enum. - * - * @see RNNGateType - * */ - -// Targeting ../nvinfer/IRNNv2Layer.java +// Targeting ../nvinfer/IPluginV2Layer.java -// Targeting ../nvinfer/IPluginV2Layer.java +// Targeting ../nvinfer/IPluginV3Layer.java @@ -2250,13 +2523,12 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * * Operations kNOT must have inputs of DataType::kBOOL. * - * Operation kSIGN must have inputs of DataType::kFLOAT, DataType::kHALF, DataType::kINT8, or DataType::kINT32. + * Operation kSIGN and kABS must have inputs of floating-point type, DataType::kINT8, DataType::kINT32 or + * DataType::kINT64. * - * Operation kISINF must have inputs of DataType::kFLOAT or DataType::kHALF. + * Operation kISINF must have inputs of floating-point type. * - * All other operations must have inputs of DataType::kFLOAT, DataType::kHALF, or DataType::kINT8. - * - * Operations kSIGN and kROUND are not supported in implicit batch mode. + * All other operations must have inputs of floating-point type. * * @see IUnaryLayer * */ @@ -2386,7 +2658,6 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { @Namespace("nvinfer1") public enum SampleMode { /** Fail with error when the coordinates are out of bounds. */ kSTRICT_BOUNDS(0), - kDEFAULT(kSTRICT_BOUNDS.value), /** @deprecated Use kSTRICT_BOUNDS. */ /** Coordinates wrap around periodically. */ kWRAP(1), /** Out of bounds indices are clamped to bounds. */ @@ -2402,15 +2673,8 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { private SampleMode(int v) { this.value = v; } private SampleMode(SampleMode e) { this.value = e.value; } public SampleMode intern() { for (SampleMode e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - -/** @deprecated Deprecated in TensorRT 8.5. Superseded by SampleMode. */ - - -//! -//! -//! + @Override public String toString() { return intern().name(); } +} /** * Maximum number of elements in SampleMode enum. @@ -2537,8 +2801,6 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { public InterpolationMode intern() { for (InterpolationMode e : values()) if (e.value == value) return e; return this; } @Override public String toString() { return intern().name(); } } - -/** @deprecated Deprecated in TensorRT 8.5. Superseded by InterpolationMode. */ /** * Maximum number of elements in InterpolationMode enum. * @@ -2666,7 +2928,9 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { -/** Enum that describes kinds of loop outputs. */ +/** + * \enum Enum that describes kinds of loop outputs. + * */ @Namespace("nvinfer1") public enum LoopOutput { /** Output value is value of tensor for last iteration. */ kLAST_VALUE(0), @@ -2691,10 +2955,12 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * */ -/** Enum that describes kinds of trip limits. */ +/** + * \enum Enum that describes kinds of trip limits. + * */ @Namespace("nvinfer1") public enum TripLimit { - /** Tensor is scalar of type kINT32 that contains the trip count. */ + /** Tensor is a scalar of type kINT32 or kINT64 that contains the trip count. */ kCOUNT(0), /** Tensor is a scalar of type kBOOL. Loop terminates when value is false. */ kWHILE(1); @@ -2760,11 +3026,27 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * @see IFillLayer * */ @Namespace("nvinfer1") public enum FillOperation { - /** Generate evenly spaced numbers over a specified interval. */ + /** Compute each value via an affine function of its indices. + * For example, suppose the parameters for the IFillLayer are: + * + * * Dimensions = [3,4] + * * Alpha = 1 + * * Beta = [100,10] + * + * Element [i,j] of the output is Alpha + Beta[0]*i + Beta[1]*j. + * Thus the output matrix is: + * + * 1 11 21 31 + * 101 111 121 131 + * 201 211 221 231 + * + * A static beta b is implicitly a 1D tensor, i.e. Beta = [b]. */ kLINSPACE(0), - /** Generate a tensor with random values drawn from a uniform distribution. */ + + /** Randomly draw values from a uniform distribution. */ kRANDOM_UNIFORM(1), - /** Generate a tensor with random values drawn from a normal distribution. */ + + /** Randomly draw values from a normal distribution. */ kRANDOM_NORMAL(2); public final int value; @@ -2794,6 +3076,8 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** + * \enum ScatterMode + * * \brief Control form of IScatterLayer * * @see IScatterLayer @@ -2828,6 +3112,8 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // class IGridSampleLayer /** + * \enum BoundingBoxFormat + * * \brief Representation of bounding box data used for the Boxes input tensor in INMSLayer * * @see INMSLayer @@ -2865,14 +3151,18 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { /** - * enum CalibrationAlgoType + * \enum CalibrationAlgoType * * \brief Version of calibration algorithm to use. * */ @Namespace("nvinfer1") public enum CalibrationAlgoType { + /** Legacy calibration */ kLEGACY_CALIBRATION(0), + /** Legacy entropy calibration */ kENTROPY_CALIBRATION(1), + /** Entropy calibration */ kENTROPY_CALIBRATION_2(2), + /** Minmax calibration */ kMINMAX_CALIBRATION(3); public final int value; @@ -2894,15 +3184,72 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Targeting ../nvinfer/IInt8EntropyCalibrator.java + // namespace v_1_0 + +/** + * \class IInt8EntropyCalibrator + * + * \brief Entropy calibrator. + * + * This is the Legacy Entropy calibrator. It is less complicated than the legacy calibrator and + * produces better results. + * + * \note To ensure compatibility of source code with future versions of TensorRT, use IEntropyCalibrator, not + * v_1_0::IEntropyCalibrator + * */ // Targeting ../nvinfer/IInt8EntropyCalibrator2.java + // namespace v_1_0 + +/** + * \class IInt8EntropyCalibrator2 + * + * \brief Entropy calibrator 2. + * + * This is the preferred calibrator. This is the required calibrator for DLA, as it supports per + * activation tensor scaling. + * + * \note To ensure compatibility of source code with future versions of TensorRT, use IEntropyCalibrator2, not + * v_1_0::IEntropyCalibrator2 + * */ // Targeting ../nvinfer/IInt8MinMaxCalibrator.java + // namespace v_1_0 + +/** + * \class IInt8MinMaxCalibrator + * + * \brief MinMax Calibrator. + * + * It supports per activation tensor scaling. + * + * \note To ensure compatibility of source code with future versions of TensorRT, use IMinMaxCalibrator>, not + * v_1_0::IMinMaxCalibrator + * */ // Targeting ../nvinfer/IInt8LegacyCalibrator.java + // namespace v_1_0 + +/** + * \class IInt8LegacyCalibrator + * + * \brief Legacy calibrator. + * + * This calibrator requires user parameterization, + * and is provided as a fallback option if the other calibrators yield poor results. + * + * \note To ensure compatibility of source code with future versions of TensorRT, use ILegacyCalibrator, not + * v_1_0::ILegacyCalibrator + * */ + + +//! +//! +//! +//! // Targeting ../nvinfer/IAlgorithmIOInfo.java @@ -2918,6 +3265,23 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Targeting ../nvinfer/IAlgorithmSelector.java + // namespace v_1_0 + +/** + * \class IAlgorithmSelector + * + * \brief Interface implemented by application for selecting and reporting algorithms of a layer provided by the + * builder. + * \note A layer in context of algorithm selection may be different from ILayer in INetworkDefiniton. + * For example, an algorithm might be implementing a conglomeration of multiple ILayers in INetworkDefinition. + * \note To ensure compatibility of source code with future versions of TensorRT, use IAlgorithmSelector, not + * v_1_0::IAlgorithmSelector + * */ + + +//! +//! +//! /** * \brief Represents one or more QuantizationFlag values using binary OR @@ -2982,44 +3346,31 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { @Namespace("nvinfer1") public enum BuilderFlag { /** Enable FP16 layer selection, with FP32 fallback. */ kFP16(0), + /** Enable Int8 layer selection, with FP32 fallback with FP16 fallback if kFP16 also specified. */ kINT8(1), + /** Enable debugging of layers via synchronizing after every layer. */ kDEBUG(2), - /** Enable layers marked to execute on GPU if layer cannot execute on DLA. -//! -//! -//! -//! */ - kGPU_FALLBACK(3), - /** Legacy flag with effect similar to setting all of these three flags: - * - * * kPREFER_PRECISION_CONSTRAINTS - * * kDIRECT_IO - * * kREJECT_EMPTY_ALGORITHMS - * - * except that if the direct I/O requirement cannot be met and kDIRECT_IO was not explicitly set, - * instead of the build failing, the build falls back as if kDIRECT_IO was not set. - * - * @deprecated Deprecated in TensorRT 8.2. - * */ - kSTRICT_TYPES(4), + /** Enable layers marked to execute on GPU if layer cannot execute on DLA. */ + kGPU_FALLBACK(3), /** Enable building a refittable engine. */ - kREFIT(5), + kREFIT(4), + /** Disable reuse of timing information across identical layers. */ - kDISABLE_TIMING_CACHE(6), + kDISABLE_TIMING_CACHE(5), /** Allow (but not require) computations on tensors of type DataType::kFLOAT to use TF32. * TF32 computes inner products by rounding the inputs to 10-bit mantissas before * multiplying, but accumulates the sum using 23-bit mantissas. Enabled by default. */ - kTF32(7), + kTF32(6), /** Allow the builder to examine weights and use optimized functions when weights have suitable sparsity. */ //! - kSPARSE_WEIGHTS(8), + kSPARSE_WEIGHTS(7), /** Change the allowed parameters in the EngineCapability::kSTANDARD flow to * match the restrictions that EngineCapability::kSAFETY check against for DeviceType::kGPU @@ -3027,59 +3378,114 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * is forced to true if EngineCapability::kSAFETY at build time if it is unset. * * This flag is only supported in NVIDIA Drive(R) products. */ - kSAFETY_SCOPE(9), + kSAFETY_SCOPE(8), /** Require that layers execute in specified precisions. Build fails otherwise. */ - kOBEY_PRECISION_CONSTRAINTS(10), + kOBEY_PRECISION_CONSTRAINTS(9), /** Prefer that layers execute in specified precisions. * Fall back (with warning) to another precision if build would otherwise fail. */ - kPREFER_PRECISION_CONSTRAINTS(11), + kPREFER_PRECISION_CONSTRAINTS(10), /** Require that no reformats be inserted between a layer and a network I/O tensor * for which ITensor::setAllowedFormats was called. * Build fails if a reformat is required for functional correctness. */ - kDIRECT_IO(12), + kDIRECT_IO(11), /** Fail if IAlgorithmSelector::selectAlgorithms returns an empty set of algorithms. */ //! - kREJECT_EMPTY_ALGORITHMS(13), - - /** Enable heuristic-based tactic selection for shorter engine generation time. The engine may not - * be as performant as when built with a profiling-based builder. - * - * This flag is only supported by NVIDIA Ampere and later GPUs. - * @deprecated Superseded by builder optimization level 2. Deprecated in TensorRT 8.6 */ - -//! - kENABLE_TACTIC_HEURISTIC(14), + kREJECT_EMPTY_ALGORITHMS(12), /** Restrict to lean runtime operators to provide version forward compatibility * for the plan. * - * Using this flag with ICudaEngine::serialize() and BuilderFlag::kREFIT would result in error. * This flag is only supported by NVIDIA Volta and later GPUs. - * This flag is not supported in NVIDIA Drive(R) products. - * This flag is not supported with implicit batch mode. Network must be created with - * NetworkDefinitionCreationFlag::kEXPLICIT_BATCH. */ + * This flag is not supported in NVIDIA Drive(R) products. */ //! -//! - kVERSION_COMPATIBLE(15), + kVERSION_COMPATIBLE(13), /** Exclude lean runtime from the plan when version forward compatability is enabled. * By default, this flag is unset, so the lean runtime will be included in the plan. * - * If BuilderFlag::kVERSION_COMPATIBLE is not set then the value of this flag will be ignored. - * - * This flag is not supported with implicit batch mode. Network must be created with - * NetworkDefinitionCreationFlag::kEXPLICIT_BATCH. */ - kEXCLUDE_LEAN_RUNTIME(16), + * If BuilderFlag::kVERSION_COMPATIBLE is not set then the value of this flag will be ignored. */ + +//! +//! + kEXCLUDE_LEAN_RUNTIME(14), /** Enable FP8 layer selection, with FP32 fallback. - * \warning kFP8 is not supported yet and will result in an error or undefined behavior. */ - kFP8(17); + * + * This flag is not supported with hardware-compatibility mode. + * + * @see HardwareCompatibilityLevel */ + kFP8(15), + + /** Emit error when a tactic being timed is not present in the timing cache. + * This flag has an effect only when IBuilderConfig has an associated ITimingCache. */ + kERROR_ON_TIMING_CACHE_MISS(16), + + /** Enable DataType::kBF16 layer selection, with FP32 fallback. + * This flag is only supported by NVIDIA Ampere and later GPUs. */ + kBF16(17), + + /** Disable caching of JIT-compilation results during engine build. + * By default, JIT-compiled code will be serialized as part of the timing cache, which may significantly increase + * the cache size. Setting this flag prevents the code from being serialized. This flag has an effect only when + * BuilderFlag::DISABLE_TIMING_CACHE is not set. */ + kDISABLE_COMPILATION_CACHE(18), + + /** Strip the refittable weights from the engine plan file. */ + kSTRIP_PLAN(19), + + /** @deprecated Deprecated in TensorRT 10.0. Superseded by kSTRIP_PLAN. */ + kWEIGHTLESS(kSTRIP_PLAN.value), + + /** Create a refittable engine under the assumption that the refit weights will be identical to those provided at + * build time. The resulting engine will have the same performance as a non-refittable one. All refittable weights + * can be refitted through the refit API, but if the refit weights are not identical to the build-time weights, + * behavior is undefined. When used alongside 'kSTRIP_PLAN', this flag will result in a small plan file for which + * weights are later supplied via refitting. This enables use of a single set of weights with different inference + * backends, or with TensorRT plans for multiple GPU architectures. */ + + +//! +//! +//! +//! +//! +//! +//! +//! + kREFIT_IDENTICAL(20), + + /** + * \brief Enable weight streaming for the current engine. + * + * Weight streaming from the host enables execution of models that do not fit + * in GPU memory by allowing TensorRT to intelligently stream network weights + * from the CPU DRAM. Please see ICudaEngine::getMinimumWeightStreamingBudget + * for the default memory budget when this flag is enabled. + * + * Enabling this feature changes the behavior of + * IRuntime::deserializeCudaEngine to allocate the entire network’s weights + * on the CPU DRAM instead of GPU memory. Then, + * ICudaEngine::createExecutionContext will determine the optimal split of + * weights between the CPU and GPU and place weights accordingly. + * + * Future TensorRT versions may enable this flag by default. + * + * \warning Enabling this flag may marginally increase build time. + * + * \warning Enabling this feature will significantly increase the latency of + * ICudaEngine::createExecutionContext. + * + * @see IRuntime::deserializeCudaEngine, + * ICudaEngine::getMinimumWeightStreamingBudget, + * ICudaEngine::setWeightStreamingBudget + * */ + kWEIGHT_STREAMING(21); public final int value; private BuilderFlag(int v) { this.value = v; } @@ -3108,7 +3514,6 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { @Namespace("nvinfer1") public enum MemoryPoolType { /** * kWORKSPACE is used by TensorRT to store intermediate buffers within an operation. - * This is equivalent to the deprecated IBuilderConfig::setMaxWorkspaceSize and overrides that value. * This defaults to max device memory. Set to a smaller value to restrict tactics that use over the * threshold en masse. For more targeted removal of tactics use the IAlgorithmSelector * interface. @@ -3123,7 +3528,7 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * kDLA_MANAGED_SRAM is a fast software managed RAM used by DLA to communicate within a layer. * The size of this pool must be at least 4 KiB and must be a power of 2. * This defaults to 1 MiB. - * Orin has capacity of 1 MiB per core, and Xavier shares 4 MiB across all of its accelerator cores. + * Orin has capacity of 1 MiB per core. * */ @@ -3160,7 +3565,27 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * This defaults to 75% of totalGlobalMem as reported by cudaGetDeviceProperties when * cudaGetDeviceProperties.embedded is true, and 100% otherwise. * */ - kTACTIC_DRAM(4); + + +//! +//! +//! +//! + kTACTIC_DRAM(4), + + /** + * kTACTIC_SHARED_MEMORY defines the maximum sum of shared memory reserved by the driver and + * used for executing CUDA kernels. Adjust this value to restrict tactics that exceed the + * specified threshold en masse. The default value is device max capability. This value must + * be less than 1GiB. + * + * The driver reserved shared memory can be queried from cuDeviceGetAttribute(&reservedShmem, + * CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK). + * + * Updating this flag will override the shared memory limit set by \ref HardwareCompatibilityLevel, + * which defaults to 48KiB - reservedShmem. + * */ + kTACTIC_SHARED_MEMORY(5); public final int value; private MemoryPoolType(int v) { this.value = v; } @@ -3185,52 +3610,12 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * They are provided as opt-in features for at least one release. * */ @Namespace("nvinfer1") public enum PreviewFeature { - /** - * Optimize runtime dimensions with TensorRT's DL Compiler. - * Potentially reduces run time and decreases device memory usage and engine size. - * Models most likely to benefit from enabling kFASTER_DYNAMIC_SHAPES_0805 are transformer-based models, - * and models containing dynamic control flows. - * - * The default value for this flag is on. - * - * @deprecated Turning it off is deprecated in TensorRT 8.6. The flag kFASTER_DYNAMIC_SHAPES_0805 will be removed in 9.0. - * */ - - -//! -//! -//! -//! -//! -//! - kFASTER_DYNAMIC_SHAPES_0805(0), - - /** - * Disable usage of cuDNN/cuBLAS/cuBLASLt tactics in the TensorRT core library. - * - * When the flag is enabled, TensorRT core will not use these tactics even if they are specified in - * \ref IBuilderConfig::setTacticSources(), but cudnnContext and cublasContext handles will still be passed to - * plugins via IPluginV2Ext::attachToContext() if the appropriate tactic sources are set. - * - * This allows users to experiment with disabling external library tactics without having to modify their - * application's plugins to support nullptr handles. - * - * The default value for this flag is on. - * - * @see TacticSource - * */ - - -//! -//! - kDISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805(1), - /** * Allows optimization profiles to be shared across execution contexts. - * This flag defaults to false and will become the default behavior in TensorRT 9.0. - * At that point this flag will do nothing. + * + * @deprecated Deprecated in TensorRT 10.0. The default value for this flag is on and can not be changed. * */ - kPROFILE_SHARING_0806(2); + kPROFILE_SHARING_0806(0); public final int value; private PreviewFeature(int v) { this.value = v; } @@ -3245,18 +3630,33 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * */ // namespace impl -/** Describes requirements of compatibility with GPU architectures other than that of the GPU on which the engine was - * built. Levels except kNONE are only supported for engines built on NVIDIA Ampere and later GPUs. - * Note that compatibility with future hardware depends on CUDA forward compatibility support. */ +/** + * \enum HardwareCompatibilityLevel + * + * \brief Describes requirements of compatibility with GPU architectures other than that of the GPU on which the engine was + * built. + * + * Levels except kNONE are only supported for engines built on NVIDIA Ampere and later GPUs. + * + * \warning Note that compatibility with future hardware depends on CUDA forward compatibility support. + * */ @Namespace("nvinfer1") public enum HardwareCompatibilityLevel { /** Do not require hardware compatibility with GPU architectures other than that of the GPU on which the engine was * built. */ + +//! +//! kNONE(0), - /** Require that the engine is compatible with Ampere and newer GPUs. This will limit the max shared memory usage to - * 48KiB, may reduce the number of available tactics for each layer, and may prevent some fusions from occurring. - * Thus this can decrease the performance, especially for tf32 models. - * This option will disable cuDNN, cuBLAS, and cuBLAS LT as tactic sources. */ + /** Require that the engine is compatible with Ampere and newer GPUs. This will limit the combined usage of driver + * reserved and backend kernel max shared memory to 48KiB, may reduce the number of available tactics for each + * layer, and may prevent some fusions from occurring. Thus this can decrease the performance, especially for tf32 + * models. + * This option will disable cuDNN, cuBLAS, and cuBLAS LT as tactic sources. + * + * The driver reserved shared memory can be queried from cuDeviceGetAttribute(&reservedShmem, + * CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK). + * */ kAMPERE_PLUS(1); public final int value; @@ -3271,42 +3671,73 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { * @see HardwareCompatibilityLevel * */ +// Targeting ../nvinfer/IProgressMonitor.java + + // class IProgressMonitor + // namespace v_1_0 + +/** + * \class IProgressMonitor + * + * \brief Application-implemented progress reporting interface for TensorRT. + * + * The IProgressMonitor is a user-defined object that TensorRT uses to report back when an internal algorithm has + * started or finished a phase to help provide feedback on the progress of the optimizer. + * + * The IProgressMonitor will trigger its start function when a phase is entered and will trigger its finish function + * when that phase is exited. Each phase consists of one or more steps. When each step is completed, the stepComplete + * function is triggered. This will allow an application using the builder to communicate progress relative to when the + * optimization step is expected to complete. + * + * The implementation of IProgressMonitor must be thread-safe so that it can be called from multiple internal threads. + * The lifetime of the IProgressMonitor must exceed the lifetime of all TensorRT objects that use it. + * + * \note To ensure compatibility of source code with future versions of TensorRT, use IProgressMonitor, not + * v_1_0::IProgressMonitor + * */ + + +//! +//! +//! +//! // Targeting ../nvinfer/IBuilderConfig.java -/** \brief Represents one or more NetworkDefinitionCreationFlag flags +/** + * \brief Represents one or more NetworkDefinitionCreationFlag flags * using binary OR operations. - * e.g., 1U << NetworkDefinitionCreationFlag::kEXPLICIT_BATCH + * e.g., 1U << NetworkDefinitionCreationFlag::kSTRONGLY_TYPED * * @see IBuilder::createNetworkV2 * */ + +//! //! //! //! -/** \enum NetworkDefinitionCreationFlag +/** + * \enum NetworkDefinitionCreationFlag * * \brief List of immutable network properties expressed at network creation time. * NetworkDefinitionCreationFlag is used with createNetworkV2() to specify immutable properties of the network. - * Creating a network without NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag has been deprecated. * * @see IBuilder::createNetworkV2 * */ @Namespace("nvinfer1") public enum NetworkDefinitionCreationFlag { - /** Mark the network to be an explicit batch network. - * Dynamic shape support requires that the kEXPLICIT_BATCH flag is set. - * With dynamic shapes, any of the input dimensions can vary at run-time, - * and there are no implicit dimensions in the network specification. - * Varying dimensions are specified by using the wildcard dimension value -1. */ - -//! + /** Ignored because networks are always "explicit batch" in TensorRT 10.0. + * + * @deprecated Deprecated in TensorRT 10.0. */ kEXPLICIT_BATCH(0), - /** Deprecated. This flag has no effect now, but is only kept for backward compatability. - * */ - kEXPLICIT_PRECISION(1); + /** Mark the network to be strongly typed. + * Every tensor in the network has a data type defined in the network following only type inference rules and the + * inputs/operator annotations. Setting layer precision and layer output types is not allowed, and the network + * output types will be inferred based on the input types and the type inference rules. */ + kSTRONGLY_TYPED(1); public final int value; private NetworkDefinitionCreationFlag(int v) { this.value = v; } @@ -3385,15 +3816,20 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Parsed from NvInferImpl.h /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 * - * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual - * property and proprietary rights in and to this material, related - * documentation and any modifications thereto. Any use, reproduction, - * disclosure or distribution of this material and related documentation - * without an express license agreement from NVIDIA CORPORATION or - * its affiliates is strictly prohibited. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ // #ifndef NV_INFER_IMPL_H @@ -3401,6 +3837,13 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // #include "NvInferLegacyDims.h" // #include "NvInferRuntimeCommon.h" + +// @cond SuppressDoxyWarnings + + + + + // Targeting ../nvinfer/IPlugin.java @@ -3410,6 +3853,7 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Targeting ../nvinfer/IPluginLayer.java + // namespace v_1_0 /** enum class nvinfer1::ActivationType */ ; @@ -3463,20 +3907,14 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { ; /** enum class nvinfer1::ResizeSelector */ ; -/** enum class nvinfer1::RNNDirection */ -; -/** enum class nvinfer1::RNNGateType */ -; -/** enum class nvinfer1::RNNInputMode */ -; -/** enum class nvinfer1::RNNOperation */ -; /** enum class nvinfer1::ScaleMode */ ; /** enum class nvinfer1::ScatterMode */ ; /** enum class nvinfer1::SampleMode */ ; +/** enum class nvinfer1::SerializationFlag */ +; /** enum class nvinfer1::TensorIOMode */ ; /** enum class nvinfer1::TensorLocation */ @@ -3493,6 +3931,8 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { ; /** enum class nvinfer1::HardwareCompatibilityLevel */ ; +/** enum class nvinfer1::ExecutionContextAllocationStrategy */ +; //! @@ -3537,9 +3977,6 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Targeting ../nvinfer/VConvolutionLayer.java -// Targeting ../nvinfer/VFullyConnectedLayer.java - - // Targeting ../nvinfer/VActivationLayer.java @@ -3567,15 +4004,15 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Targeting ../nvinfer/VGatherLayer.java -// Targeting ../nvinfer/VRNNv2Layer.java - - // Targeting ../nvinfer/VPluginLayer.java // Targeting ../nvinfer/VPluginV2Layer.java +// Targeting ../nvinfer/VPluginV3Layer.java + + // Targeting ../nvinfer/VUnaryLayer.java @@ -3711,6 +4148,9 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // Targeting ../nvinfer/VBuilderConfig.java +// Targeting ../nvinfer/VSerializationConfig.java + + // Targeting ../nvinfer/VBuilder.java @@ -3723,185 +4163,4 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer { // #endif // NV_INFER_RUNTIME_IMPL_H -// Parsed from NvUtils.h - -/* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: LicenseRef-NvidiaProprietary - * - * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual - * property and proprietary rights in and to this material, related - * documentation and any modifications thereto. Any use, reproduction, - * disclosure or distribution of this material and related documentation - * without an express license agreement from NVIDIA CORPORATION or - * its affiliates is strictly prohibited. - */ - -// #ifndef NV_UTILS_H -// #define NV_UTILS_H - - - -//! -//! -//! -// #include "NvInfer.h" - -/** - * \file NvUtils.h - * - * This file includes various utility functions - * */ - -/** - * @param input The input weights to reshape. - * @param shape The shape of the weights. - * @param shapeOrder The order of the dimensions to process for the output. - * @param data The location where the output data is placed. - * @param nbDims The number of dimensions to process. - * - * \brief Reformat the input weights of the given shape based on the new - * order of dimensions. - * - * Take the weights specified by \p input with the dimensions specified by - * \p shape and re-order the weights based on the new dimensions specified - * by \p shapeOrder. The size of each dimension and the input data is not - * modified. The output volume pointed to by \p data must be the same as - * he \p input volume. - * - * Example usage: - * float *out = new float[N*C*H*W]; - * Weights input{DataType::kFLOAT, {0 ... N*C*H*W-1}, N*C*H*W size}; - * int32_t order[4]{1, 0, 3, 2}; - * int32_t shape[4]{C, N, W, H}; - * reshapeWeights(input, shape, order, out, 4); - * Weights reshaped{input.type, out, input.count}; - * - * Input Matrix{3, 2, 3, 2}: - * { 0 1}, { 2 3}, { 4 5} <-- {0, 0, *, *} - * { 6 7}, { 8 9}, {10 11} <-- {0, 1, *, *} - * {12 13}, {14 15}, {16 17} <-- {1, 0, *, *} - * {18 19}, {20 21}, {22 23} <-- {1, 1, *, *} - * {24 25}, {26 27}, {28 29} <-- {2, 0, *, *} - * {30 31}, {32 33}, {34 35} <-- {2, 1, *, *} - * - * Output Matrix{2, 3, 2, 3}: - * { 0 2 4}, { 1 3 5} <-- {0, 0, *, *} - * {12 14 16}, {13 15 17} <-- {0, 1, *, *} - * {24 26 28}, {25 27 29} <-- {0, 2, *, *} - * { 6 8 10}, { 7 9 11} <-- {1, 0, *, *} - * {18 20 22}, {19 21 23} <-- {1, 1, *, *} - * {30 32 34}, {31 33 35} <-- {1, 2, *, *} - * - * @return True on success, false on failure. - * - * @deprecated Deprecated in TensorRT 8.0. - * - * \warning This file will be removed in TensorRT 10.0. - * */ - - -//! -//! -//! -//! -//! -//! -//! -//! -//! -//! -//! -@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean reshapeWeights( - @Const @ByRef Weights input, @Const IntPointer shape, @Const IntPointer shapeOrder, Pointer data, int nbDims); -@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean reshapeWeights( - @Const @ByRef Weights input, @Const IntBuffer shape, @Const IntBuffer shapeOrder, Pointer data, int nbDims); -@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean reshapeWeights( - @Const @ByRef Weights input, @Const int[] shape, @Const int[] shapeOrder, Pointer data, int nbDims); - -/** - * @param input The input data to re-order. - * @param order The new order of the data sub-buffers. - * @param num The number of data sub-buffers to re-order. - * @param size The size of each data sub-buffer in bytes. - * - * \brief Takes an input stream and re-orders \p num chunks of the data - * given the \p size and \p order. - * - * In some frameworks, the ordering of the sub-buffers within a dimension - * is different than the way that TensorRT expects them. - * TensorRT expects the gate/bias sub-buffers for LSTM's to be in fico order. - * TensorFlow however formats the sub-buffers in icfo order. - * This helper function solves this in a generic fashion. - * - * Example usage output of reshapeWeights above: - * int32_t indir[1]{1, 0} - * int32_t stride = W*H; - * for (int32_t x = 0, y = N*C; x < y; ++x) - * reorderSubBuffers(out + x * stride, indir, H, W); - * - * Input Matrix{2, 3, 2, 3}: - * { 0 2 4}, { 1 3 5} <-- {0, 0, *, *} - * {12 14 16}, {13 15 17} <-- {0, 1, *, *} - * {24 26 28}, {25 27 29} <-- {0, 2, *, *} - * { 6 8 10}, { 7 9 11} <-- {1, 0, *, *} - * {18 20 22}, {19 21 23} <-- {1, 1, *, *} - * {30 32 34}, {31 33 35} <-- {1, 2, *, *} - * - * Output Matrix{2, 3, 2, 3}: - * { 1 3 5}, { 0 2 4} <-- {0, 0, *, *} - * {13 15 17}, {12 14 16} <-- {0, 1, *, *} - * {25 27 29}, {24 26 28} <-- {0, 2, *, *} - * { 7 9 11}, { 6 8 10} <-- {1, 0, *, *} - * {19 21 23}, {18 20 22} <-- {1, 1, *, *} - * {31 33 35}, {30 32 34} <-- {1, 2, *, *} - * - * @return True on success, false on failure. - * - * @see reshapeWeights() - * - * @deprecated Deprecated in TensorRT 8.0. - * - * \warning This file will be removed in TensorRT 10.0. - * */ - - -//! -//! -//! -//! -//! -//! -@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean reorderSubBuffers( - Pointer input, @Const IntPointer order, int num, int size); -@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean reorderSubBuffers( - Pointer input, @Const IntBuffer order, int num, int size); -@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean reorderSubBuffers( - Pointer input, @Const int[] order, int num, int size); - -/** - * @param input The input data to transpose. - * @param type The type of the data to transpose. - * @param num The number of data sub-buffers to transpose. - * @param height The size of the height dimension to transpose. - * @param width The size of the width dimension to transpose. - * - * \brief Transpose \p num sub-buffers of \p height * \p width. - * - * @return True on success, false on failure. - * - * @deprecated Deprecated in TensorRT 8.0. - * - * \warning This file will be removed in TensorRT 10.0. - * */ -@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean transposeSubBuffers( - Pointer input, DataType type, int num, int height, int width); -@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean transposeSubBuffers( - Pointer input, @Cast("nvinfer1::DataType") int type, int num, int height, int width); - - // namespace utils - // namespace nvinfer1 -// #endif // NV_UTILS_H - - } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvinfer_plugin.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvinfer_plugin.java index aa9b88e3758..c84012c5256 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvinfer_plugin.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvinfer_plugin.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.global; @@ -26,15 +26,20 @@ public class nvinfer_plugin extends org.bytedeco.tensorrt.presets.nvinfer_plugin // Parsed from NvInferPlugin.h /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 * - * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual - * property and proprietary rights in and to this material, related - * documentation and any modifications thereto. Any use, reproduction, - * disclosure or distribution of this material and related documentation - * without an express license agreement from NVIDIA CORPORATION or - * its affiliates is strictly prohibited. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ // #ifndef NV_INFER_PLUGIN_H @@ -51,214 +56,6 @@ public class nvinfer_plugin extends org.bytedeco.tensorrt.presets.nvinfer_plugin * * This is the API for the Nvidia provided TensorRT plugins. * */ - /** - * \brief Create a plugin layer that fuses the RPN and ROI pooling using user-defined parameters. - * Registered plugin type "RPROI_TRT". Registered plugin version "1". - * @param featureStride Feature stride. - * @param preNmsTop Number of proposals to keep before applying NMS. - * @param nmsMaxOut Number of remaining proposals after applying NMS. - * @param iouThreshold IoU threshold. - * @param minBoxSize Minimum allowed bounding box size before scaling. - * @param spatialScale Spatial scale between the input image and the last feature map. - * @param pooling Spatial dimensions of pooled ROIs. - * @param anchorRatios Aspect ratios for generating anchor windows. - * @param anchorScales Scales for generating anchor windows. - * - * @return Returns a FasterRCNN fused RPN+ROI pooling plugin. Returns nullptr on invalid inputs. - * - * @deprecated Deprecated in TensorRT 8.5. Use RPROIPluginCreator::createPlugin() to create an instance of - * "RPROI_TRT" version 1 plugin. - * */ - - - //! - //! - //! - public static native @Deprecated IPluginV2 createRPNROIPlugin(int featureStride, int preNmsTop, int nmsMaxOut, - float iouThreshold, float minBoxSize, float spatialScale, @ByVal DimsHW pooling, - @ByVal Weights anchorRatios, @ByVal Weights anchorScales); - - /** - * \brief The Normalize plugin layer normalizes the input to have L2 norm of 1 with scale learnable. - * Registered plugin type "Normalize_TRT". Registered plugin version "1". - * @param scales Scale weights that are applied to the output tensor. - * @param acrossSpatial Whether to compute the norm over adjacent channels (acrossSpatial is true) or nearby - * spatial locations (within channel in which case acrossSpatial is false). - * @param channelShared Whether the scale weight(s) is shared across channels. - * @param eps Epsilon for not dividing by zero. - * - * @deprecated Deprecated in TensorRT 8.5. Use NormalizePluginCreator::createPlugin() to create an instance of - * "Normalize_TRT" version 1 plugin. - * */ - - - //! - //! - //! - public static native @Deprecated IPluginV2 createNormalizePlugin( - @Const Weights scales, @Cast("bool") boolean acrossSpatial, @Cast("bool") boolean channelShared, float eps); - - /** - * \brief The PriorBox plugin layer generates the prior boxes of designated sizes and aspect ratios across all - * dimensions (H x W). PriorBoxParameters defines a set of parameters for creating the PriorBox plugin layer. - * Registered plugin type "PriorBox_TRT". Registered plugin version "1". - * - * @deprecated Deprecated in TensorRT 8.5. Use PriorBoxPluginCreator::createPlugin() to create an instance of - * "PriorBox_TRT" version 1 plugin. - * */ - - - //! - //! - //! - public static native @Deprecated IPluginV2 createPriorBoxPlugin(@ByVal PriorBoxParameters param); - - /** - * \brief The Grid Anchor Generator plugin layer generates the prior boxes of - * designated sizes and aspect ratios across all dimensions (H x W) for all feature maps. - * GridAnchorParameters defines a set of parameters for creating the GridAnchorGenerator plugin layer. - * Registered plugin type "GridAnchor_TRT". Registered plugin version "1". - * - * @deprecated Deprecated in TensorRT 8.5. Use GridAnchorPluginCreator::createPlugin() to create an instance of - * "GridAnchor_TRT" version 1 plugin. - * */ - - - //! - //! - //! - public static native @Deprecated IPluginV2 createAnchorGeneratorPlugin( - GridAnchorParameters param, int numLayers); - - /** - * \brief The DetectionOutput plugin layer generates the detection output based on location and confidence - * predictions by doing non maximum suppression. DetectionOutputParameters defines a set of parameters for creating - * the DetectionOutput plugin layer. Registered plugin type "NMS_TRT". Registered plugin version "1". - * - * @deprecated Deprecated in TensorRT 8.5. Use NMSPluginCreator::createPlugin() to create an instance of "NMS_TRT" - * version 1 plugin. - * */ - - - //! - //! - //! - public static native @Deprecated IPluginV2 createNMSPlugin(@ByVal DetectionOutputParameters param); - - /** - * \brief The Reorg plugin reshapes input of shape CxHxW into a (C*stride*stride)x(H/stride)x(W/stride) shape, used - * in YOLOv2. It does that by taking 1 x stride x stride slices from tensor and flattening them into - * (stride x stride) x 1 x 1 shape. Registered plugin type "Reorg_TRT". Registered plugin version "1". - * @param stride Strides in H and W, it should divide both H and W. Also stride * stride should be less than or - * equal to C. - * - * @deprecated Deprecated in TensorRT 8.5. Use ReorgPluginCreator::createPlugin() to create an instance of - * "Reorg_TRT" version 1 plugin. - * */ - - - //! - //! - //! - public static native @Deprecated IPluginV2 createReorgPlugin(int stride); - - /** - * \brief The Region plugin layer performs region proposal calculation: generate 5 bounding boxes per cell (for - * yolo9000, generate 3 bounding boxes per cell). For each box, calculating its probablities of objects detections - * from 80 pre-defined classifications (yolo9000 has 9416 pre-defined classifications, and these 9416 items are - * organized as work-tree structure). RegionParameters defines a set of parameters for creating the Region plugin - * layer. Registered plugin type "Region_TRT". Registered plugin version "1". - * - * @deprecated Deprecated in TensorRT 8.5. Use RegionPluginCreator::createPlugin() to create an instance of - * "Region_TRT" version 1 plugin. - * */ - - - //! - //! - //! - //! - //! - public static native @Deprecated IPluginV2 createRegionPlugin(@ByVal RegionParameters params); - - /** - * \brief The BatchedNMS Plugin performs non_max_suppression on the input boxes, per batch, across all classes. - * It greedily selects a subset of bounding boxes in descending order of - * score. Prunes away boxes that have a high intersection-over-union (IOU) - * overlap with previously selected boxes. Bounding boxes are supplied as [y1, x1, y2, x2], - * where (y1, x1) and (y2, x2) are the coordinates of any - * diagonal pair of box corners and the coordinates can be provided as normalized - * (i.e., lying in the interval [0, 1]) or absolute. - * The plugin expects two inputs. - * Input0 is expected to be 4-D float boxes tensor of shape [batch_size, num_boxes, - * q, 4], where q can be either 1 (if shareLocation is true) or num_classes. - * Input1 is expected to be a 3-D float scores tensor of shape [batch_size, num_boxes, num_classes] - * representing a single score corresponding to each box. - * The plugin returns four outputs. - * num_detections : A [batch_size] int32 tensor indicating the number of valid - * detections per batch item. Can be less than keepTopK. Only the top num_detections[i] entries in - * nmsed_boxes[i], nmsed_scores[i] and nmsed_classes[i] are valid. - * nmsed_boxes : A [batch_size, max_detections, 4] float32 tensor containing - * the co-ordinates of non-max suppressed boxes. - * nmsed_scores : A [batch_size, max_detections] float32 tensor containing the - * scores for the boxes. - * nmsed_classes : A [batch_size, max_detections] float32 tensor containing the - * classes for the boxes. - * - * Registered plugin type "BatchedNMS_TRT". Registered plugin version "1". - * - * The batched NMS plugin can require a lot of workspace due to intermediate buffer usage. To get the - * estimated workspace size for the plugin for a batch size, use the API {@code plugin->getWorkspaceSize(batchSize)}. - * - * @deprecated Deprecated in TensorRT 8.5. Use BatchedNMSPluginCreator::createPlugin() to create an instance of - * "BatchedNMS_TRT" version 1 plugin. - * */ - - - //! - //! - //! - public static native @Deprecated IPluginV2 createBatchedNMSPlugin(@ByVal NMSParameters param); - - /** - * \brief The Split Plugin performs a split operation on the input tensor. It - * splits the input tensor into several output tensors, each of a length corresponding to output_lengths. - * The split occurs along the axis specified by axis. - * @param axis The axis to split on. - * @param output_lengths The lengths of the output tensors. - * @param noutput The number of output tensors. - * - * @deprecated Deprecated in TensorRT 8.5 along with the "Split" plugin. Use INetworkDefinition::addSlice() to add - * slice layer(s) as necessary to accomplish the required effect. - * */ - - - //! - //! - //! - public static native @Deprecated IPluginV2 createSplitPlugin(int axis, IntPointer output_lengths, int noutput); - public static native @Deprecated IPluginV2 createSplitPlugin(int axis, IntBuffer output_lengths, int noutput); - public static native @Deprecated IPluginV2 createSplitPlugin(int axis, int[] output_lengths, int noutput); - - /** - * \brief The Instance Normalization Plugin computes the instance normalization of an input tensor. - * The instance normalization is calculated as found in the paper https://arxiv.org/abs/1607.08022. - * The calculation is y = scale * (x - mean) / sqrt(variance + epsilon) + bias where mean and variance - * are computed per instance per channel. - * @param epsilon The epsilon value to use to avoid division by zero. - * @param scale_weights The input 1-dimensional scale weights of size C to scale. - * @param bias_weights The input 1-dimensional bias weights of size C to offset. - * - * @deprecated Deprecated in TensorRT 8.5. Use InstanceNormalizationPluginCreator::createPlugin() to create an - * instance of "InstanceNormalization_TRT" version 1 plugin. - * */ - - - //! - //! - public static native @Deprecated IPluginV2 createInstanceNormalizationPlugin( - float epsilon, @ByVal Weights scale_weights, @ByVal Weights bias_weights); - /** * \brief Initialize and register all the existing TensorRT plugins to the Plugin Registry with an optional * namespace. The plugin library author should ensure that this function name is unique to the library. This @@ -275,15 +72,20 @@ public class nvinfer_plugin extends org.bytedeco.tensorrt.presets.nvinfer_plugin // Parsed from NvInferPluginUtils.h /* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: LicenseRef-NvidiaProprietary + * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 * - * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual - * property and proprietary rights in and to this material, related - * documentation and any modifications thereto. Any use, reproduction, - * disclosure or distribution of this material and related documentation - * without an express license agreement from NVIDIA CORPORATION or - * its affiliates is strictly prohibited. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ // #ifndef NV_INFER_PLUGIN_UTILS_H @@ -302,9 +104,6 @@ public class nvinfer_plugin extends org.bytedeco.tensorrt.presets.nvinfer_plugin * This is the API for the Nvidia provided TensorRT plugin utilities. * It lists all the parameters utilized by the TensorRT plugins. * */ -// Targeting ../nvinfer_plugin/Quadruple.java - - // Targeting ../nvinfer_plugin/PriorBoxParameters.java @@ -317,7 +116,10 @@ public class nvinfer_plugin extends org.bytedeco.tensorrt.presets.nvinfer_plugin /** * \enum CodeTypeSSD + * * \brief The type of encoding used for decoding the bounding boxes and loc_data. + * + * @deprecated Deprecated in TensorRT 10.0. DetectionOutput plugin is deprecated. * */ @Namespace("nvinfer1::plugin") public enum CodeTypeSSD { /** Use box corners. */ diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvonnxparser.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvonnxparser.java index cf5588c39a7..f1da601b313 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvonnxparser.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvonnxparser.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.global; @@ -60,6 +60,7 @@ public class nvonnxparser extends org.bytedeco.tensorrt.presets.nvonnxparser { // #include "NvInfer.h" // #include +// #include //! @@ -120,7 +121,7 @@ public class nvonnxparser extends org.bytedeco.tensorrt.presets.nvonnxparser { /** * \enum ErrorCode * - * \brief The type of error that the parser may return + * \brief The type of error that the parser or refitter may return * */ @Namespace("nvonnxparser") public enum ErrorCode { kSUCCESS(0), @@ -131,7 +132,13 @@ public class nvonnxparser extends org.bytedeco.tensorrt.presets.nvonnxparser { kINVALID_GRAPH(5), kINVALID_NODE(6), kUNSUPPORTED_GRAPH(7), - kUNSUPPORTED_NODE(8); + kUNSUPPORTED_NODE(8), + kUNSUPPORTED_NODE_ATTR(9), + kUNSUPPORTED_NODE_INPUT(10), + kUNSUPPORTED_NODE_DATATYPE(11), + kUNSUPPORTED_NODE_DYNAMIC(12), + kUNSUPPORTED_NODE_SHAPE(13), + kREFIT_FAILED(14); public final int value; private ErrorCode(int v) { this.value = v; } @@ -156,9 +163,9 @@ public class nvonnxparser extends org.bytedeco.tensorrt.presets.nvonnxparser { @Namespace("nvonnxparser") public enum OnnxParserFlag { /** Parse the ONNX model into the INetworkDefinition with the intention of using TensorRT's native layer - * implementation over the plugin implementation for InstanceNormalization nodes. This flag is planned to be - * deprecated in TensorRT 8.7 and removed in TensorRT 9.0. This flag is required when building version-compatible - * or hardware-compatible engines. There may be performance degradations when this flag is enabled. */ + * implementation over the plugin implementation for InstanceNormalization nodes. + * This flag is required when building version-compatible or hardware-compatible engines. + * This flag is set to be ON by default. */ kNATIVE_INSTANCENORM(0); public final int value; @@ -180,10 +187,14 @@ public class nvonnxparser extends org.bytedeco.tensorrt.presets.nvonnxparser { // Targeting ../nvonnxparser/IParser.java +// Targeting ../nvonnxparser/IParserRefitter.java + + // namespace nvonnxparser public static native Pointer createNvOnnxParser_INTERNAL(Pointer network, Pointer logger, int version); +public static native Pointer createNvOnnxParserRefitter_INTERNAL(Pointer refitter, Pointer logger, int version); public static native int getNvOnnxParserVersion(); /** @@ -201,8 +212,25 @@ public class nvonnxparser extends org.bytedeco.tensorrt.presets.nvonnxparser { * * @see IParser * */ + + +//! +//! +//! +//! @Namespace("nvonnxparser") public static native IParser createParser(@ByRef INetworkDefinition network, @ByRef ILogger logger); +/** + * \brief Create a new ONNX refitter object + * + * @param refitter The Refitter object used to refit the model + * @param logger The logger to use + * @return a new ParserRefitter object or NULL if an error occurred + * + * @see IParserRefitter + * */ +@Namespace("nvonnxparser") public static native IParserRefitter createParserRefitter(@ByRef IRefitter refitter, @ByRef ILogger logger); + // namespace // namespace nvonnxparser diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvparsers.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvparsers.java deleted file mode 100644 index 21959971994..00000000000 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvparsers.java +++ /dev/null @@ -1,246 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.tensorrt.global; - -import org.bytedeco.tensorrt.nvparsers.*; - -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import org.bytedeco.cuda.cudart.*; -import static org.bytedeco.cuda.global.cudart.*; -import org.bytedeco.cuda.cublas.*; -import static org.bytedeco.cuda.global.cublas.*; -import org.bytedeco.cuda.cudnn.*; -import static org.bytedeco.cuda.global.cudnn.*; -import org.bytedeco.cuda.nvrtc.*; -import static org.bytedeco.cuda.global.nvrtc.*; -import org.bytedeco.tensorrt.nvinfer.*; -import static org.bytedeco.tensorrt.global.nvinfer.*; -import org.bytedeco.tensorrt.nvinfer_plugin.*; -import static org.bytedeco.tensorrt.global.nvinfer_plugin.*; - -public class nvparsers extends org.bytedeco.tensorrt.presets.nvparsers { - static { Loader.load(); } - -// Parsed from NvCaffeParser.h - -/* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: LicenseRef-NvidiaProprietary - * - * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual - * property and proprietary rights in and to this material, related - * documentation and any modifications thereto. Any use, reproduction, - * disclosure or distribution of this material and related documentation - * without an express license agreement from NVIDIA CORPORATION or - * its affiliates is strictly prohibited. - */ - -// #ifndef NV_CAFFE_PARSER_H -// #define NV_CAFFE_PARSER_H - - - -//! -//! -//! - -//! -//! -//! -// #include "NvInfer.h" - -/** - * \file NvCaffeParser.h - * - * This is the API for the Caffe Parser - * -

- * - * \namespace nvcaffeparser1 - * - * \brief The TensorRT Caffe parser API namespace. - * */ -// Targeting ../nvparsers/IBlobNameToTensor.java - - -// Targeting ../nvparsers/IBinaryProtoBlob.java - - -// Targeting ../nvparsers/IPluginFactoryV2.java - - -// Targeting ../nvparsers/ICaffeParser.java - - - -/** - * \brief Creates a ICaffeParser object. - * - * @return A pointer to the ICaffeParser object is returned. - * - * @see nvcaffeparser1::ICaffeParser - * - * @deprecated ICaffeParser will be removed in TensorRT 9.0. Plan to migrate your workflow to - * use nvonnxparser::IParser for deployment. - * */ - - -//! -//! -//! -@Namespace("nvcaffeparser1") public static native @NoException(true) ICaffeParser createCaffeParser(); - -/** - * \brief Shuts down protocol buffers library. - * - * \note No part of the protocol buffers library can be used after this function is called. - * */ -@Namespace("nvcaffeparser1") public static native @NoException(true) void shutdownProtobufLibrary(); - // namespace nvcaffeparser1 - -/** - * Internal C entry point for creating ICaffeParser. - * \private - * */ -public static native @NoException(true) Pointer createNvCaffeParser_INTERNAL(); -// #endif - - -// Parsed from NvUffParser.h - -/* - * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: LicenseRef-NvidiaProprietary - * - * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual - * property and proprietary rights in and to this material, related - * documentation and any modifications thereto. Any use, reproduction, - * disclosure or distribution of this material and related documentation - * without an express license agreement from NVIDIA CORPORATION or - * its affiliates is strictly prohibited. - */ - -// #ifndef NV_UFF_PARSER_H -// #define NV_UFF_PARSER_H - - - -//! -//! -//! -// #include "NvInfer.h" - -/** - * \file NvUffParser.h - * - * This is the API for the UFF Parser - * */ - -// Current supported Universal Framework Format (UFF) version for the parser. -public static final int UFF_REQUIRED_VERSION_MAJOR = 0; -public static final int UFF_REQUIRED_VERSION_MINOR = 6; - - -//! -//! -//! -public static final int UFF_REQUIRED_VERSION_PATCH = 9; - -/** - * \namespace nvuffparser - * - * \brief The TensorRT UFF parser API namespace. - * */ - -/** - * \enum UffInputOrder - * \brief The different possible supported input order. - * */ -@Namespace("nvuffparser") public enum UffInputOrder { - /** NCHW order. */ - kNCHW(0), - /** NHWC order. */ - kNHWC(1), - /** NC order. */ - kNC(2); - - public final int value; - private UffInputOrder(int v) { this.value = v; } - private UffInputOrder(UffInputOrder e) { this.value = e.value; } - public UffInputOrder intern() { for (UffInputOrder e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} - -/** - * \enum FieldType - * \brief The possible field types for custom layer. - * */ - -@Namespace("nvuffparser") public enum FieldType { - /** FP32 field type. */ - kFLOAT(0), - /** INT32 field type. */ - kINT32(1), - /** char field type. String for length>1. */ - kCHAR(2), - /** nvinfer1::Dims field type. */ - kDIMS(4), - /** nvinfer1::DataType field type. */ - kDATATYPE(5), - kUNKNOWN(6); - - public final int value; - private FieldType(int v) { this.value = v; } - private FieldType(FieldType e) { this.value = e.value; } - public FieldType intern() { for (FieldType e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} -// Targeting ../nvparsers/FieldMap.java - - -// Targeting ../nvparsers/FieldCollection.java - - -// Targeting ../nvparsers/IUffParser.java - - - -/** - * \brief Creates a IUffParser object. - * - * @return A pointer to the IUffParser object is returned. - * - * @see nvuffparser::IUffParser - * - * @deprecated IUffParser will be removed in TensorRT 9.0. Plan to migrate your workflow to - * use nvonnxparser::IParser for deployment. - * */ - - -//! -//! -//! -@Namespace("nvuffparser") public static native @NoException(true) IUffParser createUffParser(); - -/** - * \brief Shuts down protocol buffers library. - * - * \note No part of the protocol buffers library can be used after this function is called. - * */ - - // namespace nvuffparser - -/** - * Internal C entry point for creating IUffParser - * \private - * */ -public static native @NoException(true) Pointer createNvUffParser_INTERNAL(); - -// #endif /* !NV_UFF_PARSER_H */ - - -} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims2.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims2.java index 11a8f686b80..e448ede3a85 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims2.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims2.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -20,10 +20,11 @@ /** * \class Dims2 + * * \brief Descriptor for two-dimensional data. * */ @Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) -public class Dims2 extends Dims32 { +public class Dims2 extends Dims64 { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Dims2(Pointer p) { super(p); } @@ -54,6 +55,6 @@ public class Dims2 extends Dims32 { * @param d0 The first element. * @param d1 The second element. * */ - public Dims2(int d0, int d1) { super((Pointer)null); allocate(d0, d1); } - private native void allocate(int d0, int d1); + public Dims2(@Cast("int64_t") long d0, @Cast("int64_t") long d1) { super((Pointer)null); allocate(d0, d1); } + private native void allocate(@Cast("int64_t") long d0, @Cast("int64_t") long d1); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims3.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims3.java index 6fcd03ea064..fb6c4e32ce2 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims3.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims3.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -21,6 +21,7 @@ /** * \class Dims3 + * * \brief Descriptor for three-dimensional data. * */ @Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) @@ -56,6 +57,6 @@ public class Dims3 extends Dims2 { * @param d1 The second element. * @param d2 The third element. * */ - public Dims3(int d0, int d1, int d2) { super((Pointer)null); allocate(d0, d1, d2); } - private native void allocate(int d0, int d1, int d2); + public Dims3(@Cast("int64_t") long d0, @Cast("int64_t") long d1, @Cast("int64_t") long d2) { super((Pointer)null); allocate(d0, d1, d2); } + private native void allocate(@Cast("int64_t") long d0, @Cast("int64_t") long d1, @Cast("int64_t") long d2); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims4.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims4.java index c53e4ef8f4c..b82c5ad9c31 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims4.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims4.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -21,6 +21,7 @@ /** * \class Dims4 + * * \brief Descriptor for four-dimensional data. * */ @Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) @@ -57,6 +58,6 @@ public class Dims4 extends Dims3 { * @param d2 The third element. * @param d3 The fourth element. * */ - public Dims4(int d0, int d1, int d2, int d3) { super((Pointer)null); allocate(d0, d1, d2, d3); } - private native void allocate(int d0, int d1, int d2, int d3); + public Dims4(@Cast("int64_t") long d0, @Cast("int64_t") long d1, @Cast("int64_t") long d2, @Cast("int64_t") long d3) { super((Pointer)null); allocate(d0, d1, d2, d3); } + private native void allocate(@Cast("int64_t") long d0, @Cast("int64_t") long d1, @Cast("int64_t") long d2, @Cast("int64_t") long d3); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims32.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims64.java similarity index 61% rename from tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims32.java rename to tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims64.java index 1284a403697..0102c504119 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims32.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims64.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -23,36 +23,38 @@ * \class Dims * \brief Structure to define the dimensions of a tensor. * - * TensorRT can also return an invalid dims structure. This structure is represented by nbDims == -1 - * and d[i] == 0 for all d. + * TensorRT can also return an "invalid dims" structure. This structure is + * represented by nbDims == -1 and d[i] == 0 for all i. * - * TensorRT can also return an "unknown rank" dims structure. This structure is represented by nbDims == -1 - * and d[i] == -1 for all d. + * TensorRT can also return an "unknown rank" dims structure. This structure is + * represented by nbDims == -1 and d[i] == -1 for all i. * */ @Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) -public class Dims32 extends Pointer { +public class Dims64 extends Pointer { static { Loader.load(); } /** Default native constructor. */ - public Dims32() { super((Pointer)null); allocate(); } + public Dims64() { super((Pointer)null); allocate(); } /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public Dims32(long size) { super((Pointer)null); allocateArray(size); } + public Dims64(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Dims32(Pointer p) { super(p); } + public Dims64(Pointer p) { super(p); } private native void allocate(); private native void allocateArray(long size); - @Override public Dims32 position(long position) { - return (Dims32)super.position(position); + @Override public Dims64 position(long position) { + return (Dims64)super.position(position); } - @Override public Dims32 getPointer(long i) { - return new Dims32((Pointer)this).offsetAddress(i); + @Override public Dims64 getPointer(long i) { + return new Dims64((Pointer)this).offsetAddress(i); } /** The maximum rank (number of dimensions) supported for a tensor. */ @MemberGetter public static native int MAX_DIMS(); public static final int MAX_DIMS = MAX_DIMS(); + /** The rank (number of dimensions). */ - public native int nbDims(); public native Dims32 nbDims(int setter); + public native int nbDims(); public native Dims64 nbDims(int setter); + /** The extent of each dimension. */ - public native int d(int i); public native Dims32 d(int i, int setter); - @MemberGetter public native IntPointer d(); + public native @Cast("int64_t") long d(int i); public native Dims64 d(int i, long setter); + @MemberGetter public native @Cast("int64_t*") LongPointer d(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsExprs.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsExprs.java index f6bca198b8a..f25cf54d6d5 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsExprs.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsExprs.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -22,7 +22,7 @@ /** * \class DimsExprs * - * Analog of class Dims with expressions instead of constants for the dimensions. + * \brief Analog of class Dims with expressions instead of constants for the dimensions. * */ @Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class DimsExprs extends Pointer { diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsHW.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsHW.java index bda46b1ed0c..a286876eba8 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsHW.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsHW.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -21,6 +21,7 @@ /** * \class DimsHW + * * \brief Descriptor for two-dimensional spatial data. * */ @Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) @@ -60,8 +61,8 @@ public class DimsHW extends Dims2 { //! //! //! - public DimsHW(int height, int width) { super((Pointer)null); allocate(height, width); } - private native void allocate(int height, int width); + public DimsHW(@Cast("int64_t") long height, @Cast("int64_t") long width) { super((Pointer)null); allocate(height, width); } + private native void allocate(@Cast("int64_t") long height, @Cast("int64_t") long width); /** * \brief Get the height. @@ -73,7 +74,7 @@ public class DimsHW extends Dims2 { //! //! //! - public native @ByRef IntPointer h(); + public native @Cast("int64_t*") @ByRef LongPointer h(); /** * \brief Get the height. @@ -91,7 +92,7 @@ public class DimsHW extends Dims2 { //! //! //! - public native @ByRef IntPointer w(); + public native @Cast("int64_t*") @ByRef LongPointer w(); /** * \brief Get the width. diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DynamicPluginTensorDesc.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DynamicPluginTensorDesc.java index c52ec48e986..e91f83256d1 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DynamicPluginTensorDesc.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DynamicPluginTensorDesc.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -20,9 +20,9 @@ /** - * \class DynamicPluginTensorDesc + * \struct DynamicPluginTensorDesc * - * Summarizes tensors that a plugin might see for an input or output. + * \brief Summarizes tensors that a plugin might see for an input or output. * */ @Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class DynamicPluginTensorDesc extends Pointer { @@ -46,8 +46,11 @@ public class DynamicPluginTensorDesc extends Pointer { public native @ByRef PluginTensorDesc desc(); public native DynamicPluginTensorDesc desc(PluginTensorDesc setter); /** Lower bounds on tensor’s dimensions */ - public native @ByRef @Cast("nvinfer1::Dims*") Dims32 min(); public native DynamicPluginTensorDesc min(Dims32 setter); + public native @ByRef @Cast("nvinfer1::Dims*") Dims64 min(); public native DynamicPluginTensorDesc min(Dims64 setter); /** Upper bounds on tensor’s dimensions */ - public native @ByRef @Cast("nvinfer1::Dims*") Dims32 max(); public native DynamicPluginTensorDesc max(Dims32 setter); + public native @ByRef @Cast("nvinfer1::Dims*") Dims64 max(); public native DynamicPluginTensorDesc max(Dims64 setter); + + /** Optimum value of tensor’s dimensions specified for auto-tuning */ + public native @ByRef @Cast("nvinfer1::Dims*") Dims64 opt(); public native DynamicPluginTensorDesc opt(Dims64 setter); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IActivationLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IActivationLayer.java index 6cdf5a5b30c..d7ea6966af5 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IActivationLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IActivationLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithm.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithm.java index 00081e768d1..5c4a2632c8b 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithm.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithm.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -21,9 +21,11 @@ /** * \class IAlgorithm + * * \brief Describes a variation of execution of a layer. * An algorithm is represented by IAlgorithmVariant and the IAlgorithmIOInfo for each of its inputs and outputs. - * An algorithm can be selected or reproduced using AlgorithmSelector::selectAlgorithms()." + * An algorithm can be selected or reproduced using AlgorithmSelector::selectAlgorithms(). + * * @see IAlgorithmIOInfo, IAlgorithmVariant, IAlgorithmSelector::selectAlgorithms() * * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. @@ -34,22 +36,6 @@ public class IAlgorithm extends INoCopy { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IAlgorithm(Pointer p) { super(p); } - /** - * \brief Returns the format of an Algorithm input or output. Algorithm inputs are incrementally numbered first, - * followed by algorithm outputs. - * @param index Index of the input or output of the algorithm. Incremental numbers assigned to indices of inputs - * and the outputs. - * - * @return a reference to IAlgorithmIOInfo specified by index or the first algorithm if index is out of range. - * - * @deprecated Deprecated in TensorRT 8.0. Superseded by IAlgorithm::getAlgorithmIOInfoByIndex(). - * */ - - - //! - //! - public native @Const @Deprecated @ByRef @NoException(true) IAlgorithmIOInfo getAlgorithmIOInfo(int index); - /** * \brief Returns the algorithm variant. * */ @@ -76,11 +62,13 @@ public class IAlgorithm extends INoCopy { //! //! //! + //! public native @Cast("std::size_t") @NoException(true) long getWorkspaceSize(); /** * \brief Returns the format of an Algorithm input or output. Algorithm inputs are incrementally numbered first, * followed by algorithm outputs. + * * @param index Index of the input or output of the algorithm. Incremental numbers assigned to indices of inputs * and the outputs. * diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmContext.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmContext.java index 703e557142e..9ffe58ea67c 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmContext.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -35,16 +35,19 @@ public class IAlgorithmContext extends INoCopy { /** * \brief Return name of the algorithm node. + * * This is a unique identifier for the IAlgorithmContext. * */ + //! //! //! public native @NoException(true) String getName(); /** * \brief Get the minimum / optimum / maximum dimensions for input or output tensor. + * * @param index Index of the input or output of the algorithm. Incremental numbers assigned to indices of inputs * and the outputs. * @param select Which of the minimum, optimum, or maximum dimensions to be queried. @@ -53,8 +56,8 @@ public class IAlgorithmContext extends INoCopy { //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(int index, OptProfileSelector select); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(int index, @Cast("nvinfer1::OptProfileSelector") int select); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(int index, OptProfileSelector select); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(int index, @Cast("nvinfer1::OptProfileSelector") int select); /** * \brief Return number of inputs of the algorithm. diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmIOInfo.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmIOInfo.java index 41156dfb357..966336e96cf 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmIOInfo.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmIOInfo.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -35,21 +35,6 @@ public class IAlgorithmIOInfo extends INoCopy { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IAlgorithmIOInfo(Pointer p) { super(p); } - /** - * \brief Return TensorFormat of the input/output of algorithm. - * - * @deprecated Deprecated in TensorRT 8.6. The strides, data type, and vectorization - * information is sufficient to uniquely identify tensor formats. - * - * @return the tensor format - * */ - - - //! - //! - //! - public native @Deprecated @NoException(true) TensorFormat getTensorFormat(); - /** * \brief Return DataType of the input/output of algorithm. * @@ -73,7 +58,7 @@ public class IAlgorithmIOInfo extends INoCopy { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrides(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrides(); /** * \brief Return the index of the vectorized dimension or -1 for non-vectorized formats. diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmSelector.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmSelector.java index c163f42f13a..1b874408b6e 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmSelector.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmSelector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -18,21 +18,21 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; // IAlgorithm - -/** - * \class IAlgorithmSelector - * - * \brief Interface implemented by application for selecting and reporting algorithms of a layer provided by the - * builder. - * \note A layer in context of algorithm selection may be different from ILayer in INetworkDefiniton. - * For example, an algorithm might be implementing a conglomeration of multiple ILayers in INetworkDefinition. - * */ -@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) -public class IAlgorithmSelector extends Pointer { +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IAlgorithmSelector extends IVersionedInterface { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IAlgorithmSelector(Pointer p) { super(p); } + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + + //! + //! + //! + //! + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); /** * \brief Select Algorithms for a layer from the given list of algorithm choices. * @@ -44,10 +44,11 @@ public class IAlgorithmSelector extends Pointer { * * \note TensorRT uses its default algorithm selection to choose from the list provided. * If return value is 0, TensorRT's default algorithm selection is used unless - * BuilderFlag::kREJECT_EMPTY_ALGORITHMS (or the deprecated BuilderFlag::kSTRICT_TYPES) is set. + * BuilderFlag::kREJECT_EMPTY_ALGORITHMS is set. * The list of choices is valid only for this specific algorithm context. * */ + //! //! //! @@ -60,6 +61,7 @@ public class IAlgorithmSelector extends Pointer { int nbChoices, IntBuffer selection); public native @NoException(true) int selectAlgorithms(@Const @ByRef IAlgorithmContext context, @Const @ByPtrPtr IAlgorithm choices, int nbChoices, int[] selection); + /** * \brief Called by TensorRT to report choices it made. * diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmVariant.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmVariant.java index c1a7035ce46..cac77e09535 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmVariant.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmVariant.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAssertionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAssertionLayer.java index afb245db46f..7692713a663 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAssertionLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAssertionLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,7 +19,8 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; -/** \class IAssertionLayer +/** + * \class IAssertionLayer * * \brief An assertion layer in a network * diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilder.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilder.java index e00586e05bb..21d802657a5 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilder.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilder.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -45,41 +45,6 @@ public class IBuilder extends INoCopy { } - /** - * \brief Set the maximum batch size. This has no effect for networks created with explicit batch dimension mode. - * - * @param batchSize The maximum batch size which can be used at execution time, and also the batch size for which - * the engine will be optimized. - * - * @deprecated Deprecated in TensorRT 8.4. - * - * @see getMaxBatchSize() - * */ - - - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) void setMaxBatchSize(int batchSize); - - /** - * \brief Get the maximum batch size. - * - * @return The maximum batch size. - * - * @deprecated Deprecated in TensorRT 8.4. - * - * @see setMaxBatchSize() - * @see getMaxDLABatchSize() - * */ - - - //! - //! - public native @Deprecated @NoException(true) int getMaxBatchSize(); - /** * \brief Determine whether the platform has fast native fp16. * */ @@ -97,23 +62,8 @@ public class IBuilder extends INoCopy { //! //! //! - //! public native @Cast("bool") @NoException(true) boolean platformHasFastInt8(); - /** - * \brief Destroy this object. - * - * @deprecated Deprecated in TensorRT 8.0. Superseded by {@code delete}. - * - * \warning Calling destroy on a managed pointer will result in a double-free error. - * */ - - - //! - //! - //! - public native @Deprecated @NoException(true) void destroy(); - /** * \brief Get the maximum batch size DLA can support. * For any tensor the total volume of index dimensions combined(dimensions other than CHW) with the requested @@ -136,10 +86,12 @@ public class IBuilder extends INoCopy { //! //! //! + //! public native @NoException(true) int getNbDLACores(); /** * \brief Set the GPU allocator. + * * @param allocator Set the GPU allocator to be used by the builder. All GPU memory acquired will use this * allocator. If NULL is passed, the default allocator will be used. * @@ -168,45 +120,38 @@ public class IBuilder extends INoCopy { //! //! //! + //! + //! + //! public native @NoException(true) IBuilderConfig createBuilderConfig(); /** - * \brief Builds an engine for the given INetworkDefinition and given IBuilderConfig. + * \brief Create a network definition object * - * It enables the builder to build multiple engines based on the same network definition, but with different - * builder configurations. + * Creates a network definition object with immutable properties specified using the flags parameter. * - * \note This function will synchronize the cuda stream returned by \p config.getProfileStream() before returning. + * createNetworkV2 supports creating network with properties from NetworkDefinitionCreationFlags. * - * @deprecated Deprecated in TensorRT 8.0. Superseded by IBuilder::buildSerializedNetwork(). - * */ - - //! - //! - //! - //! - public native @Deprecated @NoException(true) ICudaEngine buildEngineWithConfig( - @ByRef INetworkDefinition network, @ByRef IBuilderConfig config); - - /** \brief Create a network definition object + * CreateNetworkV2 supports dynamic shapes and explicit batch dimensions by default. * - * Creates a network definition object with immutable properties specified using the flags parameter. - * CreateNetworkV2 supports dynamic shapes and explicit batch dimensions when used with - * NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag. - * Creating a network without NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag has been deprecated. + * createNetworkV2 with NetworkDefinitionCreationFlag::kSTRONGLY_TYPED flag supports creating a strongly typed plan + * where tensor data types are inferred from network input types and operator type specification. * * @param flags Bitset of NetworkDefinitionCreationFlags specifying network properties combined with bitwise OR. - * e.g., 1U << NetworkDefinitionCreationFlag::kEXPLICIT_BATCH + * e.g., 1U << NetworkDefinitionCreationFlag::kSTRONGLY_TYPED * * @see INetworkDefinition, NetworkDefinitionCreationFlags * */ + + //! //! //! //! public native @NoException(true) INetworkDefinition createNetworkV2(@Cast("nvinfer1::NetworkDefinitionCreationFlags") int flags); - /** \brief Create a new optimization profile. + /** + * \brief Create a new optimization profile. * * If the network has any dynamic input tensors, the appropriate calls to setDimensions() must be made. * Likewise, if there are any shape input tensors, the appropriate calls to setShapeValues() are required. @@ -222,6 +167,7 @@ public class IBuilder extends INoCopy { //! //! //! + //! public native @NoException(true) IOptimizationProfile createOptimizationProfile(); /** @@ -234,10 +180,10 @@ public class IBuilder extends INoCopy { * * If an error recorder is not set, messages will be sent to the global log stream. * - * @param recorder The error recorder to register with this interface. */ - // - /** @see getErrorRecorder() - /** */ + * @param recorder The error recorder to register with this interface. + * + * @see getErrorRecorder() + * */ //! @@ -308,7 +254,6 @@ public class IBuilder extends INoCopy { //! //! //! - //! public native @NoException(true) IHostMemory buildSerializedNetwork(@ByRef INetworkDefinition network, @ByRef IBuilderConfig config); /** @@ -327,8 +272,6 @@ public class IBuilder extends INoCopy { * false otherwise. * * \note This function will synchronize the cuda stream returned by \p config.getProfileStream() before returning. - * - * This function is only supported in NVIDIA Drive(R) products. * */ @@ -344,6 +287,8 @@ public class IBuilder extends INoCopy { * */ + //! + //! //! //! //! @@ -351,7 +296,9 @@ public class IBuilder extends INoCopy { /** * \brief Set the maximum number of threads. + * * @param maxThreads The maximum number of threads that can be used by the builder. + * * @return True if successful, false otherwise. * * The default value is 1 and includes the current thread. diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilderConfig.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilderConfig.java index 785b6833109..09793b00c82 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilderConfig.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilderConfig.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -17,7 +17,7 @@ import static org.bytedeco.cuda.global.nvrtc.*; import static org.bytedeco.tensorrt.global.nvinfer.*; - // namespace impl + /** * \class IBuilderConfig @@ -45,44 +45,6 @@ public class IBuilderConfig extends INoCopy { } - /** - * \brief Set the number of minimization iterations used when timing layers. - * - * When timing layers, the builder minimizes over a set of average times for layer execution. This parameter - * controls the number of iterations used in minimization. The builder may sometimes run layers for more - * iterations to improve timing accuracy if this parameter is set to a small value and the runtime of the - * layer is short. - * - * @see getMinTimingIterations() - * - * @deprecated Deprecated in TensorRT 8.4. Superseded by setAvgTimingIterations(). - * */ - - - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) void setMinTimingIterations(int minTiming); - - /** - * \brief Query the number of minimization iterations. - * - * By default the minimum number of iterations is 1. - * - * @see setMinTimingIterations() - * - * @deprecated Deprecated in TensorRT 8.4. Superseded by getAvgTimingIterations(). - * */ - - - //! - //! - //! - //! - public native @Deprecated @NoException(true) int getMinTimingIterations(); - /** * \brief Set the number of averaging iterations used when timing layers. * @@ -161,54 +123,13 @@ public class IBuilderConfig extends INoCopy { * */ - //! - //! - //! - //! - //! - public native @NoException(true) IInt8Calibrator getInt8Calibrator(); - - /** - * \brief Set the maximum workspace size. - * - * @param workspaceSize The maximum GPU temporary memory which the engine can use at execution time. - * - * @see getMaxWorkspaceSize() - * - * @deprecated Deprecated in TensorRT 8.3. Superseded by IBuilderConfig::setMemoryPoolLimit() with - * MemoryPoolType::kWORKSPACE. - * */ - - - //! - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) void setMaxWorkspaceSize(@Cast("std::size_t") long workspaceSize); - - /** - * \brief Get the maximum workspace size. - * - * By default the workspace size is the size of total global memory in the device. - * - * @return The maximum workspace size. - * - * @see setMaxWorkspaceSize() - * - * @deprecated Deprecated in TensorRT 8.3. Superseded by IBuilderConfig::getMemoryPoolLimit() with - * MemoryPoolType::kWORKSPACE. - * */ - - //! //! //! //! //! //! - public native @Cast("std::size_t") @Deprecated @NoException(true) long getMaxWorkspaceSize(); + public native @NoException(true) IInt8Calibrator getInt8Calibrator(); /** * \brief Set the build mode flags to turn on builder options for this network. @@ -290,22 +211,25 @@ public class IBuilderConfig extends INoCopy { //! //! //! + //! public native @Cast("bool") @NoException(true) boolean getFlag(BuilderFlag builderFlag); public native @Cast("bool") @NoException(true) boolean getFlag(@Cast("nvinfer1::BuilderFlag") int builderFlag); /** * \brief Set the device that this layer must execute on. + * * @param layer which layer to execute. * @param deviceType that this layer must execute on. * If DeviceType is not set or is reset, TensorRT will use the default DeviceType set in the builder. * * \note The device type for a layer must be compatible with the safety flow (if specified). - * For example a layer cannot be marked for DLA execution while the builder is configured for kSAFE_GPU. + * For example a layer cannot be marked for DLA execution while the builder is configured for kSAFETY. * * @see getDeviceType() * */ + //! //! //! public native @NoException(true) void setDeviceType(@Const ILayer layer, DeviceType deviceType); @@ -313,17 +237,22 @@ public class IBuilderConfig extends INoCopy { /** * \brief Get the device that this layer executes on. + * * @return Returns DeviceType of the layer. * */ + //! + //! //! //! public native @NoException(true) DeviceType getDeviceType(@Const ILayer layer); /** * \brief whether the DeviceType has been explicitly set for this layer + * * @return true if device type is not default + * * @see setDeviceType() getDeviceType() resetDeviceType() * */ @@ -340,12 +269,14 @@ public class IBuilderConfig extends INoCopy { * */ + //! //! //! public native @NoException(true) void resetDeviceType(@Const ILayer layer); /** * \brief Checks if a layer can run on DLA. + * * @return status true if the layer can on DLA else returns false. * */ @@ -355,10 +286,12 @@ public class IBuilderConfig extends INoCopy { //! //! //! + //! public native @Cast("bool") @NoException(true) boolean canRunOnDLA(@Const ILayer layer); /** * \brief Sets the DLA core used by the network. Defaults to -1. + * * @param dlaCore The DLA core to execute the engine on, in the range [0,getNbDlaCores()). * * This function is used to specify which DLA core to use via indexing, if multiple DLA cores are available. @@ -369,16 +302,19 @@ public class IBuilderConfig extends INoCopy { * */ + //! //! //! public native @NoException(true) void setDLACore(int dlaCore); /** * \brief Get the DLA core that the engine executes on. + * * @return assigned DLA core or -1 for DLA not present or unset. * */ + //! //! //! public native @NoException(true) int getDLACore(); @@ -386,6 +322,7 @@ public class IBuilderConfig extends INoCopy { /** * \brief Sets the default DeviceType to be used by the builder. It ensures that all the layers that can run on * this device will run on it, unless setDeviceType is used to override the default DeviceType for a layer. + * * @see getDefaultDeviceType() * */ @@ -419,26 +356,8 @@ public class IBuilderConfig extends INoCopy { //! //! //! - //! public native @NoException(true) void reset(); - /** - * \brief Delete this IBuilderConfig. - * - * De-allocates any internally allocated memory. - * - * @deprecated Deprecated in TensorRT 8.0. Superseded by {@code delete}. - * - * \warning Calling destroy on a managed pointer will result in a double-free error. - * */ - - - //! - //! - //! - //! - public native @Deprecated @NoException(true) void destroy(); - /** * \brief Set the cuda stream that is used to profile this network. * @@ -467,6 +386,7 @@ public class IBuilderConfig extends INoCopy { //! //! //! + //! public native @NoException(true) CUstream_st getProfileStream(); /** @@ -477,6 +397,7 @@ public class IBuilderConfig extends INoCopy { * a single optimization profile are not supported for refittable engines. * * @param profile The new optimization profile, which must satisfy profile->isValid() == true + * * @return The index of the optimization profile (starting from 0) if the input is valid, or -1 if the input is * not valid. * */ @@ -553,6 +474,7 @@ public class IBuilderConfig extends INoCopy { //! //! //! + //! public native @NoException(true) IAlgorithmSelector getAlgorithmSelector(); /** @@ -563,6 +485,7 @@ public class IBuilderConfig extends INoCopy { * * @param profile The new calibration profile, which must satisfy profile->isValid() == true or be nullptr. * MIN and MAX values will be overwritten by kOPT. + * * @return True if the calibration profile was set correctly. * */ @@ -890,6 +813,8 @@ public class IBuilderConfig extends INoCopy { //! //! //! + //! + //! public native @Cast("bool") @NoException(true) boolean getPreviewFeature(PreviewFeature feature); public native @Cast("bool") @NoException(true) boolean getPreviewFeature(@Cast("nvinfer1::PreviewFeature") int feature); @@ -904,6 +829,19 @@ public class IBuilderConfig extends INoCopy { * which is currently 5. Setting it to greater than the maximum level results in behavior identical to the * maximum level. * + * Below are the descriptions about each builder optimization level: + * + * - Level 0: This enables the fastest compilation by disabling dynamic kernel generation and selecting the first + * tactic that succeeds in execution. This will also not respect a timing cache. + * - Level 1: Available tactics are sorted by heuristics, but only the top are tested to select the best. If a + * dynamic kernel is generated its compile optimization is low. + * - Level 2: Available tactics are sorted by heuristics, but only the fastest tactics are tested to select the + * best. + * - Level 3: Apply heuristics to see if a static precompiled kernel is applicable or if a new one has to be + * compiled dynamically. + * - Level 4: Always compiles a dynamic kernel. + * - Level 5: Always compiles a dynamic kernel and compares it to static kernels. + * * @param level The optimization level to set to. Must be non-negative. * * @see getBuilderOptimizationLevel @@ -924,13 +862,16 @@ public class IBuilderConfig extends INoCopy { * @see setBuilderOptimizationLevel * */ + + //! //! //! //! //! public native @NoException(true) int getBuilderOptimizationLevel(); - /** \brief Set the hardware compatibility level. + /** + * \brief Set the hardware compatibility level. * * Hardware compatibility allows an engine to run on GPU * architectures other than that of the GPU where the engine was @@ -1054,5 +995,37 @@ public class IBuilderConfig extends INoCopy { * * @see setMaxAuxStreams() * */ + + + //! + //! + //! + //! + //! public native @NoException(true) int getMaxAuxStreams(); + + /** + * \brief Sets the progress monitor for building a network. + * + * @param monitor The progress monitor to assign to the IBuilderConfig. + * + * The progress monitor signals to the application when different phases of + * the compiler are being executed. Setting to nullptr unsets the monitor so + * that the application is not signaled. + * + * @see IBuilderConfig::getProgressMonitor + * */ + + + //! + //! + //! + public native @NoException(true) void setProgressMonitor(IProgressMonitor monitor); + + /** + * @return The progress monitor set by the application or nullptr. + * + * @see IBuilderConfig::setProgressMonitor + * */ + public native @NoException(true) IProgressMonitor getProgressMonitor(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICastLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICastLayer.java index 2511d27f8cd..48df20fabd2 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICastLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICastLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -33,9 +33,14 @@ public class ICastLayer extends ILayer { /** * \brief Set cast layer output type. + * + * @param toType The DataType of the output tensor. + * + * Set the output type of the cast layer. * */ + //! //! //! public native @NoException(true) void setToType(DataType toType); @@ -43,6 +48,9 @@ public class ICastLayer extends ILayer { /** * \brief Return cast layer output type. + * + * @return toType parameter set during layer creation or by setToType(). + * The return value is the output type of the cast layer. * */ public native @NoException(true) DataType getToType(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConcatenationLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConcatenationLayer.java index 9c360dfbce9..886633afef0 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConcatenationLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConcatenationLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -42,7 +42,6 @@ public class IConcatenationLayer extends ILayer { * * The default axis is the number of tensor dimensions minus three, or zero if the tensor has fewer than three * dimensions. For example, for a tensor with dimensions NCHW, it is C. - * For implicit batch mode, the number of tensor dimensions does NOT include the implicit batch dimension. * * When running this layer on the DLA, the concatenation axis must be the third to last axis, e.g. C if tensor * dimensions are NCHW. diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConditionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConditionLayer.java index 8b937eeef9a..b6932232e0f 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConditionLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConditionLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -20,7 +20,9 @@ /** - * This layer represents a condition input to an IIfConditional. + * \class IConditionLayer + * + * \brief This layer represents a condition input to an IIfConditional. * */ @Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IConditionLayer extends IIfConditionalBoundaryLayer { diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConstantLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConstantLayer.java index cbf4bc0aac4..126fd50d14f 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConstantLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConstantLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -36,9 +36,8 @@ public class IConstantLayer extends ILayer { /** * \brief Set the weights for the layer. * - * If weights.type is DataType::kINT32, the output is a tensor of 32-bit indices. - * Otherwise the output is a tensor of real values and the output type will be - * follow TensorRT's normal precision rules. + * The output type is weights.type. If the network is weakly typed and the weights have a real type, + * the output type might be different per TensorRT's type conversion rules. * * @see getWeights() * */ @@ -75,7 +74,7 @@ public class IConstantLayer extends ILayer { //! //! //! - public native @NoException(true) void setDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); + public native @NoException(true) void setDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions); /** * \brief Get the dimensions for the layer. @@ -84,5 +83,5 @@ public class IConstantLayer extends ILayer { * * @see getDimensions * */ - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConvolutionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConvolutionLayer.java index 0ba0ebd05bd..c13fb3e7337 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConvolutionLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConvolutionLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -37,38 +37,6 @@ public class IConvolutionLayer extends ILayer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IConvolutionLayer(Pointer p) { super(p); } - /** - * \brief Set the HW kernel size of the convolution. - * - * If executing this layer on DLA, both height and width of kernel size must be in the range [1,32]. - * - * @see getKernelSize() - * - * @deprecated Superseded by setKernelSizeNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - public native @Deprecated @NoException(true) void setKernelSize(@ByVal DimsHW kernelSize); - - /** - * \brief Get the HW kernel size of the convolution. - * - * @see setKernelSize() - * - * @deprecated Superseded by getKernelSizeNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - public native @Deprecated @ByVal @NoException(true) DimsHW getKernelSize(); - /** * \brief Set the number of output maps for the convolution. * @@ -81,7 +49,7 @@ public class IConvolutionLayer extends ILayer { //! //! //! - public native @NoException(true) void setNbOutputMaps(int nbOutputMaps); + public native @NoException(true) void setNbOutputMaps(@Cast("int64_t") long nbOutputMaps); /** * \brief Get the number of output maps for the convolution. @@ -94,84 +62,10 @@ public class IConvolutionLayer extends ILayer { //! //! //! - //! - //! - public native @NoException(true) int getNbOutputMaps(); - - /** - * \brief Get the stride of the convolution. - * - * Default: (1,1) - * - * If executing this layer on DLA, both height and width of stride must be in the range [1,8]. - * - * @see getStride() - * - * @deprecated Superseded by setStrideNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - public native @Deprecated @NoException(true) void setStride(@ByVal DimsHW stride); - - /** - * \brief Get the stride of the convolution. - * - * @deprecated Superseded by getStrideNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - //! - //! - //! - public native @Deprecated @ByVal @NoException(true) DimsHW getStride(); - - /** - * \brief Set the padding of the convolution. - * - * The input will be zero-padded by this number of elements in the height and width directions. - * Padding is symmetric. - * - * Default: (0,0) - * - * If executing this layer on DLA, both height and width of padding must be in the range [0,31], - * and the padding size must be less than the kernel size. - * - * @see getPadding() - * - * @deprecated Superseded by setPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - public native @Deprecated @NoException(true) void setPadding(@ByVal DimsHW padding); - - /** - * \brief Get the padding of the convolution. If the padding is asymmetric, the pre-padding is returned. - * - * @see setPadding() - * - * @deprecated Superseded by getPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - //! //! //! - //! - //! - //! - //! - public native @Deprecated @ByVal @NoException(true) DimsHW getPadding(); + public native @Cast("int64_t") @NoException(true) long getNbOutputMaps(); /** * \brief Set the number of groups for a convolution. @@ -193,7 +87,7 @@ public class IConvolutionLayer extends ILayer { //! //! //! - public native @NoException(true) void setNbGroups(int nbGroups); + public native @NoException(true) void setNbGroups(@Cast("int64_t") long nbGroups); /** * \brief Get the number of groups of the convolution. @@ -206,7 +100,7 @@ public class IConvolutionLayer extends ILayer { //! //! //! - public native @NoException(true) int getNbGroups(); + public native @Cast("int64_t") @NoException(true) long getNbGroups(); /** * \brief Set the kernel weights for the convolution. @@ -270,42 +164,6 @@ public class IConvolutionLayer extends ILayer { //! public native @ByVal @NoException(true) Weights getBiasWeights(); - /** - * \brief Set the dilation for a convolution. - * - * Default: (1,1) - * - * If executing this layer on DLA, both height and width must be in the range [1,32]. - * - * @see getDilation() - * - * @deprecated Superseded by setDilationNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - public native @Deprecated @NoException(true) void setDilation(@ByVal DimsHW dilation); - - /** - * \brief Get the dilation for a convolution. - * - * @see setDilation() - * - * @deprecated Superseded by getDilationNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - //! - //! - public native @Deprecated @ByVal @NoException(true) DimsHW getDilation(); - /** * \brief Set the multi-dimension pre-padding of the convolution. * @@ -323,7 +181,7 @@ public class IConvolutionLayer extends ILayer { //! //! //! - public native @NoException(true) void setPrePadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); + public native @NoException(true) void setPrePadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); /** * \brief Get the pre-padding. @@ -338,7 +196,7 @@ public class IConvolutionLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPrePadding(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPrePadding(); /** * \brief Set the multi-dimension post-padding of the convolution. @@ -357,7 +215,7 @@ public class IConvolutionLayer extends ILayer { //! //! //! - public native @NoException(true) void setPostPadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); + public native @NoException(true) void setPostPadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); /** * \brief Get the post-padding. @@ -371,7 +229,7 @@ public class IConvolutionLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPostPadding(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPostPadding(); /** * \brief Set the padding mode. @@ -419,7 +277,7 @@ public class IConvolutionLayer extends ILayer { //! //! //! - public native @NoException(true) void setKernelSizeNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 kernelSize); + public native @NoException(true) void setKernelSizeNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 kernelSize); /** * \brief Get the multi-dimension kernel size of the convolution. @@ -433,7 +291,7 @@ public class IConvolutionLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getKernelSizeNd(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getKernelSizeNd(); /** * \brief Set the multi-dimension stride of the convolution. @@ -443,14 +301,14 @@ public class IConvolutionLayer extends ILayer { * If executing this layer on DLA, only support 2D stride, both height and width of stride must be in the range * [1,8]. * - * @see getStrideNd() setStride() getStride() + * @see getStrideNd() * */ //! //! //! - public native @NoException(true) void setStrideNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride); + public native @NoException(true) void setStrideNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride); /** * \brief Get the multi-dimension stride of the convolution. @@ -465,7 +323,7 @@ public class IConvolutionLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrideNd(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrideNd(); /** * \brief Set the multi-dimension padding of the convolution. @@ -486,7 +344,7 @@ public class IConvolutionLayer extends ILayer { //! //! //! - public native @NoException(true) void setPaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); + public native @NoException(true) void setPaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); /** * \brief Get the multi-dimension padding of the convolution. @@ -502,7 +360,7 @@ public class IConvolutionLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPaddingNd(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPaddingNd(); /** * \brief Set the multi-dimension dilation of the convolution. @@ -511,19 +369,19 @@ public class IConvolutionLayer extends ILayer { * * If executing this layer on DLA, only support 2D padding, both height and width must be in the range [1,32]. * - * @see getDilation() + * @see getDilationNd() * */ //! //! //! - public native @NoException(true) void setDilationNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 dilation); + public native @NoException(true) void setDilationNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dilation); /** * \brief Get the multi-dimension dilation of the convolution. * - * @see setDilation() + * @see setDilationNd() * */ @@ -532,7 +390,8 @@ public class IConvolutionLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDilationNd(); + //! + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDilationNd(); /** * \brief Append or replace an input of this layer with a specific tensor @@ -545,6 +404,7 @@ public class IConvolutionLayer extends ILayer { * Input 0 is the input activation tensor. * Input 1 is the kernel tensor. If used, the kernel weights parameter must be set to empty weights. * Input 2 is the bias tensor. If used, the bias parameter must be set to empty weights. + * * @see getKernelWeights(), setKernelWeights(), getBiasWeights(), setBiasWeights() * */ } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICudaEngine.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICudaEngine.java index ff1d64f7f1a..cb5c0ad5869 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICudaEngine.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICudaEngine.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -45,145 +45,6 @@ public class ICudaEngine extends INoCopy { } - /** - * \brief Get the number of binding indices. - * - * There are separate binding indices for each optimization profile. - * This method returns the total over all profiles. - * If the engine has been built for K profiles, the first getNbBindings() / K bindings are used by profile - * number 0, the following getNbBindings() / K bindings are used by profile number 1 etc. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by getNbIOTensors. - * - * @see getBindingIndex() - * */ - - - //! - //! - //! - //! - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) int getNbBindings(); - - /** - * \brief Retrieve the binding index for a named tensor. - * - * IExecutionContext::enqueueV2() and IExecutionContext::executeV2() require an array of buffers. - * - * Engine bindings map from tensor names to indices in this array. - * Binding indices are assigned at engine build time, and take values in the range [0 ... n-1] where n is the total - * number of inputs and outputs. - * - * To get the binding index of the name in an optimization profile with index k > 0, - * mangle the name by appending " [profile k]", as described for method getBindingName(). - * - * @param name The tensor name. - * @return The binding index for the named tensor, or -1 if the provided name does not map to an input or output - * tensor. - * - * \warning The string name must be null-terminated, and be at most 4096 bytes including the terminator. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by name-based methods. Use them instead of binding-index - * based methods. - * - * @see getNbBindings() getBindingName() - * */ - - - //! - //! - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) int getBindingIndex(String name); - public native @Deprecated @NoException(true) int getBindingIndex(@Cast("const char*") BytePointer name); - - /** - * \brief Retrieve the name corresponding to a binding index. - * - * This is the reverse mapping to that provided by getBindingIndex(). - * - * For optimization profiles with an index k > 0, the name is mangled by appending - * " [profile k]", with k written in decimal. For example, if the tensor in the - * INetworkDefinition had the name "foo", and bindingIndex refers to that tensor in the - * optimization profile with index 3, getBindingName returns "foo [profile 3]". - * - * @param bindingIndex The binding index. - * @return The name corresponding to the index, or nullptr if the index is out of range. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by name-based methods. Use them instead of binding-index - * based methods. - * - * @see getBindingIndex() - * */ - - - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) String getBindingName(int bindingIndex); - - /** - * \brief Determine whether a binding is an input binding. - * - * @param bindingIndex The binding index. - * @return True if the index corresponds to an input binding and the index is in range. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorIOMode(). - * - * @see getTensorIOMode() - * */ - - - //! - //! - //! - //! - //! - //! - //! - public native @Cast("bool") @Deprecated @NoException(true) boolean bindingIsInput(int bindingIndex); - - /** - * \brief Get the dimensions of a binding. - * - * @param bindingIndex The binding index. - * @return The dimensions of the binding if the index is in range, otherwise Dims(). - * Has -1 for any dimension that varies within the optimization profile. - * - * For example, suppose an INetworkDefinition has an input with shape [-1,-1] - * that becomes a binding b in the engine. If the associated optimization profile - * specifies that b has minimum dimensions as [6,9] and maximum dimensions [7,9], - * getBindingDimensions(b) returns [-1,9], despite the second dimension being - * dynamic in the INetworkDefinition. - * - * Because each optimization profile has separate bindings, the returned value can - * differ across profiles. Consider another binding b' for the same network input, - * but for another optimization profile. If that other profile specifies minimum - * dimensions [5,8] and maximum dimensions [5,9], getBindingDimensions(b') returns [5,-1]. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorShape(). - * - * @see getTensorShape() - * */ - - - //! - //! - //! - //! - //! - public native @Deprecated @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getBindingDimensions(int bindingIndex); - /** * \brief Get shape of an input or output tensor. * @@ -201,27 +62,8 @@ public class ICudaEngine extends INoCopy { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorShape(String tensorName); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorShape(@Cast("const char*") BytePointer tensorName); - - /** - * \brief Determine the required data type for a buffer from its binding index. - * - * @param bindingIndex The binding index. - * @return The type of the data in the buffer. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorDataType(). - * - * @see getTensorDataType() - * */ - - - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) DataType getBindingDataType(int bindingIndex); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorShape(String tensorName); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorShape(@Cast("const char*") BytePointer tensorName); /** * \brief Determine the required data type for a buffer from its tensor name. @@ -239,29 +81,9 @@ public class ICudaEngine extends INoCopy { //! //! //! - //! public native @NoException(true) DataType getTensorDataType(String tensorName); public native @NoException(true) @Cast("nvinfer1::DataType") int getTensorDataType(@Cast("const char*") BytePointer tensorName); - /** - * \brief Get the maximum batch size which can be used for inference. Should only be called if the engine is built - * from an INetworkDefinition with implicit batch dimension mode. - * - * @return The maximum batch size for this engine. - * - * \warning For an engine built from an INetworkDefinition with explicit batch dimension mode, this will always - * return 1. - * - * @deprecated Deprecated in TensorRT 8.4. - * */ - - - //! - //! - //! - //! - public native @Deprecated @NoException(true) int getMaxBatchSize(); - /** * \brief Get the number of layers in the network. * @@ -298,31 +120,16 @@ public class ICudaEngine extends INoCopy { public native @NoException(true) IHostMemory serialize(); /** - * \brief Create an execution context. - * - * The execution context created will call setOptimizationProfile(0) implicitly if there are - * no other execution contexts assigned to optimization profile 0. This functionality is - * deprecated in TensorRT 8.6 and will instead default all optimization profiles to 0 starting - * in TensorRT 9.0. - * If an error recorder has been set for the engine, it will also be passed to the execution context. - * - * @see IExecutionContext. - * @see IExecutionContext::setOptimizationProfile() - * */ - - - //! - //! - //! - //! - public native @NoException(true) IExecutionContext createExecutionContext(); - - /** - * \brief Destroy this object; + * \brief Create an execution context and specify the strategy for allocating internal activation memory. * - * @deprecated Deprecated in TRT 8.0. Superseded by {@code delete}. + * The default value for the allocation strategy is ExecutionContextAllocationStrategy::kSTATIC, which means the + * context will pre-allocate a block of device memory that is sufficient for all profiles. The newly created + * execution context will be assigned optimization profile 0. If an error recorder has been set for the engine, it + * will also be passed to the execution context. * - * \warning Calling destroy on a managed pointer will result in a double-free error. + * @see IExecutionContext + * @see IExecutionContext::setOptimizationProfileAsync() + * @see ExecutionContextAllocationStrategy * */ @@ -332,30 +139,11 @@ public class ICudaEngine extends INoCopy { //! //! //! - public native @Deprecated @NoException(true) void destroy(); - - /** - * \brief Get location of binding - * - * This lets you know whether the binding should be a pointer to device or host memory. - * - * @param bindingIndex The binding index. - * @return The location of the bound tensor with given index. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorLocation(). - * - * @see ITensor::setLocation() ITensor::getLocation() - * @see getTensorLocation() - * */ - - - //! - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) TensorLocation getLocation(int bindingIndex); + public native @NoException(true) IExecutionContext createExecutionContext( + ExecutionContextAllocationStrategy strategy/*=nvinfer1::ExecutionContextAllocationStrategy::kSTATIC*/); + public native @NoException(true) IExecutionContext createExecutionContext(); + public native @NoException(true) IExecutionContext createExecutionContext( + @Cast("nvinfer1::ExecutionContextAllocationStrategy") int strategy/*=nvinfer1::ExecutionContextAllocationStrategy::kSTATIC*/); /** * \brief Get whether an input or output tensor must be on GPU or CPU. @@ -415,24 +203,30 @@ public class ICudaEngine extends INoCopy { * \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator. * */ + + //! + //! //! //! public native @NoException(true) TensorIOMode getTensorIOMode(String tensorName); public native @NoException(true) @Cast("nvinfer1::TensorIOMode") int getTensorIOMode(@Cast("const char*") BytePointer tensorName); - /** \brief create an execution context without any device memory allocated + /** + * \brief create an execution context without any device memory allocated * * The memory for execution of this device context must be supplied by the application. + * + * @deprecated Deprecated in TensorRT 10.0. Superseded by createExecutionContext() with parameter. * */ //! //! //! - public native @NoException(true) IExecutionContext createExecutionContextWithoutDeviceMemory(); + public native @Deprecated @NoException(true) IExecutionContext createExecutionContextWithoutDeviceMemory(); /** - * \brief Return the amount of device memory required by an execution context. + * \brief Return the maximum device memory required by the context over all profiles. * * @see IExecutionContext::setDeviceMemory() * */ @@ -444,31 +238,21 @@ public class ICudaEngine extends INoCopy { public native @Cast("size_t") @NoException(true) long getDeviceMemorySize(); /** - * \brief Return true if an engine can be refit. + * \brief Return the maximum device memory required by the context for a profile. * - * @see nvinfer1::createInferRefitter() + * @see IExecutionContext::setDeviceMemory() * */ //! //! //! - //! - //! - //! - public native @Cast("bool") @NoException(true) boolean isRefittable(); + public native @Cast("size_t") @NoException(true) long getDeviceMemorySizeForProfile(int profileIndex); /** - * \brief Return the number of bytes per component of an element. - * - * The vector component size is returned if getBindingVectorizedDim() != -1. - * - * @param bindingIndex The binding Index. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorBytesPerComponent(). + * \brief Return true if an engine can be refit. * - * @see getBindingVectorizedDim() - * @see getTensorBytesPerComponent() + * @see nvinfer1::createInferRefitter() * */ @@ -478,7 +262,7 @@ public class ICudaEngine extends INoCopy { //! //! //! - public native @Deprecated @NoException(true) int getBindingBytesPerComponent(int bindingIndex); + public native @Cast("bool") @NoException(true) boolean isRefittable(); /** * \brief Return the number of bytes per component of an element, or -1 if the provided name does not map to an @@ -531,27 +315,6 @@ public class ICudaEngine extends INoCopy { public native @NoException(true) int getTensorBytesPerComponent(String tensorName, int profileIndex); public native @NoException(true) int getTensorBytesPerComponent(@Cast("const char*") BytePointer tensorName, int profileIndex); - /** - * \brief Return the number of components included in one element. - * - * The number of elements in the vectors is returned if getBindingVectorizedDim() != -1. - * - * @param bindingIndex The binding Index. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorComponentsPerElement(). - * - * @see getBindingVectorizedDim() - * */ - - - //! - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) int getBindingComponentsPerElement(int bindingIndex); - /** * \brief Return the number of components included in one element, or -1 if the provided name does not map to an * input or output tensor. @@ -598,27 +361,9 @@ public class ICudaEngine extends INoCopy { //! //! //! - //! public native @NoException(true) int getTensorComponentsPerElement(String tensorName, int profileIndex); public native @NoException(true) int getTensorComponentsPerElement(@Cast("const char*") BytePointer tensorName, int profileIndex); - /** - * \brief Return the binding format. - * - * @param bindingIndex The binding Index. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorFormat(). - * - * @see getTensorFormat() - * */ - - - //! - //! - //! - //! - public native @Deprecated @NoException(true) TensorFormat getBindingFormat(int bindingIndex); - /** * \brief Return the tensor format, or TensorFormat::kLINEAR if the provided name does not map to an input or * output tensor. @@ -654,38 +399,9 @@ public class ICudaEngine extends INoCopy { //! //! //! - //! public native @NoException(true) TensorFormat getTensorFormat(String tensorName, int profileIndex); public native @NoException(true) @Cast("nvinfer1::TensorFormat") int getTensorFormat(@Cast("const char*") BytePointer tensorName, int profileIndex); - /** - * \brief Return the human readable description of the tensor format, or nullptr if the provided name does not - * map to an input or output tensor. - * - * The description includes the order, vectorization, data type, and strides. - * Examples are shown as follows: - * Example 1: kCHW + FP32 - * "Row major linear FP32 format" - * Example 2: kCHW2 + FP16 - * "Two wide channel vectorized row major FP16 format" - * Example 3: kHWC8 + FP16 + Line Stride = 32 - * "Channel major FP16 format where C % 8 == 0 and H Stride % 32 == 0" - * - * @param bindingIndex The binding Index. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorFormatDesc(). - * - * @see getTensorFormatDesc() - * */ - - - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) String getBindingFormatDesc(int bindingIndex); - /** * \brief Return the human readable description of the tensor format, or empty string if the provided name does not * map to an input or output tensor. @@ -693,9 +409,9 @@ public class ICudaEngine extends INoCopy { * The description includes the order, vectorization, data type, and strides. * Examples are shown as follows: * Example 1: kCHW + FP32 - * "Row major linear FP32 format" + * "Row-major linear FP32 format" * Example 2: kCHW2 + FP16 - * "Two wide channel vectorized row major FP16 format" + * "Two-wide channel vectorized row-major FP16 format" * Example 3: kHWC8 + FP16 + Line Stride = 32 * "Channel major FP16 format where C % 8 == 0 and H Stride % 32 == 0" * @@ -722,9 +438,9 @@ public class ICudaEngine extends INoCopy { * The description includes the order, vectorization, data type, and strides. * Examples are shown as follows: * Example 1: kCHW + FP32 - * "Row major linear FP32 format" + * "Row-major linear FP32 format" * Example 2: kCHW2 + FP16 - * "Two wide channel vectorized row major FP16 format" + * "Two-wide channel vectorized row-major FP16 format" * Example 3: kHWC8 + FP16 + Line Stride = 32 * "Channel major FP16 format where C % 8 == 0 and H Stride % 32 == 0" * @@ -740,30 +456,9 @@ public class ICudaEngine extends INoCopy { //! //! //! - //! public native @NoException(true) String getTensorFormatDesc(String tensorName, int profileIndex); public native @NoException(true) @Cast("const char*") BytePointer getTensorFormatDesc(@Cast("const char*") BytePointer tensorName, int profileIndex); - /** - * \brief Return the dimension index that the buffer is vectorized, or -1 is the name is not found. - * - * Specifically -1 is returned if scalars per vector is 1. - * - * @param bindingIndex The binding Index. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorVectorizedDim(). - * - * @see getTensorVectorizedDim() - * */ - - - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) int getBindingVectorizedDim(int bindingIndex); - /** * \brief Return the dimension index that the buffer is vectorized, or -1 if the provided name does not * map to an input or output tensor. @@ -829,7 +524,7 @@ public class ICudaEngine extends INoCopy { * * @return Number of optimization profiles. It is always at least 1. * - * @see IExecutionContext::setOptimizationProfile() */ + * @see IExecutionContext::setOptimizationProfileAsync() */ //! @@ -839,53 +534,8 @@ public class ICudaEngine extends INoCopy { //! //! //! - //! - //! - //! - //! public native @NoException(true) int getNbOptimizationProfiles(); - /** - * \brief Get the minimum / optimum / maximum dimensions for a particular input binding under an optimization - * profile. - * - * @param bindingIndex The input binding index, which must belong to the given profile, - * or be between 0 and bindingsPerProfile-1 as described below. - * - * @param profileIndex The profile index, which must be between 0 and getNbOptimizationProfiles()-1. - * - * @param select Whether to query the minimum, optimum, or maximum dimensions for this binding. - * - * @return The minimum / optimum / maximum dimensions for this binding in this profile. - * If the profileIndex or bindingIndex are invalid, return Dims with nbDims=-1. - * - * For backwards compatibility with earlier versions of TensorRT, if the bindingIndex - * does not belong to the current optimization profile, but is between 0 and bindingsPerProfile-1, - * where bindingsPerProfile = getNbBindings()/getNbOptimizationProfiles, - * then a corrected bindingIndex is used instead, computed by: - * - * profileIndex * bindingsPerProfile + bindingIndex % bindingsPerProfile - * - * Otherwise the bindingIndex is considered invalid. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by getProfileShape(). - * - * @see getProfileShape() - * */ - - - //! - //! - //! - //! - //! - //! - //! - public native @Deprecated @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileDimensions( - int bindingIndex, int profileIndex, OptProfileSelector select); - public native @Deprecated @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileDimensions( - int bindingIndex, int profileIndex, @Cast("nvinfer1::OptProfileSelector") int select); - /** * \brief Get the minimum / optimum / maximum dimensions for an input tensor given its name under an optimization * profile. @@ -910,107 +560,24 @@ public class ICudaEngine extends INoCopy { //! //! //! - //! - //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileShape(String tensorName, int profileIndex, OptProfileSelector select); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileShape(@Cast("const char*") BytePointer tensorName, int profileIndex, @Cast("nvinfer1::OptProfileSelector") int select); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getProfileShape(String tensorName, int profileIndex, OptProfileSelector select); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getProfileShape(@Cast("const char*") BytePointer tensorName, int profileIndex, @Cast("nvinfer1::OptProfileSelector") int select); /** - * \brief Get minimum / optimum / maximum values for an input shape binding under an optimization profile. - * - * @param profileIndex The profile index (must be between 0 and getNbOptimizationProfiles()-1) + * \brief Get the minimum / optimum / maximum values (not dimensions) for an input tensor given + * its name under an optimization profile. These correspond to the values set using + * IOptimizationProfile::setShapeValues when the engine was built. * - * @param inputIndex The input index (must be between 0 and getNbBindings() - 1) - * - * @param select Whether to query the minimum, optimum, or maximum shape values for this binding. - * - * @return If the binding is an input shape binding, return a pointer to an array that has - * the same number of elements as the corresponding tensor, i.e. 1 if dims.nbDims == 0, or dims.d[0] - * if dims.nbDims == 1, where dims = getBindingDimensions(inputIndex). The array contains - * the elementwise minimum / optimum / maximum values for this shape binding under the profile. - * If either of the indices is out of range, or if the binding is not an input shape binding, return - * nullptr. - * - * For backwards compatibility with earlier versions of TensorRT, a bindingIndex that does not belong - * to the profile is corrected as described for getProfileDimensions(). - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by getShapeValues(). Difference between Execution and shape - * tensor is superficial since TensorRT 8.5. - * - * @see getProfileDimensions() getShapeValues() - * */ - - - //! - //! - //! - //! - //! - //! - //! - //! - //! - //! - //! - //! - public native @Const @Deprecated @NoException(true) IntPointer getProfileShapeValues( - int profileIndex, int inputIndex, OptProfileSelector select); - public native @Const @Deprecated @NoException(true) IntBuffer getProfileShapeValues( - int profileIndex, int inputIndex, @Cast("nvinfer1::OptProfileSelector") int select); - - /** - * \brief True if tensor is required as input for shape calculations or output from them. - * - * TensorRT evaluates a network in two phases: - * - * 1. Compute shape information required to determine memory allocation requirements - * and validate that runtime sizes make sense. - * - * 2. Process tensors on the device. - * - * Some tensors are required in phase 1. These tensors are called "shape tensors", and always - * have type Int32 and no more than one dimension. These tensors are not always shapes - * themselves, but might be used to calculate tensor shapes for phase 2. - * - * isShapeBinding(i) returns true if the tensor is a required input or an output computed in phase 1. - * isExecutionBinding(i) returns true if the tensor is a required input or an output computed in phase 2. - * - * For example, if a network uses an input tensor with binding i as an addend - * to an IElementWiseLayer that computes the "reshape dimensions" for IShuffleLayer, - * then isShapeBinding(i) == true. - * - * It's possible to have a tensor be required by both phases. For instance, a tensor - * can be used for the "reshape dimensions" and as the indices for an IGatherLayer - * collecting floating-point data. - * - * It's also possible to have a tensor be required by neither phase, but nonetheless - * shows up in the engine's inputs. For example, if an input tensor is used only - * as an input to IShapeLayer, only its shape matters and its values are irrelevant. - * - * @deprecated Use name-based isShapeInferenceIO() instead to know whether a tensor is a shape tensor. + * @param tensorName The name of an input tensor. * - * @see isExecutionBinding() isShapeInferenceIO() - * */ - - - //! - //! - //! - //! - //! - public native @Cast("bool") @Deprecated @NoException(true) boolean isShapeBinding(int bindingIndex); - - /** - * \brief True if pointer to tensor data is required for execution phase, false if nullptr can be supplied. + * @param profileIndex The profile index, which must be between 0 and getNbOptimizationProfiles()-1. * - * For example, if a network uses an input tensor with binding i ONLY as the "reshape dimensions" - * input of IShuffleLayer, then isExecutionBinding(i) is false, and a nullptr can be - * supplied for it when calling IExecutionContext::execute or IExecutionContext::enqueue. + * @param select Whether to query the minimum, optimum, or maximum values for this input tensor. * - * @deprecated No name-based equivalent replacement. Use getTensorLocation() instead to know the location of tensor - * data. Distinction between execution binding and shape binding is superficial since TensorRT 8.5. + * @return The minimum / optimum / maximum values for an input tensor in this profile. + * If the profileIndex is invalid or the provided name does not map to an input tensor, return nullptr. * - * @see isShapeBinding() getTensorLocation() + * \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator. * */ @@ -1018,26 +585,31 @@ public class ICudaEngine extends INoCopy { //! //! //! - public native @Cast("bool") @Deprecated @NoException(true) boolean isExecutionBinding(int bindingIndex); + public native @Const @NoException(true) IntPointer getProfileTensorValues(String tensorName, int profileIndex, OptProfileSelector select); + public native @Const @NoException(true) IntBuffer getProfileTensorValues(@Cast("const char*") BytePointer tensorName, int profileIndex, @Cast("nvinfer1::OptProfileSelector") int select); /** * \brief Determine what execution capability this engine has. * * If the engine has EngineCapability::kSTANDARD, then all engine functionality is valid. * If the engine has EngineCapability::kSAFETY, then only the functionality in safe engine is valid. - * If the engine has EngineCapability::kDLA_STANDALONE, then only serialize, destroy, and const-accessor functions are - * valid. + * If the engine has EngineCapability::kDLA_STANDALONE, then only serialize, destroy, and const-accessor functions + * are valid. * * @return The EngineCapability flag that the engine was built for. * */ + + //! + //! //! //! //! //! public native @NoException(true) EngineCapability getEngineCapability(); - /** \brief Set the ErrorRecorder for this interface + /** + * \brief Set the ErrorRecorder for this interface * * Assigns the ErrorRecorder to this interface. The ErrorRecorder will track all errors during execution. * This function will call incRefCount of the registered ErrorRecorder at least once. Setting @@ -1046,10 +618,10 @@ public class ICudaEngine extends INoCopy { * * If an error recorder is not set, messages will be sent to the global log stream. * - * @param recorder The error recorder to register with this interface. */ - // - /** @see getErrorRecorder() - /** */ + * @param recorder The error recorder to register with this interface. + * + * @see getErrorRecorder() + * */ //! @@ -1076,30 +648,27 @@ public class ICudaEngine extends INoCopy { //! //! //! - //! public native @NoException(true) IErrorRecorder getErrorRecorder(); /** * \brief Query whether the engine was built with an implicit batch dimension. * - * @return True if tensors have implicit batch dimension, false otherwise. - * - * This is an engine-wide property. Either all tensors in the engine - * have an implicit batch dimension or none of them do. - * - * hasImplicitBatchDimension() is true if and only if the INetworkDefinition - * from which this engine was built was created with createNetworkV2() without - * NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag. + * @return Always false since TensorRT 10.0 does not support an implicit batch dimension. * * @see createNetworkV2 + * + * @deprecated Deprecated in TensorRT 10.0. Implicit batch is no supported since TensorRT 10.0. * */ + + //! //! //! //! - public native @Cast("bool") @NoException(true) boolean hasImplicitBatchDimension(); + public native @Cast("bool") @Deprecated @NoException(true) boolean hasImplicitBatchDimension(); - /** \brief return the tactic sources required by this engine. + /** + * \brief return the tactic sources required by this engine. * * The value returned is equal to zero or more tactics sources set * at build time via setTacticSources() in IBuilderConfig. Sources @@ -1110,12 +679,15 @@ public class ICudaEngine extends INoCopy { * @see IBuilderConfig::setTacticSources() * */ + + //! //! //! //! public native @Cast("nvinfer1::TacticSources") @NoException(true) int getTacticSources(); - /** \brief Return the \ref ProfilingVerbosity the builder config was set to when the engine was built. + /** + * \brief Return the \ref ProfilingVerbosity the builder config was set to when the engine was built. * * @return the profiling verbosity the builder config was set to when the engine was built. * @@ -1165,12 +737,15 @@ public class ICudaEngine extends INoCopy { * @see getNbIOTensors() * */ + + //! //! //! //! public native @NoException(true) String getIOTensorName(int index); - /** \brief Return the hardware compatibility level of this engine. + /** + * \brief Return the hardware compatibility level of this engine. * * @return hardwareCompatibilityLevel The level of hardware * compatibility. @@ -1196,5 +771,180 @@ public class ICudaEngine extends INoCopy { * * @see IBuilderConfig::setMaxAuxStreams(), IExecutionContext::setAuxStreams() * */ + + + //! + //! + //! public native @NoException(true) int getNbAuxStreams(); + + /** + * \brief Create a serialization configuration object. + * + * @see ISerializationConfig + * */ + + + //! + //! + //! + //! + //! + public native @NoException(true) ISerializationConfig createSerializationConfig(); + + /** + * \brief Serialize the network to a stream with the provided SerializationConfig. + * + * @return An IHostMemory object that contains the serialized engine. + * + * The network may be deserialized with IRuntime::deserializeCudaEngine(). + * + * @see IRuntime::deserializeCudaEngine() + * */ + + + //! + //! + //! + //! + //! + //! + //! + //! + //! + //! + public native @NoException(true) IHostMemory serializeWithConfig(@ByRef ISerializationConfig config); + + /** + * \brief Limit the maximum amount of GPU memory usable for network weights + * in bytes. + * + * @param gpuMemoryBudget This parameter may take on 3 types of values: + * -1: Allows TensorRT to choose the budget according to the streamable weights size. + * Free CUDA memory will be queried at ::createExecutionContext and accordingly: + * * If streamable weights all fit: weight streaming is not required and disabled. + * * Otherwise: Budget is set to getMinimumWeightStreamingBudget + * 0: (default) Disables weight streaming. The execution may fail if the network is too large for GPU memory. + * >0: The maximum bytes of GPU memory that weights can occupy. It must be bounded by + * [getMinimumWeightStreamingBudget, min(getStreamableWeightsSize - 1, free GPU memory)]. + * + * By setting a weight limit, users can expect a GPU memory usage reduction + * of |network weights| - gpuMemoryBudget bytes. Maximum memory savings occur + * when gpuMemoryBudget is set to getMinimumWeightStreamingBudget. + * + * Streaming larger amounts of memory will likely result in lower performance + * except in some boundary cases where streaming weights allows the user to + * run larger batch sizes. The higher throughput offsets the increased + * latency in these cases. Tuning the value of the memory limit is + * recommended for best performance. + * + * \warning If weight streaming is active, then multiple concurrent IExecutionContexts will forced to run serially. + * + * \warning GPU memory for the weights is allocated upon the first IExecutionContext's creation + * and deallocated upon the last one's destruction. + * + * \warning BuilderFlag::kWEIGHT_STREAMING must be set during engine building. + * + * @return true if the memory limit is valid and the call was successful + * otherwise false. + * + * @see BuilderFlag::kWEIGHT_STREAMING, + * ICudaEngine::getWeightStreamingBudget + * ICudaEngine::getMinimumWeightStreamingBudget, + * ICudaEngine::getStreamableWeightsSize + * */ + + + //! + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean setWeightStreamingBudget(@Cast("int64_t") long gpuMemoryBudget); + + /** + * \brief Returns the current weight streaming device memory budget in bytes. + * + * \warning BuilderFlag::kWEIGHT_STREAMING must be set during engine building. + * + * @return The weight streaming budget in bytes. Please see ::setWeightStreamingBudget for the possible + * values. + * + * @see BuilderFlag::kWEIGHT_STREAMING, + * ICudaEngine::setWeightStreamingBudget, + * ICudaEngine::getMinimumWeightStreamingBudget, + * ICudaEngine::getStreamableWeightsSize + * */ + + + //! + //! + //! + //! + //! + //! + //! + public native @Cast("int64_t") @NoException(true) long getWeightStreamingBudget(); + + /** + * \brief The minimum number of bytes of GPU memory required by network + * weights for successful weight streaming. + * + * This is a positive integer for engines with streamable weights because a + * staging buffer on the GPU is required to temporarily hold the streamed + * weights. The size of the staging buffer is determined by TensorRT and must + * be at least as large as the size of the largest streamable weight in the + * network. + * + * \warning BuilderFlag::kWEIGHT_STREAMING must be set during engine building. + * + * + * @return The minimum number of bytes of GPU memory required for streaming. + * + * @see ICudaEngine::setWeightStreamingBudget + * */ + + + //! + //! + //! + //! + //! + //! + //! + public native @Cast("int64_t") @NoException(true) long getMinimumWeightStreamingBudget(); + + /** + * \brief Get the total size in bytes of all streamable weights. + * + * The set of streamable weights is a subset of all network weights. The + * total size may exceed free GPU memory. + * + * Returns 0 if BuilderFlag::kWEIGHT_STREAMING is unset during engine building. + * + * + * @return The total size in bytes of all streamable weights. + * + * @see ICudaEngine::setWeightStreamingBudget + * */ + + + //! + //! + //! + //! + //! + public native @Cast("int64_t") @NoException(true) long getStreamableWeightsSize(); + + /** + * \brief Check if a tensor is marked as a debug tensor. + * + * Determine whether the given name corresponds to a debug tensor. + * + * @return True if tensor is a debug tensor, false otherwise. + * + * @see INetworkDefinition::markDebug + * */ + public native @Cast("bool") @NoException(true) boolean isDebugTensor(String name); + public native @Cast("bool") @NoException(true) boolean isDebugTensor(@Cast("const char*") BytePointer name); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDebugListener.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDebugListener.java new file mode 100644 index 00000000000..958d98cedf3 --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDebugListener.java @@ -0,0 +1,55 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IDebugListener extends IVersionedInterface { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IDebugListener(Pointer p) { super(p); } + + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + + + //! + //! + //! + //! + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); + + /** + * \brief Callback function that is called when a debug tensor’s value is updated and the debug state of the tensor + * is set to true. Content in the given address is only guaranteed to be valid for the duration of the callback. + * + * @param location TensorLocation of the tensor. + * @param addr pointer to buffer. + * @param type data Type of the tensor. + * @param shape shape of the tensor. + * @param name name of the tensor. + * @param stream Cuda stream object. + * + * @return True on success, false otherwise. + * */ + public native @Cast("bool") boolean processDebugTensor(@Const Pointer addr, TensorLocation location, DataType type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 shape, + String name, CUstream_st stream); + public native @Cast("bool") boolean processDebugTensor(@Const Pointer addr, @Cast("nvinfer1::TensorLocation") int location, @Cast("nvinfer1::DataType") int type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 shape, + @Cast("const char*") BytePointer name, CUstream_st stream); +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDeconvolutionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDeconvolutionLayer.java index 494de1a0f75..061a13992fd 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDeconvolutionLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDeconvolutionLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -32,40 +32,6 @@ public class IDeconvolutionLayer extends ILayer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IDeconvolutionLayer(Pointer p) { super(p); } - /** - * \brief Set the HW kernel size of the convolution. - * - * If executing this layer on DLA, both height and width of kernel size must be in the range [1,32], or the - * combinations of [64, 96, 128] in one dimension and 1 in the other dimensions, i.e. [1x64] or [64x1] are valid, - * but not [64x64]. - * - * @see getKernelSize() - * - * @deprecated Superseded by setKernelSizeNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - public native @Deprecated @NoException(true) void setKernelSize(@ByVal DimsHW kernelSize); - - /** - * \brief Get the HW kernel size of the deconvolution. - * - * @see setKernelSize() - * - * @deprecated Superseded by getKernelSizeNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - public native @Deprecated @ByVal @NoException(true) DimsHW getKernelSize(); - /** * \brief Set the number of output feature maps for the deconvolution. * @@ -78,7 +44,7 @@ public class IDeconvolutionLayer extends ILayer { //! //! //! - public native @NoException(true) void setNbOutputMaps(int nbOutputMaps); + public native @NoException(true) void setNbOutputMaps(@Cast("int64_t") long nbOutputMaps); /** * \brief Get the number of output feature maps for the deconvolution. @@ -91,89 +57,10 @@ public class IDeconvolutionLayer extends ILayer { //! //! //! - //! - public native @NoException(true) int getNbOutputMaps(); - - /** - * \brief Set the stride of the deconvolution. - * - * If executing this layer on DLA, there is one restriction: - * 1) Stride height and width must be in the range [1,32] or the combinations of [64, 96, 128] in one - * dimension and 1 in the other dimensions, i.e. [1x64] or [64x1] are valid, but not [64x64]. - * - * @see getStride() - * - * @deprecated Superseded by setStrideNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - //! //! //! - //! - public native @Deprecated @NoException(true) void setStride(@ByVal DimsHW stride); - - /** - * \brief Get the stride of the deconvolution. - * - * Default: (1,1) - * - * @deprecated Superseded by getStrideNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - //! - //! - //! - public native @Deprecated @ByVal @NoException(true) DimsHW getStride(); - - /** - * \brief Set the padding of the deconvolution. - * - * The output will be trimmed by this number of elements on each side in the height and width directions. - * In other words, it resembles the inverse of a convolution layer with this padding size. - * Padding is symmetric, and negative padding is not supported. - * - * Default: (0,0) - * - * If executing this layer on DLA, both height and width of padding must be 0. - * - * @see getPadding() - * - * @deprecated Superseded by setPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) void setPadding(@ByVal DimsHW padding); - - /** - * \brief Get the padding of the deconvolution. - * - * Default: (0, 0) - * - * @see setPadding() - * - * @deprecated Superseded by getPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - //! - //! - //! - public native @Deprecated @ByVal @NoException(true) DimsHW getPadding(); + public native @Cast("int64_t") @NoException(true) long getNbOutputMaps(); /** * \brief Set the number of groups for a deconvolution. @@ -195,7 +82,7 @@ public class IDeconvolutionLayer extends ILayer { //! //! //! - public native @NoException(true) void setNbGroups(int nbGroups); + public native @NoException(true) void setNbGroups(@Cast("int64_t") long nbGroups); /** * \brief Get the number of groups for a deconvolution. @@ -208,7 +95,7 @@ public class IDeconvolutionLayer extends ILayer { //! //! //! - public native @NoException(true) int getNbGroups(); + public native @Cast("int64_t") @NoException(true) long getNbGroups(); /** * \brief Set the kernel weights for the deconvolution. @@ -290,7 +177,7 @@ public class IDeconvolutionLayer extends ILayer { //! //! //! - public native @NoException(true) void setPrePadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); + public native @NoException(true) void setPrePadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); /** * \brief Get the pre-padding. @@ -305,7 +192,7 @@ public class IDeconvolutionLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPrePadding(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPrePadding(); /** * \brief Set the multi-dimension post-padding of the deconvolution. @@ -325,7 +212,7 @@ public class IDeconvolutionLayer extends ILayer { //! //! //! - public native @NoException(true) void setPostPadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); + public native @NoException(true) void setPostPadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); /** * \brief Get the padding. @@ -339,7 +226,7 @@ public class IDeconvolutionLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPostPadding(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPostPadding(); /** * \brief Set the padding mode. @@ -382,14 +269,14 @@ public class IDeconvolutionLayer extends ILayer { * 2) Kernel height and width must be in the range [1,32] or the combinations of [64, 96, 128] in one * dimension and 1 in the other dimensions, i.e. [1x64] or [64x1] are valid, but not [64x64]. * - * @see getKernelSizeNd() setKernelSize() getKernelSize() + * @see getKernelSizeNd() * */ //! //! //! - public native @NoException(true) void setKernelSizeNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 kernelSize); + public native @NoException(true) void setKernelSizeNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 kernelSize); /** * \brief Get the multi-dimension kernel size of the deconvolution. @@ -403,7 +290,7 @@ public class IDeconvolutionLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getKernelSizeNd(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getKernelSizeNd(); /** * \brief Set the multi-dimension stride of the deconvolution. @@ -415,14 +302,14 @@ public class IDeconvolutionLayer extends ILayer { * 2) Stride height and width must be in the range [1,32] or the combinations of [64, 96, 128] in one * dimension and 1 in the other dimensions, i.e. [1x64] or [64x1] are valid, but not [64x64]. * - * @see getStrideNd() setStride() getStride() + * @see getStrideNd() * */ //! //! //! - public native @NoException(true) void setStrideNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride); + public native @NoException(true) void setStrideNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride); /** * \brief Get the multi-dimension stride of the deconvolution. @@ -437,7 +324,7 @@ public class IDeconvolutionLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrideNd(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrideNd(); /** * \brief Set the multi-dimension padding of the deconvolution. @@ -458,7 +345,7 @@ public class IDeconvolutionLayer extends ILayer { //! //! //! - public native @NoException(true) void setPaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); + public native @NoException(true) void setPaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); /** * \brief Get the multi-dimension padding of the deconvolution. @@ -473,7 +360,8 @@ public class IDeconvolutionLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPaddingNd(); + //! + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPaddingNd(); /** * \brief Append or replace an input of this layer with a specific tensor @@ -484,14 +372,18 @@ public class IDeconvolutionLayer extends ILayer { * Input 0 is the input activation tensor. * Input 1 is the kernel tensor. If used, the kernel weights parameter must be set to empty weights. * Input 2 is the bias tensor. If used, the bias parameter must be set to empty weights. + * * @see getKernelWeights(), setKernelWeights(), getBiasWeights(), setBiasWeights() * */ + + //! //! //! //! - /** \brief Set the multi-dimension dilation of the deconvolution. + /** + * \brief Set the multi-dimension dilation of the deconvolution. * * Default: (1, 1, ..., 1) * @@ -502,12 +394,12 @@ public class IDeconvolutionLayer extends ILayer { //! //! //! - public native @NoException(true) void setDilationNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 dilation); + public native @NoException(true) void setDilationNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dilation); /** * \brief Get the multi-dimension dilation of the deconvolution. * * @see setDilationNd() * */ - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDilationNd(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDilationNd(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDequantizeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDequantizeLayer.java index c2497fbb5d3..736b5219365 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDequantizeLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDequantizeLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -24,29 +24,35 @@ * * \brief A Dequantize layer in a network definition. * - * This layer accepts a signed 8-bit integer input tensor, and uses the configured scale and zeroPt inputs to + * This layer accepts a quantized type input tensor, and uses the configured scale and zeroPt inputs to * dequantize the input according to: * \p output = (\p input - \p zeroPt) * \p scale * * The first input (index 0) is the tensor to be quantized. * The second (index 1) and third (index 2) are the scale and zero point respectively. - * Each of \p scale and \p zeroPt must be either a scalar, or a 1D tensor. + * \p scale and \p zeroPt should have identical dimensions, and rank lower or equal to 2. * - * The \p zeroPt tensor is optional, and if not set, will be assumed to be zero. Its data type must be - * DataType::kINT8. \p zeroPt must only contain zero-valued coefficients, because only symmetric quantization is + * The \p zeroPt tensor is optional, and if not set, will be assumed to be zero. Its data type must be identical to + * the input's data type. \p zeroPt must only contain zero-valued coefficients, because only symmetric quantization is * supported. - * The \p scale value must be either a scalar for per-tensor quantization, or a 1D tensor for per-channel - * quantization. All \p scale coefficients must have positive values. The size of the 1-D \p scale tensor must match - * the size of the quantization axis. The size of the \p scale must match the size of the \p zeroPt. + * The \p scale value must be either a scalar for per-tensor quantization, a 1-D tensor for per-channel quantization, + * or a 2-D tensor for block quantization (supported for DataType::kINT4 only). All \p scale coefficients must have + * positive values. The size of the 1-D \p scale tensor must match the size of the quantization axis. For block + * quantization, the shape of \p scale tensor must match the shape of the input, except for one dimension in which + * blocking occurs. The size of \p zeroPt must match the size of \p scale. * * The subgraph which terminates with the \p scale tensor must be a build-time constant. The same restrictions apply * to the \p zeroPt. - * The output type, if constrained, must be constrained to DataType::kFLOAT or DataType::kHALF. The input type, if - * constrained, must be constrained to DataType::kINT8. The output size is the same as the input size. The quantization - * axis is in reference to the input tensor's dimensions. + * The output type, if constrained, must be constrained to DataType::kFLOAT, DataType::kHALF, or DataType::kBF16. The + * input type, if constrained, must be constrained to DataType::kINT8, DataType::kFP8 or DataType::kINT4. The output + * size is the same as the input size. The quantization axis is in reference to the input tensor's dimensions. * - * IDequantizeLayer only supports DataType::kINT8 precision and will default to this precision during instantiation. - * IDequantizeLayer only supports DataType::kFLOAT or DataType::kHALF output. + * IDequantizeLayer supports DataType::kINT8, DataType::kFP8 or DataType::kINT4 precision and will default to + * DataType::kINT8 precision during instantiation. For strongly typed networks, \p input data type must be same as + * \p zeroPt data type. + * + * IDequantizeLayer supports DataType::kFLOAT, DataType::kHALF, or DataType::kBF16 output. For strongly typed + * networks, \p output data type is inferred from \p scale data type. * * As an example of the operation of this layer, imagine a 4D NCHW activation input which can be quantized using a * single scale coefficient (referred to as per-tensor quantization): @@ -66,11 +72,21 @@ * For each s in S: * output[k,c,r,s] = (\p input[k,c,r,s] - \p zeroPt[k]) * \p scale[k] * + * Block dequantization is supported only for 2-D input tensors with DataType::kINT4 that are rooted at an + * IConstantLayer (i.e. weights). As an example of blocked operation, imagine a 2-D RS weights input with R + * (dimension 0) as the blocking axis and B as the block size. The scale is a 2-D array of coefficients, with + * dimensions (R//B, S). + * For each r in R: + * For each s in S: + * output[r,s] = (\p input[r,s] - \p zeroPt[r//B, s]) * \p scale[r//B, s] + * * \note Only symmetric quantization is supported. * \note Currently the only allowed build-time constant \p scale and \p zeroPt subgraphs are: * 1. Constant -> Quantize * 2. Constant -> Cast -> Quantize * + * \note The input tensor for this layer must not be a scalar. + * * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. * */ @Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) @@ -99,5 +115,40 @@ public class IDequantizeLayer extends ILayer { * The axis must be a valid axis if the scale tensor has more than one coefficient. * The axis value will be ignored if the scale tensor has exactly one coefficient (per-tensor quantization). * */ + + + //! + //! + //! + //! + //! public native @NoException(true) void setAxis(int axis); + + /** + * \brief Set the Dequantize layer output type. + * + * @param toType The DataType of the output tensor. + * + * Set the output type of the dequantize layer. Valid values are DataType::kFLOAT and DataType::kHALF. + * If the network is strongly typed, setToType must be used to set the output type, and use of setOutputType + * is an error. Otherwise, types passed to setOutputType and setToType must be the same. + * + * @see NetworkDefinitionCreationFlag::kSTRONGLY_TYPED + * */ + + + //! + //! + //! + public native @NoException(true) void setToType(DataType toType); + public native @NoException(true) void setToType(@Cast("nvinfer1::DataType") int toType); + + /** + * \brief Return the Dequantize layer output type. + * + * @return toType parameter set during layer creation or by setToType(). + * The return value is the output type of the quantize layer. + * The default value is DataType::kFLOAT. + * */ + public native @NoException(true) DataType getToType(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDimensionExpr.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDimensionExpr.java index 25c168a2ae6..e96b8a5d0b9 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDimensionExpr.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDimensionExpr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -22,14 +22,14 @@ /** * \class IDimensionExpr * - * An IDimensionExpr represents an integer expression constructed from constants, + * \brief An IDimensionExpr represents an integer expression constructed from constants, * input dimensions, and binary operations. These expressions are can be used - * in overrides of IPluginV2DynamicExt::getOutputDimensions to define output + * in overrides of IPluginV2DynamicExt::getOutputDimensions or IPluginV3OneBuild::getOutputShapes() to define output * dimensions in terms of input dimensions. * * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. * - * @see DimensionOperation, IPluginV2DynamicExt::getOutputDimensions + * @see DimensionOperation, IPluginV2DynamicExt::getOutputDimensions, IPluginV3OneBuild::getOutputShapes() * */ @Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IDimensionExpr extends INoCopy { @@ -37,10 +37,27 @@ public class IDimensionExpr extends INoCopy { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IDimensionExpr(Pointer p) { super(p); } - /** Return true if expression is a build-time constant. */ + /** + * \brief Return true if expression is a build-time constant. + * */ + + + //! + //! + //! public native @Cast("bool") @NoException(true) boolean isConstant(); - /** If isConstant(), returns value of the constant. - * If !isConstant(), return std::numeric_limits::min(). */ - public native @NoException(true) int getConstantValue(); + /** + * \brief Get the value of the constant. + * + * If isConstant(), returns value of the constant. + * If !isConstant(), return std::numeric_limits::min(). + * */ + public native @Cast("int64_t") @NoException(true) long getConstantValue(); + /** + * \brief Return true if this denotes the value of a size tensor. + * + * @return True if this was created with method IExprBuilder::declareSizeTensor, false otherwise + * */ + public native @Cast("bool") @NoException(true) boolean isSizeTensor(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IEinsumLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IEinsumLayer.java index 6adacb0f5e8..6379fb8a41c 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IEinsumLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IEinsumLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,7 +19,8 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; -/** \class IEinsumLayer +/** + * \class IEinsumLayer * * \brief An Einsum layer in a network * @@ -35,9 +36,9 @@ * means that those axes will be multiplied. Omitting a label from the output means values along those axes will be * summed. In implicit mode, the indices which appear once in the expression will be part of the output in increasing * alphabetical order. In explicit mode, the output can be controlled by specifying output subscript labels by adding - * an arrow (‘->’) followed by subscripts for the output. - * For example, “ij,jk->ik” is equivalent to “ij,jk”. - * Ellipsis (‘...’) can be used in place of subscripts to broadcast the dimensions. + * an arrow ('->') followed by subscripts for the output. + * For example, "ij,jk->ik" is equivalent to "ij,jk". + * Ellipsis ('...') can be used in place of subscripts to broadcast the dimensions. * See the TensorRT Developer Guide for more details on equation syntax. * * Many common operations can be expressed using the Einsum equation. diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IElementWiseLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IElementWiseLayer.java index ba4ec20059b..76c4b51d97e 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IElementWiseLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IElementWiseLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IEngineInspector.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IEngineInspector.java index 4a92e0a39a5..483bdaf331e 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IEngineInspector.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IEngineInspector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -149,6 +149,7 @@ public class IEngineInspector extends INoCopy { //! //! //! + //! public native @NoException(true) String getEngineInformation(LayerInformationFormat format); public native @NoException(true) @Cast("const char*") BytePointer getEngineInformation(@Cast("nvinfer1::LayerInformationFormat") int format); @@ -162,10 +163,10 @@ public class IEngineInspector extends INoCopy { * * If an error recorder is not set, messages will be sent to the global log stream. * - * @param recorder The error recorder to register with this interface. */ - // - /** @see getErrorRecorder() - /** */ + * @param recorder The error recorder to register with this interface. + * + * @see getErrorRecorder() + * */ //! diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IErrorRecorder.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IErrorRecorder.java index 58d70f6dcbe..0c10cf05401 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IErrorRecorder.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IErrorRecorder.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -18,46 +18,33 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; // namespace impl - -/** - * \class IErrorRecorder - * - * \brief Reference counted application-implemented error reporting interface for TensorRT objects. - * - * The error reporting mechanism is a user defined object that interacts with the internal state of the object - * that it is assigned to in order to determine information about abnormalities in execution. The error recorder - * gets both an error enum that is more descriptive than pass/fail and also a string description that gives more - * detail on the exact failure modes. In the safety context, the error strings are all limited to 1024 characters - * in length. - * - * The ErrorRecorder gets passed along to any class that is created from another class that has an ErrorRecorder - * assigned to it. For example, assigning an ErrorRecorder to an IBuilder allows all INetwork's, ILayer's, and - * ITensor's to use the same error recorder. For functions that have their own ErrorRecorder accessor functions. - * This allows registering a different error recorder or de-registering of the error recorder for that specific - * object. - * - * The ErrorRecorder object implementation must be thread safe. All locking and synchronization is pushed to the - * interface implementation and TensorRT does not hold any synchronization primitives when calling the interface - * functions. - * - * The lifetime of the ErrorRecorder object must exceed the lifetime of all TensorRT objects that use it. - * */ -@Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) -public class IErrorRecorder extends Pointer { +@Namespace("nvinfer1::v_1_0") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IErrorRecorder extends IVersionedInterface { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IErrorRecorder(Pointer p) { super(p); } /** - * A typedef of a C-style string for reporting error descriptions. + * \brief Return version information associated with this interface. Applications must not override this method. * */ //! //! + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); /** - * The length limit for an error description, excluding the '\0' string terminator. + * \brief A typedef of a C-style string for reporting error descriptions. + * */ + + + //! + //! + + /** + * \brief The length limit for an error description in bytes, excluding the '\0' string terminator. + * Only applicable to safe runtime. + * General error recorder implementation can use any size appropriate for the use case. * */ @@ -67,7 +54,7 @@ public class IErrorRecorder extends Pointer { public static final long kMAX_DESC_LENGTH = kMAX_DESC_LENGTH(); /** - * A typedef of a 32bit integer for reference counting. + * \brief A typedef of a 32-bit integer for reference counting. * */ // Public API used to retrieve information from the error recorder. @@ -77,13 +64,18 @@ public class IErrorRecorder extends Pointer { * * Determines the number of errors that occurred between the current point in execution * and the last time that the clear() was executed. Due to the possibility of asynchronous - * errors occuring, a TensorRT API can return correct results, but still register errors - * with the Error Recorder. The value of getNbErrors must monotonically increases until clear() - * is called. + * errors occurring, a TensorRT API can return correct results, but still register errors + * with the Error Recorder. The value of getNbErrors() must increment by 1 after each reportError() + * call until clear() is called, or the maximum number of errors that can be stored is exceeded. * * @return Returns the number of errors detected, or 0 if there are no errors. + * If the upper bound of errors that can be stored is exceeded, the upper bound value must + * be returned. + * + * For example, if the error recorder can store up to 16 error descriptions but recordError() has + * been called 20 times, getNbErrors() must return 16. * - * @see clear + * @see clear(), hasOverflowed() * * \u005Cusage * - Allowed context for the API call @@ -109,9 +101,10 @@ public class IErrorRecorder extends Pointer { * The errorIdx specifies what error code from 0 to getNbErrors()-1 that the application * wants to analyze and return the error code enum. * - * @return Returns the enum corresponding to errorIdx. + * @return Returns the enum corresponding to errorIdx if errorIdx is in range (between 0 and getNbErrors()-1). + * ErrorCode::kUNSPECIFIED_ERROR must be returned if errorIdx is not in range. * - * @see getErrorDesc, ErrorCode + * @see getErrorDesc(), ErrorCode * * \u005Cusage * - Allowed context for the API call @@ -137,11 +130,13 @@ public class IErrorRecorder extends Pointer { * For the error specified by the idx value, return the string description of the error. The * error string is a null-terminated C-style string. In the safety context there is a * constant length requirement to remove any dynamic memory allocations and the error message - * may be truncated. The format of the string is " - ". + * will be truncated if it exceeds kMAX_DESC_LENGTH bytes. + * The format of the string is " - ". * - * @return Returns a string representation of the error along with a description of the error. + * @return Returns a string representation of the error along with a description of the error if errorIdx is in + * range (between 0 and getNbErrors()-1). An empty string will be returned if errorIdx is not in range. * - * @see getErrorCode + * @see getErrorCode() * * \u005Cusage * - Allowed context for the API call @@ -183,11 +178,11 @@ public class IErrorRecorder extends Pointer { /** * \brief Clear the error stack on the error recorder. * - * Removes all the tracked errors by the error recorder. This function must guarantee that after + * Removes all the tracked errors by the error recorder. The implementation must guarantee that after * this function is called, and as long as no error occurs, the next call to getNbErrors will return - * zero. + * zero and hasOverflowed will return false. * - * @see getNbErrors + * @see getNbErrors(), hasOverflowed() * * \u005Cusage * - Allowed context for the API call @@ -202,6 +197,7 @@ public class IErrorRecorder extends Pointer { //! //! //! + //! public native @NoException(true) void clear(); // API used by TensorRT to report Error information to the application. @@ -209,8 +205,10 @@ public class IErrorRecorder extends Pointer { /** * \brief Report an error to the error recorder with the corresponding enum and description. * - * @param val The error code enum that is being reported. - * @param desc The string description of the error. + * @param val The error code enum that is being reported. + * @param desc The string description of the error, which will be a NULL-terminated string. + * For safety use cases its length is limited to kMAX_DESC_LENGTH bytes + * (excluding the NULL terminator) and descriptions that exceed this limit will be silently truncated. * * Report an error to the user that has a given value and human readable description. The function returns false * if processing can continue, which implies that the reported error is not fatal. This does not guarantee that @@ -221,6 +219,10 @@ public class IErrorRecorder extends Pointer { * * @return True if the error is determined to be fatal and processing of the current function must end. * + * \warning If the error recorder's maximum number of storable errors is exceeded, the error description will be + * silently dropped and the value returned by getNbErrors() will not be incremented. However, the return + * value will still signal whether the error must be considered fatal. + * * \u005Cusage * - Allowed context for the API call * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads @@ -241,9 +243,9 @@ public class IErrorRecorder extends Pointer { * * Increments the reference count for the object by one and returns the current value. This reference count allows * the application to know that an object inside of TensorRT has taken a reference to the ErrorRecorder. TensorRT - * guarantees that every call to IErrorRecorder::incRefCount will be paired with a call to - * IErrorRecorder::decRefCount when the reference is released. It is undefined behavior to destruct the - * ErrorRecorder when incRefCount has been called without a corresponding decRefCount. + * guarantees that every call to IErrorRecorder::incRefCount() will be paired with a call to + * IErrorRecorder::decRefCount() when the reference is released. It is undefined behavior to destruct the + * ErrorRecorder when incRefCount() has been called without a corresponding decRefCount(). * * @return The reference counted value after the increment completes. * @@ -259,16 +261,16 @@ public class IErrorRecorder extends Pointer { //! //! //! - public native @Cast("nvinfer1::IErrorRecorder::RefCount") @NoException(true) int incRefCount(); + public native @Cast("nvinfer1::v_1_0::IErrorRecorder::RefCount") @NoException(true) int incRefCount(); /** * \brief Decrements the refcount for the current ErrorRecorder. * * Decrements the reference count for the object by one and returns the current value. This reference count allows * the application to know that an object inside of TensorRT has taken a reference to the ErrorRecorder. TensorRT - * guarantees that every call to IErrorRecorder::decRefCount will be preceded by a call to - * IErrorRecorder::incRefCount. It is undefined behavior to destruct the ErrorRecorder when incRefCount has been - * called without a corresponding decRefCount. + * guarantees that every call to IErrorRecorder::decRefCount() will be preceded by a call to + * IErrorRecorder::incRefCount(). It is undefined behavior to destruct the ErrorRecorder when incRefCount() has been + * called without a corresponding decRefCount(). * * @return The reference counted value after the decrement completes. * @@ -277,5 +279,5 @@ public class IErrorRecorder extends Pointer { * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads * when multiple execution contexts are used during runtime. * */ - public native @Cast("nvinfer1::IErrorRecorder::RefCount") @NoException(true) int decRefCount(); + public native @Cast("nvinfer1::v_1_0::IErrorRecorder::RefCount") @NoException(true) int decRefCount(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IExecutionContext.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IExecutionContext.java index 5edcefb7262..b12b129aa16 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IExecutionContext.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IExecutionContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -48,87 +48,11 @@ public class IExecutionContext extends INoCopy { } - /** - * \brief Synchronously execute inference on a batch. - * - * This method requires an array of input and output buffers. The mapping from tensor names to indices - * can be queried using ICudaEngine::getBindingIndex() - * - * @param batchSize The batch size. This is at most the max batch size value supplied to the builder when the - * engine was built. If the network is created with NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag, please use - * executeV2() instead, and this batchSize argument has no effect. - * @param bindings An array of pointers to input and output buffers for the network. - * - * @return True if execution succeeded. - * - * @deprecated Deprecated in TensorRT 8.4. Superseded by executeV2() if the network is created with - * NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag. - * - * \warning This function will trigger layer resource updates if hasImplicitBatchDimension() - * returns true and batchSize changes between subsequent calls, possibly resulting - * in performance bottlenecks. - * - * @see ICudaEngine::getBindingIndex() ICudaEngine::getMaxBatchSize() - * */ - - - //! - //! - //! - //! - //! - //! - //! - //! - //! - public native @Cast("bool") @Deprecated @NoException(true) boolean execute(int batchSize, @Cast("void*const*") PointerPointer bindings); - public native @Cast("bool") @Deprecated @NoException(true) boolean execute(int batchSize, @Cast("void*const*") @ByPtrPtr Pointer bindings); - - /** - * \brief Enqueue inference of a batch on a stream. - * - * This method requires an array of input and output buffers. The mapping from tensor names to indices can be - * queried using ICudaEngine::getBindingIndex() - * - * @param batchSize The batch size. This is at most the max batch size value supplied to the builder when the - * engine was built. If the network is created with NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag, please use - * enqueueV3() instead, and this batchSize argument has no effect. - * @param bindings An array of pointers to input and output buffers for the network. - * @param stream A cuda stream on which the inference kernels will be enqueued. - * @param inputConsumed An optional event which will be signaled when the input buffers can be refilled with new - * data. - * - * @return True if the kernels were enqueued successfully. - * - * @deprecated Deprecated in TensorRT 8.4. Superseded by enqueueV2() if the network is created with - * NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag. - * - * @see ICudaEngine::getBindingIndex() ICudaEngine::getMaxBatchSize() - * - * \warning Calling enqueue() in from the same IExecutionContext object with different CUDA streams concurrently - * results in undefined behavior. To perform inference concurrently in multiple streams, use one execution - * context per stream. - * - * \warning This function will trigger layer resource updates if hasImplicitBatchDimension() - * returns true and batchSize changes between subsequent calls, possibly resulting in performance - * bottlenecks. - * */ - - - //! - //! - //! - //! - public native @Cast("bool") @Deprecated @NoException(true) boolean enqueue( - int batchSize, @Cast("void*const*") PointerPointer bindings, CUstream_st stream, @ByPtrPtr CUevent_st inputConsumed); - public native @Cast("bool") @Deprecated @NoException(true) boolean enqueue( - int batchSize, @Cast("void*const*") @ByPtrPtr Pointer bindings, CUstream_st stream, @ByPtrPtr CUevent_st inputConsumed); - /** * \brief Set the debug sync flag. * * If this flag is set to true, the engine will log the successful execution for each kernel during executeV2(). It - * has no effect when using enqueueV2()/enqueueV3(). + * has no effect when using enqueueV3(). * * @see getDebugSync() * */ @@ -183,26 +107,11 @@ public class IExecutionContext extends INoCopy { //! - //! - //! - //! - public native @Const @ByRef @NoException(true) ICudaEngine getEngine(); - - /** - * \brief Destroy this object. - * - * @deprecated Deprecated in TRT 8.0. Superseded by {@code delete}. - * - * \warning Calling destroy on a managed pointer will result in a double-free error. - * */ - - //! //! //! //! - //! - public native @Deprecated @NoException(true) void destroy(); + public native @Const @ByRef @NoException(true) ICudaEngine getEngine(); /** * \brief Set the name of the execution context. @@ -238,13 +147,17 @@ public class IExecutionContext extends INoCopy { * \brief Set the device memory for use by this execution context. * * The memory must be aligned with cuda memory alignment property (using cudaGetDeviceProperties()), and its size - * must be at least that returned by getDeviceMemorySize(). Setting memory to nullptr is acceptable if - * getDeviceMemorySize() returns 0. If using enqueueV2()/enqueueV3() to run the network, the memory is in use from - * the invocation of enqueueV2()/enqueueV3() until network execution is complete. If using executeV2(), it is in - * use until executeV2() returns. Releasing or otherwise using the memory for other purposes during this time will - * result in undefined behavior. - * - * @see ICudaEngine::getDeviceMemorySize() ICudaEngine::createExecutionContextWithoutDeviceMemory() + * must be large enough for performing inference with the given network inputs. getDeviceMemorySize() and + * getDeviceMemorySizeForProfile() report upper bounds of the size. Setting memory to nullptr is acceptable if the + * reported size is 0. If using enqueueV3() to run the network, the memory is in use from the invocation of + * enqueueV3() until network execution is complete. If using executeV2(), it is in use until executeV2() returns. + * Releasing or otherwise using the memory for other purposes during this time will result in undefined behavior. + * + * @see ICudaEngine::getDeviceMemorySize() + * @see ICudaEngine::getDeviceMemorySizeForProfile() + * @see ExecutionContextAllocationStrategy + * @see ICudaEngine::createExecutionContext() + * @see ICudaEngine::createExecutionContextWithoutDeviceMemory() * */ @@ -255,38 +168,8 @@ public class IExecutionContext extends INoCopy { //! //! //! - //! public native @NoException(true) void setDeviceMemory(Pointer memory); - /** - * \brief Return the strides of the buffer for the given binding. - * - * The strides are in units of elements, not components or bytes. - * For example, for TensorFormat::kHWC8, a stride of one spans 8 scalars. - * - * Note that strides can be different for different execution contexts - * with dynamic shapes. - * - * If the bindingIndex is invalid or there are dynamic dimensions that have not been - * set yet, returns Dims with Dims::nbDims = -1. - * - * @param bindingIndex The binding index. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorStrides(). - * - * @see getTensorStrides() - * */ - - - //! - //! - //! - //! - //! - //! - //! - public native @Deprecated @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrides(int bindingIndex); - /** * \brief Return the strides of the buffer for the given tensor name. * @@ -303,53 +186,15 @@ public class IExecutionContext extends INoCopy { * * \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator. * */ - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorStrides(String tensorName); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorStrides(@Cast("const char*") BytePointer tensorName); - /** - * \brief Select an optimization profile for the current context. - * - * @param profileIndex Index of the profile. It must lie between 0 and - * getEngine().getNbOptimizationProfiles() - 1 - * - * The selected profile will be used in subsequent calls to executeV2()/enqueueV2()/enqueueV3(). - * - * When an optimization profile is switched via this API, TensorRT may - * enqueue GPU memory copy operations required to set up the new profile during the subsequent - * enqueueV2()/enqueueV3() operations. To avoid these calls during enqueueV2()/enqueueV3(), use - * setOptimizationProfileAsync() instead. - * - * If the associated CUDA engine does not have inputs with dynamic shapes, this method need not be - * called, in which case the default profile index of 0 will be used (this is particularly - * the case for all safe engines). - * - * setOptimizationProfile() must be called before calling setBindingDimensions() and - * setInputShapeBinding() for all dynamic input tensors or input shape tensors, which in - * turn must be called before executeV2()/enqueueV2()/enqueueV3(). - * - * \warning This function will trigger layer resource updates on the next - * call of enqueueV2()/enqueueV3()/executeV2(), possibly resulting in performance bottlenecks. - * - * @return true if the call succeeded, else false (e.g. input out of range) - * - * @deprecated Superseded by setOptimizationProfileAsync. Deprecated prior to TensorRT 8.0 and will be - * removed in 9.0. - * - * @see ICudaEngine::getNbOptimizationProfiles() IExecutionContext::setOptimizationProfileAsync() - * */ - - - //! - //! - //! - public native @Cast("bool") @Deprecated @NoException(true) boolean setOptimizationProfile(int profileIndex); - + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorStrides(String tensorName); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorStrides(@Cast("const char*") BytePointer tensorName); /** * \brief Get the index of the currently selected optimization profile. * * If the profile index has not been set yet (implicitly to 0 if no other execution context has been set to * profile 0, or explicitly for all subsequent contexts), an invalid value of -1 will be returned - * and all calls to enqueueV2()/enqueueV3()/executeV2() will fail until a valid profile index has been set. - * This behavior is deprecated in TensorRT 8.6 and in TensorRT 9.0, all profiles will default to optimization + * and all calls to enqueueV3()/executeV2() will fail until a valid profile index has been set. + * This behavior is deprecated in TensorRT 8.6, all profiles will default to optimization * profile 0 and -1 will no longer be returned. * */ @@ -360,56 +205,8 @@ public class IExecutionContext extends INoCopy { //! //! //! - //! - //! - //! - //! public native @NoException(true) int getOptimizationProfile(); - /** - * \brief Set the dynamic dimensions of an input binding. - * - * @param bindingIndex index of an input tensor whose dimensions must be compatible with - * the network definition (i.e. only the wildcard dimension -1 can be replaced with a - * new dimension >= 0). - * - * @param dimensions specifies the dimensions of the input tensor. It must be in the valid - * range for the currently selected optimization profile, and the corresponding engine must - * not be safety-certified. - * - * This method requires the engine to be built without an implicit batch dimension. - * This method will fail unless a valid optimization profile is defined for the current - * execution context (getOptimizationProfile() must not be -1). - * - * For all dynamic non-output bindings (which have at least one wildcard dimension of -1), - * this method needs to be called before either enqueueV2() or executeV2() may be called. - * This can be checked using the method allInputDimensionsSpecified(). - * - * \warning This function will trigger layer resource updates on the next - * call of enqueueV2()/executeV2(), possibly resulting in performance bottlenecks, - * if the dimensions are different than the previous set dimensions. - * - * @return false if an error occurs (e.g. bindingIndex is out of range for the currently selected - * optimization profile or binding dimension is inconsistent with min-max range of the - * optimization profile), else true. Note that the network can still be invalid for certain - * combinations of input shapes that lead to invalid output shapes. To confirm the correctness - * of the network input shapes, check whether the output binding has valid - * dimensions using getBindingDimensions() on the output bindingIndex. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by setInputShape(). - * - * @see setInputShape() - * */ - - - //! - //! - //! - //! - //! - //! - public native @Cast("bool") @Deprecated @NoException(true) boolean setBindingDimensions(int bindingIndex, @ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); - /** * \brief Set shape of given input. * @@ -435,50 +232,8 @@ public class IExecutionContext extends INoCopy { //! //! //! - public native @Cast("bool") @NoException(true) boolean setInputShape(String tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims32 dims); - public native @Cast("bool") @NoException(true) boolean setInputShape(@Cast("const char*") BytePointer tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims32 dims); - - /** - * \brief Get the dynamic dimensions of a binding. - * - * If the engine was built with an implicit batch dimension, same as ICudaEngine::getBindingDimensions. - * - * If setBindingDimensions() has been called on this binding (or if there are no - * dynamic dimensions), all dimensions will be positive. Otherwise, it is necessary to - * call setBindingDimensions() before enqueueV2() or executeV2() may be called. - * - * If the bindingIndex is out of range, an invalid Dims with nbDims == -1 is returned. - * The same invalid Dims will be returned if the engine was not built with an implicit - * batch dimension and if the execution context is not currently associated with a valid - * optimization profile (i.e. if getOptimizationProfile() returns -1). - * - * If ICudaEngine::bindingIsInput(bindingIndex) is false, then both - * allInputDimensionsSpecified() and allInputShapesSpecified() must be true - * before calling this method. - * - * @return Currently selected binding dimensions - * - * For backwards compatibility with earlier versions of TensorRT, a bindingIndex that does not belong - * to the current profile is corrected as described for ICudaEngine::getProfileDimensions. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorShape(). - * - * @see ICudaEngine::getProfileDimensions() - * @see getTensorShape() - * */ - - - //! - //! - //! - //! - //! - //! - //! - //! - //! - //! - public native @Deprecated @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getBindingDimensions(int bindingIndex); + public native @Cast("bool") @NoException(true) boolean setInputShape(String tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dims); + public native @Cast("bool") @NoException(true) boolean setInputShape(@Cast("const char*") BytePointer tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dims); /** * \brief Return the shape of the given input or output. @@ -519,99 +274,19 @@ public class IExecutionContext extends INoCopy { //! //! //! - //! - //! - //! - //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorShape(String tensorName); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorShape(@Cast("const char*") BytePointer tensorName); - - /** - * \brief Set values of input tensor required by shape calculations. - * - * @param bindingIndex index of an input tensor for which - * ICudaEngine::isShapeBinding(bindingIndex) and ICudaEngine::bindingIsInput(bindingIndex) - * are both true. - * - * @param data pointer to values of the input tensor. The number of values should be - * the product of the dimensions returned by getBindingDimensions(bindingIndex). - * - * If ICudaEngine::isShapeBinding(bindingIndex) and ICudaEngine::bindingIsInput(bindingIndex) - * are both true, this method must be called before enqueueV2() or executeV2() may be called. - * This method will fail unless a valid optimization profile is defined for the current - * execution context (getOptimizationProfile() must not be -1). - * - * \warning This function will trigger layer resource updates on the next call of - * enqueueV2()/executeV2(), possibly resulting in performance bottlenecks, if the - * shapes are different than the previous set shapes. - * - * @return false if an error occurs (e.g. bindingIndex is out of range for the currently selected - * optimization profile or shape data is inconsistent with min-max range of the - * optimization profile), else true. Note that the network can still be invalid for certain - * combinations of input shapes that lead to invalid output shapes. To confirm the correctness - * of the network input shapes, check whether the output binding has valid - * dimensions using getBindingDimensions() on the output bindingIndex. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by setInputTensorAddress() or setTensorAddress(). - * - * @see setInputTensorAddress() setTensorAddress() - * */ - - - //! - //! - //! - //! - //! - //! - //! - public native @Cast("bool") @Deprecated @NoException(true) boolean setInputShapeBinding(int bindingIndex, @Const IntPointer data); - public native @Cast("bool") @Deprecated @NoException(true) boolean setInputShapeBinding(int bindingIndex, @Const IntBuffer data); - public native @Cast("bool") @Deprecated @NoException(true) boolean setInputShapeBinding(int bindingIndex, @Const int[] data); - - /** - * \brief Get values of an input tensor required for shape calculations or an output tensor produced by shape - * calculations. - * - * @param bindingIndex index of an input or output tensor for which - * ICudaEngine::isShapeBinding(bindingIndex) is true. - * - * @param data pointer to where values will be written. The number of values written is - * the product of the dimensions returned by getBindingDimensions(bindingIndex). - * - * If ICudaEngine::bindingIsInput(bindingIndex) is false, then both - * allInputDimensionsSpecified() and allInputShapesSpecified() must be true - * before calling this method. The method will also fail if no valid optimization profile - * has been set for the current execution context, i.e. if getOptimizationProfile() returns -1. - * - * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorAddress() or getOutputTensorAddress(). - * - * @see isShapeBinding() getTensorAddress() getOutputTensorAddress() - * */ - - - //! - //! - //! - //! - //! - //! - public native @Cast("bool") @Deprecated @NoException(true) boolean getShapeBinding(int bindingIndex, IntPointer data); - public native @Cast("bool") @Deprecated @NoException(true) boolean getShapeBinding(int bindingIndex, IntBuffer data); - public native @Cast("bool") @Deprecated @NoException(true) boolean getShapeBinding(int bindingIndex, int[] data); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorShape(String tensorName); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorShape(@Cast("const char*") BytePointer tensorName); /** * \brief Whether all dynamic dimensions of input tensors have been specified * * @return True if all dynamic dimensions of input tensors have been specified - * by calling setBindingDimensions(). + * by calling setInputShape(). * * Trivially true if network has no dynamically shaped input tensors. * * Does not work with name-base interfaces eg. IExecutionContext::setInputShape(). Use * IExecutionContext::inferShapes() instead. - * - * @see setBindingDimensions(bindingIndex,dimensions) * */ @@ -633,7 +308,7 @@ public class IExecutionContext extends INoCopy { * Does not work with name-base interfaces eg. IExecutionContext::setInputShape(). Use * IExecutionContext::inferShapes() instead. * - * @see isShapeBinding(bindingIndex) + * @deprecated Deprecated in TensorRT 10.0. setInputShapeBinding() is removed since TensorRT 10.0. * */ @@ -642,7 +317,8 @@ public class IExecutionContext extends INoCopy { //! //! //! - public native @Cast("bool") @NoException(true) boolean allInputShapesSpecified(); + //! + public native @Cast("bool") @Deprecated @NoException(true) boolean allInputShapesSpecified(); /** * \brief Set the ErrorRecorder for this interface @@ -654,10 +330,10 @@ public class IExecutionContext extends INoCopy { * * If an error recorder is not set, messages will be sent to the global log stream. * - * @param recorder The error recorder to register with this interface. */ - // - /** @see getErrorRecorder() - /** */ + * @param recorder The error recorder to register with this interface. + * + * @see getErrorRecorder() + * */ //! @@ -684,57 +360,20 @@ public class IExecutionContext extends INoCopy { //! //! //! + //! public native @NoException(true) IErrorRecorder getErrorRecorder(); /** - * \brief Synchronously execute inference a network. + * \brief Synchronously execute a network. * - * This method requires an array of input and output buffers. The mapping from tensor names to indices can be - * queried using ICudaEngine::getBindingIndex(). - * This method only works for execution contexts built with full dimension networks. - * @param bindings An array of pointers to input and output buffers for the network. + * This method requires an array of input and output buffers. The mapping + * from indices to tensor names can be queried using ICudaEngine::getIOTensorName(). * - * @return True if execution succeeded. - * - * @see ICudaEngine::getBindingIndex() ICudaEngine::getMaxBatchSize() - * */ - - - //! - //! - //! - //! - //! - //! - //! - //! - public native @Cast("bool") @NoException(true) boolean executeV2(@Cast("void*const*") PointerPointer bindings); - public native @Cast("bool") @NoException(true) boolean executeV2(@Cast("void*const*") @ByPtrPtr Pointer bindings); - - /** - * \brief Enqueue inference on a stream. - * - * This method requires an array of input and output buffers. The mapping from tensor names to indices can be - * queried using ICudaEngine::getBindingIndex(). - * This method only works for execution contexts built with full dimension networks. * @param bindings An array of pointers to input and output buffers for the network. - * @param stream A cuda stream on which the inference kernels will be enqueued - * @param inputConsumed An optional event which will be signaled when the input buffers can be refilled with new - * data - * - * @return True if the kernels were enqueued successfully. - * - * @deprecated Superseded by enqueueV3(). Deprecated in TensorRT 8.5 * - * @see ICudaEngine::getBindingIndex() ICudaEngine::getMaxBatchSize() IExecutionContext::enqueueV3() - * - * \note Calling enqueueV2() with a stream in CUDA graph capture mode has a known issue. If dynamic shapes are - * used, the first enqueueV2() call after a setInputShapeBinding() call will cause failure in stream capture - * due to resource allocation. Please call enqueueV2() once before capturing the graph. + * @return True if execution succeeded. * - * \warning Calling enqueueV2() in from the same IExecutionContext object with different CUDA streams concurrently - * results in undefined behavior. To perform inference concurrently in multiple streams, use one execution - * context per stream. + * @see ICudaEngine::getIOTensorName() * */ @@ -749,8 +388,8 @@ public class IExecutionContext extends INoCopy { //! //! //! - public native @Cast("bool") @Deprecated @NoException(true) boolean enqueueV2(@Cast("void*const*") PointerPointer bindings, CUstream_st stream, @ByPtrPtr CUevent_st inputConsumed); - public native @Cast("bool") @Deprecated @NoException(true) boolean enqueueV2(@Cast("void*const*") @ByPtrPtr Pointer bindings, CUstream_st stream, @ByPtrPtr CUevent_st inputConsumed); + public native @Cast("bool") @NoException(true) boolean executeV2(@Cast("void*const*") PointerPointer bindings); + public native @Cast("bool") @NoException(true) boolean executeV2(@Cast("void*const*") @ByPtrPtr Pointer bindings); /** * \brief Select an optimization profile for the current context with async @@ -767,24 +406,22 @@ public class IExecutionContext extends INoCopy { * application’s responsibility to guarantee that synchronization between * the profile sync stream and the enqueue stream occurs. * - * The selected profile will be used in subsequent calls to executeV2()/enqueueV2()/enqueueV3(). + * The selected profile will be used in subsequent calls to executeV2()/enqueueV3(). * If the associated CUDA engine has inputs with dynamic shapes, the optimization profile must - * be set with its corresponding profileIndex before calling execute or enqueue. If no execution - * context is assigned optimization profile 0 and a new context is created for an engine, - * setOptimizationProfile(0) is called implicitly. This functionality is deprecated in TensorRT 8.6 - * and will instead default all optimization profiles to 0 starting in TensorRT 9.0. + * be set with its corresponding profileIndex before calling execute or enqueue. The newly created execution + * context will be assigned optimization profile 0. * * If the associated CUDA engine does not have inputs with dynamic shapes, * this method need not be called, in which case the default profile index * of 0 will be used. * * setOptimizationProfileAsync() must be called before calling - * setBindingDimensions() and setInputShapeBinding() for all dynamic input + * setInputShape() for all dynamic input * tensors or input shape tensors, which in turn must be called before - * executeV2()/enqueueV2()/enqueueV3(). + * executeV2()/enqueueV3(). * * \warning This function will trigger layer resource updates on the next call of - * enqueueV2()/executeV2()/enqueueV3(), possibly resulting in performance bottlenecks. + * executeV2()/enqueueV3(), possibly resulting in performance bottlenecks. * * \warning Not synchronizing the stream used at enqueue with the stream * used to set optimization profile asynchronously using this API will @@ -792,13 +429,13 @@ public class IExecutionContext extends INoCopy { * * @return true if the call succeeded, else false (e.g. input out of range) * - * @see ICudaEngine::getNbOptimizationProfiles() - * @see IExecutionContext::setOptimizationProfile() */ + * @see ICudaEngine::getNbOptimizationProfiles() */ //! //! //! + //! public native @Cast("bool") @NoException(true) boolean setOptimizationProfileAsync(int profileIndex, CUstream_st stream); /** @@ -810,12 +447,14 @@ public class IExecutionContext extends INoCopy { * reportToProfiler() needs to be called to obtain the profiling data and report to the profiler attached. * * @see IExecutionContext::getEnqueueEmitsProfile() - * @see IExecutionContext::reportToProfiler() */ + * @see IExecutionContext::reportToProfiler() + * */ //! //! //! + //! public native @NoException(true) void setEnqueueEmitsProfile(@Cast("bool") boolean enqueueEmitsProfile); /** @@ -823,7 +462,8 @@ public class IExecutionContext extends INoCopy { * * @return The enqueueEmitsProfile state. * - * @see IExecutionContext::setEnqueueEmitsProfile() */ + * @see IExecutionContext::setEnqueueEmitsProfile() + * */ //! @@ -833,6 +473,7 @@ public class IExecutionContext extends INoCopy { //! //! //! + //! public native @Cast("bool") @NoException(true) boolean getEnqueueEmitsProfile(); /** @@ -858,7 +499,8 @@ public class IExecutionContext extends INoCopy { * @return true if the call succeeded, else false (e.g. profiler not provided, in CUDA graph capture mode, etc.) * * @see IExecutionContext::setEnqueueEmitsProfile() - * @see IExecutionContext::getEnqueueEmitsProfile() */ + * @see IExecutionContext::getEnqueueEmitsProfile() + * */ //! @@ -895,8 +537,7 @@ public class IExecutionContext extends INoCopy { * Before calling enqueueV3(), each input must have a non-null address and * each output must have a non-null address or an IOutputAllocator to set it later. * - * If the TensorLocation of the tensor is kHOST, the pointer must point to a host buffer of sufficient size. For - * shape tensors, the only supported data type is int32_t. + * If the TensorLocation of the tensor is kHOST, the pointer must point to a host buffer of sufficient size. * If the TensorLocation of the tensor is kDEVICE, the pointer must point to a device buffer of sufficient size and * alignment, or be nullptr if the tensor is an output tensor that will be allocated by IOutputAllocator. * @@ -912,7 +553,7 @@ public class IExecutionContext extends INoCopy { * * \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator. * - * @see setInputTensorAddress() getTensorShape() setOutputAllocator() IOutputAllocator + * @see setInputTensorAddress() setOutputTensorAddress() getTensorShape() setOutputAllocator() IOutputAllocator * */ @@ -950,6 +591,37 @@ public class IExecutionContext extends INoCopy { public native @Const @NoException(true) Pointer getTensorAddress(String tensorName); public native @Const @NoException(true) Pointer getTensorAddress(@Cast("const char*") BytePointer tensorName); + /** + * \brief Set the memory address for a given output tensor. + * + * @param tensorName The name of an output tensor. + * @param data The pointer to the buffer to which to write the output. + * + * @return True on success, false if the provided name does not map to an output tensor, does not meet alignment + * requirements, or some other error occurred. + * + * Output addresses can also be set using method setTensorAddress. This method is provided for applications which + * prefer to use different methods for setting input and output tensors. + * + * See setTensorAddress() for alignment and data type constraints. + * + * \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator. + * + * @see setTensorAddress() + * */ + + + //! + //! + //! + //! + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean setOutputTensorAddress(String tensorName, Pointer data); + public native @Cast("bool") @NoException(true) boolean setOutputTensorAddress(@Cast("const char*") BytePointer tensorName, Pointer data); + /** * \brief Set memory address for given input. * @@ -1043,12 +715,33 @@ public class IExecutionContext extends INoCopy { //! //! //! - //! public native @NoException(true) int inferShapes(int nbMaxNames, @Cast("const char**") PointerPointer tensorNames); public native @NoException(true) int inferShapes(int nbMaxNames, @Cast("const char**") @ByPtrPtr BytePointer tensorNames); public native @NoException(true) int inferShapes(int nbMaxNames, @Cast("const char**") @ByPtrPtr ByteBuffer tensorNames); public native @NoException(true) int inferShapes(int nbMaxNames, @Cast("const char**") @ByPtrPtr byte[] tensorNames); + /** + * \brief Recompute the internal activation buffer sizes based on the current input shapes, and return the total + * amount of memory required. + * + * Users can allocate the device memory based on the size returned and provided the memory to TRT with + * IExecutionContext::setDeviceMemory(). Must specify all input shapes and the optimization profile to use before + * calling this function, otherwise the partition will be invalidated. + * + * @return Total amount of memory required on success, 0 if error occurred. + * + * @see IExecutionContext::setDeviceMemory() + * */ + + + //! + //! + //! + //! + //! + //! + public native @Cast("size_t") @NoException(true) long updateDeviceMemorySizeForShapes(); + /** * \brief Mark input as consumed. * @@ -1182,6 +875,8 @@ public class IExecutionContext extends INoCopy { //! //! //! + //! + //! public native @NoException(true) IGpuAllocator getTemporaryStorageAllocator(); /** @@ -1196,14 +891,23 @@ public class IExecutionContext extends INoCopy { * behavior. * Input tensor can be released after the setInputConsumedEvent whereas output tensors require stream * synchronization. + * + * \warning Using default stream may lead to performance issues due to additional cudaDeviceSynchronize() calls by + * TensorRT to ensure correct synchronizations. Please use non-default stream instead. + * + * \warning If the Engine is streaming weights, enqueueV3 will become synchronous, and + * the graph will not be capturable. * */ + + //! //! //! //! public native @Cast("bool") @NoException(true) boolean enqueueV3(CUstream_st stream); - /** \brief Set the maximum size for persistent cache usage. + /** + * \brief Set the maximum size for persistent cache usage. * * This function sets the maximum persistent L2 cache that this execution context may use for activation caching. * Activation caching is not supported on all architectures - see "How TensorRT uses Memory" in the developer guide @@ -1241,7 +945,7 @@ public class IExecutionContext extends INoCopy { /** * \brief Set the verbosity of the NVTX markers in the execution context. * - * Building with kDETAILED verbosity will generally increase latency in enqueueV2/enqueueV3(). Call this method + * Building with kDETAILED verbosity will generally increase latency in enqueueV3(). Call this method * to select NVTX verbosity in this execution context at runtime. * * The default is the verbosity with which the engine was built, and the verbosity may not be raised above that @@ -1310,5 +1014,86 @@ public class IExecutionContext extends INoCopy { * * @see enqueueV3(), IBuilderConfig::setMaxAuxStreams(), ICudaEngine::getNbAuxStreams() * */ + + + //! + //! + //! + //! public native @NoException(true) void setAuxStreams(@ByPtrPtr CUstream_st auxStreams, int nbStreams); + + /** + * \brief Set DebugListener for this execution context. + * + * @param listener DebugListener for this execution context. + * + * @return true if succeed, false if failure. + * */ + + + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean setDebugListener(IDebugListener listener); + + /** + * \brief Get the DebugListener of this execution context. + * + * @return DebugListener of this execution context. + * */ + + + //! + //! + //! + //! + //! + //! + public native @NoException(true) IDebugListener getDebugListener(); + + /** + * \brief Set debug state of tensor given the tensor name. + * + * Turn the debug state of a tensor on or off. + * A tensor with the parameter tensor name must exist in the network, and the tensor must have + * been marked as a debug tensor during build time. Otherwise, an error is thrown. + * + * @param name Name of target tensor. + * + * @param flag True if turning on debug state, false if turning off debug state of tensor + * The default is off. + * + * @return True if successful, false otherwise. + * */ + + + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean setTensorDebugState(String name, @Cast("bool") boolean flag); + public native @Cast("bool") @NoException(true) boolean setTensorDebugState(@Cast("const char*") BytePointer name, @Cast("bool") boolean flag); + + /** + * Turn the debug state of all debug tensors on or off. + * + * @param flag true if turning on debug state, false if turning off debug state. + * + * @return true if successful, false otherwise. + * + * The default is off. */ + + + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean setAllTensorsDebugState(@Cast("bool") boolean flag); + + /** + * Get the debug state. + * + * @return true if there is a debug tensor with the given name and it has debug state turned on. + * */ + public native @Cast("bool") @NoException(true) boolean getDebugState(String name); + public native @Cast("bool") @NoException(true) boolean getDebugState(@Cast("const char*") BytePointer name); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IExprBuilder.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IExprBuilder.java index 65f7bd5bca7..b2f4ec92001 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IExprBuilder.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IExprBuilder.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -22,15 +22,15 @@ /** * \class IExprBuilder * - * Object for constructing IDimensionExpr. + * \brief Object for constructing IDimensionExpr. * * There is no public way to construct an IExprBuilder. It appears as an argument to - * method IPluginV2DynamicExt::getOutputDimensions(). Overrides of that method can use - * that IExprBuilder argument to construct expressions that define output dimensions - * in terms of input dimensions. + * method IPluginV2DynamicExt::getOutputDimensions() and IPluginV3OneBuild::getOutputShapes(). Overrides of that + * method can use that IExprBuilder argument to construct expressions that define output dimensions in terms of input + * dimensions. * * Clients should assume that any values constructed by the IExprBuilder are destroyed - * after IPluginV2DynamicExt::getOutputDimensions() returns. + * after IPluginV2DynamicExt::getOutputDimensions() or IPluginV3OneBuild::getOutputShapes() returns. * * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. * @@ -42,13 +42,49 @@ public class IExprBuilder extends INoCopy { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IExprBuilder(Pointer p) { super(p); } - /** Return pointer to IDimensionExp for given value. */ - public native @Const @NoException(true) IDimensionExpr constant(int value); + /** + * \brief Return pointer to IDimensionExp for given value. + * */ + + + //! + //! + //! + public native @Const @NoException(true) IDimensionExpr constant(@Cast("int64_t") long value); - /** Return pointer to IDimensionExp that represents the given operation applied to first and second. - * Returns nullptr if op is not a valid DimensionOperation. */ + /** + * \brief Get the operation. + * + * Return pointer to IDimensionExp that represents the given operation applied to first and second. + * Returns nullptr if op is not a valid DimensionOperation. + * */ public native @Const @NoException(true) IDimensionExpr operation( DimensionOperation op, @Const @ByRef IDimensionExpr first, @Const @ByRef IDimensionExpr second); public native @Const @NoException(true) IDimensionExpr operation( @Cast("nvinfer1::DimensionOperation") int op, @Const @ByRef IDimensionExpr first, @Const @ByRef IDimensionExpr second); + /** + * \brief Declare a size tensor at the given output index, with the specified auto-tuning formula and upper bound. + * + * A size tensor allows a plugin to have output dimensions that cannot be computed solely from input dimensions. + * For example, suppose a plugin implements the equivalent of INonZeroLayer for 2D input. The plugin can + * have one output for the indices of non-zero elements, and a second output containing the number of non-zero + * elements. Suppose the input has size [M,N] and has K non-zero elements. The plugin can write K to the second + * output. When telling TensorRT that the first output has shape [2,K], plugin uses IExprBuilder::constant() and + * IExprBuilder::declareSizeTensor(1,...) to create the IDimensionExpr that respectively denote 2 and K. + * + * TensorRT also needs to know the value of K to use for auto-tuning and an upper bound on K so that it can + * allocate memory for the output tensor. In the example, supposed typically half of the plugin's input elements + * are non-zero, and all the elements might be nonzero. then using M*N/2 might be a good expression for the opt + * parameter, and M*N for the upper bound. IDimensionsExpr for these expressions can be constructed from + * IDimensionsExpr for the input dimensions. + * + * @param outputIndex index of a plugin output that is a size tensor. + * @param opt formula for computing auto-tuning value. Must not depend on a size tensor. + * @param upper Upper bound on the size tensor. + * + * @return IDimensionExpr denoting the value of the size tensor. + * + * @see IPluginV3OneBuild::getOutputShapes() + * */ + public native @Const IDimensionExpr declareSizeTensor(int outputIndex, @Const @ByRef IDimensionExpr opt, @Const @ByRef IDimensionExpr upper); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IFillLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IFillLayer.java index 158bfa8a80d..3a23d6a5e1f 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IFillLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IFillLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -20,30 +20,40 @@ /** - * \brief Generate an output tensor with specified mode. + * \class IFillLayer * - * The fill layer has two variants, static and dynamic. Static fill specifies its parameters - * at layer creation time via Dims and the get/set accessor functions of the IFillLayer. - * Dynamic fill specifies one or more of its parameters as ITensors, by using ILayer::setInput to add - * a corresponding input. The corresponding static parameter is used if an input is missing or null. + * \brief Generate a tensor according to a specified mode. * - * The shape of the output is specified by the parameter \p Dimension, or if non-null and present, - * the first input, which must be a 1D Int32 shape tensor. Thus an application can determine if the - * IFillLayer has a dynamic output shape based on whether it has a non-null first input. + * The fill layer generates a tensor with values that are drawn from a random distribution + * or an affine function of their indices, as specified by the FillMode. * - * Alpha and Beta are treated differently based on the Fill Operation specified. See details in - * IFillLayer::setAlpha(), IFillLayer::setBeta(), and IFillLayer::setInput(). + * When an IFillLayer is initially added to a network, all of its parameters are static. + * Each parameter may be changed to dynamic by setting a corresponding input. + * A parameter is considered dynamic even if that input is the output of an IConstantLayer. + * The inputs for each parameter are: * - * A fill layer can produce a shape tensor if the following restrictions are met: + * - 0: Dimensions + * - 1: Alpha + * - 2: Beta + * + * The parameter Dimensions describes the shape of the output. If the Dimensions input is provided, + * it must be a 1D tensor of type Int32 or Int64 whose length is computable by constant folding. + * + * The meanings of Alpha and Beta depend on the mode, as described in IFillLayer::setAlpha(), + * IFillLayer::setBeta(), and IFillLayer::setInput(). Parameters Alpha and Beta must both be static + * or both be dynamic. + * + * An IFillLayer can produce a shape tensor if the following restrictions are met: * * * The FillOperation is kLINSPACE. - * * The output is an Int32 or Float tensor within the volume limit of a shape tensor. - * * There is at most one input, and if so, that input is input 0. - * * If input 0 exists, the length of the output tensor must be computable by constant folding. + * * The output has type Int32, Int64, or Float. + * * The volume of the output is within the volume limit imposed on shape tensors. + * * If input 0 exists, the values of input 0 must be computable by constant folding. * * @see FillOperation * - * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. */ + * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. + * */ @Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IFillLayer extends ILayer { static { Loader.load(); } @@ -66,7 +76,7 @@ public class IFillLayer extends ILayer { //! //! //! - public native @NoException(true) void setDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); + public native @NoException(true) void setDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions); /** * \brief Get the output tensor's dimensions. @@ -83,7 +93,7 @@ public class IFillLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(); /** * \brief Set the fill operation for the layer. @@ -122,9 +132,9 @@ public class IFillLayer extends ILayer { * kRANDOM_UNIFORM | the minimum value, defaults to 0.0; * kRANDOM_NORMAL | the mean of the normal distribution, default is 0.0; * - * If a second input had been used to create this layer, that input is reset to null by this method. + * If input 1 exists, it is reset to null by this method. * - * @see getAlpha */ + * @see getAlpha, setAlphaInt64 */ // @@ -165,7 +175,7 @@ public class IFillLayer extends ILayer { * kRANDOM_UNIFORM | the maximal value, defaults to 1.0; * kRANDOM_NORMAL | the standard deviation of the normal distribution, default is 1.0; * - * If a third input had been used to create this layer, that input is reset to null by this method. + * If input 2 exists, it is reset to null by this method. * * @see getBeta * */ @@ -186,7 +196,7 @@ public class IFillLayer extends ILayer { * If the third input is present and non-null, * this function returns -1.0. * - * @see setBeta + * @see setBeta, setBetaInt64 * */ @@ -202,38 +212,179 @@ public class IFillLayer extends ILayer { //! //! //! + //! + //! + //! public native @NoException(true) double getBeta(); /** - * \brief replace an input of this layer with a specific tensor. + * \brief Replace an input of this layer with a specific tensor. * * @param index the index of the input to set. * @param tensor the new input tensor * - * Indices for kLINSPACE are described as: + * The three inputs correspond to these setters of IFillLayer: * - * - 0: Shape tensor, represents the output tensor's dimensions. - * - 1: Start, a scalar, represents the start value. - * - 2: Delta, a 1D tensor, length equals to shape tensor's nbDims, represents the delta value for each dimension. + * - 0: setDimensions + * - 1: setAlpha + * - 2: setBeta * - * Indices for kRANDOM_UNIFORM are described as: + * The following descriptions give more intuitive names for the inputs. * - * - 0: Shape tensor, represents the output tensor's dimensions. - * - 1: Minimum, a scalar, represents the minimum random value. - * - 2: Maximum, a scalar, represents the maximal random value. + * Indices for kLINSPACE are: * - * Indices for kRANDOM_NORMAL are described as: + * - 0: Shape, a 1D shape tensor, specifies the output tensor's dimensions. + * - 1: Start, a scalar, specifies the start value. + * - 2: Delta, a 1D tensor, specifies the delta value for each dimension. * - * - 0: Shape tensor, represents the output tensor's dimensions. - * - 1: Mean, a scalar, represents the mean of the normal distribution,. - * - 2: Scale, a scalar, represents the standard deviation of the normal distribution. + * Indices for kRANDOM_UNIFORM are: + * + * - 0: Shape, a 1D shape tensor, specifies the output tensor's dimensions. + * - 1: Minimum, a scalar, specifies the minimum random value. + * - 2: Maximum, a scalar, specifies the maximal random value. + * + * Indices for kRANDOM_NORMAL are: + * + * - 0: Shape, a 1D shape tensor, specifies the output tensor's dimensions. + * - 1: Mean, a scalar, specifies the mean of the normal distribution,. + * - 2: Scale, a scalar, specifies the standard deviation of the normal distribution. * * Using the corresponding setter resets the input to null. * - * If either inputs 1 or 2, is non-null, then both must be non-null and have the same data type. + * If either inputs 1 or 2 is non-null, then both must be non-null and have the same data type. * * If this function is called for an index greater or equal to getNbInputs(), * then afterwards getNbInputs() returns index + 1, and any missing intervening * inputs are set to null. * */ + + + //! + //! + //! + //! + //! + + /** + * \brief Set the alpha parameter with int64 datatype. + * + * @param alpha has different meanings for each operator: + * + * Operation | Usage + * kLINSPACE | the start value, defaults to 0; + * kRANDOM_UNIFORM | the minimum value, defaults to 0; + * kRANDOM_NORMAL | the mean of the normal distribution, default is 0; + * + * If a third input had been used to create this layer, that input is reset to null by this method. + * + * @see getAlphaInt64 */ + // + + + //! + //! + //! + //! + //! + public native @NoException(true) void setAlphaInt64(@Cast("int64_t") long alpha); + + /** + * \brief Get the value of alpha parameter with int64 datatype. + * + * @return A int64 value of alpha. + * + * If the second input is present and non-null, + * this function returns -1. + * + * @see setAlphaInt64 + * */ + + + //! + //! + //! + //! + //! + //! + public native @Cast("int64_t") @NoException(true) long getAlphaInt64(); + + /** + * \brief Set the beta parameter with int64 datatype. + * + * @param beta has different meanings for each operator: + * + * Operation | Usage + * kLINSPACE | the delta value, defaults to 1; + * kRANDOM_UNIFORM | the maximal value, defaults to 1; + * kRANDOM_NORMAL | the standard deviation of the normal distribution, default is 1; + * + * If a third input had been used to create this layer, that input is reset to null by this method. + * + * @see getBetaInt64 + * */ + + + //! + //! + //! + //! + //! + public native @NoException(true) void setBetaInt64(@Cast("int64_t") long beta); + + /** + * \brief Get the value of beta parameter with int64 datatype. + * + * @return A int64 value of beta. + * + * If the third input is present and non-null, + * this function returns -1.0. + * + * @see setBetaInt64 + * */ + + + //! + //! + public native @Cast("int64_t") @NoException(true) long getBetaInt64(); + + /** + * \brief Return true if alpha/beta have type int64, false if they have type double. + * */ + + + //! + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean isAlphaBetaInt64(); + + /** + * \brief Set the fill layer output type. + * + * @param toType The DataType of the output tensor. + * + * Set the output type of the fill layer. Valid values are DataType::kFLOAT, DataType::kINT32, + * and DataType::kINT64. + * If the network is strongly typed, setToType must be used to set the output type, and use of setOutputType + * is an error. Otherwise, types passed to setOutputType and setToType must be the same. + * + * @see NetworkDefinitionCreationFlag::kSTRONGLY_TYPED + * */ + + + //! + //! + //! + public native @NoException(true) void setToType(DataType toType); + public native @NoException(true) void setToType(@Cast("nvinfer1::DataType") int toType); + + /** + * \brief Get the fill layer output type. + * + * @return toType parameter set during layer creation or by setToType(). + * The return value is the output type of the fill layer. + * The default value is DataType::kFLOAT. + * */ + public native @NoException(true) DataType getToType(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IFullyConnectedLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IFullyConnectedLayer.java deleted file mode 100644 index 0239dc61fb9..00000000000 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IFullyConnectedLayer.java +++ /dev/null @@ -1,161 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.tensorrt.nvinfer; - -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import org.bytedeco.cuda.cudart.*; -import static org.bytedeco.cuda.global.cudart.*; -import org.bytedeco.cuda.cublas.*; -import static org.bytedeco.cuda.global.cublas.*; -import org.bytedeco.cuda.cudnn.*; -import static org.bytedeco.cuda.global.cudnn.*; -import org.bytedeco.cuda.nvrtc.*; -import static org.bytedeco.cuda.global.nvrtc.*; - -import static org.bytedeco.tensorrt.global.nvinfer.*; - - -/** \class IFullyConnectedLayer - * - * \brief A fully connected layer in a network definition. - * This layer expects an input tensor of three or more non-batch dimensions. The input is automatically - * reshaped into an {@code MxV} tensor {@code X}, where {@code V} is a product of the last three dimensions and {@code M} - * is a product of the remaining dimensions (where the product over 0 dimensions is defined as 1). For example: - * - * - If the input tensor has shape {@code {C, H, W}}, then the tensor is reshaped into {@code {1, C*H*W}}. - * - If the input tensor has shape {@code {P, C, H, W}}, then the tensor is reshaped into {@code {P, C*H*W}}. - * - * The layer then performs the following operation: - * - * ~~~ - * Y := matmul(X, W^T) + bias - * ~~~ - * - * Where {@code X} is the {@code MxV} tensor defined above, {@code W} is the {@code KxV} weight tensor - * of the layer, and {@code bias} is a row vector size {@code K} that is broadcasted to - * {@code MxK}. {@code K} is the number of output channels, and configurable via - * setNbOutputChannels(). If {@code bias} is not specified, it is implicitly {@code 0}. - * - * The {@code MxK} result {@code Y} is then reshaped such that the last three dimensions are {@code {K, 1, 1}} and - * the remaining dimensions match the dimensions of the input tensor. For example: - * - * - If the input tensor has shape {@code {C, H, W}}, then the output tensor will have shape {@code {K, 1, 1}}. - * - If the input tensor has shape {@code {P, C, H, W}}, then the output tensor will have shape {@code {P, K, 1, 1}}. - * - * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. - * - * @deprecated Deprecated in TensorRT 8.4. Superseded by IMatrixMultiplyLayer. - * */ -@Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) -public class IFullyConnectedLayer extends ILayer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public IFullyConnectedLayer(Pointer p) { super(p); } - - /** - * \brief Set the number of output channels {@code K} from the fully connected layer. - * - * If executing this layer on DLA, number of output channels must in the range [1,8192]. - * - * @see getNbOutputChannels() - * */ - - - //! - //! - //! - public native @NoException(true) void setNbOutputChannels(int nbOutputs); - - /** - * \brief Get the number of output channels {@code K} from the fully connected layer. - * - * @see setNbOutputChannels() - * */ - - - //! - //! - //! - public native @NoException(true) int getNbOutputChannels(); - - /** - * \brief Set the kernel weights, given as a {@code KxC} matrix in row-major order. - * - * @see getKernelWeights() - * */ - - - //! - //! - //! - public native @NoException(true) void setKernelWeights(@ByVal Weights weights); - - /** - * \brief Get the kernel weights. - * - * @see setKernelWeights() - * */ - - - //! - //! - //! - //! - public native @ByVal @NoException(true) Weights getKernelWeights(); - - /** - * \brief Set the bias weights. - * - * Bias is optional. To omit bias, set the count value in the weights structure to zero. - * - * @see getBiasWeightsWeights() - * */ - - - //! - //! - //! - public native @NoException(true) void setBiasWeights(@ByVal Weights weights); - - /** - * \brief Get the bias weights. - * - * @see setBiasWeightsWeights() - * */ - - - //! - //! - //! - //! - //! - //! - //! - public native @ByVal @NoException(true) Weights getBiasWeights(); - - /** - * \brief Append or replace an input of this layer with a specific tensor - * - * @param index the index of the input to modify. - * @param tensor the new input tensor - * - * Only index 0 (data input) is valid, unless explicit-quantization mode is enabled. - * In explicit-quantization mode, input with index 1 is the kernel-weights tensor, if present. - * The kernel-weights tensor must be a build-time constant (computable at build-time via constant-folding) - * and an output of a dequantize layer. - * If input index 1 is used then the kernel-weights parameter must be set to empty Weights. - * - * @see getKernelWeights(), setKernelWeights() - * - * The indices are as follows: - * - * - 0: The input activation tensor. - * - 1: The kernel weights tensor (a constant tensor). - * - * If this function is called with the value 1, then the function getNbInputs() changes - * from returning 1 to 2. */ -} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IGatherLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IGatherLayer.java index 0fe4a19cdea..7f67dfa1f9f 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IGatherLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IGatherLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -60,7 +60,7 @@ * GatherMode::kELEMENT: * The output dimensions match the dimensions of the indices tensor. * - * The types of Data and Output must be the same, and Indices shall be DataType::kINT32. + * The types of Data and Output must be the same, and Indices shall be DataType::kINT32 or DataType::kINT64. * * How the elements of Data are gathered depends on the mode: * @@ -90,7 +90,6 @@ * Notes: * * For modes GatherMode::kND and GatherMode::kELEMENT, the first nbElementWiseDims dimensions of data and index must * be equal. If not, an error will be reported at build time or run time. - * * Only mode GatherMode::kDEFAULT supports an implicit batch dimensions or broadcast on the elementwise dimensions. * * If an axis of Data has dynamic length, using a negative index for it has undefined behavior. * * No DLA support * * Zero will be stored for OOB access @@ -117,15 +116,21 @@ public class IGatherLayer extends ILayer { //! //! //! + //! public native @NoException(true) void setGatherAxis(int axis); /** * \brief Get the axis to gather on. + * * \warning Undefined behavior when used with GatherMode::kND. * * @see setGatherAxis() * */ + + //! + //! + //! //! //! //! @@ -133,17 +138,19 @@ public class IGatherLayer extends ILayer { //! public native @NoException(true) int getGatherAxis(); - /** \brief Set the number of leading dimensions of indices tensor to be handled elementwise. + /** + * \brief Set the number of leading dimensions of indices tensor to be handled elementwise. + * * The gathering of indexing starts from the dimension of data[NbElementWiseDims:]. * The NbElementWiseDims must be less than the Rank of the data input. + * * @param elementWiseDims number of dims to be handled as elementwise. * * Default: 0 * * The value of nbElementWiseDims and GatherMode are checked during network validation: * - * GatherMode::kDEFAULT: nbElementWiseDims must be 0 if there is an implicit batch dimension. It can be 0 or 1 if - * there is not an implicit batch dimension. + * GatherMode::kDEFAULT: nbElementWiseDims can be 0 or 1. * GatherMode::kND: nbElementWiseDims can be between 0 and one less than rank(data). * GatherMode::kELEMENT: nbElementWiseDims must be 0 * diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IGpuAllocator.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IGpuAllocator.java index 87c59b529de..d3ff014add6 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IGpuAllocator.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IGpuAllocator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,41 +19,73 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; -/** - * \class IGpuAllocator - * - * \brief Application-implemented class for controlling allocation on the GPU. - * */ -@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) -public class IGpuAllocator extends Pointer { +/** DO NOT REFER TO namespace v_1_0 IN CODE. ALWAYS USE nvinfer1 INSTEAD. + * The name v_1_0 may change in future versions of TensoRT. */ + +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IGpuAllocator extends IVersionedInterface { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IGpuAllocator(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public IGpuAllocator(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public IGpuAllocator position(long position) { - return (IGpuAllocator)super.position(position); - } - @Override public IGpuAllocator getPointer(long i) { - return new IGpuAllocator((Pointer)this).offsetAddress(i); - } /** - * A thread-safe callback implemented by the application to handle acquisition of GPU memory. + * \brief A thread-safe callback implemented by the application to handle acquisition of GPU memory. * - * @param size The size of the memory required. + * @param size The size of the memory block required (in bytes). * @param alignment The required alignment of memory. Alignment will be zero * or a power of 2 not exceeding the alignment guaranteed by cudaMalloc. * Thus this allocator can be safely implemented with cudaMalloc/cudaFree. * An alignment value of zero indicates any alignment is acceptable. * @param flags Reserved for future use. In the current release, 0 will be passed. * - * If an allocation request of size 0 is made, nullptr should be returned. + * @return If the allocation was successful, the start address of a device memory block of the requested size. + * If an allocation request of size 0 is made, nullptr must be returned. + * If an allocation request cannot be satisfied, nullptr must be returned. + * If a non-null address is returned, it is guaranteed to have the specified alignment. + * + * \note The implementation must guarantee thread safety for concurrent allocate/reallocate/deallocate + * requests. + * + * \u005Cusage + * - Allowed context for the API call + * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads. + * + * @deprecated Deprecated in TensorRT 10.0. Superseded by allocateAsync + * */ + public native @Deprecated @Name("allocate") @NoException(true) Pointer _allocate( + @Cast("const uint64_t") long size, @Cast("const uint64_t") long alignment, @Cast("const nvinfer1::AllocatorFlags") int flags); + + /** + * \brief A thread-safe callback implemented by the application to resize an existing allocation. + * + * Only allocations which were allocated with AllocatorFlag::kRESIZABLE will be resized. + * + * Options are one of: + * * resize in place leaving min(oldSize, newSize) bytes unchanged and return the original address + * * move min(oldSize, newSize) bytes to a new location of sufficient size and return its address + * * return nullptr, to indicate that the request could not be fulfilled. + * + * If nullptr is returned, TensorRT will assume that resize() is not implemented, and that the + * allocation at baseAddr is still valid. + * + * This method is made available for use cases where delegating the resize + * strategy to the application provides an opportunity to improve memory management. + * One possible implementation is to allocate a large virtual device buffer and + * progressively commit physical memory with cuMemMap. CU_MEM_ALLOC_GRANULARITY_RECOMMENDED + * is suggested in this case. + * + * TensorRT may call realloc to increase the buffer by relatively small amounts. + * + * @param baseAddr the address of the original allocation, which will have been returned by previously calling + * allocate() or reallocate() on the same object. + * @param alignment The alignment used by the original allocation. This will be the same value that was previously + * passed to the allocate() or reallocate() call that returned baseAddr. + * @param newSize The new memory size required (in bytes). * - * If an allocation request cannot be satisfied, nullptr should be returned. + * @return The address of the reallocated memory, or nullptr. If a non-null address is returned, it is + * guaranteed to have the specified alignment. * - * \note The implementation must guarantee thread safety for concurrent allocate/free/reallocate/deallocate + * \note The implementation must guarantee thread safety for concurrent allocate/reallocate/deallocate * requests. * * \u005Cusage @@ -69,79 +101,59 @@ public class IGpuAllocator extends Pointer { //! //! //! - //! - @Virtual(true) public native @Name("allocate") @NoException(true) Pointer _allocate(@Cast("const uint64_t") long size, @Cast("const uint64_t") long alignment, @Cast("const nvinfer1::AllocatorFlags") int flags); + public native @NoException(true) Pointer reallocate(Pointer arg0, @Cast("uint64_t") long arg1, @Cast("uint64_t") long arg2); /** - * A thread-safe callback implemented by the application to handle release of GPU memory. + * \brief A thread-safe callback implemented by the application to handle release of GPU memory. * * TensorRT may pass a nullptr to this function if it was previously returned by allocate(). * - * @param memory The acquired memory. - * - * \note The implementation must guarantee thread safety for concurrent allocate/free/reallocate/deallocate - * requests. + * @param memory A memory address that was previously returned by an allocate() or reallocate() call of the same + * allocator object. * - * @see deallocate() + * @return True if the acquired memory is released successfully. * - * @deprecated Deprecated in TensorRT 8.0. Superseded by deallocate. + * \note The implementation must guarantee thread safety for concurrent allocate/reallocate/deallocate + * requests. * * \u005Cusage * - Allowed context for the API call * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads. + * @deprecated Deprecated in TensorRT 10.0. Superseded by deallocateAsync * */ //! //! - @Virtual(true) public native @Deprecated @Name("free") @NoException(true) void _free(Pointer memory); - - /** - * Destructor declared virtual as general good practice for a class with virtual methods. - * TensorRT never calls the destructor for an IGpuAllocator defined by the application. - * */ - - //! //! //! //! //! - //! - //! - //! - //! - //! - public IGpuAllocator() { super((Pointer)null); allocate(); } - private native void allocate(); + public native @Cast("bool") @Deprecated @Name("deallocate") @NoException(true) boolean _deallocate(Pointer memory); /** - * A thread-safe callback implemented by the application to resize an existing allocation. - * - * Only allocations which were allocated with AllocatorFlag::kRESIZABLE will be resized. - * - * Options are one of: - * * resize in place leaving min(oldSize, newSize) bytes unchanged and return the original address - * * move min(oldSize, newSize) bytes to a new location of sufficient size and return its address - * * return nullptr, to indicate that the request could not be fulfilled. - * - * If nullptr is returned, TensorRT will assume that resize() is not implemented, and that the - * allocation at baseAddr is still valid. + * \brief A thread-safe callback implemented by the application to handle stream-ordered acquisition of GPU memory. * - * This method is made available for use cases where delegating the resize - * strategy to the application provides an opportunity to improve memory management. - * One possible implementation is to allocate a large virtual device buffer and - * progressively commit physical memory with cuMemMap. CU_MEM_ALLOC_GRANULARITY_RECOMMENDED - * is suggested in this case. + * The default behavior is to call method allocate(), which is synchronous and thus loses + * any performance benefits of asynchronous allocation. If you want the benefits of asynchronous + * allocation, see discussion of IGpuAsyncAllocator vs. IGpuAllocator in the documentation + * for nvinfer1::IGpuAllocator. * - * TensorRT may call realloc to increase the buffer by relatively small amounts. + * @param size The size of the memory block required (in bytes). + * @param alignment The required alignment of memory. Alignment will be zero + * or a power of 2 not exceeding the alignment guaranteed by cudaMalloc. + * Thus this allocator can be safely implemented with cudaMalloc/cudaFree. + * An alignment value of zero indicates any alignment is acceptable. + * @param flags Reserved for future use. In the current release, 0 will be passed. + * @param stream specifies the cudaStream for asynchronous usage. * - * @param baseAddr the address of the original allocation. - * @param alignment The alignment used by the original allocation. - * @param newSize The new memory size required. - * @return the address of the reallocated memory + * @return If the allocation was successful, the start address of a device memory block of the requested size. + * If an allocation request of size 0 is made, nullptr must be returned. + * If an allocation request cannot be satisfied, nullptr must be returned. + * If a non-null address is returned, it is guaranteed to have the specified alignment. * - * \note The implementation must guarantee thread safety for concurrent allocate/free/reallocate/deallocate + * \note The implementation must guarantee thread safety for concurrent allocate/reallocate/deallocate * requests. * * \u005Cusage @@ -149,7 +161,6 @@ public class IGpuAllocator extends Pointer { * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads. * */ - //! //! //! @@ -158,27 +169,45 @@ public class IGpuAllocator extends Pointer { //! //! //! - @Virtual public native @NoException(true) Pointer reallocate(Pointer arg0, @Cast("uint64_t") long arg1, @Cast("uint64_t") long arg2); - + //! + public native @NoException(true) Pointer allocateAsync( + @Cast("const uint64_t") long size, @Cast("const uint64_t") long alignment, @Cast("const nvinfer1::AllocatorFlags") int flags, CUstream_st arg3); /** - * A thread-safe callback implemented by the application to handle release of GPU memory. + * \brief A thread-safe callback implemented by the application to handle stream-ordered release of GPU memory. + * + * The default behavior is to call method deallocate(), which is synchronous and thus loses + * any performance benefits of asynchronous deallocation. If you want the benefits of asynchronous + * deallocation, see discussion of IGpuAsyncAllocator vs. IGpuAllocator in the documentation + * for nvinfer1::IGpuAllocator. * * TensorRT may pass a nullptr to this function if it was previously returned by allocate(). * - * @param memory The acquired memory. + * @param memory A memory address that was previously returned by an allocate() or reallocate() call of the same + * allocator object. + * @param stream specifies the cudaStream for asynchronous usage. + * * @return True if the acquired memory is released successfully. * - * \note The implementation must guarantee thread safety for concurrent allocate/free/reallocate/deallocate + * \note The implementation must guarantee thread safety for concurrent allocate/reallocate/deallocate * requests. * - * \note If user-implemented free() might hit an error condition, the user should override deallocate() as the - * primary implementation and override free() to call deallocate() for backwards compatibility. - * - * @see free() + * \note The implementation is not required to be asynchronous. It is permitted to synchronize, + * albeit doing so will lose the performance advantage of asynchronous deallocation. + * Either way, it is critical that it not actually free the memory until the current + * stream position is reached. * * \u005Cusage * - Allowed context for the API call * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads. * */ - @Virtual public native @Cast("bool") @Name("deallocate") @NoException(true) boolean _deallocate(Pointer memory); + + + //! + //! + public native @Cast("bool") @NoException(true) boolean deallocateAsync(Pointer memory, CUstream_st arg1); + + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IGpuAsyncAllocator.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IGpuAsyncAllocator.java new file mode 100644 index 00000000000..56f36a2faf9 --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IGpuAsyncAllocator.java @@ -0,0 +1,171 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + + +/** DO NOT REFER TO namespace v_1_0 IN CODE. ALWAYS USE nvinfer1 INSTEAD. + * The name v_1_0 may change in future versions of TensoRT. */ + +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IGpuAsyncAllocator extends IGpuAllocator { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IGpuAsyncAllocator(Pointer p) { super(p); } + + + /** + * \brief A thread-safe callback implemented by the application to handle stream-ordered asynchronous + * acquisition of GPU memory. + * + * @param size The size of the memory block required (in bytes). + * @param alignment The required alignment of memory. Alignment will be zero + * or a power of 2 not exceeding the alignment guaranteed by cudaMalloc. + * Thus this allocator can be safely implemented with cudaMalloc/cudaFree. + * An alignment value of zero indicates any alignment is acceptable. + * @param flags Reserved for future use. In the current release, 0 will be passed. + * + * @param stream Specifies the cudastream for the asynchronous allocation. If nullptr or 0 is + * passed, the default stream will be used. + * + * @return If the allocation was successful, the start address of a device memory block of the requested size. + * If an allocation request of size 0 is made, nullptr must be returned. + * If an allocation request cannot be satisfied, nullptr must be returned. + * If a non-null address is returned, it is guaranteed to have the specified alignment. + * + * \note The implementation must guarantee thread safety for concurrent allocateAsync/deallocateAsync + * requests. + * + * \note The implementation is not required to be asynchronous. It is permitted to synchronize, + * albeit doing so will lose the performance advantage of asynchronous allocation. + * + * \u005Cusage + * - Allowed context for the API call + * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads. + * */ + + + //! + //! + //! + //! + //! + //! + //! + //! + public native @NoException(true) Pointer allocateAsync(@Cast("const uint64_t") long size, @Cast("const uint64_t") long alignment, @Cast("const nvinfer1::AllocatorFlags") int flags, + CUstream_st arg3); + + /** + * \brief A thread-safe callback implemented by the application to handle stream-ordered asynchronous + * release of GPU memory. + * + * TensorRT may pass a nullptr to this function if it was previously returned by allocate(). + * + * @param memory A memory address that was previously returned by an allocate() or reallocate() call of the same + * allocator object. + * + * @param stream Specifies the cudastream for the asynchronous deallocation. If nullptr or 0 is + * passed, the default stream will be used. + * + * @return True if the acquired memory is released successfully. + * + * \note The implementation must guarantee thread safety for concurrent allocateAsync/deallocateAsync + * requests. + * + * \note The implementation is not required to be asynchronous. It is permitted to synchronize, + * albeit doing so will lose the performance advantage of asynchronous deallocation. + * Either way, it is critical that it not actually free the memory until the current + * stream position is reached. + * + * \u005Cusage + * - Allowed context for the API call + * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads. */ + + + //! + //! + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean deallocateAsync(Pointer memory, CUstream_st arg1); + + /** + * \brief A thread-safe callback implemented by the application to handle acquisition of GPU memory. + * + * @param size The size of the memory block required (in bytes). + * @param alignment The required alignment of memory. Alignment will be zero + * or a power of 2 not exceeding the alignment guaranteed by cudaMalloc. + * Thus this allocator can be safely implemented with cudaMalloc/cudaFree. + * An alignment value of zero indicates any alignment is acceptable. + * @param flags Reserved for future use. In the current release, 0 will be passed. + * + * @return If the allocation was successful, the start address of a device memory block of the requested size. + * If an allocation request of size 0 is made, nullptr must be returned. + * If an allocation request cannot be satisfied, nullptr must be returned. + * If a non-null address is returned, it is guaranteed to have the specified alignment. + * + * \note The implementation must guarantee thread safety for concurrent allocateAsync/deallocateAsync/reallocate requests. + * + * \u005Cusage + * - Allowed context for the API call + * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads. + * @deprecated Deprecated in TensorRT 10.0. Superseded by allocateAsync + * */ + + + //! + //! + //! + //! + //! + //! + //! + public native @Deprecated @Name("allocate") @NoException(true) Pointer _allocate( + @Cast("const uint64_t") long size, @Cast("const uint64_t") long alignment, @Cast("const nvinfer1::AllocatorFlags") int flags); + + /** + * \brief A thread-safe callback implemented by the application to handle release of GPU memory. + * + * TensorRT may pass a nullptr to this function if it was previously returned by allocate(). + * + * @param memory A memory address that was previously returned by an allocate() or reallocate() call of the same + * allocator object. + * + * @return True if the acquired memory is released successfully. + * + * \note The implementation must guarantee thread safety for concurrent allocate/reallocate/deallocate + * requests. + * + * \u005Cusage + * - Allowed context for the API call + * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads. + * @deprecated Deprecated in TensorRT 10.0. Superseded by deallocateAsync + * */ + + + //! + //! + public native @Cast("bool") @Deprecated @Name("deallocate") @NoException(true) boolean _deallocate(Pointer memory); + + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IGridSampleLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IGridSampleLayer.java index 1afe04abf9f..ff37e2a88c9 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IGridSampleLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IGridSampleLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,7 +19,8 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; -/** \class IGridSampleLayer +/** + * \class IGridSampleLayer * * \brief A GridSample layer in a network definition. * diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IHostMemory.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IHostMemory.java index bca8e20ece0..956abad14e4 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IHostMemory.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IHostMemory.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -55,18 +55,5 @@ public class IHostMemory extends INoCopy { public native @Cast("std::size_t") @NoException(true) long size(); /** The type of the memory that was allocated. */ - - //! - //! - //! - //! public native @NoException(true) DataType type(); - /** - * Destroy the allocated memory. - * - * @deprecated Deprecated in TRT 8.0. Superseded by {@code delete}. - * - * \warning Calling destroy on a managed pointer will result in a double-free error. - * */ - public native @Deprecated @NoException(true) void destroy(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIdentityLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIdentityLayer.java index 8bbb9646dff..b4aa568a9a5 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIdentityLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIdentityLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -39,10 +39,9 @@ * Two types are compatible if they are identical, or are both in {kFLOAT, kHALF}. * Implicit conversion between incompatible types, i.e. without using setOutputType, * is recognized as incorrect as of TensorRT 8.4, but is retained for API compatibility - * within TensorRT 8.x releases. In a future major release the behavior will change - * to record an error if the network output tensor type is incompatible with the layer - * output type. E.g., implicit conversion from kFLOAT to kINT32 will not be allowed, - * and instead such a conversion will require calling setOutputType(DataType::kINT32). + * within TensorRT 8.x releases. TensorRT 10.0 onwards it is an error if the network output tensor type is incompatible + * with the layer output type. E.g., implicit conversion from kFLOAT to kINT32 is not allowed, Use + * setOutputType(DataType::kINT32) to explict convert kFLOAT to kINT32. * * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. * */ diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditional.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditional.java index 9a476dbc5f0..b45379209e0 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditional.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -20,7 +20,9 @@ /** - * Helper for constructing conditionally-executed subgraphs. + * \class IIfConditional + * + * \brief Helper for constructing conditionally-executed subgraphs. * * An If-conditional conditionally executes part of the network according * to the following pseudo-code: @@ -32,13 +34,13 @@ * Emit output * * Condition is a 0D boolean tensor (representing a scalar). - * trueSubgraph represents a network subgraph that is executed when condition is evaluated to True. - * falseSubgraph represents a network subgraph that is executed when condition is evaluated to False. + * trueSubgraph represents a network subgraph that is executed when condition evaluates to True. + * falseSubgraph represents a network subgraph that is executed when condition evaluates to False. * * The following constraints apply to If-conditionals: * - Both the trueSubgraph and falseSubgraph must be defined. * - The number of output tensors in both subgraphs is the same. - * - The type and shape of each output tensor from true/false subgraphs are the same. + * - Corresponding output tensors from the true/false subgraphs have the same type and shape. * */ @Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IIfConditional extends INoCopy { diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditionalBoundaryLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditionalBoundaryLayer.java index 4cfabcc6e81..cfbf6f52f83 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditionalBoundaryLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditionalBoundaryLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -20,7 +20,9 @@ /** - * This is a base class for Conditional boundary layers. + * \class IIfConditionalBoundaryLayer + * + * \brief This is a base class for Conditional boundary layers. * * Boundary layers are used to demarcate the boundaries of Conditionals. * */ @@ -30,6 +32,8 @@ public class IIfConditionalBoundaryLayer extends ILayer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IIfConditionalBoundaryLayer(Pointer p) { super(p); } - /** Return pointer to the IIfConditional associated with this boundary layer. */ + /** + * \brief Get a pointer to the IIfConditional associated with this boundary layer. + * */ public native @NoException(true) IIfConditional getConditional(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditionalInputLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditionalInputLayer.java index f3795917cb7..60d61f51c6f 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditionalInputLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditionalInputLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -20,7 +20,9 @@ /** - * This layer represents an input to an IIfConditional. + * \class IIfConditionalInputLayer + * + * \brief This layer represents an input to an IIfConditional. * */ @Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IIfConditionalInputLayer extends IIfConditionalBoundaryLayer { diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditionalOutputLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditionalOutputLayer.java index 38ed7a1099d..a3f4925524a 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditionalOutputLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIfConditionalOutputLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -20,7 +20,9 @@ /** - * This layer represents an output of an IIfConditional. + * \class IIfConditionalOutputLayer + * + * \brief This layer represents an output of an IIfConditional. * * An IIfConditionalOutputLayer has exactly one output. * */ diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8Calibrator.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8Calibrator.java index 7f1dd22789a..99feb7e7048 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8Calibrator.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8Calibrator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -31,7 +31,7 @@ * on subsequent runs. * */ @Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) -public class IInt8Calibrator extends Pointer { +public class IInt8Calibrator extends IVersionedInterface { static { Loader.load(); } /** Default native constructor. */ public IInt8Calibrator() { super((Pointer)null); allocate(); } @@ -52,6 +52,8 @@ public class IInt8Calibrator extends Pointer { * \brief Get the batch size used for calibration batches. * * @return The batch size. + * + * @deprecated Deprecated in TensorRT 10.0. Implicit batch support is removed in TensorRT 10.0. * */ @@ -60,7 +62,8 @@ public class IInt8Calibrator extends Pointer { //! //! //! - @Virtual(true) public native @NoException(true) @Const({false, false, true}) int getBatchSize(); + //! + @Virtual(true) public native @Deprecated @NoException(true) @Const({false, false, true}) int getBatchSize(); /** * \brief Get a batch of input for calibration. @@ -71,6 +74,7 @@ public class IInt8Calibrator extends Pointer { * containing each network input data. * @param names The names of the network input for each pointer in the binding array. * @param nbBindings The number of pointers in the bindings array. + * * @return False if there are no more batches for calibration. * * @see getBatchSize() diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8EntropyCalibrator.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8EntropyCalibrator.java index ebe3818256d..40f4196791b 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8EntropyCalibrator.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8EntropyCalibrator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -18,12 +18,7 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; - -/** - * Entropy calibrator. This is the Legacy Entropy calibrator. It is less complicated than the legacy calibrator and - * produces better results. - * */ -@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IInt8EntropyCalibrator extends IInt8Calibrator { static { Loader.load(); } /** Default native constructor. */ @@ -41,8 +36,17 @@ public class IInt8EntropyCalibrator extends IInt8Calibrator { return new IInt8EntropyCalibrator((Pointer)this).offsetAddress(i); } + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + + + //! + //! + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); + /** * Signal that this is the entropy calibrator. * */ - @Virtual public native @NoException(true) CalibrationAlgoType getAlgorithm(); + public native @NoException(true) CalibrationAlgoType getAlgorithm(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8EntropyCalibrator2.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8EntropyCalibrator2.java index 51716b79b6b..64536837f38 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8EntropyCalibrator2.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8EntropyCalibrator2.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -18,12 +18,7 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; - -/** - * Entropy calibrator 2. This is the preferred calibrator. This is the required calibrator for DLA, as it supports per - * activation tensor scaling. - * */ -@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IInt8EntropyCalibrator2 extends IInt8Calibrator { static { Loader.load(); } /** Default native constructor. */ @@ -41,8 +36,17 @@ public class IInt8EntropyCalibrator2 extends IInt8Calibrator { return new IInt8EntropyCalibrator2((Pointer)this).offsetAddress(i); } + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + + + //! + //! + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); + /** * Signal that this is the entropy calibrator 2. * */ - @Virtual public native @NoException(true) CalibrationAlgoType getAlgorithm(); + public native @NoException(true) CalibrationAlgoType getAlgorithm(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8LegacyCalibrator.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8LegacyCalibrator.java index f84d66e8686..76de765a746 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8LegacyCalibrator.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8LegacyCalibrator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -18,28 +18,20 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; - -/** - * Legacy calibrator left for backward compatibility with TensorRT 2.0. This calibrator requires user parameterization, - * and is provided as a fallback option if the other calibrators yield poor results. - * */ -@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IInt8LegacyCalibrator extends IInt8Calibrator { static { Loader.load(); } - /** Default native constructor. */ - public IInt8LegacyCalibrator() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public IInt8LegacyCalibrator(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IInt8LegacyCalibrator(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public IInt8LegacyCalibrator position(long position) { - return (IInt8LegacyCalibrator)super.position(position); - } - @Override public IInt8LegacyCalibrator getPointer(long i) { - return new IInt8LegacyCalibrator((Pointer)this).offsetAddress(i); - } + + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + + + //! + //! + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); /** * Signal that this is the legacy calibrator. @@ -49,7 +41,7 @@ public class IInt8LegacyCalibrator extends IInt8Calibrator { //! //! //! - @Virtual public native @NoException(true) CalibrationAlgoType getAlgorithm(); + public native @NoException(true) CalibrationAlgoType getAlgorithm(); /** * \brief The quantile (between 0 and 1) that will be used to select the region maximum when the quantile method @@ -62,7 +54,7 @@ public class IInt8LegacyCalibrator extends IInt8Calibrator { //! //! //! - @Virtual(true) public native @NoException(true) @Const({false, false, true}) double getQuantile(); + public native @NoException(true) double getQuantile(); /** * \brief The fraction (between 0 and 1) of the maximum used to define the regression cutoff when using regression @@ -77,7 +69,7 @@ public class IInt8LegacyCalibrator extends IInt8Calibrator { //! //! //! - @Virtual(true) public native @NoException(true) @Const({false, false, true}) double getRegressionCutoff(); + public native @NoException(true) double getRegressionCutoff(); /** * \brief Load a histogram. @@ -97,7 +89,9 @@ public class IInt8LegacyCalibrator extends IInt8Calibrator { //! //! //! - @Virtual(true) public native @Const @NoException(true) Pointer readHistogramCache(@Cast("std::size_t*") @ByRef LongPointer length); + public native @Const @NoException(true) Pointer readHistogramCache(@Cast("std::size_t*") @ByRef LongPointer length); + public native @Const @NoException(true) Pointer readHistogramCache(@Cast("std::size_t*") @ByRef LongBuffer length); + public native @Const @NoException(true) Pointer readHistogramCache(@Cast("std::size_t*") @ByRef long[] length); /** * \brief Save a histogram cache. @@ -107,5 +101,5 @@ public class IInt8LegacyCalibrator extends IInt8Calibrator { * * @see readHistogramCache() * */ - @Virtual(true) public native @NoException(true) void writeHistogramCache(@Const Pointer ptr, @Cast("std::size_t") long length); + public native @NoException(true) void writeHistogramCache(@Const Pointer ptr, @Cast("std::size_t") long length); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8MinMaxCalibrator.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8MinMaxCalibrator.java index e571ec6cf1e..b4a6e4af63a 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8MinMaxCalibrator.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IInt8MinMaxCalibrator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -18,11 +18,7 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; - -/** - * MinMax Calibrator. It supports per activation tensor scaling. - * */ -@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IInt8MinMaxCalibrator extends IInt8Calibrator { static { Loader.load(); } /** Default native constructor. */ @@ -40,8 +36,17 @@ public class IInt8MinMaxCalibrator extends IInt8Calibrator { return new IInt8MinMaxCalibrator((Pointer)this).offsetAddress(i); } + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + + + //! + //! + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); + /** * Signal that this is the MinMax Calibrator. * */ - @Virtual public native @NoException(true) CalibrationAlgoType getAlgorithm(); + public native @NoException(true) CalibrationAlgoType getAlgorithm(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIteratorLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIteratorLayer.java index 0f02bbcf6d2..feb639df26e 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIteratorLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IIteratorLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,25 +19,61 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; +/** + * \class IIteratorLayer + * + * \brief A layer to do iterations. + * + * The iterator layer iterates over a tensor along the given axis and in the given direction. + * It enables each loop iteration to inspect a different slice of the tensor. + * + * @see ILoop::addIterator() + * */ @Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IIteratorLayer extends ILoopBoundaryLayer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IIteratorLayer(Pointer p) { super(p); } - /** Set axis to iterate over. */ + /** + * \brief Set axis to iterate over. + * */ + + + //! + //! public native @NoException(true) void setAxis(int axis); - /** Get axis being iterated over. */ + /** + * \brief Get axis being iterated over. + * */ + + + //! + //! + //! public native @NoException(true) int getAxis(); - /** For reverse=false, the layer is equivalent to addGather(tensor, I, 0) where I is a + /** + * \brief Set iteration order to be reverse. + * + * For reverse=false, the layer is equivalent to addGather(tensor, I, 0) where I is a * scalar tensor containing the loop iteration number. * For reverse=true, the layer is equivalent to addGather(tensor, M-1-I, 0) where M is the trip count * computed from TripLimits of kind kCOUNT. - * The default is reverse=false. */ + * The default is reverse=false. + * */ + + + //! + //! + //! public native @NoException(true) void setReverse(@Cast("bool") boolean reverse); - /** True if and only if reversing input. */ + /** + * \brief Check if the iteration order is reverse. + * + * @return True if and only if reversing input. + * */ public native @Cast("bool") @NoException(true) boolean getReverse(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILRNLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILRNLayer.java index 8bd00f97db6..6079be6eba4 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILRNLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILRNLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -48,7 +48,7 @@ public class ILRNLayer extends ILayer { //! //! //! - public native @NoException(true) void setWindowSize(int windowSize); + public native @NoException(true) void setWindowSize(@Cast("int64_t") long windowSize); /** * \brief Get the LRN window size. @@ -60,12 +60,14 @@ public class ILRNLayer extends ILayer { //! //! //! - public native @NoException(true) int getWindowSize(); + //! + public native @Cast("int64_t") @NoException(true) long getWindowSize(); /** * \brief Set the LRN alpha value. * * The valid range is [-1e20, 1e20]. + * * @see getAlpha() * */ @@ -85,12 +87,14 @@ public class ILRNLayer extends ILayer { //! //! //! + //! public native @NoException(true) float getAlpha(); /** * \brief Set the LRN beta value. * * The valid range is [0.01, 1e5f]. + * * @see getBeta() * */ @@ -110,12 +114,14 @@ public class ILRNLayer extends ILayer { //! //! //! + //! public native @NoException(true) float getBeta(); /** * \brief Set the LRN K value. * * The valid range is [1e-5, 1e10]. + * * @see getK() * */ diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILayer.java index fbe296f700b..2c1049cba12 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -91,7 +91,7 @@ public class ILayer extends INoCopy { * @param index The index of the input tensor. * * @return The input tensor, or nullptr if the index is out of range or the tensor is optional - * (\ref ISliceLayer and \ref IRNNv2Layer). + * (\ref ISliceLayer). * */ @@ -112,8 +112,7 @@ public class ILayer extends INoCopy { /** * \brief Get the layer output corresponding to the given index. * - * @return The indexed output tensor, or nullptr if the index is out of range or the tensor is optional - * (\ref IRNNv2Layer). + * @return The indexed output tensor, or nullptr if the index is out of range or the tensor is optional. * */ @@ -147,12 +146,13 @@ public class ILayer extends INoCopy { //! //! //! + //! public native @NoException(true) void setInput(int index, @ByRef ITensor tensor); /** - * \brief Set the computational precision of this layer + * \brief Set the preferred or required computational precision of this layer in a weakly-typed network. * - * Setting the precision allows TensorRT to choose an implementation which run at this computational precision. + * Setting the precision directs TensorRT to choose an implementation that runs at this computational precision. * TensorRT could still choose a non-conforming fastest implementation that ignores the requested precision. * To force choosing an implementation with the requested precision, set exactly one of the following flags, * which differ in what happens if no such implementation exists: @@ -168,6 +168,10 @@ public class ILayer extends INoCopy { * For a IIdentityLayer: If it casts to/from float/half/int8/uint8, the precision must be one of those types, * otherwise it must be either the input or output type. * + * Strongly-typed networks reject calls to method setPrecision. In strongly-typed networks, the computation + * precision is typically controlled by casting the input tensors to the desired type. The exception is + * INormalizationLayer, which has a method setComputePrecision(). + * * @param dataType the computational precision. * * @see getPrecision() precisionIsSet() resetPrecision() @@ -227,15 +231,18 @@ public class ILayer extends INoCopy { //! //! //! + //! + //! public native @NoException(true) void resetPrecision(); /** - * \brief Set the output type of this layer + * \brief Set the output type of this layer in a weakly-typed network. * * Setting the output type constrains TensorRT to choose implementations which generate output data with the * given type. If it is not set, TensorRT will select output type based on layer computational precision. TensorRT * could still choose non-conforming output type based on fastest implementation. To force choosing the requested - * output type, set exactly one of the following flags, which differ in what happens if no such implementation exists: + * output type, set exactly one of the following flags, which differ in what happens if no such implementation + * exists: * * * BuilderFlag::kOBEY_PRECISION_CONSTRAINTS - build fails with an error message. * @@ -257,6 +264,14 @@ public class ILayer extends INoCopy { * is marked as a network output, since only setType() [but not setOutputType()] will affect the data * representation in the corresponding output binding. * + * Strongly-typed networks reject calls to method setOutputType. Instead, the output type can be set + * only for layers that define method setToType(). Those layers are: + * + * * ICastLayer + * * IDequantizeLayer + * * IFillLayer + * * IQuantizeLayer + * * @param index the index of the output to set * @param dataType the type of the output * @@ -268,6 +283,7 @@ public class ILayer extends INoCopy { //! //! //! + //! public native @NoException(true) void setOutputType(int index, DataType dataType); public native @NoException(true) void setOutputType(int index, @Cast("nvinfer1::DataType") int dataType); @@ -275,6 +291,7 @@ public class ILayer extends INoCopy { * \brief get the output type of this layer * * @param index the index of the output + * * @return the output precision. If no precision has been set, DataType::kFLOAT will be returned, * unless the output type is inherently DataType::kINT32. * @@ -286,12 +303,14 @@ public class ILayer extends INoCopy { //! //! //! + //! public native @NoException(true) DataType getOutputType(int index); /** * \brief whether the output type has been set for this layer * * @param index the index of the output + * * @return whether the output type has been explicitly set * * @see setOutputType() getOutputType() resetOutputType() diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILogger.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILogger.java index d016d79013a..db186e21cd7 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILogger.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILogger.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,7 +25,7 @@ * \brief Application-implemented logging interface for the builder, refitter and runtime. * * The logger used to create an instance of IBuilder, IRuntime or IRefitter is used for all objects created through - * that interface. The logger should be valid until all objects created are released. + * that interface. The logger must be valid until all objects created are released. * * The Logger object implementation must be thread safe. All locking and synchronization is pushed to the * interface implementation and TensorRT does not hold any synchronization primitives when calling the interface @@ -49,7 +49,7 @@ public class ILogger extends Pointer { /** * \enum Severity * - * The severity corresponding to a log message. + * \brief The severity corresponding to a log message. * */ public enum Severity { /** An internal error has occurred. Execution is unrecoverable. */ @@ -71,11 +71,17 @@ public enum Severity { } /** - * A callback implemented by the application to handle logging messages; + * \brief A callback implemented by the application to handle logging messages; * * @param severity The severity of the message. * @param msg A null-terminated log message. * + * \warning Loggers used in the safety certified runtime must set a maximum message length and truncate + * messages exceeding this length. It is up to the implementer of the derived class to define + * a suitable limit that will prevent buffer overruns, resource exhaustion, and other security + * vulnerabilities in their implementation. The TensorRT safety certified runtime will never + * emit messages longer than 1024 bytes. + * * \u005Cusage * - Allowed context for the API call * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoggerFinder.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoggerFinder.java index a8bad672edb..5f5df61252c 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoggerFinder.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoggerFinder.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoop.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoop.java index 9a057eec3c5..65be0a3ae6a 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoop.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoop.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -20,9 +20,13 @@ /** - * Helper for creating a recurrent subgraph. + * \class ILoop * - * An ILoop cannot be added to an INetworkDefinition where hasImplicitBatchDimensions() returns true. + * \brief Helper for creating a recurrent subgraph. + * + * An ILoop defines a loop within a network. It supports the implementation of recurrences, + * which are crucial for iterative computations, such as RNNs for natural language processing and + * time-series analysis. * */ @Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class ILoop extends INoCopy { @@ -78,13 +82,16 @@ public class ILoop extends INoCopy { * computed from TripLimits of kind kCOUNT. * */ + + //! //! //! //! public native @NoException(true) IIteratorLayer addIterator(@ByRef ITensor tensor, int axis/*=0*/, @Cast("bool") boolean reverse/*=false*/); public native @NoException(true) IIteratorLayer addIterator(@ByRef ITensor tensor); - /** \brief Make an output for this loop, based on the given tensor. + /** + * \brief Make an output for this loop, based on the given tensor. * * axis is the axis for concatenation (if using outputKind of kCONCATENATE or kREVERSE). * diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoopBoundaryLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoopBoundaryLayer.java index c37ef0531ef..09952dfe769 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoopBoundaryLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoopBoundaryLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,12 +19,27 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; +/** + * \class ILoopBoundaryLayer + * + * \brief This is a base class for Loop boundary layers. + * + * The loop boundary layers are used to define loops within a network, enabling the implementation + * of recurrences. The boundary layers for a loop are created by class ILoop. + * + * There are four kinds of boundary layers. + * * ITripLimitLayer: controls the number of loop iterations. + * * IIterationLayer: iterates over an input tensor. + * * IRecurrenceLayer: returns an initial value or value from the previous loop iteration. + * * ILoopOutputLayer: generates an output tensor from the loop iterations. */ @Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class ILoopBoundaryLayer extends ILayer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ILoopBoundaryLayer(Pointer p) { super(p); } - /** Return pointer to ILoop associated with this boundary layer. */ + /** + * \brief Get a pointer to ILoop associated with this boundary layer. + * */ public native @NoException(true) ILoop getLoop(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoopOutputLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoopOutputLayer.java index b35cf21a43b..1c0594b12ad 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoopOutputLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ILoopOutputLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -20,7 +20,9 @@ /** - * An ILoopOutputLayer is the sole way to get output from a loop. + * \class ILoopOutputLayer + * + * \brief An ILoopOutputLayer is the sole way to get output from a loop. * * The first input tensor must be defined inside the loop; the output tensor is outside the loop. * The second input tensor, if present, must be defined outside the loop. @@ -42,6 +44,9 @@ public class ILoopOutputLayer extends ILoopBoundaryLayer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ILoopOutputLayer(Pointer p) { super(p); } + /** + * \brief Get which kind a loop output has. + * */ //! @@ -61,9 +66,15 @@ public class ILoopOutputLayer extends ILoopBoundaryLayer { * setAxis(3) causes the output to have dimensions [b,c,d,a]. * Default is axis is 0. * */ + + + //! + //! public native @NoException(true) void setAxis(int axis); - /** Get axis being concatenated over. */ + /** + * \brief Get axis being concatenated over. + * */ //! @@ -89,7 +100,7 @@ public class ILoopOutputLayer extends ILoopBoundaryLayer { /** The indices in the kCONCATENATE or kREVERSE cases are as follows: /** /** - 0: Contribution to the output tensor. The contribution must come from inside the loop. - /** - 1: The concatenation length scalar value, must come from outside the loop, as a 0D Int32 shape tensor. + /** - 1: The concatenation length scalar value, must come from outside the loop, as a 0D Int32 or Int64 shape tensor. /** /** If this function is called with the value 1, then the function getNbInputs() changes /** from returning 1 to 2. diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IMatrixMultiplyLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IMatrixMultiplyLayer.java index 8bbbb17eda9..caf4145ee46 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IMatrixMultiplyLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IMatrixMultiplyLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -52,8 +52,10 @@ public class IMatrixMultiplyLayer extends ILayer { /** * \brief Set the operation for an input tensor. + * * @param index Input tensor number (0 or 1). * @param op New operation. + * * @see getOperation() * */ diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INMSLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INMSLayer.java index 64ae9e731a6..f60913d6ecd 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INMSLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INMSLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -29,7 +29,10 @@ * intersection-over-union (IoU) with previously selected boxes is less than or equal to a given threshold. * This layer implements NMS per batch item and per class. * - * For each batch item, the ordering of candidate bounding boxes with the same score is unspecified. + * Per batch item, boxes are initially sorted by their scores without regard to class. Only boxes up to a maximum of the TopK limit are considered for selection (per batch). + * During selection, only overlapping boxes of the same class are compared, so that overlapping boxes of different classes do not suppress each other. + * + * For each batch item, the ordering of candidate bounding boxes with the same score is unspecified, but the ordering will be consistent across different runs for the same inputs. * * The layer has the following inputs, in order of input index: * diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INetworkDefinition.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INetworkDefinition.java index bc19f06f578..ea253f60c91 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INetworkDefinition.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INetworkDefinition.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,10 +25,8 @@ * \brief A network definition for input to the builder. * * A network definition defines the structure of the network, and combined with a IBuilderConfig, is built - * into an engine using an IBuilder. An INetworkDefinition can either have an implicit batch dimensions, specified - * at runtime, or all dimensions explicit, full dims mode, in the network definition. The former mode, i.e. the - * implicit batch size mode, has been deprecated. The function hasImplicitBatchDimension() can be used to query the - * mode of the network. + * into an engine using an IBuilder. An INetworkDefinition can have all dimensions explicit, full dims mode, in the + * network definition. The former mode, i.e. the implicit batch size mode, has been deprecated. * * A network with implicit batch dimensions returns the dimensions of a layer without the implicit dimension, * and instead the batch is specified at execute/enqueue time. If the network has all dimensions specified, then @@ -63,13 +61,12 @@ public class INetworkDefinition extends INoCopy { * The name of the input tensor is used to find the index into the buffer array for an engine built from * the network. The volume must be less than 2^31 elements. * - * For networks with an implicit batch dimension, this volume includes the batch dimension with its length set - * to the maximum batch size. For networks with all explicit dimensions and with wildcard dimensions, the volume + * For networks with wildcard dimensions, the volume * is based on the maxima specified by an IOptimizationProfile.Dimensions are normally non-negative integers. The * exception is that in networks with all explicit dimensions, -1 can be used as a wildcard for a dimension to * be specified at runtime. Input tensors with such a wildcard must have a corresponding entry in the * IOptimizationProfiles indicating the permitted extrema, and the input dimensions must be set by - * IExecutionContext::setBindingDimensions. Different IExecutionContext instances can have different dimensions. + * IExecutionContext::setInputShape. Different IExecutionContext instances can have different dimensions. * Wildcard dimensions are only supported for EngineCapability::kSTANDARD. They are not * supported in safety contexts. DLA does not support Wildcard dimensions. * @@ -100,8 +97,8 @@ public class INetworkDefinition extends INoCopy { //! //! //! - public native @NoException(true) ITensor addInput(String name, DataType type, @ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); - public native @NoException(true) ITensor addInput(@Cast("const char*") BytePointer name, @Cast("nvinfer1::DataType") int type, @ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); + public native @NoException(true) ITensor addInput(String name, DataType type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions); + public native @NoException(true) ITensor addInput(@Cast("const char*") BytePointer name, @Cast("nvinfer1::DataType") int type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions); /** * \brief Mark a tensor as a network output. @@ -120,26 +117,20 @@ public class INetworkDefinition extends INoCopy { //! //! //! - //! public native @NoException(true) void markOutput(@ByRef ITensor tensor); /** - * \brief Add a convolution layer to the network. + * \brief Mark a tensor as a debug tensor. * - * @param input The input tensor to the convolution. - * @param nbOutputMaps The number of output feature maps for the convolution. - * @param kernelSize The HW-dimensions of the convolution kernel. - * @param kernelWeights The kernel weights for the convolution. - * @param biasWeights The bias weights for the convolution. Weights{} represents no bias. + * A debug tensor can be optionally emitted at runtime. + * Note that tensor names are required to specify debug + * tensors at runtime. * - * @see IConvolutionLayer - * - * \warning It is an error to specify a wildcard value for the 'C' dimension of the input tensor. - * \warning Int32 tensors are not valid input tensors. + * @param tensor Tensor to be marked as debug * - * @return The new convolution layer, or nullptr if it could not be created. + * @return True if tensor successfully marked (or was already marked), false otherwise. * - * @deprecated Superseded by addConvolutionNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 + * @see unmarkDebug(), IExecutionContext::setDebugListener(), ITensor::setName() * */ @@ -149,51 +140,30 @@ public class INetworkDefinition extends INoCopy { //! //! //! - //! - public native @Deprecated @NoException(true) IConvolutionLayer addConvolution( - @ByRef ITensor input, int nbOutputMaps, @ByVal DimsHW kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights); + public native @Cast("bool") @NoException(true) boolean markDebug(@ByRef ITensor tensor); /** - * \brief Add a fully connected layer to the network. + * \brief Unmark a tensor as a debug tensor. * - * @param input The input tensor to the layer. - * @param nbOutputs The number of outputs of the layer. - * @param kernelWeights The kernel weights for the fully connected layer. - * @param biasWeights The bias weights for the fully connected layer. Weights{} represents no bias. + * Remove the marking of a tensor as a debug tensor. * - * @see IFullyConnectedLayer + * @param tensor Tensor to be unmarked as debug. * - * \warning It is an error to specify a wildcard value for the 'C' dimension of the input tensor. - * \warning Int32 tensors are not valid input tensors. + * @return True if tensor successfully unmarked (or was already unmarked), false otherwise. * - * @return The new fully connected layer, or nullptr if it could not be created. - * - * @deprecated Deprecated in TensorRT 8.4. Superseded by addMatrixMultiply(). + * @see markDebug(), IExecutionContext::setDebugListener() * */ //! //! //! - //! - //! - //! - public native @Deprecated @NoException(true) IFullyConnectedLayer addFullyConnected( - @ByRef ITensor input, int nbOutputs, @ByVal Weights kernelWeights, @ByVal Weights biasWeights); + public native @Cast("bool") @NoException(true) boolean unmarkDebug(@ByRef ITensor tensor); /** - * \brief Add an activation layer to the network. + * \brief Check if a tensor is marked as debug tensor. * - * @param input The input tensor to the layer. - * @param type The type of activation function to apply. - * - * Note that the setAlpha() and setBeta() methods must be used on the - * output for activations that require these parameters. - * - * @see IActivationLayer ActivationType - * \warning Int32 tensors are not valid input tensors. - * - * @return The new activation layer, or nullptr if it could not be created. + * @return true if tensor is marked as debug tensor, false otherwise. * */ @@ -203,22 +173,23 @@ public class INetworkDefinition extends INoCopy { //! //! //! - public native @NoException(true) IActivationLayer addActivation(@ByRef ITensor input, ActivationType type); - public native @NoException(true) IActivationLayer addActivation(@ByRef ITensor input, @Cast("nvinfer1::ActivationType") int type); + //! + public native @Cast("bool") @NoException(true) boolean isDebugTensor(@Const @ByRef ITensor tensor); /** - * \brief Add a pooling layer to the network. + * \brief Add an activation layer to the network. * * @param input The input tensor to the layer. - * @param type The type of pooling to apply. - * @param windowSize The size of the pooling window. + * @param type The type of activation function to apply. * - * @see IPoolingLayer PoolingType - * \warning Int32 tensors are not valid input tensors. + * Note that the setAlpha() and setBeta() methods must be used on the + * output for activations that require these parameters. * - * @return The new pooling layer, or nullptr if it could not be created. + * @see IActivationLayer ActivationType * - * @deprecated Superseded by addPoolingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 + * \warning Int32 and Int64 are valid only for activation type kRELU. + * + * @return The new activation layer, or nullptr if it could not be created. * */ @@ -227,8 +198,8 @@ public class INetworkDefinition extends INoCopy { //! //! //! - public native @Deprecated @NoException(true) IPoolingLayer addPooling(@ByRef ITensor input, PoolingType type, @ByVal DimsHW windowSize); - public native @Deprecated @NoException(true) IPoolingLayer addPooling(@ByRef ITensor input, @Cast("nvinfer1::PoolingType") int type, @ByVal DimsHW windowSize); + public native @NoException(true) IActivationLayer addActivation(@ByRef ITensor input, ActivationType type); + public native @NoException(true) IActivationLayer addActivation(@ByRef ITensor input, @Cast("nvinfer1::ActivationType") int type); /** * \brief Add a LRN layer to the network. @@ -252,14 +223,13 @@ public class INetworkDefinition extends INoCopy { //! //! //! - public native @NoException(true) ILRNLayer addLRN(@ByRef ITensor input, int window, float alpha, float beta, float k); + public native @NoException(true) ILRNLayer addLRN(@ByRef ITensor input, @Cast("int64_t") long window, float alpha, float beta, float k); /** * \brief Add a Scale layer to the network. * * @param input The input tensor to the layer. - * This tensor is required to have a minimum of 3 dimensions in implicit batch mode - * and a minimum of 4 dimensions in explicit batch mode. + * This tensor must have at least 4 dimensions. * @param mode The scaling mode. * @param shift The shift value. * @param scale The scale value. @@ -324,40 +294,10 @@ public class INetworkDefinition extends INoCopy { //! //! //! + //! public native @NoException(true) IConcatenationLayer addConcatenation(@Cast("nvinfer1::ITensor*const*") PointerPointer inputs, int nbInputs); public native @NoException(true) IConcatenationLayer addConcatenation(@ByPtrPtr ITensor inputs, int nbInputs); - /** - * \brief Add a deconvolution layer to the network. - * - * @param input The input tensor to the layer. - * @param nbOutputMaps The number of output feature maps. - * @param kernelSize The HW-dimensions of the deconvolution kernel. - * @param kernelWeights The kernel weights for the deconvolution. - * @param biasWeights The bias weights for the deconvolution. Weights{} represents no bias. - * - * @see IDeconvolutionLayer - * - * \warning It is an error to specify a wildcard value for the 'C' dimension of the input tensor. - * \warning Int32 tensors are not valid input tensors. - * - * @return The new deconvolution layer, or nullptr if it could not be created. - * - * @deprecated Superseded by addDeconvolutionNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) IDeconvolutionLayer addDeconvolution( - @ByRef ITensor input, int nbOutputMaps, @ByVal DimsHW kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights); - /** * \brief Add an elementwise layer to the network. * @@ -410,6 +350,7 @@ public class INetworkDefinition extends INoCopy { * @return The new unary layer, or nullptr if it could not be created * */ + //! //! //! @@ -418,27 +359,6 @@ public class INetworkDefinition extends INoCopy { public native @NoException(true) IUnaryLayer addUnary(@ByRef ITensor input, UnaryOperation operation); public native @NoException(true) IUnaryLayer addUnary(@ByRef ITensor input, @Cast("nvinfer1::UnaryOperation") int operation); - /** \brief Add a padding layer to the network. - * - * @param input The input tensor to the layer. - * @param prePadding The padding to apply to the start of the tensor. - * @param postPadding The padding to apply to the end of the tensor. - * - * @see IPaddingLayer - * - * @return The new padding layer, or nullptr if it could not be created. - * - * @deprecated Superseded by addPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) IPaddingLayer addPadding(@ByRef ITensor input, @ByVal DimsHW prePadding, @ByVal DimsHW postPadding); - /** * \brief Add a shuffle layer to the network. * @@ -462,7 +382,7 @@ public class INetworkDefinition extends INoCopy { * * @param indices - tensor containing indices where on_value should be set. * @param values - a 2-element tensor, consisting of [off_value, on_value]. - * @param depth - tensor containing the width of the added one-hot dimension. + * @param depth - a shape tensor containing the width of the added one-hot dimension. * @param axis - the axis to add the one-hot encoding to. * * @see IOneHotLayer @@ -581,28 +501,12 @@ public class INetworkDefinition extends INoCopy { //! //! - //! - //! - public native @NoException(true) ITensor getOutput(int index); - - /** - * \brief Destroy this INetworkDefinition object. - * - * @deprecated Deprecated in TensorRT 8.0. Superseded by {@code delete}. - * - * \warning Calling destroy on a managed pointer will result in a double-free error. - * */ - - //! //! //! //! //! - //! - //! - //! - public native @Deprecated @NoException(true) void destroy(); + public native @NoException(true) ITensor getOutput(int index); /** * \brief Add a reduce layer to the network. @@ -613,7 +517,6 @@ public class INetworkDefinition extends INoCopy { * The bit in position i of bitmask reduceAxes corresponds to explicit dimension i if result. * E.g., the least significant bit corresponds to the first explicit dimension and the next to least * significant bit corresponds to the second explicit dimension. - * * @param keepDimensions The boolean that specifies whether or not to keep the reduced dimensions in the * output of the layer. * @@ -622,7 +525,7 @@ public class INetworkDefinition extends INoCopy { * * @see IReduceLayer * - * \warning If output is an Int32 shape tensor, ReduceOperation::kAVG is unsupported. + * \warning If output is an Int32 or Int64 shape tensor, ReduceOperation::kAVG is unsupported. * * @return The new reduce layer, or nullptr if it could not be created. * */ @@ -639,7 +542,6 @@ public class INetworkDefinition extends INoCopy { //! //! //! - //! public native @NoException(true) IReduceLayer addReduce( @ByRef ITensor input, ReduceOperation operation, @Cast("uint32_t") int reduceAxes, @Cast("bool") boolean keepDimensions); public native @NoException(true) IReduceLayer addReduce( @@ -670,8 +572,6 @@ public class INetworkDefinition extends INoCopy { * * @see ITopKLayer * - * \warning Int32 tensors are not valid input tensors. - * * @return The new TopK layer, or nullptr if it could not be created. * */ @@ -736,6 +636,7 @@ public class INetworkDefinition extends INoCopy { * * \warning The bounds tensor cannot have the last dimension be the wildcard character. * \warning Int32 tensors are not valid input tensors. + * \warning The input and bounds tensors should be 3D tensors. * * @return The new RaggedSoftMax layer, or nullptr if it could not be created. * */ @@ -797,7 +698,6 @@ public class INetworkDefinition extends INoCopy { //! //! //! - //! public native @NoException(true) INonZeroLayer addNonZero(@ByRef ITensor input); /** @@ -814,9 +714,6 @@ public class INetworkDefinition extends INoCopy { * Otherwise the output is a tensor of real values and the output type will be * follow TensorRT's normal precision rules. * - * If tensors in the network have an implicit batch dimension, the constant - * is broadcast over that dimension. - * * If a wildcard dimension is used, the volume of the runtime dimensions must equal * the number of weights specified. * @@ -829,97 +726,7 @@ public class INetworkDefinition extends INoCopy { //! //! //! - //! - //! - //! - //! - //! - //! - //! - //! - //! - //! - //! - //! - //! - public native @NoException(true) IConstantLayer addConstant(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions, @ByVal Weights weights); - - /** - * \brief Add an \p layerCount deep RNN layer to the network with \p hiddenSize internal states that can - * take a batch with fixed or variable sequence lengths. - * - * @param input The input tensor to the layer (see below). - * @param layerCount The number of layers in the RNN. - * @param hiddenSize Size of the internal hidden state for each layer. - * @param maxSeqLen Maximum sequence length for the input. - * @param op The type of RNN to execute. - * - * By default, the layer is configured with RNNDirection::kUNIDIRECTION and RNNInputMode::kLINEAR. - * To change these settings, use IRNNv2Layer::setDirection() and IRNNv2Layer::setInputMode(). - * - * %Weights and biases for the added layer should be set using - * IRNNv2Layer::setWeightsForGate() and IRNNv2Layer::setBiasForGate() prior - * to building an engine using this network. - * - * The input tensors must be of the type DataType::kFLOAT or DataType::kHALF. - * The layout of the weights is row major and must be the same datatype as the input tensor. - * \p weights contain 8 matrices and \p bias contains 8 vectors. - * - * See IRNNv2Layer::setWeightsForGate() and IRNNv2Layer::setBiasForGate() for details on the required input - * format for \p weights and \p bias. - * - * The \p input ITensor should contain zero or more index dimensions {@code {N1, ..., Np}}, followed by - * two dimensions, defined as follows: - * - {@code S_max} is the maximum allowed sequence length (number of RNN iterations) - * - {@code E} specifies the embedding length (unless RNNInputMode::kSKIP is set, in which case it should match - * getHiddenSize()). - * - * By default, all sequences in the input are assumed to be size \p maxSeqLen. To provide explicit sequence - * lengths for each input sequence in the batch, use IRNNv2Layer::setSequenceLengths(). - * - * The RNN layer outputs up to three tensors. - * - * The first output tensor is the output of the final RNN layer across all timesteps, with dimensions - * {@code {N1, ..., Np, S_max, H}}: - * - * - {@code N1..Np} are the index dimensions specified by the input tensor - * - {@code S_max} is the maximum allowed sequence length (number of RNN iterations) - * - {@code H} is an output hidden state (equal to getHiddenSize() or 2x getHiddenSize()) - * - * The second tensor is the final hidden state of the RNN across all layers, and if the RNN - * is an LSTM (i.e. getOperation() is RNNOperation::kLSTM), then the third tensor is the final cell state - * of the RNN across all layers. Both the second and third output tensors have dimensions - * {@code {N1, ..., Np, L, H}}: - * - * - {@code N1..Np} are the index dimensions specified by the input tensor - * - {@code L} is the number of layers in the RNN, equal to getLayerCount() if getDirection is - * RNNDirection::kUNIDIRECTION, - * and 2x getLayerCount() if getDirection is RNNDirection::kBIDIRECTION. In the bi-directional - * case, layer {@code l}'s final forward hidden state is stored in {@code L = 2*l}, and - * final backward hidden state is stored in {@code L= 2*l + 1}. - * - {@code H} is the hidden state for each layer, equal to getHiddenSize(). - * - * @see IRNNv2Layer - * - * @deprecated Deprecated prior to TensorRT 8.0 and will be removed in 9.0. Superseded by - * INetworkDefinition::addLoop(). - * - * \warning RNN inputs do not support wildcard dimensions or explicit batch size networks. - * \warning Int32 tensors are not valid input tensors, only for sequence lengths. - * - * @return The new RNN layer, or nullptr if it could not be created. - * */ - - - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) IRNNv2Layer addRNNv2( - @ByRef ITensor input, int layerCount, int hiddenSize, int maxSeqLen, RNNOperation op); - public native @Deprecated @NoException(true) IRNNv2Layer addRNNv2( - @ByRef ITensor input, int layerCount, int hiddenSize, int maxSeqLen, @Cast("nvinfer1::RNNOperation") int op); + public native @NoException(true) IConstantLayer addConstant(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions, @ByVal Weights weights); /** * \brief Add an identity layer. @@ -1014,10 +821,35 @@ public class INetworkDefinition extends INoCopy { //! //! //! - //! public native @NoException(true) IPluginV2Layer addPluginV2(@Cast("nvinfer1::ITensor*const*") PointerPointer inputs, int nbInputs, @ByRef IPluginV2 plugin); public native @NoException(true) IPluginV2Layer addPluginV2(@ByPtrPtr ITensor inputs, int nbInputs, @ByRef IPluginV2 plugin); + /** + * \brief Add a plugin layer implementing the IPluginV3 interface to the network. + * + * @param inputs The input tensors to the layer. + * @param nbInputs The number of input tensors. + * @param shapeInputs Shape tensor inputs to the layer. + * @param nbShapeInputs The number of shape tensor inputs. + * @param plugin The layer plugin. + * + * @see IPluginV3Layer + * + * @return The new plugin layer, or nullptr if it could not be created. + * */ + + + //! + //! + //! + //! + //! + //! + public native @NoException(true) IPluginV3Layer addPluginV3(@Cast("nvinfer1::ITensor*const*") PointerPointer inputs, int nbInputs, @Cast("nvinfer1::ITensor*const*") PointerPointer shapeInputs, + int nbShapeInputs, @ByRef IPluginV3 plugin); + public native @NoException(true) IPluginV3Layer addPluginV3(@ByPtrPtr ITensor inputs, int nbInputs, @ByPtrPtr ITensor shapeInputs, + int nbShapeInputs, @ByRef IPluginV3 plugin); + /** * \brief Add a slice layer to the network. * @@ -1042,7 +874,7 @@ public class INetworkDefinition extends INoCopy { //! //! //! - public native @NoException(true) ISliceLayer addSlice(@ByRef ITensor input, @ByVal @Cast("nvinfer1::Dims*") Dims32 start, @ByVal @Cast("nvinfer1::Dims*") Dims32 size, @ByVal @Cast("nvinfer1::Dims*") Dims32 stride); + public native @NoException(true) ISliceLayer addSlice(@ByRef ITensor input, @Cast("const nvinfer1::Dims*") @ByRef Dims64 start, @Cast("const nvinfer1::Dims*") @ByRef Dims64 size, @Cast("const nvinfer1::Dims*") @ByRef Dims64 stride); /** * \brief Sets the name of the network. @@ -1110,21 +942,43 @@ public class INetworkDefinition extends INoCopy { //! //! //! - //! public native @NoException(true) IShapeLayer addShape(@ByRef ITensor input); /** * \brief Query whether the network was created with an implicit batch dimension. * - * @return True if tensors have implicit batch dimension, false otherwise. + * @return Always false since TensorRT 10.0 does not support an implicit batch dimension. + * + * @see createNetworkV2 + * + * @deprecated Deprecated in TensorRT 10.0. Implicit batch is not supported since TensorRT 10.0. + * */ + + + //! + //! + //! + public native @Cast("bool") @Deprecated @NoException(true) boolean hasImplicitBatchDimension(); + + /** + * \brief Get the network definition creation flags for this network definition object. Defaults to 0. * - * This is a network-wide property. Either all tensors in the network - * have an implicit batch dimension or none of them do. + * @return The network definition creation options as a bitmask. + * */ + + + //! + //! + //! + //! + public native @Cast("nvinfer1::NetworkDefinitionCreationFlags") @NoException(true) int getFlags(); + + /** + * \brief Returns true if the network definition creation flag is set * - * hasImplicitBatchDimension() is true if and only if this INetworkDefinition - * was created with createNetworkV2() without NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag. + * @see getFlags() * - * @see createNetworkV2 + * @return True if flag is set, false if unset. * */ @@ -1135,7 +989,7 @@ public class INetworkDefinition extends INoCopy { //! //! //! - public native @Cast("bool") @NoException(true) boolean hasImplicitBatchDimension(); + public native @Cast("bool") @NoException(true) boolean getFlag(@ByVal NetworkDefinitionCreationFlag networkDefinitionCreationFlag); /** * \brief Enable tensor's value to be computed by IExecutionContext::getShapeBinding. @@ -1148,7 +1002,6 @@ public class INetworkDefinition extends INoCopy { * * \warning It is an error to mark a network input as a shape output. * - * @see isShapeBinding(), getShapeBinding() * */ @@ -1224,7 +1077,7 @@ public class INetworkDefinition extends INoCopy { //! //! public native @NoException(true) IConvolutionLayer addConvolutionNd( - @ByRef ITensor input, int nbOutputMaps, @ByVal @Cast("nvinfer1::Dims*") Dims32 kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights); + @ByRef ITensor input, @Cast("int64_t") long nbOutputMaps, @Cast("const nvinfer1::Dims*") @ByRef Dims64 kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights); /** * \brief Add a multi-dimension pooling layer to the network. @@ -1247,8 +1100,8 @@ public class INetworkDefinition extends INoCopy { //! //! //! - public native @NoException(true) IPoolingLayer addPoolingNd(@ByRef ITensor input, PoolingType type, @ByVal @Cast("nvinfer1::Dims*") Dims32 windowSize); - public native @NoException(true) IPoolingLayer addPoolingNd(@ByRef ITensor input, @Cast("nvinfer1::PoolingType") int type, @ByVal @Cast("nvinfer1::Dims*") Dims32 windowSize); + public native @NoException(true) IPoolingLayer addPoolingNd(@ByRef ITensor input, PoolingType type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 windowSize); + public native @NoException(true) IPoolingLayer addPoolingNd(@ByRef ITensor input, @Cast("nvinfer1::PoolingType") int type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 windowSize); /** * \brief Add a multi-dimension deconvolution layer to the network. @@ -1279,7 +1132,7 @@ public class INetworkDefinition extends INoCopy { //! //! public native @NoException(true) IDeconvolutionLayer addDeconvolutionNd( - @ByRef ITensor input, int nbOutputMaps, @ByVal @Cast("nvinfer1::Dims*") Dims32 kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights); + @ByRef ITensor input, @Cast("int64_t") long nbOutputMaps, @ByVal @Cast("nvinfer1::Dims*") Dims64 kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights); /** * \brief Add a multi-dimension scale layer to the network. @@ -1313,6 +1166,8 @@ public class INetworkDefinition extends INoCopy { * @return The new Scale layer, or nullptr if it could not be created. * */ + + //! //! //! //! @@ -1323,7 +1178,8 @@ public class INetworkDefinition extends INoCopy { public native @NoException(true) IScaleLayer addScaleNd( @ByRef ITensor input, @Cast("nvinfer1::ScaleMode") int mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power, int channelAxis); - /** \brief Add a resize layer to the network. + /** + * \brief Add a resize layer to the network. * * @param input The input tensor to the layer. * @@ -1343,13 +1199,13 @@ public class INetworkDefinition extends INoCopy { public native @NoException(true) IResizeLayer addResize(@ByRef ITensor input); /** - * \brief True if network is an explicit precision network + * \brief Add a loop to the network. * - * @deprecated Deprecated in TensorRT 8.0. + * An ILoop provides a way to specify a recurrent subgraph. * - * @see createNetworkV2 + * @return Pointer to ILoop that can be used to add loop-boundary layers for the loop. * - * @return True if network has explicit precision, false otherwise. + * @see ILoop * */ @@ -1358,20 +1214,20 @@ public class INetworkDefinition extends INoCopy { //! //! //! - public native @Cast("bool") @Deprecated @NoException(true) boolean hasExplicitPrecision(); + public native @NoException(true) ILoop addLoop(); /** - * \brief Add a loop to the network. + * \brief Add an if-then-else to the network. * - * An ILoop provides a way to specify a recurrent subgraph. + * An IIfConditional provides a way to conditionally execute parts of the network. * - * @return Pointer to ILoop that can be used to add loop boundary layers for the loop, - * or nullptr if network has an implicit batch dimension or this version - * of TensorRT does not support loops. + * @return Pointer to the IIfConditional that can be used to add conditional-boundary layers + * for the if-then-else. * - * The network must not have an implicit batch dimension. + * @see IIfConditional * */ + //! //! //! @@ -1384,9 +1240,10 @@ public class INetworkDefinition extends INoCopy { //! //! //! - public native @NoException(true) ILoop addLoop(); + public native @NoException(true) IIfConditional addIfConditional(); - /** \brief Add a select layer to the network. + /** + * \brief Add a select layer to the network. * * @param condition The condition tensor to the layer. Must have type DataType::kBOOL. * @param thenInput The "then" input tensor to the layer. @@ -1414,8 +1271,6 @@ public class INetworkDefinition extends INoCopy { * * then the output dimensions are [1,3,0,9]. * - * The network must not have an implicit batch dimension. - * * The inputs are shape tensors if the output is a shape tensor. * * @see ISelectLayer @@ -1444,6 +1299,8 @@ public class INetworkDefinition extends INoCopy { * The input tensor must be a boolean shape tensor. * */ + + //! //! //! //! @@ -1454,34 +1311,71 @@ public class INetworkDefinition extends INoCopy { public native @NoException(true) IAssertionLayer addAssertion(@ByRef ITensor condition, String message); public native @NoException(true) IAssertionLayer addAssertion(@ByRef ITensor condition, @Cast("const char*") BytePointer message); - /** \brief Add a fill layer to the network. + /** + * \brief Add a fill layer to the network. * - * @param dimensions The output tensor dimensions. + * @param dimensions The output tensor dimensions if input 0 is missing. * @param op The fill operation that the layer applies. * - * \warning For FillOperation::kLINSPACE, dimensions.nbDims must be 1. + * \warning For FillOperation::kLINSPACE, dimensions.nbDims must be 1 for static start/delta. If delta is provided + * as a 1D tensor, the length of delta must match dimensions.nbDims. * * This layer is non-deterministic across subsequent calls as the same inputs will produce different * output tensors if \p op is either FillOperation::kRANDOM_UNIFORM or FillOperation::kRANDOM_NORMAL * due to random state being shared across calls. The output tensors generated are determinstic when * starting from the same initial state. * - * The network must not have an implicit batch dimension. + * @see IFillLayer + * + * @return The new fill layer, or nullptr if it could not be created. + * + * @deprecated Deprecated in TensorRT 9.0. Superseded by three-argument addFill. + * */ + + + //! + //! + //! + //! + //! + //! + //! + public native @Deprecated @NoException(true) IFillLayer addFill(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions, FillOperation op); + public native @Deprecated @NoException(true) IFillLayer addFill(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions, @Cast("nvinfer1::FillOperation") int op); + + /** + * \brief Add a fill layer to the network. + * + * @param dimensions The output tensor dimensions if input 0 is missing. + * @param op The fill operation that the layer applies. + * @param outputType Optional output tensor data type, must be DataType::kFLOAT, DataType::kHALF, DataType::kINT32, + * or DataType::kINT64. This parameter is only used for static alpha/beta. Future calls to set output type using + * setToType or setOutputType must be consistent. + * + * \warning For FillOperation::kLINSPACE, dimensions.nbDims must be 1 for static start/delta. If delta is provided + * as a 1D tensor, the length of delta must match dimensions.nbDims. + * + * This layer is non-deterministic across subsequent calls as the same inputs will produce different + * output tensors if \p op is either FillOperation::kRANDOM_UNIFORM or FillOperation::kRANDOM_NORMAL + * due to random state being shared across calls. The output tensors generated are deterministic when + * starting from the same initial state. * * @see IFillLayer * * @return The new fill layer, or nullptr if it could not be created. * */ + //! //! //! //! //! - public native @NoException(true) IFillLayer addFill(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions, FillOperation op); - public native @NoException(true) IFillLayer addFill(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions, @Cast("nvinfer1::FillOperation") int op); + public native @NoException(true) IFillLayer addFill(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions, FillOperation op, DataType outputType); + public native @NoException(true) IFillLayer addFill(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions, @Cast("nvinfer1::FillOperation") int op, @Cast("nvinfer1::DataType") int outputType); - /** \brief Add a padding layer to the network. Only 2D padding is currently supported. + /** + * \brief Add a padding layer to the network. Only 2D padding is currently supported. * * @param input The input tensor to the layer. * @param prePadding The padding to apply to the start of the tensor. @@ -1490,18 +1384,19 @@ public class INetworkDefinition extends INoCopy { * @see IPaddingLayer * * @return The new padding layer, or nullptr if it could not be created. - * - * @deprecated Deprecated in TensorRT 8.0. Superseded by addSlice(). * */ + //! //! //! //! //! - public native @Deprecated @NoException(true) IPaddingLayer addPaddingNd(@ByRef ITensor input, @ByVal @Cast("nvinfer1::Dims*") Dims32 prePadding, @ByVal @Cast("nvinfer1::Dims*") Dims32 postPadding); + //! + public native @NoException(true) IPaddingLayer addPaddingNd(@ByRef ITensor input, @Cast("const nvinfer1::Dims*") @ByRef Dims64 prePadding, @Cast("const nvinfer1::Dims*") @ByRef Dims64 postPadding); - /** \brief Associate a name with all current uses of the given weights. + /** + * \brief Associate a name with all current uses of the given weights. * * The name must be set after the Weights are used in the network. * Lookup is associative. The name applies to all Weights with matching @@ -1570,6 +1465,7 @@ public class INetworkDefinition extends INoCopy { //! //! //! + //! public native @NoException(true) IErrorRecorder getErrorRecorder(); /** @@ -1580,11 +1476,39 @@ public class INetworkDefinition extends INoCopy { * * @see IDequantizeLayer * - * \p input tensor data type must be DataType::kFLOAT. + * \p input tensor data type must be DataType::kINT8/DataType::kFP8. * \p scale tensor data type must be DataType::kFLOAT. The subgraph which terminates with the \p scale tensor must * be a build-time constant. * * @return The new quantization layer, or nullptr if it could not be created. + * + * @deprecated Deprecated in TensorRT 9.0. Superseded by three-argument addDequantize. + * */ + + + //! + //! + //! + //! + //! + //! + public native @Deprecated @NoException(true) IDequantizeLayer addDequantize(@ByRef ITensor input, @ByRef ITensor scale); + + /** + * \brief Add a dequantization layer to the network. + * + * @param input The input tensor to be dequantized. + * @param scale A tensor with the scale value. + * + * @see IDequantizeLayer + * + * \p input tensor data type must be DataType::kINT8/DataType::kFP8/DataType::kINT4. + * \p scale tensor data type defaults to DataType::kFLOAT. For strongly typed networks, it must be the same as the + * output data type. The subgraph which terminates with the \p scale tensor must be a build-time constant. + * \p outputType output tensor data type, default value is DataType::kFLOAT. Future calls to set output type using + * setToType or setOutputType must be consistent. For strongly typed networks, it must be the same as the scale data type. + * + * @return The new quantization layer, or nullptr if it could not be created. * */ @@ -1594,7 +1518,8 @@ public class INetworkDefinition extends INoCopy { //! //! //! - public native @NoException(true) IDequantizeLayer addDequantize(@ByRef ITensor input, @ByRef ITensor scale); + public native @NoException(true) IDequantizeLayer addDequantize(@ByRef ITensor input, @ByRef ITensor scale, DataType outputType); + public native @NoException(true) IDequantizeLayer addDequantize(@ByRef ITensor input, @ByRef ITensor scale, @Cast("nvinfer1::DataType") int outputType); /** * \brief Add a Scatter layer to the network with specified mode and axis=0. @@ -1619,6 +1544,7 @@ public class INetworkDefinition extends INoCopy { //! //! //! + //! public native @NoException(true) IScatterLayer addScatter(@ByRef ITensor data, @ByRef ITensor indices, @ByRef ITensor updates, ScatterMode mode); public native @NoException(true) IScatterLayer addScatter(@ByRef ITensor data, @ByRef ITensor indices, @ByRef ITensor updates, @Cast("nvinfer1::ScatterMode") int mode); @@ -1630,11 +1556,13 @@ public class INetworkDefinition extends INoCopy { * * @see IQuantizeLayer * - * \p input tensor data type must be DataType::kFLOAT. + * \p input tensor data type must be DataType::kFLOAT/DataType::kHALF. * \p scale tensor data type must be DataType::kFLOAT. The subgraph which terminates with the \p scale tensor must * be a build-time constant. * * @return The new quantization layer, or nullptr if it could not be created. + * + * @deprecated Deprecated in TensorRT 9.0. Superseded by three-argument addQuantize. * */ @@ -1643,25 +1571,36 @@ public class INetworkDefinition extends INoCopy { //! //! //! - public native @NoException(true) IQuantizeLayer addQuantize(@ByRef ITensor input, @ByRef ITensor scale); + //! + public native @Deprecated @NoException(true) IQuantizeLayer addQuantize(@ByRef ITensor input, @ByRef ITensor scale); /** - * \brief Add an If-conditional layer to the network. + * \brief Add a quantization layer to the network. * - * An IIfConditional provides a way to conditionally execute parts of the network. + * @param input The input tensor to be quantized. + * @param scale A tensor with the scale value. * - * @see IIfConditional + * @see IQuantizeLayer + * + * \p input tensor data type must be DataType::kFLOAT/DataType::kHALF/DataType::kBF16. + * \p scale tensor data type defaults to DataType::kFLOAT. For strongly typed networks, it must have the same data + * type as the input. The subgraph which terminates with the \p scale tensor must be a build-time constant. + * \p outputType output tensor data type, must be DataType::kINT8 (default), DataType::kFP8 or DataType::kINT4. + * Future calls to set output type using setToType or setOutputType must be consistent. * - * @return The new conditional layer, or nullptr if network has an implicit batch dimension - * or this version of TensorRT does not support conditional execution. + * @return The new quantization layer, or nullptr if it could not be created. * */ + //! //! //! - public native @NoException(true) IIfConditional addIfConditional(); + //! + public native @NoException(true) IQuantizeLayer addQuantize(@ByRef ITensor input, @ByRef ITensor scale, DataType outputType); + public native @NoException(true) IQuantizeLayer addQuantize(@ByRef ITensor input, @ByRef ITensor scale, @Cast("nvinfer1::DataType") int outputType); - /** \brief Add an Einsum layer to the network. + /** + * \brief Add an Einsum layer to the network. * * @param inputs The input tensors to the layer. * @param nbInputs The number of input tensors. @@ -1671,6 +1610,9 @@ public class INetworkDefinition extends INoCopy { * @return The new Einsum layer, or nullptr if it could not be created. * */ + + //! + //! //! //! //! @@ -1679,10 +1621,12 @@ public class INetworkDefinition extends INoCopy { public native @NoException(true) IEinsumLayer addEinsum(@ByPtrPtr ITensor inputs, int nbInputs, String equation); public native @NoException(true) IEinsumLayer addEinsum(@ByPtrPtr ITensor inputs, int nbInputs, @Cast("const char*") BytePointer equation); - /** \brief Add a GridSample layer to the network. + /** + * \brief Add a GridSample layer to the network. * * @param input The input tensor to the layer. * @param grid The grid tensor to the layer. + * * @see IGridSampleLayer * * Creates a GridSample layer with a InterpolationMode::kLINEAR, unaligned corners, @@ -1773,8 +1717,7 @@ public class INetworkDefinition extends INoCopy { //! //! //! - public native @NoException(true) INormalizationLayer addNormalization( - @ByRef ITensor input, @ByRef ITensor scale, @ByRef ITensor bias, @Cast("uint32_t") int axesMask); + public native @NoException(true) INormalizationLayer addNormalization(@ByRef ITensor input, @ByRef ITensor scale, @ByRef ITensor bias, @Cast("uint32_t") int axesMask); /** * \brief Return the builder from which this INetworkDefinition was created. diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INoCopy.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INoCopy.java index f808162d646..92551f377f5 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INoCopy.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INoCopy.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INonZeroLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INonZeroLayer.java index eeac8e81ec5..6163bee3fcb 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INonZeroLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INonZeroLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INormalizationLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INormalizationLayer.java index 9250c70277d..5e44a49db61 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INormalizationLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/INormalizationLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,7 +19,8 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; // class IReverseSequenceLayer -/** \class INormalizationLayer +/** + * \class INormalizationLayer * * \brief A normalization layer in a network definition. * @@ -34,48 +35,60 @@ * * Where Mean(X, axes) is a reduction over a set of axes, and Variance(X) = Mean((X - Mean(X, axes)) ^ 2, axes). * - * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. */ - + * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. + * */ @Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class INormalizationLayer extends ILayer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public INormalizationLayer(Pointer p) { super(p); } - /** \brief Set the epsilon value used for the normalization calculation. + /** + * \brief Set the epsilon value used for the normalization calculation. * * The default value of \p eps is 1e-5F. * * @param eps The epsilon value used for the normalization calculation. * */ + + //! //! //! public native @NoException(true) void setEpsilon(float eps); - /** \brief Get the epsilon value used for the normalization calculation. + /** + * \brief Get the epsilon value used for the normalization calculation. * * @return The epsilon value used for the normalization calculation. * */ + + //! //! //! public native @NoException(true) float getEpsilon(); - /** \brief Set the reduction axes for the normalization calculation. + /** + * \brief Set the reduction axes for the normalization calculation. * * @param axesMask The axes used for the normalization calculation. * */ + + //! //! //! public native @NoException(true) void setAxes(@Cast("uint32_t") int axesMask); - /** \brief Get the axes value used for the normalization calculation. + /** + * \brief Get the axes value used for the normalization calculation. * * @return The axes used for the normalization calculation. * */ + + //! //! //! //! @@ -84,7 +97,8 @@ public class INormalizationLayer extends ILayer { //! public native @Cast("uint32_t") @NoException(true) int getAxes(); - /** \brief Set the number of groups used to split the channels in the normalization calculation. + /** + * \brief Set the number of groups used to split the channels in the normalization calculation. * * The input tensor channels are divided into \p nbGroups groups, and normalization is performed per group. * The channel dimension is considered to be the second dimension in a [N, C, H, W, ...] formatted tensor. @@ -100,42 +114,60 @@ public class INormalizationLayer extends ILayer { * @param nbGroups The number of groups to split the channels into for the normalization calculation. * */ + + //! //! //! - public native @NoException(true) void setNbGroups(int nbGroups); + public native @NoException(true) void setNbGroups(@Cast("int64_t") long nbGroups); - /** \brief Get the number of groups used to split the channels for the normalization calculation. + /** + * \brief Get the number of groups used to split the channels for the normalization calculation. * * @return The number of groups used to split the channel used for the normalization calculation. * */ + + //! + //! + //! + //! //! //! //! //! //! - public native @NoException(true) int getNbGroups(); + public native @Cast("int64_t") @NoException(true) long getNbGroups(); - /** \brief Set the compute precision of this layer. + /** + * \brief Set the compute precision of this layer. * * @param type The datatype used for the compute precision of this layer. * - * By default TensorRT will run the normalization computation in DataType::kFLOAT32 even in mixed precision - * mode regardless of any set builder flags to avoid overflow errors. To override this default, - * use this function to set the desired compute precision. + * By default, to avoid overflow errors, TensorRT will run the normalization computation in DataType::kFLOAT32 + * even in mixed precision mode regardless of builder flags. To override this default, use this method + * to set the desired compute precision. * - * setPrecision() and setOutputPrecision() functions can still be called to control the input and output data types - * to this layer. + * For a weakly typed network: + * + * * Method setOutputType() can still be called to control the output data type. + * + * * Method setPrecision() can still be called. The input data is cast to that precision before + * being cast to the compute precision. + * + * Neither of these two methods are allowed for a strongly typed network. * * Only DataType::kFLOAT32 and DataType::kHALF are valid types for \p type. * */ + + //! //! //! public native @NoException(true) void setComputePrecision(DataType type); public native @NoException(true) void setComputePrecision(@Cast("nvinfer1::DataType") int type); - /** \brief Get the compute precision of this layer. + /** + * \brief Get the compute precision of this layer. * * @return The datatype used for the compute precision of this layer. * */ diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IOneHotLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IOneHotLayer.java index 48de94784aa..f3378374468 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IOneHotLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IOneHotLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -32,8 +32,7 @@ * The depth tensor must be a build-time constant, and its value should be positive. * * Output is a tensor with rank = rank(indices)+1, where the added dimension contains the one-hot encoding. * The data types of Output is equal to the Values data type. - * * Axis is a scaler specifying to which dimension of the output one-hot encoding is added. - * Axis defaults to -1, that is the new dimension in the output is its final dimension. + * * Axis is a scalar specifying to which dimension of the output one-hot encoding is added. * Valid range for axis is -rank(indices)-1 <= axis <= rank(indices). * * The output is computed by copying off_values to all output elements, then setting on_value on the indices diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IOptimizationProfile.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IOptimizationProfile.java index d9a02b89141..274a26027c2 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IOptimizationProfile.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IOptimizationProfile.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -80,8 +80,8 @@ public class IOptimizationProfile extends INoCopy { //! //! //! - public native @Cast("bool") @NoException(true) boolean setDimensions(String inputName, OptProfileSelector select, @ByVal @Cast("nvinfer1::Dims*") Dims32 dims); - public native @Cast("bool") @NoException(true) boolean setDimensions(@Cast("const char*") BytePointer inputName, @Cast("nvinfer1::OptProfileSelector") int select, @ByVal @Cast("nvinfer1::Dims*") Dims32 dims); + public native @Cast("bool") @NoException(true) boolean setDimensions(String inputName, OptProfileSelector select, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dims); + public native @Cast("bool") @NoException(true) boolean setDimensions(@Cast("const char*") BytePointer inputName, @Cast("nvinfer1::OptProfileSelector") int select, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dims); /** * \brief Get the minimum / optimum / maximum dimensions for a dynamic input tensor. @@ -105,25 +105,27 @@ public class IOptimizationProfile extends INoCopy { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(String inputName, OptProfileSelector select); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(@Cast("const char*") BytePointer inputName, @Cast("nvinfer1::OptProfileSelector") int select); + //! + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(String inputName, OptProfileSelector select); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(@Cast("const char*") BytePointer inputName, @Cast("nvinfer1::OptProfileSelector") int select); /** * \brief Set the minimum / optimum / maximum values for an input shape tensor. * * This function must be called three times for every input tensor t that is a shape tensor (t.isShape() == true). - * This implies that the datatype of t is DataType::kINT32, the rank is either 0 or 1, and the dimensions of t - * are fixed at network definition time. This function must not be called for any input tensor that is not a - * shape tensor. + * This implies that the dimensions of t are fixed at network definition time and the volume does not exceed 64. + * This function must not be called for any input tensor that is not a shape tensor. * * Each time this function is called for the same input tensor, the same nbValues must be supplied (either 1 * if the tensor rank is 0, or dims.d[0] if the rank is 1). Furthermore, if minVals, optVals, maxVals are the * minimum, optimum, and maximum values, it must be true that minVals[i] <= optVals[i] <= maxVals[i] for * i = 0, ..., nbValues - 1. Execution of the network must be valid for the optVals. * - * Shape tensors are tensors that contribute to shape calculations in some way, and can contain - * any int32_t values appropriate for the network. Shape tensors of other data types (e.g. float) are not - * supported. Examples: + * Shape tensors are tensors that contribute to shape calculations in some way. While input shape tensors can be + * type kBOOL, kINT32, or kINT64, the values used to set the minimum, optimium, and maximum values must fit in int32_t. + * Boolean values are represented as 0 for false and 1 for true. + * + * Examples: * * * A shape tensor used as the second input to IShuffleLayer can contain a -1 wildcard. * The corresponding minVal[i] should be -1. @@ -139,6 +141,7 @@ public class IOptimizationProfile extends INoCopy { * @param inputName The input tensor name * @param select Whether to set the minimum, optimum, or maximum input values. * @param values An array of length nbValues containing the minimum, optimum, or maximum shape tensor elements. + * For multidimensional tensors, the array is in row-major order. * @param nbValues The length of the value array, which must equal the number of shape tensor elements (>= 1) * * @return false if an inconsistency was detected (e.g. nbValues does not match a previous call for the same diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IOutputAllocator.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IOutputAllocator.java index cd349502a92..552bceafb60 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IOutputAllocator.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IOutputAllocator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -18,28 +18,14 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; - -/** - * \class IOutputAllocator - * - * \brief Callback from ExecutionContext::enqueueV3() - * - * Clients should override the method reallocateOutput. - * - * @see IExecutionContext::enqueueV3() - * */ -@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) -public class IOutputAllocator extends Pointer { +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IOutputAllocator extends IVersionedInterface { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IOutputAllocator(Pointer p) { super(p); } /** - * \brief Return the API version of this IOutputAllocator. - * - * Do not override this method as it is used by the TensorRT library to maintain - * backwards-compatibility with IOutputAllocator. The value will change if Nvidia - * adds additional virtual methods to this class. + * \brief Return version information associated with this interface. Applications must not override this method. * */ @@ -49,10 +35,12 @@ public class IOutputAllocator extends Pointer { //! //! //! - public native @NoException(true) int getInterfaceVersion(); + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); /** * \brief Return a pointer to memory for an output tensor, or nullptr if memory cannot be allocated. + * If the requested memory size exceeds the currentMemory size, the currentMemory can be freed as well. + * If currentMemory is known to be big enough, one option is to return currentMemory. * * @param tensorName name of the output tensor. * @param currentMemory points to the address set by IExectionContext::setTensorAddress. @@ -61,11 +49,46 @@ public class IOutputAllocator extends Pointer { * * @return A pointer to memory to use for the output tensor or nullptr. * - * If currentMemory is known to be big enough, one option is to return currentMemory. - * * To preallocate memory and have the engine fail if the preallocation is not big enough, * use IExecutionContext::setTensorAddress to set a pointer to the preallocated memory, * and have reallocateOutput return nullptr if that memory is not big enough. + * + * @deprecated Deprecated in TensorRT 10.0. Superseded by reallocateOutputAsync with cudaStream_t argument + * */ + + + //! + //! + //! + //! + //! + //! + public native @Deprecated @NoException(true) Pointer reallocateOutput( + String tensorName, Pointer currentMemory, @Cast("uint64_t") long size, @Cast("uint64_t") long alignment); + public native @Deprecated @NoException(true) Pointer reallocateOutput( + @Cast("const char*") BytePointer tensorName, Pointer currentMemory, @Cast("uint64_t") long size, @Cast("uint64_t") long alignment); + + /** + * \brief Return a pointer to memory for an output tensor, or nullptr if memory cannot be allocated. + * If the requested memory size exceeds the currentMemory size, the currentMemory can be freed as well. + * If currentMemory is known to be big enough, one option is to return currentMemory. + * + * @param tensorName name of the output tensor. + * @param currentMemory points to the address set by IExectionContext::setTensorAddress. + * @param size number of bytes required. Always positive, even for an empty tensor. + * @param alignment required alignment of the allocation. + * @param stream The stream in which to execute the kernels. + * + * @return A pointer to memory to use for the output tensor or nullptr. + * + * To preallocate memory and have the engine fail if the preallocation is not big enough, + * use IExecutionContext::setTensorAddress to set a pointer to the preallocated memory, + * and have reallocateOutputAsync return nullptr if that memory is not big enough. + * + * The default definition exists for sake of backward compatibility with earlier versions of TensorRT. + * Eventually this method will become a pure virtual method that requires an override, and method + * reallocateOutput() will disappear. Code moving away from TensorRT 9.x should override method + * reallocateOutputAsync() and NOT override method reallocateOutput(). * */ @@ -73,8 +96,10 @@ public class IOutputAllocator extends Pointer { //! //! //! - public native @NoException(true) Pointer reallocateOutput(String tensorName, Pointer currentMemory, @Cast("uint64_t") long size, @Cast("uint64_t") long alignment); - public native @NoException(true) Pointer reallocateOutput(@Cast("const char*") BytePointer tensorName, Pointer currentMemory, @Cast("uint64_t") long size, @Cast("uint64_t") long alignment); + public native Pointer reallocateOutputAsync( + String tensorName, Pointer currentMemory, @Cast("uint64_t") long size, @Cast("uint64_t") long alignment, CUstream_st arg4); + public native Pointer reallocateOutputAsync( + @Cast("const char*") BytePointer tensorName, Pointer currentMemory, @Cast("uint64_t") long size, @Cast("uint64_t") long alignment, CUstream_st arg4); /** * \brief Called by TensorRT when the shape of the output tensor is known. @@ -84,6 +109,6 @@ public class IOutputAllocator extends Pointer { * @param dims dimensions of the output * @param tensorName name of the tensor * */ - public native @NoException(true) void notifyShape(String tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims32 dims); - public native @NoException(true) void notifyShape(@Cast("const char*") BytePointer tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims32 dims); + public native @NoException(true) void notifyShape(String tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dims); + public native @NoException(true) void notifyShape(@Cast("const char*") BytePointer tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dims); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPaddingLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPaddingLayer.java index 80c3b53d5e4..77b9438abf3 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPaddingLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPaddingLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -35,72 +35,6 @@ public class IPaddingLayer extends ILayer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IPaddingLayer(Pointer p) { super(p); } - /** - * \brief Set the padding that is applied at the start of the tensor. - * - * Negative padding results in trimming the edge by the specified amount - * - * @see getPrePadding - * - * @deprecated Superseded by setPrePaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - public native @Deprecated @NoException(true) void setPrePadding(@ByVal DimsHW padding); - - /** - * \brief Get the padding that is applied at the start of the tensor. - * - * @see setPrePadding - * - * @deprecated Superseded by getPrePaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - //! - public native @Deprecated @ByVal @NoException(true) DimsHW getPrePadding(); - - /** - * \brief Set the padding that is applied at the end of the tensor. - * - * Negative padding results in trimming the edge by the specified amount - * - * @see getPostPadding - * - * @deprecated Superseded by setPostPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - public native @Deprecated @NoException(true) void setPostPadding(@ByVal DimsHW padding); - - /** - * \brief Get the padding that is applied at the end of the tensor. - * - * @see setPostPadding - * - * @deprecated Superseded by getPostPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - //! - public native @Deprecated @ByVal @NoException(true) DimsHW getPostPadding(); - /** * \brief Set the padding that is applied at the start of the tensor. * @@ -116,7 +50,7 @@ public class IPaddingLayer extends ILayer { //! //! //! - public native @NoException(true) void setPrePaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); + public native @NoException(true) void setPrePaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); /** * \brief Get the padding that is applied at the start of the tensor. @@ -132,7 +66,7 @@ public class IPaddingLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPrePaddingNd(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPrePaddingNd(); /** * \brief Set the padding that is applied at the end of the tensor. @@ -149,7 +83,7 @@ public class IPaddingLayer extends ILayer { //! //! //! - public native @NoException(true) void setPostPaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); + public native @NoException(true) void setPostPaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); /** * \brief Get the padding that is applied at the end of the tensor. @@ -158,5 +92,5 @@ public class IPaddingLayer extends ILayer { * * @see setPostPaddingNd * */ - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPostPaddingNd(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPostPaddingNd(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IParametricReLULayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IParametricReLULayer.java index fc88c2d3967..9d359930e51 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IParametricReLULayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IParametricReLULayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPlugin.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPlugin.java index 6646199e909..9900b521931 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPlugin.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPlugin.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -18,8 +18,6 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; - -// @cond SuppressDoxyWarnings @Namespace("nvinfer1") @Opaque @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IPlugin extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginCapability.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginCapability.java new file mode 100644 index 00000000000..33fa9ab1137 --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginCapability.java @@ -0,0 +1,27 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + +@Namespace("nvinfer1::v_1_0") @Opaque @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IPluginCapability extends IVersionedInterface { + /** Empty constructor. Calls {@code super((Pointer)null)}. */ + public IPluginCapability() { super((Pointer)null); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IPluginCapability(Pointer p) { super(p); } +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginCreator.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginCreator.java index f42e71fe076..02820857d29 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginCreator.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginCreator.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,40 +19,17 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; -/** - * \class IPluginCreator - * - * \brief Plugin creator class for user implemented layers. - * - * @see IPlugin and IPluginFactory - * */ - -@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) -public class IPluginCreator extends Pointer { +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IPluginCreator extends IPluginCreatorInterface { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IPluginCreator(Pointer p) { super(p); } - /** - * \brief Return the version of the API the plugin creator was compiled with. - * - * \u005Cusage - * - Allowed context for the API call - * - Thread-safe: Yes, the implementation provided here is safe to call from any thread. - * */ - - - //! - //! - //! - //! - public native @NoException(true) int getTensorRTVersion(); - /** * \brief Return the plugin name. * - * \warning The string returned must be 1024 bytes or less including the NULL terminator and must be NULL - * terminated. + * \warning The string returned must be NULL-terminated and have a length of 1024 bytes or less including + * the NULL terminator. * * \u005Cusage * - Allowed context for the API call @@ -71,8 +48,8 @@ public class IPluginCreator extends Pointer { /** * \brief Return the plugin version. * - * \warning The string returned must be 1024 bytes or less including the NULL terminator and must be NULL - * terminated. + * \warning The string returned must be NULL-terminated and have a length of 1024 bytes or less including + * the NULL terminator. * * \u005Cusage * - Allowed context for the API call @@ -85,10 +62,12 @@ public class IPluginCreator extends Pointer { //! //! //! + //! public native @NoException(true) String getPluginVersion(); /** - * \brief Return a list of fields that needs to be passed to createPlugin. + * \brief Return a list of fields that need to be passed to createPlugin. + * * @see PluginFieldCollection * * \u005Cusage @@ -102,11 +81,15 @@ public class IPluginCreator extends Pointer { //! //! //! + //! public native @Const @NoException(true) PluginFieldCollection getFieldNames(); /** * \brief Return a plugin object. Return nullptr in case of error. * + * @param name A NULL-terminated name string of length 1024 or less, including the NULL terminator. + * @param fc A pointer to a collection of fields needed for constructing the plugin. + * * \u005Cusage * - Allowed context for the API call * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads @@ -115,6 +98,8 @@ public class IPluginCreator extends Pointer { * */ + //! + //! //! //! //! @@ -124,6 +109,12 @@ public class IPluginCreator extends Pointer { /** * \brief Called during deserialization of plugin layer. Return a plugin object. * + * @param name A NULL-terminated name string of length 1024 or less, including the NULL terminator. + * @param serialData The start address of a byte array with the serialized plugin representation. + * @param serialLength The length in bytes of the byte array with the serialized plugin representation. + * + * @return A deserialized plugin object + * * \u005Cusage * - Allowed context for the API call * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads @@ -136,6 +127,7 @@ public class IPluginCreator extends Pointer { //! //! //! + //! public native @NoException(true) IPluginV2 deserializePlugin(String name, @Const Pointer serialData, @Cast("size_t") long serialLength); public native @NoException(true) IPluginV2 deserializePlugin(@Cast("const char*") BytePointer name, @Const Pointer serialData, @Cast("size_t") long serialLength); @@ -143,6 +135,8 @@ public class IPluginCreator extends Pointer { * \brief Set the namespace of the plugin creator based on the plugin * library it belongs to. This can be set while registering the plugin creator. * + * @param pluginNamespace A NULL-terminated namespace string of length 1024 or less, including the NULL terminator + * * @see IPluginRegistry::registerCreator() * * \u005Cusage @@ -163,8 +157,8 @@ public class IPluginCreator extends Pointer { /** * \brief Return the namespace of the plugin creator object. * - * \warning The string returned must be 1024 bytes or less including the NULL terminator and must be NULL - * terminated. + * \warning The string returned must be NULL-terminated and have a length of 1024 bytes or less including the + * NULL terminator. * * \u005Cusage * - Allowed context for the API call @@ -173,4 +167,8 @@ public class IPluginCreator extends Pointer { * multiple engines concurrently sharing plugins. * */ public native @NoException(true) String getPluginNamespace(); + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginCreatorInterface.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginCreatorInterface.java new file mode 100644 index 00000000000..995150e5f62 --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginCreatorInterface.java @@ -0,0 +1,27 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IPluginCreatorInterface extends IVersionedInterface { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IPluginCreatorInterface(Pointer p) { super(p); } + +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginCreatorV3One.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginCreatorV3One.java new file mode 100644 index 00000000000..ac753b0147c --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginCreatorV3One.java @@ -0,0 +1,107 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IPluginCreatorV3One extends IPluginCreatorInterface { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IPluginCreatorV3One(Pointer p) { super(p); } + + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + + + //! + //! + //! + //! + //! + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); + + /** + * \brief Return a plugin object. Return nullptr in case of error. + * + * @param name A NULL-terminated name string of length 1024 or less, including the NULL terminator. + * @param fc A pointer to a collection of fields needed for constructing the plugin. + * @param phase The TensorRT phase in which the plugin is being created + * + * When the phase is TensorRTPhase::kRUNTIME, the PluginFieldCollection provided for serialization by the plugin's + * runtime interface will be passed as fc. + * + * \note The returned plugin object must be in an initialized state + * */ + + + //! + //! + //! + public native @NoException(true) IPluginV3 createPlugin( + String name, @Const PluginFieldCollection fc, TensorRTPhase phase); + public native @NoException(true) IPluginV3 createPlugin( + @Cast("const char*") BytePointer name, @Const PluginFieldCollection fc, @Cast("nvinfer1::TensorRTPhase") int phase); + + /** + * \brief Return a list of fields that need to be passed to createPlugin() when creating a plugin for use in the + * TensorRT build phase. + * + * @see PluginFieldCollection + * */ + + + //! + //! + //! + public native @Const @NoException(true) PluginFieldCollection getFieldNames(); + + /** + * \brief Return the plugin name. + * + * \warning The string returned must be NULL-terminated and have a length of 1024 bytes or less including + * the NULL terminator. + * */ + + + //! + //! + //! + public native @NoException(true) String getPluginName(); + + /** + * \brief Return the plugin version. + * + * \warning The string returned must be NULL-terminated and have a length of 1024 bytes or less including + * the NULL terminator. + * */ + + + //! + //! + //! + public native @NoException(true) String getPluginVersion(); + + /** + * \brief Return the plugin namespace. + * + * \warning The string returned must be NULL-terminated and have a length of 1024 bytes or less including + * the NULL terminator. + * */ + public native @NoException(true) String getPluginNamespace(); +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginExt.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginExt.java index 2b8fa45c684..6d19918cc27 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginExt.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginExt.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginFactory.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginFactory.java index 72b921b2551..0404a6f9f5c 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginFactory.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginFactory.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginLayer.java index 744950679b9..a332911fe46 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginRegistry.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginRegistry.java index d205c95dd9a..3951ec8237d 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginRegistry.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginRegistry.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -35,28 +35,32 @@ * \warning In the automotive safety context, be sure to call IPluginRegistry::setErrorRecorder() to register * an error recorder with the registry before using other methods in the registry. * */ - @Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IPluginRegistry extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IPluginRegistry(Pointer p) { super(p); } - /** Pointer for plugin library handle. */ + /** + * \brief Pointer for plugin library handle. + * */ + //! //! //! //! + //! @Namespace @Name("void") @Opaque public static class PluginLibraryHandle extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ public PluginLibraryHandle() { super((Pointer)null); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public PluginLibraryHandle(Pointer p) { super(p); } } + /** - * \brief Register a plugin creator. Returns false if one with same type - * is already registered. + * \brief Register a plugin creator implementing IPluginCreator. Returns false if any plugin creator with the same + * name, version or namespace is already registered. * * \warning The string pluginNamespace must be 1024 bytes or less including the NULL terminator and must be NULL * terminated. @@ -64,22 +68,34 @@ public class IPluginRegistry extends Pointer { * \u005Cusage * - Allowed context for the API call * - Thread-safe: Yes; calls to this method will be synchronized by a mutex. + * + * @deprecated Deprecated in TensorRT 10.0. Superseded by + * IPluginRegistry::registerCreator(IPluginCreatorInterface&, AsciiChar const* const). * */ //! //! //! - public native @Cast("bool") @NoException(true) boolean registerCreator(@ByRef IPluginCreator creator, String pluginNamespace); - public native @Cast("bool") @NoException(true) boolean registerCreator(@ByRef IPluginCreator creator, @Cast("const char*") BytePointer pluginNamespace); + //! + //! + public native @Cast("bool") @Deprecated @NoException(true) boolean registerCreator( + @ByRef IPluginCreator creator, String pluginNamespace); + public native @Cast("bool") @Deprecated @NoException(true) boolean registerCreator( + @ByRef IPluginCreator creator, @Cast("const char*") BytePointer pluginNamespace); /** * \brief Return all the registered plugin creators and the number of * registered plugin creators. Returns nullptr if none found. * + * \warning If any plugin creators are registered or deregistered after calling this function, the returned pointer + * is not guaranteed to be valid thereafter. + * * \u005Cusage * - Allowed context for the API call * - Thread-safe: No + * + * @deprecated Deprecated in TensorRT 10.0. Superseded by IPluginRegistry::getAllCreators(int32_t* const). * */ @@ -87,6 +103,8 @@ public class IPluginRegistry extends Pointer { //! //! //! + //! + //! public native @Cast("nvinfer1::IPluginCreator*const*") PointerPointer getPluginCreatorList(IntPointer numCreators); /** @@ -96,16 +114,24 @@ public class IPluginRegistry extends Pointer { * \warning The strings pluginName, pluginVersion, and pluginNamespace must be 1024 bytes or less including the * NULL terminator and must be NULL terminated. * + * \warning Returns nullptr if a plugin creator with matching name, version, and namespace is found, but is not a + * descendent of IPluginCreator + * * \u005Cusage * - Allowed context for the API call * - Thread-safe: Yes + * + * @deprecated Deprecated in TensorRT 10.0. Superseded by IPluginRegistry::getCreator(AsciiChar const* const, + * AsciiChar const* const, AsciiChar const* const). * */ - public native @NoException(true) IPluginCreator getPluginCreator(String pluginName, String pluginVersion, - String pluginNamespace/*=""*/); - public native @NoException(true) IPluginCreator getPluginCreator(String pluginName, String pluginVersion); - public native @NoException(true) IPluginCreator getPluginCreator(@Cast("const char*") BytePointer pluginName, @Cast("const char*") BytePointer pluginVersion, - @Cast("const char*") BytePointer pluginNamespace/*=""*/); - public native @NoException(true) IPluginCreator getPluginCreator(@Cast("const char*") BytePointer pluginName, @Cast("const char*") BytePointer pluginVersion); + public native @Deprecated @NoException(true) IPluginCreator getPluginCreator(String pluginName, + String pluginVersion, String pluginNamespace/*=""*/); + public native @Deprecated @NoException(true) IPluginCreator getPluginCreator(String pluginName, + String pluginVersion); + public native @Deprecated @NoException(true) IPluginCreator getPluginCreator(@Cast("const char*") BytePointer pluginName, + @Cast("const char*") BytePointer pluginVersion, @Cast("const char*") BytePointer pluginNamespace/*=""*/); + public native @Deprecated @NoException(true) IPluginCreator getPluginCreator(@Cast("const char*") BytePointer pluginName, + @Cast("const char*") BytePointer pluginVersion); // @cond SuppressDoxyWarnings @@ -120,14 +146,14 @@ public class IPluginRegistry extends Pointer { * recorder to nullptr unregisters the recorder with the interface, resulting in a call to decRefCount if * a recorder has been registered. * - * @param recorder The error recorder to register with this interface. */ - // - /** @see getErrorRecorder() - /** - /** \u005Cusage - /** - Allowed context for the API call - /** - Thread-safe: No - /** */ + * @param recorder The error recorder to register with this interface. + * + * @see getErrorRecorder() + * + * \u005Cusage + * - Allowed context for the API call + * - Thread-safe: No + * */ //! @@ -160,22 +186,25 @@ public class IPluginRegistry extends Pointer { //! //! //! + //! public native @NoException(true) IErrorRecorder getErrorRecorder(); /** - * \brief Deregister a previously registered plugin creator. + * \brief Deregister a previously registered plugin creator implementing IPluginCreator. * * Since there may be a desire to limit the number of plugins, * this function provides a mechanism for removing plugin creators registered in TensorRT. * The plugin creator that is specified by \p creator is removed from TensorRT and no longer tracked. * * @return True if the plugin creator was deregistered, false if it was not found in the registry or otherwise - * could - * not be deregistered. + * could not be deregistered. * * \u005Cusage * - Allowed context for the API call * - Thread-safe: Yes + * + * @deprecated Deprecated in TensorRT 10.0. Superseded by + * IPluginRegistry::deregisterCreator(IPluginCreatorInterface const&). * */ @@ -183,7 +212,7 @@ public class IPluginRegistry extends Pointer { //! //! //! - public native @Cast("bool") @NoException(true) boolean deregisterCreator(@Const @ByRef IPluginCreator creator); + public native @Cast("bool") @Deprecated @NoException(true) boolean deregisterCreator(@Const @ByRef IPluginCreator creator); /** * \brief Return whether the parent registry will be searched if a plugin is not found in this registry @@ -238,5 +267,134 @@ public class IPluginRegistry extends Pointer { * * @param handle the plugin library handle to deregister. * */ + + + //! + //! + //! + //! public native @NoException(true) void deregisterLibrary(PluginLibraryHandle handle); + + /** + * \brief Register a plugin creator. Returns false if a plugin creator with the same type + * is already registered. + * + * \warning The string pluginNamespace must be 1024 bytes or less including the NULL terminator and must be NULL + * terminated. + * + * \u005Cusage + * - Allowed context for the API call + * - Thread-safe: Yes; calls to this method will be synchronized by a mutex. + * */ + + + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean registerCreator(@ByRef IPluginCreatorInterface creator, String pluginNamespace); + public native @Cast("bool") @NoException(true) boolean registerCreator(@ByRef IPluginCreatorInterface creator, @Cast("const char*") BytePointer pluginNamespace); + + /** + * \brief Return all registered plugin creators. Returns nullptr if none found. + * + * \warning If any plugin creators are registered or deregistered after calling this function, the returned pointer + * is not guaranteed to be valid thereafter. + * + * \u005Cusage + * - Allowed context for the API call + * - Thread-safe: No + * */ + + + //! + //! + //! + //! + public native @Cast("nvinfer1::IPluginCreatorInterface*const*") @NoException(true) PointerPointer getAllCreators(IntPointer numCreators); + + /** + * \brief Return a registered plugin creator based on plugin name, version, and namespace associated with the + * plugin during network creation. + * + * \warning The strings pluginName, pluginVersion, and pluginNamespace must be 1024 bytes or less including the + * NULL terminator and must be NULL terminated. + * + * \u005Cusage + * - Allowed context for the API call + * - Thread-safe: Yes + * */ + + + //! + //! + //! + //! + //! + public native @NoException(true) IPluginCreatorInterface getCreator(String pluginName, String pluginVersion, + String pluginNamespace/*=""*/); + public native @NoException(true) IPluginCreatorInterface getCreator(String pluginName, String pluginVersion); + public native @NoException(true) IPluginCreatorInterface getCreator(@Cast("const char*") BytePointer pluginName, @Cast("const char*") BytePointer pluginVersion, + @Cast("const char*") BytePointer pluginNamespace/*=""*/); + public native @NoException(true) IPluginCreatorInterface getCreator(@Cast("const char*") BytePointer pluginName, @Cast("const char*") BytePointer pluginVersion); + + /** + * \brief Deregister a previously registered plugin creator. + * + * Since there may be a desire to limit the number of plugins, + * this function provides a mechanism for removing plugin creators registered in TensorRT. + * The plugin creator that is specified by \p creator is removed from TensorRT and no longer tracked. + * + * @return True if the plugin creator was deregistered, false if it was not found in the registry or otherwise + * could not be deregistered. + * + * \u005Cusage + * - Allowed context for the API call + * - Thread-safe: Yes + * */ + + + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean deregisterCreator(@Const @ByRef IPluginCreatorInterface creator); + + /** + * \brief Get a plugin resource + * @param key Key for identifying the resource. Cannot be null. + * @param resource A plugin resource object. The object will only need to be valid until this method returns, as + * only a clone of this object will be registered by TRT. Cannot be null. + * + * @return Registered plugin resource object + * + * \u005Cusage + * - Allowed context for the API call + * - Thread-safe: Yes; calls to this method will be synchronized by a mutex. + * */ + + + //! + //! + //! + //! + //! + public native @NoException(true) IPluginResource acquirePluginResource(String key, IPluginResource resource); + public native @NoException(true) IPluginResource acquirePluginResource(@Cast("const char*") BytePointer key, IPluginResource resource); + + /** + * \brief Decrement reference count for the resource with this key + * If reference count goes to zero after decrement, release() will be invoked on the resource, the key will + * be deregistered and the resource object will be deleted + * + * @param key Key that was used to register the resource. Cannot be null. + * + * @return 0 for success, else non-zero + * + * \u005Cusage + * - Allowed context for the API call + * - Thread-safe: Yes; calls to this method will be synchronized by a mutex. + * */ + public native @NoException(true) int releasePluginResource(String key); + public native @NoException(true) int releasePluginResource(@Cast("const char*") BytePointer key); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginResource.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginResource.java new file mode 100644 index 00000000000..40ffb0e7e8b --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginResource.java @@ -0,0 +1,75 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + + +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IPluginResource extends IVersionedInterface { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IPluginResource(Pointer p) { super(p); } + + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + + //! + //! + //! + //! + //! + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); + /** + * \brief Free the underlying resource + * + * This will only be called for IPluginResource objects that were produced from IPluginResource::clone() + * + * The IPluginResource object on which release() is called must still be in a clone-able state + * after release() returns + * + * @return 0 for success, else non-zero + * \u005Cusage + * - Allowed context for the API call + * - Thread-safe: No; this method is not required to be thread-safe + * */ + + + //! + //! + //! + //! + //! + public native @NoException(true) int release(); + + /** + * \brief Clone the resource object + * + * \note Resource initialization (if any) may be skipped for non-cloned objects since only clones will be + * registered by TensorRT + * + * @return Pointer to cloned object. nullptr if there was an issue. + * + * \u005Cusage + * - Allowed context for the API call + * - Thread-safe: Yes; this method is required to be thread-safe and may be called from multiple threads. + * */ + public native @NoException(true) IPluginResource clone(); + + +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginResourceContext.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginResourceContext.java new file mode 100644 index 00000000000..7df6b53b7e3 --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginResourceContext.java @@ -0,0 +1,55 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + + +/** + * \class IPluginResourceContext + * + * \brief Interface for plugins to access per context resources provided by TensorRT + * + * There is no public way to construct an IPluginResourceContext. It appears as an argument to + * IPluginV3OneRuntime::attachToContext(). Overrides of that method can use the IPluginResourceContext object to access + * any available per context resources. + * + * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. + * + * @see IPluginV3OneRuntime::attachToContext() + * */ +@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IPluginResourceContext extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IPluginResourceContext(Pointer p) { super(p); } + + /** \brief Get the GPU allocator associated with the resource context + * + * @see IPluginV3OneRuntime::attachToContext() + * */ + + //! + //! + public native @NoException(true) IGpuAllocator getGpuAllocator(); + + /** \brief Get the error recorder associated with the resource context + * + * @see IPluginV3OneRuntime::attachToContext() + * */ + public native @NoException(true) IErrorRecorder getErrorRecorder(); +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2.java index 8d40a363545..7585c0be894 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,7 +19,8 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; -/** \class IPluginV2 +/** + * \class IPluginV2 * * \brief Plugin class for user-implemented layers. * @@ -45,6 +46,8 @@ public class IPluginV2 extends Pointer { * Do not override this method as it is used by the TensorRT library to maintain backwards-compatibility with * plugins. * + * @return The TensorRT version in the format (major * 100 + minor) * 100 + patch. + * * \u005Cusage * - Allowed context for the API call * - Thread-safe: Yes, the implementation provided here is safe to call from any thread. @@ -55,14 +58,16 @@ public class IPluginV2 extends Pointer { //! //! //! + //! public native @NoException(true) int getTensorRTVersion(); /** * \brief Return the plugin type. Should match the plugin name returned by the corresponding plugin creator + * * @see IPluginCreator::getPluginName() * - * \warning The string returned must be 1024 bytes or less including the NULL terminator and must be NULL - * terminated. + * \warning The string returned must be NULL-terminated and have a length of 1024 bytes or less including the + * NULL terminator. * * \u005Cusage * - Allowed context for the API call @@ -75,14 +80,16 @@ public class IPluginV2 extends Pointer { //! //! //! + //! public native @NoException(true) String getPluginType(); /** * \brief Return the plugin version. Should match the plugin version returned by the corresponding plugin creator + * * @see IPluginCreator::getPluginVersion() * - * \warning The string returned must be 1024 bytes or less including the NULL terminator and must be NULL - * terminated. + * \warning The string returned must be NULL-terminated and have a length of 1024 bytes or less including the + * NULL terminator. * * \u005Cusage * - Allowed context for the API call @@ -101,7 +108,7 @@ public class IPluginV2 extends Pointer { /** * \brief Get the number of outputs from the layer. * - * @return The number of outputs. + * @return The number of outputs, which is a positive integer. * * This function is called by the implementations of INetworkDefinition and IBuilder. In particular, it is called * prior to any call to initialize(). @@ -119,14 +126,19 @@ public class IPluginV2 extends Pointer { //! //! //! + //! public native @NoException(true) int getNbOutputs(); /** * \brief Get the dimension of an output tensor. * - * @param index The index of the output tensor. - * @param inputs The input tensors. - * @param nbInputDims The number of input tensors. + * @param index The index of the output tensor. Will lie in the valid range (between 0 and getNbOutputs()-1 + * inclusive). + * @param inputs The input tensor dimensions. Will be the start address of a Dims array of length nbInputDims. + * @param nbInputDims The number of input tensors. Will be a non-negative integer. + * + * @return The output tensor dimensions if the index is in the valid range. + * An invalid value of Dims{-1, {}} must be returned if the index is not in the valid range. * * This function is called by the implementations of INetworkDefinition and IBuilder. In particular, it is called * prior to any call to initialize(). @@ -136,7 +148,7 @@ public class IPluginV2 extends Pointer { * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads * when building networks on multiple devices sharing the same plugin. * - * \note In any non-IPluginV2DynamicExt plugin, batch size should not be included in the returned dimensions, + * \note In any non-IPluginV2DynamicExt plugin, batch size must not be included in the returned dimensions, * even if the plugin is expected to be run in a network with explicit batch mode enabled. * Please see the TensorRT Developer Guide for more details on how plugin inputs and outputs behave. * */ @@ -149,13 +161,15 @@ public class IPluginV2 extends Pointer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getOutputDimensions(int index, @Cast("const nvinfer1::Dims*") Dims32 inputs, int nbInputDims); + //! + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getOutputDimensions(int index, @Cast("const nvinfer1::Dims*") Dims64 inputs, int nbInputDims); /** * \brief Check format support. * * @param type DataType requested. * @param format PluginFormat requested. + * * @return true if the plugin supports the type-format combination. * * This function is called by the implementations of INetworkDefinition, IBuilder, and @@ -193,13 +207,14 @@ public class IPluginV2 extends Pointer { * This function is called by the builder prior to initialize(). It provides an opportunity for the layer to make * algorithm choices on the basis of its weights, dimensions, and maximum batch size. * - * @param inputDims The input tensor dimensions. - * @param nbInputs The number of inputs. - * @param outputDims The output tensor dimensions. - * @param nbOutputs The number of outputs. + * @param inputDims The input tensor dimensions. Will be the start address of a Dims array of length nbInputs. + * @param nbInputs The number of inputs. Will be a non-negative integer. + * @param outputDims The output tensor dimensions. Will be the start address of a Dims array of length nbOutputs. + * @param nbOutputs The number of outputs. Will be a positive integer identical to the return value of + * getNbOutputs(). * @param type The data type selected for the engine. * @param format The format selected for the engine. - * @param maxBatchSize The maximum batch size. + * @param maxBatchSize The maximum batch size. Will be a positive integer. * * The dimensions passed here do not include the outermost batch size (i.e. for 2-D image networks, they will be * 3-dimensional CHW dimensions). @@ -224,9 +239,9 @@ public class IPluginV2 extends Pointer { //! //! //! - public native @NoException(true) void configureWithFormat(@Cast("const nvinfer1::Dims*") Dims32 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims32 outputDims, int nbOutputs, + public native @NoException(true) void configureWithFormat(@Cast("const nvinfer1::Dims*") Dims64 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims64 outputDims, int nbOutputs, DataType type, @Cast("nvinfer1::PluginFormat") TensorFormat format, int maxBatchSize); - public native @NoException(true) void configureWithFormat(@Cast("const nvinfer1::Dims*") Dims32 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims32 outputDims, int nbOutputs, + public native @NoException(true) void configureWithFormat(@Cast("const nvinfer1::Dims*") Dims64 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims64 outputDims, int nbOutputs, @Cast("nvinfer1::DataType") int type, @Cast("nvinfer1::PluginFormat") int format, int maxBatchSize); /** @@ -245,11 +260,13 @@ public class IPluginV2 extends Pointer { //! //! //! + //! public native @NoException(true) int initialize(); /** * \brief Release resources acquired during plugin layer initialization. This is called when the engine is * destroyed. + * * @see initialize() * * \u005Cusage @@ -266,15 +283,19 @@ public class IPluginV2 extends Pointer { //! //! //! + //! public native @NoException(true) void terminate(); /** * \brief Find the workspace size required by the layer. * - * This function is called during engine startup, after initialize(). The workspace size returned should be + * This function is called during engine startup, after initialize(). The workspace size returned must be * sufficient for any batch size up to the maximum. * - * @return The workspace size. + * @param maxBatchSize The maximum batch size, which will be a positive integer. + * + * @return The workspace size in bytes, i.e. the device memory size that the plugin requires for its internal + * computations. * * \u005Cusage * - Allowed context for the API call @@ -295,10 +316,15 @@ public class IPluginV2 extends Pointer { * \brief Execute the layer. * * @param batchSize The number of inputs in the batch. - * @param inputs The memory for the input tensors. - * @param outputs The memory for the output tensors. - * @param workspace Workspace for execution. - * @param stream The stream in which to execute the kernels. + * @param inputs The memory for the input tensors. Will be an array of device addresses corresponding to input + * tensors of length nbInputs, where nbInputs is the second parameter passed to configureWithFormat(). + * The i-th input tensor will have the dimensions inputDims[i], where inputDims is the first parameter + * that was passed to configureWithFormat(). + * @param outputs The memory for the output tensors. Will be an array of device addresses corresponding to output + * tensors of length getNbOutputs(). + * @param workspace Workspace for execution. Will be the start address of a device buffer whose length will be at + * least getWorkspaceSize(batchSize). + * @param stream The stream in which to execute the kernels. This will be a valid CUDA stream. * * @return 0 for success, else non-zero (which will cause engine termination). * @@ -319,9 +345,9 @@ public class IPluginV2 extends Pointer { CUstream_st stream); /** - * \brief Find the size of the serialization buffer required. + * \brief Find the size of the serialization buffer required to store the plugin configuration in a binary file. * - * @return The size of the serialization buffer. + * @return The size of the serialization buffer in bytes. * * \u005Cusage * - Allowed context for the API call @@ -340,8 +366,8 @@ public class IPluginV2 extends Pointer { /** * \brief Serialize the layer. * - * @param buffer A pointer to a buffer to serialize data. Size of buffer must be equal to value returned by - * getSerializationSize. + * @param buffer A pointer to a host buffer to serialize data. Size of buffer will be at least as large as the + * value returned by getSerializationSize. * * @see getSerializationSize() * @@ -371,6 +397,7 @@ public class IPluginV2 extends Pointer { //! //! //! + //! public native @NoException(true) void destroy(); /** @@ -379,7 +406,10 @@ public class IPluginV2 extends Pointer { * * The TensorRT runtime calls clone() to clone the plugin when an execution context is created for an engine, * after the engine has been created. The runtime does not call initialize() on the cloned plugin, - * so the cloned plugin should be created in an initialized state. + * so the cloned plugin must be created in an initialized state. + * + * @return A cloned plugin object in an initialized state with the same parameters as the current object. + * nullptr must be returned if the cloning fails, e.g. because of resource exhaustion. * * \u005Cusage * - Allowed context for the API call @@ -398,12 +428,12 @@ public class IPluginV2 extends Pointer { /** * \brief Set the namespace that this plugin object belongs to. Ideally, all plugin - * objects from the same plugin library should have the same namespace. + * objects from the same plugin library must have the same namespace. * * @param pluginNamespace The namespace for the plugin object. * - * \warning The string pluginNamespace must be 1024 bytes or less including the NULL terminator and must be NULL - * terminated. + * \warning The string pluginNamespace will be NULL-terminated and have a length of 1024 bytes or less including the + * NULL terminator. * * \u005Cusage * - Allowed context for the API call @@ -415,12 +445,16 @@ public class IPluginV2 extends Pointer { //! //! //! + //! public native @NoException(true) void setPluginNamespace(String pluginNamespace); public native @NoException(true) void setPluginNamespace(@Cast("const char*") BytePointer pluginNamespace); /** * \brief Return the namespace of the plugin object. * + * @return The namespace string that was passed to setPluginNamespace(), possibly after truncation to 1024 bytes + * if a longer string was passed. An empty string must be returned as default value. + * * \u005Cusage * - Allowed context for the API call * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2DynamicExt.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2DynamicExt.java index 7745dbbd9c4..58eb5058ca2 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2DynamicExt.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2DynamicExt.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -22,20 +22,32 @@ /** * \class IPluginV2DynamicExt * - * Similar to IPluginV2Ext, but with support for dynamic shapes. + * \brief Similar to IPluginV2Ext, but with support for dynamic shapes. * * Clients should override the public methods, including the following inherited methods: * - * virtual int32_t getNbOutputs() const noexcept = 0; - * virtual nvinfer1::DataType getOutputDataType(int32_t index, nvinfer1::DataType const* inputTypes, int32_t - * nbInputs) const noexcept = 0; virtual size_t getSerializationSize() const noexcept = 0; virtual void - * serialize(void* buffer) const noexcept = 0; virtual void destroy() noexcept = 0; virtual void - * setPluginNamespace(char const* pluginNamespace) noexcept = 0; virtual char const* getPluginNamespace() const - * noexcept = 0; + * * virtual int32_t getNbOutputs() const noexcept = 0; * - * For getOutputDataType, the inputTypes will always be DataType::kFLOAT or DataType::kINT32, + * * virtual DataType getOutputDataType(int32_t index, DataType const* inputTypes, + * int32_t nbInputs) const noexcept = 0; + * + * * virtual size_t getSerializationSize() const noexcept = 0; + * + * * virtual void serialize(void* buffer) const noexcept = 0; + * + * * virtual void destroy() noexcept = 0; + * + * * virtual void setPluginNamespace(char const* pluginNamespace) noexcept = 0; + * + * * virtual char const* getPluginNamespace() const noexcept = 0; + * + * For weakly typed networks, the inputTypes will always be DataType::kFLOAT or DataType::kINT32, * and the returned type is canonicalized to DataType::kFLOAT if it is DataType::kHALF or DataType:kINT8. + * For strongly typed networks, inputTypes are inferred from previous operations, and getOutputDataType + * specifies the returned type based on the inputTypes. * Details about the floating-point precision are elicited later by method supportsFormatCombination. + * + * @deprecated Deprecated in TensorRT 10.0. Please implement IPluginV3 instead. * */ @Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IPluginV2DynamicExt extends IPluginV2Ext { @@ -87,7 +99,7 @@ public class IPluginV2DynamicExt extends IPluginV2Ext { int outputIndex, @Const DimsExprs inputs, int nbInputs, @ByRef IExprBuilder exprBuilder); /** - * Limit on number of format combinations accepted. + * \brief Limit on number of format combinations accepted. * */ @@ -123,18 +135,18 @@ public class IPluginV2DynamicExt extends IPluginV2Ext { * * * A definition for a plugin that supports only FP16 NCHW: * - * return inOut.format[pos] == TensorFormat::kLINEAR && inOut.type[pos] == DataType::kHALF; + * return inOut[pos].format == TensorFormat::kLINEAR && inOut[pos].type == DataType::kHALF; * * * A definition for a plugin that supports only FP16 NCHW for its two inputs, * and FP32 NCHW for its single output: * - * return inOut.format[pos] == TensorFormat::kLINEAR && (inOut.type[pos] == (pos < 2 ? DataType::kHALF : + * return inOut[pos].format == TensorFormat::kLINEAR && (inOut[pos].type == (pos < 2 ? DataType::kHALF : * DataType::kFLOAT)); * * * A definition for a "polymorphic" plugin with two inputs and one output that supports * any format or type, but the inputs and output must have the same format and type: * - * return pos == 0 || (inOut.format[pos] == inOut.format[0] && inOut.type[pos] == inOut.type[0]); + * return pos == 0 || (inOut[pos].format == inOut.format[0] && inOut[pos].type == inOut[0].type); * * Warning: TensorRT will stop asking for formats once it finds kFORMAT_COMBINATION_LIMIT on combinations. * */ @@ -175,9 +187,8 @@ public class IPluginV2DynamicExt extends IPluginV2Ext { * * IExecutionContext will call this during the next subsequent instance enqueue[V2]() or execute[V2]() if: * - The batch size is changed from previous call of execute()/enqueue() if hasImplicitBatchDimension() returns * true. - * - The optimization profile is changed via setOptimizationProfile() or setOptimizationProfileAsync(). - * - An input shape binding is changed via setInputShapeBinding(). - * - An input execution binding is changed via setBindingDimensions(). + * - The optimization profile is changed via setOptimizationProfileAsync(). + * - An input execution binding is changed via setInputShape(). * \warning The execution phase is timing critical during IExecutionContext but is not part of the timing loop when * called from IBuilder. Performance bottlenecks of configurePlugin won't show up during engine building but will * be visible during execution after calling functions that trigger layer resource updates. diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2Ext.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2Ext.java index 8fd9abcc5d2..85ca9c5b155 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2Ext.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2Ext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,13 +19,14 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; -/** \class IPluginV2Ext +/** + * \class IPluginV2Ext * * \brief Plugin class for user-implemented layers. * * Plugins are a mechanism for applications to implement custom layers. This * interface provides additional capabilities to the IPluginV2 interface by - * supporting different output data types and broadcast across batch. + * supporting different output data types and broadcast across batches. * * @see IPluginV2 * @@ -41,7 +42,15 @@ public class IPluginV2Ext extends IPluginV2 { /** * \brief Return the DataType of the plugin output at the requested index. * - * The default behavior should be to return the type of the first input, or DataType::kFLOAT if the layer has no + * @param index The output tensor index in the valid range between 0 and getNbOutputs()-1. + * @param inputTypes The data types of the input tensors, stored in an array of length nbInputs. + * @param nbInputs The number of input tensors. Will be a non-negative integer. + * + * @return The data type of the output tensor with the provided index if the input tensors have the data types + * provided in inputTypes, provided the output tensor index is in the valid range. DataType::kFLOAT must be + * returned if the index is not in the valid range. + * + * The default behavior must be to return the type of the first input, or DataType::kFLOAT if the layer has no * inputs. The returned data type must have a format that is supported by the plugin. * * @see supportsFormat() @@ -54,6 +63,9 @@ public class IPluginV2Ext extends IPluginV2 { * when building networks on multiple devices sharing the same plugin. * */ + + //! + //! //! //! //! @@ -65,11 +77,14 @@ public class IPluginV2Ext extends IPluginV2 { public native @NoException(true) DataType getOutputDataType( int index, @Cast("nvinfer1::DataType*") int[] inputTypes, int nbInputs); - /** \brief Return true if output tensor is broadcast across a batch. + /** + * \brief Return true if the output tensor is broadcast across a batch. * - * @param outputIndex The index of the output - * @param inputIsBroadcasted The ith element is true if the tensor for the ith input is broadcast across a batch. - * @param nbInputs The number of inputs + * @param outputIndex The index of the output tensor, which will be in the valid range between 0 and + * nbOutputs()-1. + * @param inputIsBroadcasted A boolean array of length nbInputs. The i-th element will be true if and only if + * the tensor for the ith input is broadcast across a batch. + * @param nbInputs The number of inputs. Will be a non-negative integer. * * The values in inputIsBroadcasted refer to broadcasting at the semantic level, * i.e. are unaffected by whether method canBroadcastInputAcrossBatch requests @@ -79,26 +94,37 @@ public class IPluginV2Ext extends IPluginV2 { * - Allowed context for the API call * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads * when building networks on multiple devices sharing the same plugin. + * + * @deprecated Deprecated in TensorRT 10.0. Implicit batch support is removed in TensorRT 10.0. * */ + + //! //! //! //! //! //! - public native @Cast("bool") @NoException(true) boolean isOutputBroadcastAcrossBatch( + //! + //! + public native @Cast("bool") @Deprecated @NoException(true) boolean isOutputBroadcastAcrossBatch( int outputIndex, @Cast("const bool*") BoolPointer inputIsBroadcasted, int nbInputs); - public native @Cast("bool") @NoException(true) boolean isOutputBroadcastAcrossBatch( + public native @Cast("bool") @Deprecated @NoException(true) boolean isOutputBroadcastAcrossBatch( int outputIndex, @Cast("const bool*") boolean[] inputIsBroadcasted, int nbInputs); - /** \brief Return true if plugin can use input that is broadcast across batch without replication. + /** + * \brief Return true if the plugin can use an input tensor that is broadcast across batch without replication. * - * @param inputIndex Index of input that could be broadcast. + * @param inputIndex Index of input that could be broadcast. Will be in the valid range between 0 and + * nbInputs - 1 where nbInputs is the maximum number of input tensors supported by this plugin. + * + * @return true if the index is in the valid range and the plugin is able to broadcast a single copy of this + * input tensor across the batch. False otherwise. * * For each input whose tensor is semantically broadcast across a batch, * TensorRT calls this method before calling configurePlugin. * If canBroadcastInputAcrossBatch returns true, TensorRT will not replicate the input tensor; - * i.e., there will be a single copy that the plugin should share across the batch. + * i.e., there will be a single copy that the plugin must share across the batch. * If it returns false, TensorRT will replicate the input tensor * so that it appears like a non-broadcasted tensor. * @@ -108,6 +134,8 @@ public class IPluginV2Ext extends IPluginV2 { * - Allowed context for the API call * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads * when building networks on multiple devices sharing the same plugin. + * + * @deprecated Deprecated in TensorRT 10.0. Implicit batch support is removed in TensorRT 10.0. * */ @@ -118,7 +146,7 @@ public class IPluginV2Ext extends IPluginV2 { //! //! //! - public native @Cast("bool") @NoException(true) boolean canBroadcastInputAcrossBatch(int inputIndex); + public native @Cast("bool") @Deprecated @NoException(true) boolean canBroadcastInputAcrossBatch(int inputIndex); /** * \brief Configure the layer with input and output data types. @@ -126,20 +154,22 @@ public class IPluginV2Ext extends IPluginV2 { * This function is called by the builder prior to initialize(). It provides an opportunity for the layer to make * algorithm choices on the basis of its weights, dimensions, data types and maximum batch size. * - * @param inputDims The input tensor dimensions. - * @param nbInputs The number of inputs. - * @param outputDims The output tensor dimensions. - * @param nbOutputs The number of outputs. - * @param inputTypes The data types selected for the plugin inputs. - * @param outputTypes The data types selected for the plugin outputs. + * @param inputDims The input tensor dimensions. Will be an array of length nbInputs. + * @param nbInputs The number of inputs. Will be a non-negative integer. + * @param outputDims The output tensor dimensions. Will be an array of length nbOutputs. + * @param nbOutputs The number of outputs. Will be a positive integer. + * @param inputTypes The data types selected for the plugin inputs. Will be an array of length nbInputs. + * @param outputTypes The data types selected for the plugin outputs. Will be an array of length nbOutputs. * @param inputIsBroadcast True for each input that the plugin must broadcast across the batch. + * Will be an array of length nbInputs. * @param outputIsBroadcast True for each output that TensorRT will broadcast across the batch. + * Will be an array of length nbOutputs. * @param floatFormat The format selected for the engine for the floating point inputs/outputs. - * @param maxBatchSize The maximum batch size. + * @param maxBatchSize The maximum batch size. Will be a positive integer. * * The dimensions passed here do not include the outermost batch size (i.e. for 2-D image networks, they will be * 3-dimensional CHW dimensions). When inputIsBroadcast or outputIsBroadcast is true, the outermost batch size for - * that input or output should be treated as if it is one. + * that input or output must be treated as if it is one. * Index 'i' of inputIsBroadcast is true only if the input is semantically broadcast across the batch and * calling canBroadcastInputAcrossBatch with argument 'i' returns true. * Index 'i' of outputIsBroadcast is true only if calling isOutputBroadcastAcrossBatch with argument 'i' @@ -155,31 +185,33 @@ public class IPluginV2Ext extends IPluginV2 { * when building networks on multiple devices sharing the same plugin. However, TensorRT * will not call this method from two threads simultaneously on a given clone of a plugin. * */ - public native @NoException(true) void configurePlugin(@Cast("const nvinfer1::Dims*") Dims32 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims32 outputDims, int nbOutputs, + public native @NoException(true) void configurePlugin(@Cast("const nvinfer1::Dims*") Dims64 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims64 outputDims, int nbOutputs, @Cast("nvinfer1::DataType*") IntPointer inputTypes, @Cast("nvinfer1::DataType*") IntPointer outputTypes, @Cast("const bool*") BoolPointer inputIsBroadcast, @Cast("const bool*") BoolPointer outputIsBroadcast, @Cast("nvinfer1::PluginFormat") TensorFormat floatFormat, int maxBatchSize); - public native @NoException(true) void configurePlugin(@Cast("const nvinfer1::Dims*") Dims32 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims32 outputDims, int nbOutputs, + public native @NoException(true) void configurePlugin(@Cast("const nvinfer1::Dims*") Dims64 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims64 outputDims, int nbOutputs, @Cast("nvinfer1::DataType*") IntBuffer inputTypes, @Cast("nvinfer1::DataType*") IntBuffer outputTypes, @Cast("const bool*") boolean[] inputIsBroadcast, @Cast("const bool*") boolean[] outputIsBroadcast, @Cast("nvinfer1::PluginFormat") int floatFormat, int maxBatchSize); - public native @NoException(true) void configurePlugin(@Cast("const nvinfer1::Dims*") Dims32 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims32 outputDims, int nbOutputs, + public native @NoException(true) void configurePlugin(@Cast("const nvinfer1::Dims*") Dims64 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims64 outputDims, int nbOutputs, @Cast("nvinfer1::DataType*") int[] inputTypes, @Cast("nvinfer1::DataType*") int[] outputTypes, @Cast("const bool*") BoolPointer inputIsBroadcast, @Cast("const bool*") BoolPointer outputIsBroadcast, @Cast("nvinfer1::PluginFormat") TensorFormat floatFormat, int maxBatchSize); - public native @NoException(true) void configurePlugin(@Cast("const nvinfer1::Dims*") Dims32 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims32 outputDims, int nbOutputs, + public native @NoException(true) void configurePlugin(@Cast("const nvinfer1::Dims*") Dims64 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims64 outputDims, int nbOutputs, @Cast("nvinfer1::DataType*") IntPointer inputTypes, @Cast("nvinfer1::DataType*") IntPointer outputTypes, @Cast("const bool*") boolean[] inputIsBroadcast, @Cast("const bool*") boolean[] outputIsBroadcast, @Cast("nvinfer1::PluginFormat") int floatFormat, int maxBatchSize); - public native @NoException(true) void configurePlugin(@Cast("const nvinfer1::Dims*") Dims32 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims32 outputDims, int nbOutputs, + public native @NoException(true) void configurePlugin(@Cast("const nvinfer1::Dims*") Dims64 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims64 outputDims, int nbOutputs, @Cast("nvinfer1::DataType*") IntBuffer inputTypes, @Cast("nvinfer1::DataType*") IntBuffer outputTypes, @Cast("const bool*") BoolPointer inputIsBroadcast, @Cast("const bool*") BoolPointer outputIsBroadcast, @Cast("nvinfer1::PluginFormat") TensorFormat floatFormat, int maxBatchSize); - public native @NoException(true) void configurePlugin(@Cast("const nvinfer1::Dims*") Dims32 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims32 outputDims, int nbOutputs, + public native @NoException(true) void configurePlugin(@Cast("const nvinfer1::Dims*") Dims64 inputDims, int nbInputs, @Cast("const nvinfer1::Dims*") Dims64 outputDims, int nbOutputs, @Cast("nvinfer1::DataType*") int[] inputTypes, @Cast("nvinfer1::DataType*") int[] outputTypes, @Cast("const bool*") boolean[] inputIsBroadcast, @Cast("const bool*") boolean[] outputIsBroadcast, @Cast("nvinfer1::PluginFormat") int floatFormat, int maxBatchSize); /** * \brief Attach the plugin object to an execution context and grant the plugin the access to some context - * resource. + * resources. * - * @param cudnn The CUDNN context handle of the execution context - * @param cublas The cublas context handle of the execution context + * @param cudnn The cuDNN context handle of the execution context. Will be a valid cuDNN context handle, or + * nullptr if TacticSource::kCUDNN is disabled. + * @param cublas The cuBLAS context handle of the execution context. Will be a valid cuBLAS context handle, or + * nullptr if TacticSource::kCUBLAS is disabled. * @param allocator The allocator used by the execution context * * This function is called automatically for each plugin when a new execution context is created. If the context @@ -187,10 +219,19 @@ public class IPluginV2Ext extends IPluginV2 { * new resources are assigned to the context. * * If the plugin needs per-context resource, it can be allocated here. - * The plugin can also get context-owned CUDNN and CUBLAS context here. + * The plugin can also get context-owned cuDNN and cuBLAS context here. * - * \note In the automotive safety context, the CUDNN and CUBLAS parameters will be nullptr because CUDNN and CUBLAS - * is not used by the safe runtime. + * \note The TacticSource::kCUDNN and TacticSource::kCUBLAS flag is disabled by default. + * The allocator pointer is unique to each building or execution context instance having overlapping lifetimes. + * It can be used as a key to manage resources across plugin instances sharing the same context. + * Plugins attached to different contexts will have different handles as their execution will not overlap. + * + * @see TacticSources + * @see getPluginCudnnHandle(void* executionContextIdentifier) + * @see getPluginCublasHandle(void* excecutionContextIdentifier) + * + * \note In the automotive safety context, the cuDNN and cuBLAS parameters will be nullptr because cuDNN and cuBLAS + * are not used by the safe runtime. * * \u005Cusage * - Allowed context for the API call @@ -210,7 +251,7 @@ public class IPluginV2Ext extends IPluginV2 { /** * \brief Detach the plugin object from its execution context. * - * This function is called automatically for each plugin when a execution context is destroyed or the context + * This function is called automatically for each plugin when an execution context is destroyed or the context * resources are unassigned from the context. * * If the plugin owns per-context resource, it can be released here. @@ -225,15 +266,18 @@ public class IPluginV2Ext extends IPluginV2 { //! //! //! + //! public native @NoException(true) void detachFromContext(); /** * \brief Clone the plugin object. This copies over internal plugin parameters as well and returns a new plugin * object with these parameters. If the source plugin is pre-configured with configurePlugin(), the returned object - * should also be pre-configured. The returned object should allow attachToContext() with a new execution context. + * must also be pre-configured. The returned object must allow attachToContext() with a new execution context. * Cloned plugin objects can share the same per-engine immutable resource (e.g. weights) with the source object * (e.g. via ref-counting) to avoid duplication. * + * @return A pointer to a cloned plugin object if cloning was successful, otherwise nullptr. + * * \u005Cusage * - Allowed context for the API call * - Thread-safe: Yes, this method is required to be thread-safe and may be called from multiple threads diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2IOExt.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2IOExt.java index b5df9714df2..d970f60ed13 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2IOExt.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2IOExt.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,7 +19,8 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; -/** \class IPluginV2IOExt +/** + * \class IPluginV2IOExt * * \brief Plugin class for user-implemented layers. * @@ -27,6 +28,8 @@ * capabilities to the IPluginV2Ext interface by extending different I/O data types and tensor formats. * * @see IPluginV2Ext + * + * @deprecated Deprecated in TensorRT 10.0. * */ @Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IPluginV2IOExt extends IPluginV2Ext { @@ -76,10 +79,10 @@ public class IPluginV2IOExt extends IPluginV2Ext { * Using this numbering, pos is an index into InOut, where 0 <= pos < nbInputs+nbOutputs. * * TensorRT invokes this method to ask if the input/output indexed by pos supports the format/datatype specified - * by inOut[pos].format and inOut[pos].type. The override should return true if that format/datatype at inOut[pos] + * by inOut[pos].format and inOut[pos].type. The override must return true if that format/datatype at inOut[pos] * are supported by the plugin. If support is conditional on other input/output formats/datatypes, the plugin can * make its result conditional on the formats/datatypes in inOut[0..pos-1], which will be set to values - * that the plugin supports. The override should not inspect inOut[pos+1..nbInputs+nbOutputs-1], + * that the plugin supports. The override must not inspect inOut[pos+1..nbInputs+nbOutputs-1], * which will have invalid values. In other words, the decision for pos must be based on inOut[0..pos] only. * * Some examples: diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2Layer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2Layer.java index 6e0af9f6dc7..99c55c03ee9 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2Layer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV2Layer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV3.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV3.java new file mode 100644 index 00000000000..0c1acc074cf --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV3.java @@ -0,0 +1,68 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IPluginV3 extends IVersionedInterface { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IPluginV3(Pointer p) { super(p); } + + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + + //! + //! + //! + //! + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); + + /** \brief Return a pointer to plugin object implementing the specified PluginCapabilityType. + * + * \note IPluginV3 objects added for the build phase (through addPluginV3()) must return valid objects for + * PluginCapabilityType::kCORE, PluginCapabilityType::kBUILD and PluginCapabilityType::kRUNTIME. + * + * \note IPluginV3 objects added for the runtime phase must return valid objects for + * PluginCapabilityType::kCORE and PluginCapabilityType::kRUNTIME. + * + * @see TensorRTPhase + * @see IPluginCreatorV3One::createPlugin() + * */ + + + //! + //! + //! + //! + public native @NoException(true) IPluginCapability getCapabilityInterface(PluginCapabilityType type); + public native @NoException(true) IPluginCapability getCapabilityInterface(@Cast("nvinfer1::PluginCapabilityType") int type); + + /** + * \brief Clone the plugin object. This copies over internal plugin parameters and returns a new plugin object with + * these parameters. The cloned object must be in a fully initialized state. + * + * \note The cloned object must return valid objects through getCapabilityInterface() for at least the same + * PluginCapabilityTypes as the original object. + * + * @return A cloned plugin object in an initialized state with the same parameters as the current object. + * nullptr must be returned if the cloning fails. + * */ + public native @NoException(true) IPluginV3 clone(); +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV3Layer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV3Layer.java new file mode 100644 index 00000000000..dc75dc93e1d --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV3Layer.java @@ -0,0 +1,43 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + + +/** + * \class IPluginV3Layer + * + * \brief Layer type for V3 plugins + * + * @see IPluginV3 + * + * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. + * */ +@Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IPluginV3Layer extends ILayer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IPluginV3Layer(Pointer p) { super(p); } + + /** + * \brief Get the plugin for the layer. + * + * @see IPluginV3 + * */ + public native @ByRef @NoException(true) IPluginV3 getPlugin(); +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV3OneBuild.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV3OneBuild.java new file mode 100644 index 00000000000..404097d95e6 --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV3OneBuild.java @@ -0,0 +1,292 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + + +@Namespace("nvinfer1::v_1_0") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IPluginV3OneBuild extends IPluginCapability { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IPluginV3OneBuild(Pointer p) { super(p); } + + /** + * \brief The default maximum number of format combinations that will be timed by TensorRT during the build phase + * + * @see getFormatCombinationLimit + * */ + + + //! + //! + @MemberGetter public static native int kDEFAULT_FORMAT_COMBINATION_LIMIT(); + public static final int kDEFAULT_FORMAT_COMBINATION_LIMIT = kDEFAULT_FORMAT_COMBINATION_LIMIT(); + + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + + + //! + //! + //! + //! + //! + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); + + /** + * \brief Configure the plugin. + * + * configurePlugin() can be called multiple times in the build phase during creation of an engine by IBuilder. + * + * configurePlugin() is called when a plugin is being prepared for profiling but not for any + * specific input size. This provides an opportunity for the plugin to make algorithmic choices on the basis of + * input and output formats, along with the bound of possible dimensions. The min, opt and max value of the + * DynamicPluginTensorDesc correspond to the kMIN, kOPT and kMAX value of the current profile that the plugin is + * being profiled for, with the desc.dims field corresponding to the dimensions of plugin specified at network + * creation. Wildcard dimensions may exist during this phase in the desc.dims field. + * + * @param in The input tensors attributes that are used for configuration. + * @param nbInputs Number of input tensors. + * @param out The output tensors attributes that are used for configuration. + * @param nbOutputs Number of output tensors. + * */ + + + //! + //! + //! + //! + //! + //! + public native @NoException(true) int configurePlugin(@Const DynamicPluginTensorDesc in, int nbInputs, + @Const DynamicPluginTensorDesc out, int nbOutputs); + + /** + * \brief Provide the data types of the plugin outputs if the input tensors have the data types provided. + * + * @param outputTypes Pre-allocated array to which the output data types should be written. + * @param nbOutputs The number of output tensors. This matches the value returned from getNbOutputs(). + * @param inputTypes The input data types. + * @param nbInputs The number of input tensors. + * + * @return 0 for success, else non-zero (which will cause engine termination). The returned code will be reported + * through the error recorder. + * + * \note Provide {@code DataType::kFLOAT}s if the layer has no inputs. The data type for any size tensor outputs must be + * {@code DataType::kINT32}. The returned data types must each have a format that is supported by the plugin. + * + * \warning DataType:kBOOL and DataType::kUINT8 are not supported. + * */ + + + //! + //! + //! + //! + //! + //! + public native @NoException(true) int getOutputDataTypes( + @Cast("nvinfer1::DataType*") IntPointer outputTypes, int nbOutputs, @Cast("nvinfer1::DataType*") IntPointer inputTypes, int nbInputs); + public native @NoException(true) int getOutputDataTypes( + @Cast("nvinfer1::DataType*") IntBuffer outputTypes, int nbOutputs, @Cast("nvinfer1::DataType*") IntBuffer inputTypes, int nbInputs); + public native @NoException(true) int getOutputDataTypes( + @Cast("nvinfer1::DataType*") int[] outputTypes, int nbOutputs, @Cast("nvinfer1::DataType*") int[] inputTypes, int nbInputs); + + /** + * \brief Provide expressions for computing dimensions of the output tensors from dimensions of the input tensors. + * + * @param inputs Expressions for dimensions of the input tensors + * @param nbInputs The number of input tensors + * @param shapeInputs Expressions for values of the shape tensor inputs + * @param nbShapeInputs The number of shape tensor inputs + * @param outputs Pre-allocated array to which the output dimensions must be written + * @param exprBuilder Object for generating new dimension expressions + * + * \note Any size tensor outputs must be declared to be 0-D. + * + * \note The declaration of shapeInputs as DimsExprs is slightly abusive, because the "dimensions" + * are actually the values of the shape tensor. For example, if the input shape tensor + * is a 2x3 matrix, the DimsExprs will have six "dimensions": the three values from the first + * row of the matrix followed by the three values from the second row of the matrix. + * + * @return 0 for success, else non-zero (which will cause engine termination). Returned code will be reported + * through the error recorder. + * */ + + + //! + //! + //! + //! + //! + //! + //! + //! + //! + //! + //! + //! + //! + public native @NoException(true) int getOutputShapes(@Const DimsExprs inputs, int nbInputs, @Const DimsExprs shapeInputs, + int nbShapeInputs, DimsExprs outputs, int nbOutputs, @ByRef IExprBuilder exprBuilder); + + /** + * \brief Return true if plugin supports the format and datatype for the input/output indexed by pos. + * + * For this method inputs are numbered 0.. (nbInputs - 1) and outputs are numbered nbInputs.. (nbInputs + nbOutputs + * - 1). Using this numbering, pos is an index into InOut, where 0 <= pos < nbInputs + nbOutputs - 1. + * + * TensorRT invokes this method to ask if the input/output indexed by pos supports the format/datatype specified + * by inOut[pos].format and inOut[pos].type. The override should return true if that format/datatype at inOut[pos] + * are supported by the plugin. If support is conditional on other input/output formats/datatypes, the plugin can + * make its result conditional on the formats/datatypes in inOut[0.. pos - 1], which will be set to values + * that the plugin supports. The override should not inspect inOut[pos1.. nbInputs + nbOutputs - 1], + * which will have invalid values. In other words, the decision for pos must be based on inOut[0..pos] only. + * + * Some examples: + * + * * A definition for a plugin that supports only FP16 NCHW: + * + * return inOut.format[pos] == TensorFormat::kLINEAR && inOut.type[pos] == DataType::kHALF; + * + * * A definition for a plugin that supports only FP16 NCHW for its two inputs, + * and FP32 NCHW for its single output: + * + * return inOut.format[pos] == TensorFormat::kLINEAR && (inOut.type[pos] == pos < 2 ? DataType::kHALF : + * DataType::kFLOAT); + * + * * A definition for a "polymorphic" plugin with two inputs and one output that supports + * any format or type, but the inputs and output must have the same format and type: + * + * return pos == 0 || (inOut.format[pos] == inOut.format[0] && inOut.type[pos] == inOut.type[0]); + * + * \warning TensorRT will stop querying once it finds getFormatCombinationLimit() of combinations. + * + * @see getFormatCombinationLimit + * */ + + + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean supportsFormatCombination( + int pos, @Const DynamicPluginTensorDesc inOut, int nbInputs, int nbOutputs); + + /** + * \brief Get the number of outputs from the plugin. + * + * @return The number of outputs, which must be a positive integer. + * */ + + + //! + //! + //! + //! + public native @NoException(true) int getNbOutputs(); + + /** + * \brief Find the workspace size required by the layer. + * + * This function is called after the plugin is configured, and possibly during execution. + * The result should be a sufficient workspace size to deal with inputs and outputs of the given size + * or any smaller problem. + * + * @return The workspace size. + * */ + + + //! + //! + //! + //! + //! + //! + public native @Cast("size_t") @NoException(true) long getWorkspaceSize(@Const DynamicPluginTensorDesc inputs, int nbInputs, + @Const DynamicPluginTensorDesc outputs, int nbOutputs); + + /** + * \brief Query for any custom tactics that the plugin intends to use + * + * For each format combination supported by the plugin (up to a maximum indicated by getFormatCombinationLimit()), + * the plugin will be timed for each tactic advertised through this method. + * + * @param tactics Pre-allocated buffer to which the tactic values should be written + * @param nbTactics The number of tactics advertised through getNbTactics() + * + * \note The provided tactic values must be unique and non-zero. The tactic value 0 is reserved for the default + * tactic attached to each format combination. + * + * @return 0 for success, else non-zero (which will cause engine termination). The returned code will be reported + * through the error recorder. + * */ + + + //! + //! + public native @NoException(true) int getValidTactics(IntPointer tactics, int nbTactics); + public native @NoException(true) int getValidTactics(IntBuffer tactics, int nbTactics); + public native @NoException(true) int getValidTactics(int[] tactics, int nbTactics); + + /** + * \brief Query for the number of custom tactics the plugin intends to use + * */ + + + //! + //! + //! + //! + public native @NoException(true) int getNbTactics(); + + /** + * \brief Called to query the suffix to use for the timing cache ID. May be called anytime after plugin creation. + * + * @return Suffix to use for timing cache ID, considering only the creation state of the plugin. + * Returning nullptr will disable timing caching for the plugin altogether. + * + * \note If timing caching is enabled for the plugin (by returning non-null), the I/O shape and format information + * will be automatically considered to form the prefix of the timing cache ID. Therefore, only other factors + * determining the creation state of the plugin, such as its attribute values, should be considered to compose the + * return value. + * */ + + + //! + //! + public native @NoException(true) String getTimingCacheID(); + + /** + * \brief Return the maximum number of format combinations that will be timed by TensorRT during the build phase + * */ + + + //! + //! + //! + public native @NoException(true) int getFormatCombinationLimit(); + + /** + * \brief Query for a string representing the configuration of the plugin. May be called anytime after + * plugin creation. + * + * @return A string representing the plugin's creation state, especially with regard to its attribute values. + * */ + public native @NoException(true) String getMetadataString(); +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV3OneCore.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV3OneCore.java new file mode 100644 index 00000000000..7e9efdb1986 --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV3OneCore.java @@ -0,0 +1,80 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IPluginV3OneCore extends IPluginCapability { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IPluginV3OneCore(Pointer p) { super(p); } + + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + + + //! + //! + //! + //! + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); + + /** + * \brief Return the plugin name. Should match the plugin name returned by the corresponding plugin creator. + * + * @see IPluginCreatorV3One::getPluginName() + * + * \warning The string returned must be NULL-terminated and have a length of 1024 bytes or less including the + * NULL terminator. + * */ + + + //! + //! + //! + //! + public native @NoException(true) String getPluginName(); + + /** + * \brief Return the plugin version. Should match the plugin version returned by the corresponding plugin creator. + * + * @see IPluginCreatorV3One::getPluginVersion() + * + * \warning The string returned must be NULL-terminated and have a length of 1024 bytes or less including the + * NULL terminator. + * */ + + + //! + //! + //! + //! + public native @NoException(true) String getPluginVersion(); + + /** + * \brief Return the namespace of the plugin object. Should match the plugin namespace returned by the + * corresponding plugin creator. + * + * @see IPluginCreatorV3One::getPluginNamespace() + * + * \warning The string returned must be NULL-terminated and have a length of 1024 bytes or less including the + * NULL terminator. + * */ + public native @NoException(true) String getPluginNamespace(); +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV3OneRuntime.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV3OneRuntime.java new file mode 100644 index 00000000000..7e64ec22dc7 --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPluginV3OneRuntime.java @@ -0,0 +1,142 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + + +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IPluginV3OneRuntime extends IPluginCapability { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IPluginV3OneRuntime(Pointer p) { super(p); } + + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + + + //! + //! + //! + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); + + /** + * \brief Set the tactic to be used in the subsequent call to enqueue(). If no custom tactics were advertised, this + * will have a value of 0, which is designated as the default tactic. + * + * @return 0 for success, else non-zero (which will cause engine termination). The returned code will be reported + * through the error recorder. + * */ + + + //! + //! + //! + public native @NoException(true) int setTactic(int tactic); + + /** + * \brief Called when a plugin is being prepared for execution for specific dimensions. This could + * happen multiple times in the execution phase, both during creation of an engine by IBuilder and execution of an + * engine by IExecutionContext. + * * IBuilder will call this function once per profile, with {@code in} resolved to the values specified by the + * kOPT field of the current profile. + * * IExecutionContext will call this during the next subsequent instance of enqueueV3() or executeV2() if: + * - The optimization profile is changed via setOptimizationProfile() or setOptimizationProfileAsync(). + * - An input binding is changed via setInputTensorAddress() or setTensorAddress() or setInputShape(). + * \warning The execution phase is timing critical during IExecutionContext but is not part of the timing loop when + * called from IBuilder. Performance bottlenecks of onShapeChange() will not show up during engine building but + * will be visible during execution if any triggering functions are called. + * + * @param in The input tensors attributes that are used for configuration. + * @param nbInputs Number of input tensors. + * @param out The output tensors attributes that are used for configuration. + * @param nbOutputs Number of output tensors. + * */ + + + //! + //! + //! + //! + public native @NoException(true) int onShapeChange( + @Const PluginTensorDesc in, int nbInputs, @Const PluginTensorDesc out, int nbOutputs); + + /** + * \brief Execute the layer. + * + * @param inputDesc how to interpret the memory for the input tensors. + * @param outputDesc how to interpret the memory for the output tensors. + * @param inputs The memory for the input tensors. + * @param outputs The memory for the output tensors. + * @param workspace Workspace for execution. + * @param stream The stream in which to execute the kernels. + * + * @return 0 for success, else non-zero (which will cause engine termination). The returned code will be reported + * through the error recorder. + * */ + + + //! + //! + //! + //! + //! + //! + //! + public native @NoException(true) int enqueue(@Const PluginTensorDesc inputDesc, @Const PluginTensorDesc outputDesc, + @Cast("const void*const*") PointerPointer inputs, @Cast("void*const*") PointerPointer outputs, Pointer workspace, CUstream_st stream); + public native @NoException(true) int enqueue(@Const PluginTensorDesc inputDesc, @Const PluginTensorDesc outputDesc, + @Cast("const void*const*") @ByPtrPtr Pointer inputs, @Cast("void*const*") @ByPtrPtr Pointer outputs, Pointer workspace, CUstream_st stream); + + /** + * \brief Clone the plugin, attach the cloned plugin object to a execution context and grant the cloned plugin + * access to some context resources. + * + * This function is called automatically for each plugin when a new execution context is created. The plugin may + * use resources provided by the IPluginResourceContext until the plugin is deleted by TensorRT. + * + * If the plugin needs per-context resources, it can be allocated here. + * + * @param context A resource context that exposes methods to get access to execution context specific resources. + * A different resource context is guaranteed for each different execution context to which the + * plugin is attached. + * @see IPluginResourceContext + * + * \note This method should clone the entire IPluginV3 object, not just the runtime interface + * + * @return A clone of the IPluginV3 object whose runtime interface on which this method is invoked, which has + * attached to the provided resource context. + * */ + + + //! + //! + //! + public native @NoException(true) IPluginV3 attachToContext(IPluginResourceContext context); + + /** + * \brief Get the plugin fields which should be serialized. + * + * \note The set of plugin fields returned does not necessarily need to match that advertised through + * getFieldNames() of the corresponding plugin creator. +

+ * \note To serialize arbitrary plugin data, use a PluginField of + * PluginFieldType::kUNKNOWN, with the length of the PluginField set to the correct number of bytes. + * */ + public native @Const @NoException(true) PluginFieldCollection getFieldsToSerialize(); +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPoolingLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPoolingLayer.java index d36ceacc6ca..0a8466648d4 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPoolingLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPoolingLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -62,116 +62,8 @@ public class IPoolingLayer extends ILayer { //! //! //! - //! public native @NoException(true) PoolingType getPoolingType(); - /** - * \brief Set the window size for pooling. - * - * If executing this layer on DLA, both height and width of window size must be in the range [1,8]. - * - * @see getWindowSize() - * - * @deprecated Superseded by setWindowSizeNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - public native @Deprecated @NoException(true) void setWindowSize(@ByVal DimsHW windowSize); - - /** - * \brief Get the window size for pooling. - * - * @see setWindowSize() - * - * @deprecated Superseded by getWindowSizeNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - //! - //! - public native @Deprecated @ByVal @NoException(true) DimsHW getWindowSize(); - - /** - * \brief Set the stride for pooling. - * - * Default: 1 - * - * If executing this layer on DLA, both height and width of stride must be in the range [1,16]. - * - * @see getStride() - * - * @deprecated Superseded by setStrideNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - public native @Deprecated @NoException(true) void setStride(@ByVal DimsHW stride); - - /** - * \brief Get the stride for pooling. - * - * @see setStride() - * - * @deprecated Superseded by getStrideNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - //! - //! - public native @Deprecated @ByVal @NoException(true) DimsHW getStride(); - - /** - * \brief Set the padding for pooling. - * - * Default: 0 - * - * If executing this layer on DLA, both height and width of padding must be in the range [0,7]. - * - * @see getPadding() - * - * @deprecated Superseded by setPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) void setPadding(@ByVal DimsHW padding); - - /** - * \brief Get the padding for pooling. - * - * Default: 0 - * - * @see setPadding() - * - * @deprecated Superseded by getPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0 - * */ - - - //! - //! - //! - //! - public native @Deprecated @ByVal @NoException(true) DimsHW getPadding(); - /** * \brief Set the blending factor for the max_average_blend mode: * max_average_blendPool = (1-blendFactor)*maxPool + blendFactor*avgPool @@ -203,7 +95,6 @@ public class IPoolingLayer extends ILayer { //! //! //! - //! public native @NoException(true) float getBlendFactor(); /** @@ -213,9 +104,6 @@ public class IPoolingLayer extends ILayer { * * Default: true * - * \note On Xavier, DLA supports only inclusive padding and this must be explicitly - * set to false. - * * @see getAverageCountExcludesPadding() * */ @@ -259,7 +147,7 @@ public class IPoolingLayer extends ILayer { //! //! //! - public native @NoException(true) void setPrePadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); + public native @NoException(true) void setPrePadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); /** * \brief Get the pre-padding. @@ -274,7 +162,7 @@ public class IPoolingLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPrePadding(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPrePadding(); /** * \brief Set the multi-dimension post-padding for pooling. @@ -294,7 +182,7 @@ public class IPoolingLayer extends ILayer { //! //! //! - public native @NoException(true) void setPostPadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); + public native @NoException(true) void setPostPadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); /** * \brief Get the padding. @@ -307,7 +195,7 @@ public class IPoolingLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPostPadding(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPostPadding(); /** * \brief Set the padding mode. @@ -352,7 +240,7 @@ public class IPoolingLayer extends ILayer { //! //! //! - public native @NoException(true) void setWindowSizeNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 windowSize); + public native @NoException(true) void setWindowSizeNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 windowSize); /** * \brief Get the multi-dimension window size for pooling. @@ -366,7 +254,7 @@ public class IPoolingLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getWindowSizeNd(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getWindowSizeNd(); /** * \brief Set the multi-dimension stride for pooling. @@ -376,14 +264,14 @@ public class IPoolingLayer extends ILayer { * If executing this layer on DLA, only support 2D stride, both height and width of stride must be in the range * [1,16]. * - * @see getStrideNd() setStride() getStride() + * @see getStrideNd() * */ //! //! //! - public native @NoException(true) void setStrideNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride); + public native @NoException(true) void setStrideNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride); /** * \brief Get the multi-dimension stride for pooling. @@ -398,7 +286,7 @@ public class IPoolingLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrideNd(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrideNd(); /** * \brief Set the multi-dimension padding for pooling. @@ -420,7 +308,7 @@ public class IPoolingLayer extends ILayer { //! //! //! - public native @NoException(true) void setPaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); + public native @NoException(true) void setPaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); /** * \brief Get the multi-dimension padding for pooling. @@ -429,5 +317,5 @@ public class IPoolingLayer extends ILayer { * * @see setPaddingNd() * */ - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPaddingNd(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPaddingNd(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IProfiler.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IProfiler.java index 9b6dc222c8d..14b7239ea25 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IProfiler.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IProfiler.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -18,35 +18,11 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; - -/** - * \class IProfiler - * - * \brief Application-implemented interface for profiling. - * - * When this class is added to an execution context, the profiler will be called once per layer for each invocation of - * executeV2()/enqueueV2()/enqueueV3(). - * - * It is not recommended to run inference with profiler enabled when the inference execution time is critical since the - * profiler may affect execution time negatively. - * */ -@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IProfiler extends Pointer { static { Loader.load(); } - /** Default native constructor. */ - public IProfiler() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public IProfiler(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public IProfiler(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public IProfiler position(long position) { - return (IProfiler)super.position(position); - } - @Override public IProfiler getPointer(long i) { - return new IProfiler((Pointer)this).offsetAddress(i); - } /** * \brief Layer time reporting callback. @@ -55,5 +31,6 @@ public class IProfiler extends Pointer { * with profiling verbosity set to kNONE, the layerName is the decimal index of the layer. * @param ms The time in milliseconds to execute the layer. * */ - @Virtual(true) public native @NoException(true) void reportLayerTime(String layerName, float ms); + public native @NoException(true) void reportLayerTime(String layerName, float ms); + public native @NoException(true) void reportLayerTime(@Cast("const char*") BytePointer layerName, float ms); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IProgressMonitor.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IProgressMonitor.java new file mode 100644 index 00000000000..e331660e000 --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IProgressMonitor.java @@ -0,0 +1,106 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + // namespace impl +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IProgressMonitor extends IVersionedInterface { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IProgressMonitor(Pointer p) { super(p); } + + + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + + + //! + //! + //! + //! + //! + //! + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); + + /** + * \brief Signal that a phase of the optimizer has started. + * + * @param phaseName The name of this phase for tracking purposes. + * @param parentPhase The parent phase that this phase belongs to, or nullptr if there is no parent. + * @param nbSteps The number of steps that are involved in this phase. + * + * The phaseStart function signals to the application that the current phase is beginning, and that it has a + * certain number of steps to perform. If \p phaseParent is nullptr, then the phaseStart is beginning an + * independent phase, and if \p phaseParent is specified, then the current phase, specified by \p phaseName, is + * within the scope of the parent phase. \p nbSteps will always be a positive number. The phaseStart function + * implies that the first step is being executed. TensorRT will signal when each step is complete. + * + * Phase names are human readable English strings which are unique within a single phase hierarchy but which can be + * reused once the previous instance has completed. Phase names and their hierarchies may change between versions + * of TensorRT. + * + * @see phaseFinish + * */ + + + //! + //! + //! + //! + //! + public native @NoException(true) void phaseStart(String phaseName, String parentPhase, int nbSteps); + public native @NoException(true) void phaseStart(@Cast("const char*") BytePointer phaseName, @Cast("const char*") BytePointer parentPhase, int nbSteps); + + /** + * \brief Signal that a step of an optimizer phase has finished. + * + * @param phaseName The name of the innermost phase being executed. + * @param step The step number that was completed. + * + * The stepComplete function signals to the application that TensorRT has finished the current \p step for the + * phase \p phaseName, and will move onto the next step if there is one. The application can return false for + * TensorRT to exit the build early. The step value will increase on subsequent calls in the range [0, nbSteps). + * + * @return true to continue to the next step or false to stop the build. + * */ + + + //! + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean stepComplete(String phaseName, int step); + public native @Cast("bool") @NoException(true) boolean stepComplete(@Cast("const char*") BytePointer phaseName, int step); + + /** + * \brief Signal that a phase of the optimizer has finished. + * + * @param phaseName The name of the phase that has finished. + * + * The phaseFinish function signals to the application that the phase is complete. This function may be called + * before all steps in the range [0, nbSteps) have been reported to stepComplete. This scenario can be triggered by + * error handling, internal optimizations, or when stepComplete returns false to request cancellation of the build. + * + * @see phaseStart + * */ + public native @NoException(true) void phaseFinish(String phaseName); + public native @NoException(true) void phaseFinish(@Cast("const char*") BytePointer phaseName); + +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IQuantizeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IQuantizeLayer.java index 144d4107728..bd93c7ca2db 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IQuantizeLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IQuantizeLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,32 +25,39 @@ * \brief A Quantize layer in a network definition. * * This layer accepts a floating-point data input tensor, and uses the scale and zeroPt inputs to - * quantize the data to an 8-bit signed integer according to: + * quantize the data according to: * \p output = clamp(round(\p input / \p scale) + \p zeroPt) * * Rounding type is rounding-to-nearest ties-to-even (https://en.wikipedia.org/wiki/Rounding#Round_half_to_even). - * Clamping is in the range [-128, 127]. + * Clamping range according to data type: + * - FP8: [-448, 448] + * - INT4: [-8, 7] + * - INT8: [-128, 127] * * The first input (index 0) is the tensor to be quantized. * The second (index 1) and third (index 2) are the scale and zero point respectively. - * Each of \p scale and \p zeroPt must be either a scalar, or a 1D tensor. + * \p scale and \p zeroPt should have identical dimensions, and rank lower or equal to 2. * - * The \p zeroPt tensor is optional, and if not set, will be assumed to be zero. Its data type must be - * DataType::kINT8. \p zeroPt must only contain zero-valued coefficients, because only symmetric quantization is + * The \p zeroPt tensor is optional, and if not set, will be assumed to be zero. Its data type must match the + * output data type. \p zeroPt must only contain zero-valued coefficients, because only symmetric quantization is * supported. - * The \p scale value must be either a scalar for per-tensor quantization, or a 1D tensor for per-channel - * quantization. All \p scale coefficients must have positive values. The size of the 1-D \p scale tensor must match - * the size of the quantization axis. The size of the \p scale must match the size of the \p zeroPt. + * The \p scale value must be a scalar for per-tensor quantization, a 1-D tensor for per-channel quantization, or a + * 2-D tensor for block quantization (supported for DataType::kINT4 only). All \p scale coefficients must have + * positive values. The size of the 1-D \p scale tensor must match the size of the quantization axis. For block + * quantization, the shape of \p scale tensor must match the shape of the input, except for one dimension in which + * blocking occurs. The size of \p zeroPt must match the size of \p scale. * - * The subgraph which terminates with the \p scale tensor must be a build-time constant. The same restrictions apply + * The subgraph which terminates with the \p scale tensor must be a build-time constant. The same restrictions apply * to the \p zeroPt. - * The output type, if constrained, must be constrained to DataType::kINT8. The input type, if constrained, must be - * constrained to DataType::kFLOAT or DataType::kHALF. - * The output size is the same as the input size. The quantization axis is in reference to the input tensor's - * dimensions. + * The output type, if constrained, must be constrained to DataType::kINT8, DataType::kFP8 or DataType::kINT4. The + * input type, if constrained, must be constrained to DataType::kFLOAT, DataType::kHALF, or DataType::kBF16. The + * output size is the same as the input size. The quantization axis is in reference to the input tensor's dimensions. * - * IQuantizeLayer only supports DataType::kFLOAT precision and will default to this precision during instantiation. - * IQuantizeLayer only supports DataType::kINT8 output. + * IQuantizeLayer supports DataType::kFLOAT, DataType::kHALF, or DataType::kBF16 precision and will default to + * DataType::kFLOAT precision during instantiation. For strongly typed networks, \p input data type must match the + * \p scale data type. + * + * IQuantizeLayer supports DataType::kINT8, DataType::kFP8, or DataType::kINT4 output. * * As an example of the operation of this layer, imagine a 4D NCHW activation input which can be quantized using a * single scale coefficient (referred to as per-tensor quantization): @@ -69,11 +76,20 @@ * For each s in S: * output[k,c,r,s] = clamp(round(\p input[k,c,r,s] / \p scale[k]) + \p zeroPt[k]) * + * Block quantization is supported only for 2-D weight inputs of DataType::kINT4. As an example of blocked + * operation, imagine a 2-D RS weights input, R (dimension 0) as the blocking axis and B as the block size. + * The scale is a 2D array of coefficients, with dimensions (R//B, S). + * For each r in R: + * For each s in S: + * output[r,s] = clamp(round(\p input[r,s] / \p scale[r//B, s]) + \p zeroPt[r//B, s]) + * * \note Only symmetric quantization is supported. * \note Currently the only allowed build-time constant \p scale and \p zeroPt subgraphs are: * 1. Constant -> Quantize * 2. Constant -> Cast -> Quantize * + * \note The input tensor for this layer must not be a scalar. + * * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. * */ @Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) @@ -102,5 +118,40 @@ public class IQuantizeLayer extends ILayer { * The axis must be a valid axis if the scale tensor has more than one coefficient. * The axis value will be ignored if the scale tensor has exactly one coefficient (per-tensor quantization). * */ + + + //! + //! + //! + //! + //! public native @NoException(true) void setAxis(int axis); + + /** + * \brief Set the Quantize layer output type. + * + * @param toType The DataType of the output tensor. + * + * Set the output type of the quantize layer. Valid values are DataType::kINT8 and DataType::kFP8. + * If the network is strongly typed, setToType must be used to set the output type, and use of setOutputType + * is an error. Otherwise, types passed to setOutputType and setToType must be the same. + * + * @see NetworkDefinitionCreationFlag::kSTRONGLY_TYPED + * */ + + + //! + //! + //! + public native @NoException(true) void setToType(DataType toType); + public native @NoException(true) void setToType(@Cast("nvinfer1::DataType") int toType); + + /** + * \brief Return the Quantize layer output type. + * + * @return toType parameter set during layer creation or by setToType(). + * The return value is the output type of the quantize layer. + * The default value is DataType::kINT8. + * */ + public native @NoException(true) DataType getToType(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRNNv2Layer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRNNv2Layer.java deleted file mode 100644 index 529b7a09bbf..00000000000 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRNNv2Layer.java +++ /dev/null @@ -1,358 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.tensorrt.nvinfer; - -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import org.bytedeco.cuda.cudart.*; -import static org.bytedeco.cuda.global.cudart.*; -import org.bytedeco.cuda.cublas.*; -import static org.bytedeco.cuda.global.cublas.*; -import org.bytedeco.cuda.cudnn.*; -import static org.bytedeco.cuda.global.cudnn.*; -import org.bytedeco.cuda.nvrtc.*; -import static org.bytedeco.cuda.global.nvrtc.*; - -import static org.bytedeco.tensorrt.global.nvinfer.*; - - -/** - * \class IRNNv2Layer - * - * \brief An RNN layer in a network definition, version 2. - * - * This layer supersedes IRNNLayer. - * - * @deprecated Deprecated prior to TensorRT 8.0 and will be removed in 9.0. Superseded by - * INetworkDefinition::addLoop(). - * - * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. - * */ -@Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) -public class IRNNv2Layer extends ILayer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public IRNNv2Layer(Pointer p) { super(p); } - - /** Get the layer count of the RNN. */ - public native @NoException(true) int getLayerCount(); - /** Get the hidden size of the RNN. */ - public native @NoException(true) int getHiddenSize(); - /** Get the maximum sequence length of the RNN. */ - public native @NoException(true) int getMaxSeqLength(); - /** Get the embedding length of the RNN. -

- //! - //! - //! - //! - //! - //! */ - public native @NoException(true) int getDataLength(); - - /** - * \brief Specify individual sequence lengths in the batch with the ITensor pointed to by - * \p seqLengths. - * - * The \p seqLengths ITensor should be a {N1, ..., Np} tensor, where N1..Np are the index dimensions - * of the input tensor to the RNN. - * - * If this is not specified, then the RNN layer assumes all sequences are size getMaxSeqLength(). - * - * All sequence lengths in \p seqLengths should be in the range [1, getMaxSeqLength()]. Zero-length - * sequences are not supported. - * - * This tensor must be of type DataType::kINT32. - * */ - - - //! - //! - //! - //! - public native @NoException(true) void setSequenceLengths(@ByRef ITensor seqLengths); - - /** - * \brief Get the sequence lengths specified for the RNN. - * - * @return nullptr if no sequence lengths were specified, the sequence length data otherwise. - * - * @see setSequenceLengths() - * */ - - - //! - //! - //! - public native @NoException(true) ITensor getSequenceLengths(); - - /** - * \brief Set the operation of the RNN layer. - * - * @see getOperation(), RNNOperation - * */ - - - //! - //! - //! - public native @NoException(true) void setOperation(RNNOperation op); - public native @NoException(true) void setOperation(@Cast("nvinfer1::RNNOperation") int op); - - /** - * \brief Get the operation of the RNN layer. - * - * @see setOperation(), RNNOperation - * */ - - - //! - //! - //! - public native @NoException(true) RNNOperation getOperation(); - - /** - * \brief Set the input mode of the RNN layer. - * - * @see getInputMode(), RNNInputMode - * */ - - - //! - //! - //! - public native @NoException(true) void setInputMode(RNNInputMode op); - public native @NoException(true) void setInputMode(@Cast("nvinfer1::RNNInputMode") int op); - - /** - * \brief Get the input mode of the RNN layer. - * - * @see setInputMode(), RNNInputMode - * */ - - - //! - //! - //! - //! - public native @NoException(true) RNNInputMode getInputMode(); - - /** - * \brief Set the direction of the RNN layer. - * - * The direction determines if the RNN is run as a unidirectional(left to right) or - * bidirectional(left to right and right to left). - * In the RNNDirection::kBIDIRECTION case the output is concatenated together, resulting - * in output size of 2x getHiddenSize(). - * - * @see getDirection(), RNNDirection - * */ - - - //! - //! - //! - public native @NoException(true) void setDirection(RNNDirection op); - public native @NoException(true) void setDirection(@Cast("nvinfer1::RNNDirection") int op); - - /** - * \brief Get the direction of the RNN layer. - * - * @see setDirection(), RNNDirection - * */ - - - //! - //! - //! - //! - //! - //! - //! - //! - //! - public native @NoException(true) RNNDirection getDirection(); - - /** - * \brief Set the weight parameters for an individual gate in the RNN. - * - * The DataType for this structure must be DataType::kFLOAT or DataType::kHALF, and must be the same - * datatype as the input tensor. - * - * Each parameter matrix is row-major in memory, and has the following dimensions: - * - * ~~~ - * Let K := { ::kUNIDIRECTION => 1 - * { ::kBIDIRECTION => 2 - * l := layer index (as described above) - * H := getHiddenSize() - * E := getDataLength() (the embedding length) - * isW := true if the matrix is an input (W) matrix, and false if - * the matrix is a recurrent input (R) matrix. - * - * if isW: - * if l < K and ::kSKIP: - * (numRows, numCols) := (0, 0) # input matrix is skipped - * elif l < K and ::kLINEAR: - * (numRows, numCols) := (H, E) # input matrix acts on input data size E - * elif l >= K: - * (numRows, numCols) := (H, K * H) # input matrix acts on previous hidden state - * else: # not isW - * (numRows, numCols) := (H, H) - * ~~~ - * - * In other words, the input weights of the first layer of the RNN (if - * not skipped) transform a {@code getDataLength()}-size column - * vector into a {@code getHiddenSize()}-size column vector. The input - * weights of subsequent layers transform a {@code K*getHiddenSize()}-size - * column vector into a {@code getHiddenSize()}-size column vector. {@code K=2} in - * the bidirectional case to account for the full hidden state being - * the concatenation of the forward and backward RNN hidden states. - * - * The recurrent weight matrices for all layers all have shape {@code (H, H)}, - * both in the unidirectional and bidirectional cases. (In the - * bidirectional case, each recurrent weight matrix for the (forward or - * backward) RNN cell operates on the previous (forward or - * backward) RNN cell's hidden state, which is size {@code H}). - * - * @param layerIndex The index of the layer that contains this gate. - * @param gate The name of the gate within the RNN layer. The gate name must correspond - * to one of the gates used by this layer's #RNNOperation. - * @param isW True if the weight parameters are for the input matrix W[g] - * and false if they are for the recurrent input matrix R[g]. See - * #RNNOperation for equations showing how these matrices are used - * in the RNN gate. - * @param weights The weight structure holding the weight parameters, which are stored - * as a row-major 2D matrix. See See \ref setWeightsForGate() for documentation on the expected - * dimensions of this matrix. - * */ - - - //! - //! - //! - public native @NoException(true) void setWeightsForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW, @ByVal Weights weights); - public native @NoException(true) void setWeightsForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW, @ByVal Weights weights); - - /** - * \brief Get the weight parameters for an individual gate in the RNN. - * - * @see setWeightsForGate() - * */ - - - //! - //! - //! - //! - //! - public native @ByVal @NoException(true) Weights getWeightsForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW); - public native @ByVal @NoException(true) Weights getWeightsForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW); - - /** - * \brief Set the bias parameters for an individual gate in the RNN. - * - * The DataType for this structure must be DataType::kFLOAT or DataType::kHALF, and must be the same - * datatype as the input tensor. - * - * Each bias vector has a fixed size, getHiddenSize(). - * - * @param layerIndex The index of the layer that contains this gate. See \ref setWeightsForGate() - * for a description of the layer index. - * @param gate The name of the gate within the RNN layer. The gate name must correspond - * to one of the gates used by this layer's #RNNOperation. - * @param isW True if the bias parameters are for the input bias Wb[g] - * and false if they are for the recurrent input bias Rb[g]. See - * #RNNOperation for equations showing how these bias vectors are used - * in the RNN gate. - * @param bias The weight structure holding the bias parameters, which should be an - * array of size getHiddenSize(). - * */ - - - //! - //! - //! - public native @NoException(true) void setBiasForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW, @ByVal Weights bias); - public native @NoException(true) void setBiasForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW, @ByVal Weights bias); - - /** - * \brief Get the bias parameters for an individual gate in the RNN. - * - * @see setBiasForGate() - * */ - - - //! - //! - //! - //! - public native @ByVal @NoException(true) Weights getBiasForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW); - public native @ByVal @NoException(true) Weights getBiasForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW); - - /** - * \brief Set the initial hidden state of the RNN with the provided \p hidden ITensor. - * - * The \p hidden ITensor should have the dimensions {@code {N1, ..., Np, L, H}}, where: - * - * - {@code N1..Np} are the index dimensions specified by the input tensor - * - {@code L} is the number of layers in the RNN, equal to getLayerCount() if getDirection is - * RNNDirection::kUNIDIRECTION, - * and 2x getLayerCount() if getDirection is RNNDirection::kBIDIRECTION. In the bi-directional - * case, layer {@code l}'s final forward hidden state is stored in {@code L = 2*l}, and - * final backward hidden state is stored in {@code L= 2*l + 1}. - * - {@code H} is the hidden state for each layer, equal to getHiddenSize(). - * */ - - - //! - //! - //! - public native @NoException(true) void setHiddenState(@ByRef ITensor hidden); - - /** - * \brief Get the initial hidden state of the RNN. - * - * @see setHiddenState() - * */ - - - //! - //! - //! - //! - //! - public native @NoException(true) ITensor getHiddenState(); - - /** - * \brief Set the initial cell state of the LSTM with the provided \p cell ITensor. - * - * The \p cell ITensor should have the dimensions {@code {N1, ..., Np, L, H}}, where: - * - * - {@code N1..Np} are the index dimensions specified by the input tensor - * - {@code L} is the number of layers in the RNN, equal to getLayerCount() if getDirection is - * RNNDirection::kUNIDIRECTION, - * and 2x getLayerCount() if getDirection is RNNDirection::kBIDIRECTION. In the bi-directional - * case, layer {@code l}'s final forward hidden state is stored in {@code L = 2*l}, and - * final backward hidden state is stored in {@code L= 2*l + 1}. - * - {@code H} is the hidden state for each layer, equal to getHiddenSize(). - * - * It is an error to call setCellState() on an RNN layer that is not configured with RNNOperation::kLSTM. - * */ - - - //! - //! - //! - public native @NoException(true) void setCellState(@ByRef ITensor cell); - - /** - * \brief Get the initial cell state of the RNN. - * - * @see setCellState() - * */ - public native @NoException(true) ITensor getCellState(); -} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRaggedSoftMaxLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRaggedSoftMaxLayer.java index 104a2b68c41..fb4213576b9 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRaggedSoftMaxLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRaggedSoftMaxLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRecurrenceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRecurrenceLayer.java index 998a7be0619..d0b8fb5e8b3 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRecurrenceLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRecurrenceLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,7 +19,13 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; - +/** + * \class IRecurrenceLayer + * + * \brief A recurrence layer in a network definition. + * + * The recurrence layer allows a loop iteration to compute a result from a value computed in the previous iteration. + * */ @Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class IRecurrenceLayer extends ILoopBoundaryLayer { static { Loader.load(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReduceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReduceLayer.java index 015a91a418a..dd431628f86 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReduceLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReduceLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRefitter.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRefitter.java index e589ac8ddeb..fe27d73aee5 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRefitter.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRefitter.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -52,9 +52,11 @@ public class IRefitter extends INoCopy { * * * There is no such layer by that name. * * The layer does not have weights with the specified role. - * * The number of weights is inconsistent with the layer’s original specification. + * * The count of weights is inconsistent with the layer’s original specification. + * * The type of weights is inconsistent with the layer’s original specification. * - * Modifying the weights before method refit() completes will result in undefined behavior. + * Modifying the weights before method refitCudaEngine or refitCudaEngineAsync returns will result in undefined + * behavior. * * \warning The string layerName must be null-terminated, and be at most 4096 bytes including the terminator. * */ @@ -69,14 +71,16 @@ public class IRefitter extends INoCopy { public native @Cast("bool") @NoException(true) boolean setWeights(@Cast("const char*") BytePointer layerName, @Cast("nvinfer1::WeightsRole") int role, @ByVal Weights weights); /** - * \brief Updates associated engine. Return true if successful. + * \brief Refits associated engine. * - * Failure occurs if getMissing() != 0 before the call. + * @return True on success, or false if new weights validation fails or getMissingWeights() != 0 before the call. + * If false is returned, a subset of weights may have been refitted. * * The behavior is undefined if the engine has pending enqueued work. + * Provided weights on CPU or GPU can be unset and released, or updated after refitCudaEngine returns. * - * Extant IExecutionContexts associated with the engine should not be used afterwards. - * Instead, create new IExecutionContexts after refitting. + * IExecutionContexts associated with the engine remain valid for use afterwards. There is no need to set the same + * weights repeatedly for multiple refit calls as the weights memory can be updated directly instead. * */ @@ -130,6 +134,9 @@ public class IRefitter extends INoCopy { * */ + //! + //! + //! //! //! //! @@ -138,21 +145,6 @@ public class IRefitter extends INoCopy { public native @NoException(true) int getAll(int size, @Cast("const char**") @ByPtrPtr ByteBuffer layerNames, @Cast("nvinfer1::WeightsRole*") IntBuffer roles); public native @NoException(true) int getAll(int size, @Cast("const char**") @ByPtrPtr byte[] layerNames, @Cast("nvinfer1::WeightsRole*") int[] roles); - /** - * @deprecated Deprecated in TRT 8.0. Superseded by {@code delete}. - * - * \warning Calling destroy on a managed pointer will result in a double-free error. - * */ - - - //! - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) void destroy(); - /** * Update dynamic range for a tensor. * @@ -293,9 +285,13 @@ public class IRefitter extends INoCopy { * Possible reasons for rejection are: * * * The name of weights is nullptr or does not correspond to any refittable weights. - * * The number of weights is inconsistent with the original specification. + * * The count of the weights is inconsistent with the count returned from calling getWeightsPrototype() with the + * same name. + * * The type of the weights is inconsistent with the type returned from calling getWeightsPrototype() with the + * same name. * - * Modifying the weights before method refitCudaEngine() completes will result in undefined behavior. + * Modifying the weights before method refitCudaEngine or refitCudaEngineAsync returns will result in undefined + * behavior. * * \warning The string name must be null-terminated, and be at most 4096 bytes including the terminator. * */ @@ -365,6 +361,8 @@ public class IRefitter extends INoCopy { * */ + //! + //! //! //! //! @@ -372,7 +370,9 @@ public class IRefitter extends INoCopy { /** * \brief Set the maximum number of threads. + * * @param maxThreads The maximum number of threads that can be used by the refitter. + * * @return True if successful, false otherwise. * * The default value is 1 and includes the current thread. @@ -397,5 +397,184 @@ public class IRefitter extends INoCopy { * * @see setMaxThreads() * */ + + + //! + //! + //! + //! + //! + //! + //! public native @NoException(true) int getMaxThreads(); + + /** + * \brief Specify new weights on a specified device of given name. + * + * @param name The name of the weights to be refitted. + * @param weights The new weights on the specified device. + * @param location The location (host vs. device) of the new weights. + * + * @return True on success, or false if new weights are rejected. + * Possible reasons for rejection are: + * + * * The name of the weights is nullptr or does not correspond to any refittable weights. + * * The count of the weights is inconsistent with the count returned from calling getWeightsPrototype() with the + * same name. + * * The type of the weights is inconsistent with the type returned from calling getWeightsPrototype() with the + * same name. + * + * It is allowed to provide some weights on CPU and others on GPU. + * Modifying the weights before the method refitCudaEngine() or refitCudaEngineAsync() completes will result in + * undefined behavior. + * + * \warning The string name must be null-terminated, and be at most 4096 bytes including the terminator. + * */ + + + //! + //! + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean setNamedWeights(String name, @ByVal Weights weights, TensorLocation location); + public native @Cast("bool") @NoException(true) boolean setNamedWeights(@Cast("const char*") BytePointer name, @ByVal Weights weights, @Cast("nvinfer1::TensorLocation") int location); + + /** + * \brief Get weights associated with the given name. + * + * @param weightsName The name of the weights to be refitted. + * + * @return Weights associated with the given name. + * + * If the weights were never set, returns null weights and reports an error to the refitter errorRecorder. + * + * \warning The string weightsName must be null-terminated, and be at most 4096 bytes including the terminator. + * */ + + + //! + //! + //! + //! + //! + //! + public native @ByVal @NoException(true) Weights getNamedWeights(String weightsName); + public native @ByVal @NoException(true) Weights getNamedWeights(@Cast("const char*") BytePointer weightsName); + + /** + * \brief Get location for the weights associated with the given name. + * + * @param weightsName The name of the weights to be refitted. + * + * @return Location for the weights associated with the given name. + * + * If the weights were never set, returns TensorLocation::kHOST and reports an error to the refitter errorRecorder. + * + * \warning The string weightsName must be null-terminated, and be at most 4096 bytes including the terminator. + * */ + + + //! + //! + //! + //! + //! + //! + public native @NoException(true) TensorLocation getWeightsLocation(String weightsName); + public native @NoException(true) @Cast("nvinfer1::TensorLocation") int getWeightsLocation(@Cast("const char*") BytePointer weightsName); + + /** + * \brief Unset weights associated with the given name. + * + * @param weightsName The name of the weights to be refitted. + * + * @return False if the weights were never set, returns true otherwise. + * + * Unset weights before releasing them. + * + * \warning The string weightsName must be null-terminated, and be at most 4096 bytes including the terminator. + * */ + + + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean unsetNamedWeights(String weightsName); + public native @Cast("bool") @NoException(true) boolean unsetNamedWeights(@Cast("const char*") BytePointer weightsName); + + /** + * \brief Set whether to validate weights during refitting. + * + * @param weightsValidation Indicate whether to validate weights during refitting. + * + * When set to true, TensorRT will validate weights during FP32 to FP16/BF16 weights conversions or + * sparsifying weights in the refit call. If provided weights are not proper for some weights transformations, + * TensorRT will issue a warning and continue the transformation for minor issues (such as overflow during + * narrowing conversion), or issue an error and stop the refitting process for severe issues (such as sparsifying + * dense weights). By default the flag is true. Set the flag to false for faster refitting performance. + * */ + + + //! + //! + public native @NoException(true) void setWeightsValidation(@Cast("bool") boolean weightsValidation); + + /** + * \brief Get whether to validate weights values during refitting. + * */ + + + //! + //! + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean getWeightsValidation(); + + /** + * \brief Enqueue weights refitting of the associated engine on the given stream. + * + * @param stream The stream to enqueue the weights updating task. + * + * @return True on success, or false if new weights validation fails or getMissingWeights() != 0 before the call. + * If false is returned, a subset of weights may have been refitted. + * + * The behavior is undefined if the engine has pending enqueued work on a different stream from the provided one. + * Provided weights on CPU can be unset and released, or updated after refitCudaEngineAsync returns. + * Freeing or updating of the provided weights on GPU can be enqueued on the same stream after refitCudaEngineAsync + * returns. + * + * IExecutionContexts associated with the engine remain valid for use afterwards. There is no need to set the same + * weights repeatedly for multiple refit calls as the weights memory can be updated directly instead. The weights + * updating task should use the same stream as the one used for the refit call. + * */ + + + //! + //! + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean refitCudaEngineAsync(CUstream_st stream); + + /** + * \brief Get the Weights prototype associated with the given name. + * + * @param weightsName The name of the weights to be refitted. + * + * @return Weights prototype associated with the given name. + * + * The type and count of weights prototype is the same as weights used for engine building. The values property + * is nullptr for weights prototypes. The count of the weights prototype is -1 when the name of the weights is + * nullptr or does not correspond to any refittable weights. + * + * \warning The string weightsName must be null-terminated, and be at most 4096 bytes including the terminator. + * */ + public native @ByVal @NoException(true) Weights getWeightsPrototype(String weightsName); + public native @ByVal @NoException(true) Weights getWeightsPrototype(@Cast("const char*") BytePointer weightsName); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IResizeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IResizeLayer.java index 41bc2125d1b..1ac757091df 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IResizeLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IResizeLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -26,13 +26,13 @@ * Resize layer can be used for resizing a N-D tensor. * * Resize layer currently supports the following configurations: - * - ResizeMode::kNEAREST - resizes innermost {@code m} dimensions of N-D, where 0 < m <= min(8, N) and N > 0 - * - ResizeMode::kLINEAR - resizes innermost {@code m} dimensions of N-D, where 0 < m <= min(3, N) and N > 0 + * - InterpolationMode::kNEAREST - resizes innermost {@code m} dimensions of N-D, where 0 < m <= min(8, N) and N > 0 + * - InterpolationMode::kLINEAR - resizes innermost {@code m} dimensions of N-D, where 0 < m <= min(3, N) and N > 0 * - * Default resize mode is ResizeMode::kNEAREST. + * Default resize mode is InterpolationMode::kNEAREST. * * The coordinates in the output tensor are mapped to coordinates in the input tensor using a function set by calling - * setCoordinateTransformation(). The default for all ResizeMode settings (nearest, linear, bilinear, etc.) is + * setCoordinateTransformation(). The default for all InterpolationMode settings (nearest, linear, bilinear, etc.) is * ResizeCoordinateTransformation::kASYMMETRIC. * * The resize layer provides two ways to resize tensor dimensions. @@ -84,7 +84,7 @@ public class IResizeLayer extends ILayer { //! //! //! - public native @NoException(true) void setOutputDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); + public native @NoException(true) void setOutputDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions); /** * \brief Get the output dimensions. @@ -101,7 +101,7 @@ public class IResizeLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getOutputDimensions(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getOutputDimensions(); /** * \brief Set the resize scales. @@ -169,15 +169,15 @@ public class IResizeLayer extends ILayer { * * Supported resize modes are Nearest Neighbor and Linear. * - * @see ResizeMode + * @see InterpolationMode * */ //! //! //! - public native @NoException(true) void setResizeMode(InterpolationMode resizeMode); - public native @NoException(true) void setResizeMode(@Cast("nvinfer1::InterpolationMode") int resizeMode); + public native @NoException(true) void setResizeMode(InterpolationMode interpolationMode); + public native @NoException(true) void setResizeMode(@Cast("nvinfer1::InterpolationMode") int interpolationMode); /** * \brief Get resize mode for an input tensor. @@ -191,44 +191,9 @@ public class IResizeLayer extends ILayer { //! //! //! - public native @NoException(true) InterpolationMode getResizeMode(); - - /** - * \brief Set whether to align corners while resizing. - * - * If true, the centers of the 4 corner pixels of both input and output - * tensors are aligned i.e. preserves the values of corner - * pixels. - * - * Default: false. - * - * @deprecated Deprecated in TensorRT 8.0. Superseded by IResizeLayer::setCoordinateTransformation(). - * */ - - - //! - //! - //! - //! - public native @Deprecated @NoException(true) void setAlignCorners(@Cast("bool") boolean alignCorners); - - /** - * \brief True if align corners has been set. - * - * @return True if align corners has been set, false otherwise. - * - * @deprecated Deprecated in TensorRT 8.0. Superseded by IResizeLayer::getCoordinateTransformation(). - * */ - - //! //! - //! - //! - //! - //! - //! - public native @Cast("bool") @Deprecated @NoException(true) boolean getAlignCorners(); + public native @NoException(true) InterpolationMode getResizeMode(); /** * \brief Append or replace an input of this layer with a specific tensor diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReverseSequenceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReverseSequenceLayer.java index 76cf0df8854..66da1642d78 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReverseSequenceLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReverseSequenceLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,7 +19,8 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; // class INMSLayer -/** \class IReverseSequenceLayer +/** + * \class IReverseSequenceLayer * * \brief A ReverseSequence layer in a network definition. * diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRuntime.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRuntime.java index ceca09131c1..bfb8cd472a5 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRuntime.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRuntime.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -45,33 +45,9 @@ public class IRuntime extends INoCopy { } - /** - * \brief Deserialize an engine from a stream. - * - * If an error recorder has been set for the runtime, it will also be passed to the engine. - * - * @param blob The memory that holds the serialized engine. - * @param size The size of the memory in bytes. - * @param pluginFactory The plugin factory, if any plugins are used by the network, otherwise nullptr. - * - * @return The engine, or nullptr if it could not be deserialized. - * - * @deprecated Deprecated in TensorRT 8.0. - * - * \warning IPluginFactory is no longer supported, therefore pluginFactory must be a nullptr. - * */ - - - //! - //! - //! - //! - //! - public native @Deprecated @NoException(true) ICudaEngine deserializeCudaEngine( - @Const Pointer blob, @Cast("std::size_t") long size, IPluginFactory pluginFactory); - /** * \brief Sets the DLA core used by the network. Defaults to -1. + * * @param dlaCore The DLA core to execute the engine on, in the range [0,getNbDlaCores()). * * This function is used to specify which DLA core to use via indexing, if multiple DLA cores are available. @@ -82,12 +58,14 @@ public class IRuntime extends INoCopy { * */ + //! //! //! public native @NoException(true) void setDLACore(int dlaCore); /** * \brief Get the DLA core that the engine executes on. + * * @return assigned DLA core or -1 for DLA not present or unset. * */ @@ -103,27 +81,14 @@ public class IRuntime extends INoCopy { //! //! - //! - //! - public native @NoException(true) int getNbDLACores(); - - /** - * \brief Destroy this object. - * - * @deprecated Deprecated in TRT 8.0. Superseded by {@code delete}. - * - * \warning Calling destroy on a managed pointer will result in a double-free error. - * */ - - //! //! //! - //! - public native @Deprecated @NoException(true) void destroy(); + public native @NoException(true) int getNbDLACores(); /** * \brief Set the GPU allocator. + * * @param allocator Set the GPU allocator to be used by the runtime. All GPU memory acquired will use this * allocator. If NULL is passed, the default allocator will be used. * @@ -183,7 +148,7 @@ public class IRuntime extends INoCopy { public native @NoException(true) IErrorRecorder getErrorRecorder(); /** - * \brief Deserialize an engine from a stream. + * \brief Deserialize an engine from host memory. * * If an error recorder has been set for the runtime, it will also be passed to the engine. * @@ -194,11 +159,34 @@ public class IRuntime extends INoCopy { * */ + //! + //! + //! //! //! //! public native @NoException(true) ICudaEngine deserializeCudaEngine(@Const Pointer blob, @Cast("std::size_t") long size); + /** + * \brief Deserialize an engine from a stream. + * + * If an error recorder has been set for the runtime, it will also be passed to the + * engine. + * + * This deserialization path will reduce host memory usage when weight streaming is enabled. + * + * @param streamReader a read-only stream from which TensorRT will deserialize a + * previously serialized engine. + * + * @return The engine, or nullptr if it could not be deserialized. + * */ + + + //! + //! + //! + public native ICudaEngine deserializeCudaEngine(@ByRef IStreamReader streamReader); + /** * \brief get the logger with which the runtime was created * @@ -209,10 +197,12 @@ public class IRuntime extends INoCopy { //! //! //! + //! public native @NoException(true) ILogger getLogger(); /** * \brief Set the maximum number of threads. + * * @param maxThreads The maximum number of threads that can be used by the runtime. * @return True if successful, false otherwise. * diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScaleLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScaleLayer.java index e3061409adb..511e076ec49 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScaleLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScaleLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -35,8 +35,7 @@ * * The output size is the same as the input size. * - * \note The input tensor for this layer is required to have a minimum of 3 dimensions in implicit batch mode - * and a minimum of 4 dimensions in explicit batch mode. + * \note The input tensor is required to have at least 4 dimensions. * * A scale layer may be used as an INT8 quantization node in a graph, if the output is constrained to INT8 and * the input to FP32. Quantization rounds ties to even, and clamps to [-128, 127]. diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScatterLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScatterLayer.java index 57abf942ce7..30ae4bd5e22 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScatterLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScatterLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -38,7 +38,7 @@ * Scattermode::kELEMENT: s = q = r * * Output is a tensor with the same dimensions as Data that stores the resulting values of the * transformation. It must not be a shape tensor. - * The types of Data, Update, and Output shall be the same, and Indices shall be DataType::kINT32. + * The types of Data, Update, and Output shall be the same, and Indices shall be DataType::kINT32 or DataType::kINT64. * * The output is computed by copying the data, and then updating elements of it based on indices. * How Indices are interpreted depends upon the ScatterMode. @@ -69,7 +69,7 @@ * for c in [0,n) * for h in [0,n) * for w in [0,n) - * output[n,c,indices[n,c,h,w],w] = updates[n,c,h,w]] + * output[n,c,indices[n,c,h,w],w] = updates[n,c,h,w] * * Writes to the same output element cause undefined behavior. * diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISelectLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISelectLayer.java index b08a3d0b273..711da35038e 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISelectLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISelectLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -20,6 +20,15 @@ /** + * \class ISelectLayer + * + * \brief Select elements from two data tensors based on a condition tensor. + * + * The select layer makes elementwise selections from two data tensors based on a condition tensor, + * behaving similarly to the numpy.where function with three parameters. + * The three input tensors must share the same rank. Multidirectional broadcasting is supported. + * The output tensor has the dimensions of the inputs AFTER applying the broadcast rule. + * * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. * */ @Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISerializationConfig.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISerializationConfig.java new file mode 100644 index 00000000000..f53961f88ba --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISerializationConfig.java @@ -0,0 +1,123 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + + +/** + * \class ISerializationConfig + * + * \brief Holds properties for configuring an engine to serialize the binary. + * + * @see SerializationFlag + * */ +@Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class ISerializationConfig extends INoCopy { + static { Loader.load(); } + /** Default native constructor. */ + public ISerializationConfig() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ISerializationConfig(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ISerializationConfig(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public ISerializationConfig position(long position) { + return (ISerializationConfig)super.position(position); + } + @Override public ISerializationConfig getPointer(long i) { + return new ISerializationConfig((Pointer)this).offsetAddress(i); + } + + + /** + * \brief Set the serialization flags to turn on for this config. + * + * The flags are listed in the SerializationFlag enum. + * + * @param serializationFlags The serialization flags for an engine. + * + * \note This function will override the previous set flags, rather than bitwise ORing the new flag. + * + * @see getFlags() + * */ + + + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean setFlags(@Cast("nvinfer1::SerializationFlags") int serializationFlags); + + /** + * \brief Get the serialization flags for this config. + * + * @return The serialization flags as a bitmask. + * + * @see setFlags() + * */ + + + //! + //! + //! + //! + public native @Cast("nvinfer1::SerializationFlags") @NoException(true) int getFlags(); + + /** + * \brief clear a serialization flag. + * + * clears the serialization flag from the config. + * + * @see setFlags() + * */ + + + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean clearFlag(SerializationFlag serializationFlag); + public native @Cast("bool") @NoException(true) boolean clearFlag(@Cast("nvinfer1::SerializationFlag") int serializationFlag); + + /** + * \brief Set a serialization flag. + * + * Add the input serialization flag to the already enabled flags. + * + * @see setFlags() + * */ + + + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean setFlag(SerializationFlag serializationFlag); + public native @Cast("bool") @NoException(true) boolean setFlag(@Cast("nvinfer1::SerializationFlag") int serializationFlag); + + /** + * \brief Returns true if the serialization flag is set + * + * @see getFlags() + * + * @return True if flag is set, false if unset. + * */ + public native @Cast("bool") @NoException(true) boolean getFlag(SerializationFlag serializationFlag); + public native @Cast("bool") @NoException(true) boolean getFlag(@Cast("nvinfer1::SerializationFlag") int serializationFlag); +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShapeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShapeLayer.java index e152ea02596..95794f3b13a 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShapeLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShapeLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -23,10 +23,10 @@ * * \brief Layer type for getting shape of a tensor. * - * This layer sets the output to a 1D tensor of type Int32 with the dimensions of the input tensor. + * This layer sets the output to a 1D tensor of type Int64 with the dimensions of the input tensor. * * For example, if the input is a four-dimensional tensor (of any type) with - * dimensions [2,3,5,7], the output tensor is a one-dimensional Int32 tensor + * dimensions [2,3,5,7], the output tensor is a one-dimensional Int64 tensor * of length 4 containing the sequence 2, 3, 5, 7. * * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShuffleLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShuffleLayer.java index 6c70b038a1d..3b3f5624078 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShuffleLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShuffleLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -99,7 +99,7 @@ public class IShuffleLayer extends ILayer { //! //! //! - public native @NoException(true) void setReshapeDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); + public native @NoException(true) void setReshapeDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions); /** * \brief Get the reshaped dimensions. @@ -119,7 +119,7 @@ public class IShuffleLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getReshapeDimensions(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getReshapeDimensions(); /** * \brief Append or replace an input of this layer with a specific tensor diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISliceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISliceLayer.java index f52cb2c9e62..ec94379f9cd 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISliceLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISliceLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -42,7 +42,7 @@ * stride = {1, 2} * output = {{1, 5}} * - * When the sliceMode is kCLAMP or kREFLECT, for each input dimension, if its size is 0 then the corresponding output + * When the sampleMode is kCLAMP or kREFLECT, for each input dimension, if its size is 0 then the corresponding output * dimension must be 0 too. * * A slice layer can produce a shape tensor if the following conditions are met: @@ -54,7 +54,7 @@ * * The following constraints must be satisfied to execute this layer on DLA: * * start, size, and stride are build time constants, either as static Dims or as constant input tensors. - * * sliceMode is kDEFAULT. + * * sampleMode is kSTRICT_BOUNDS. * * Strides are 1 for all dimensions. * * Slicing is not performed on the first dimension * * The input tensor has four dimensions @@ -83,7 +83,7 @@ public class ISliceLayer extends ILayer { //! //! //! - public native @NoException(true) void setStart(@ByVal @Cast("nvinfer1::Dims*") Dims32 start); + public native @NoException(true) void setStart(@Cast("const nvinfer1::Dims*") @ByRef Dims64 start); /** * \brief Get the start offset for the slice layer. @@ -102,7 +102,7 @@ public class ISliceLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStart(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStart(); /** * \brief Set the dimensions of the output slice. @@ -120,7 +120,7 @@ public class ISliceLayer extends ILayer { //! //! //! - public native @NoException(true) void setSize(@ByVal @Cast("nvinfer1::Dims*") Dims32 size); + public native @NoException(true) void setSize(@Cast("const nvinfer1::Dims*") @ByRef Dims64 size); /** * \brief Get dimensions of the output slice. @@ -139,7 +139,7 @@ public class ISliceLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getSize(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getSize(); /** * \brief Set the stride for computing the output slice data. @@ -157,7 +157,7 @@ public class ISliceLayer extends ILayer { //! //! //! - public native @NoException(true) void setStride(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride); + public native @NoException(true) void setStride(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride); /** * \brief Get the stride for the output slice. @@ -174,7 +174,7 @@ public class ISliceLayer extends ILayer { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStride(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStride(); /** * \brief Set the slice mode. diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISoftMaxLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISoftMaxLayer.java index 2ad9f2d4604..5bdd53ea5cd 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISoftMaxLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISoftMaxLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -28,8 +28,7 @@ * * The output size is the same as the input size. * - * On Xavier, this layer is not supported on DLA. - * Otherwise, the following constraints must be satisfied to execute this layer on DLA: + * The following constraints must be satisfied to execute this layer on DLA: * * Axis must be one of the channel or spatial dimensions. * * There are two classes of supported input sizes: * 1. Non-axis, non-batch dimensions are all 1 and the axis dimension is at most 8192. @@ -50,17 +49,8 @@ public class ISoftMaxLayer extends ILayer { * \brief Set the axis along which softmax is computed. Currently, only one axis can be set. * * The axis is specified by setting the bit corresponding to the axis to 1. - * For example, consider an NCHW tensor as input (three non-batch dimensions). + * For example, consider an NCHW tensor as input. * - * In implicit mode : - * Bit 0 corresponds to the C dimension boolean. - * Bit 1 corresponds to the H dimension boolean. - * Bit 2 corresponds to the W dimension boolean. - * By default, softmax is performed on the axis which is the number of axes minus three. It is 0 if - * there are fewer than 3 non-batch axes. For example, if the input is NCHW, the default axis is C. If the input - * is NHW, then the default axis is H. - * - * In explicit mode : * Bit 0 corresponds to the N dimension boolean. * Bit 1 corresponds to the C dimension boolean. * Bit 2 corresponds to the H dimension boolean. @@ -69,8 +59,7 @@ public class ISoftMaxLayer extends ILayer { * there are fewer than 3 axes. For example, if the input is NCHW, the default axis is C. If the input * is NHW, then the default axis is N. * - * For example, to perform softmax on axis R of a NPQRCHW input, set bit 2 with implicit batch mode, - * set bit 3 with explicit batch mode. + * For example, to perform softmax on axis R of a NPQRCHW input, set bit 3. * * @param axes The axis along which softmax is computed. * Here axes is a bitmap. For example, when doing softmax along axis 0, bit 0 is set to 1, axes = 1 << axis diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IStreamReader.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IStreamReader.java new file mode 100644 index 00000000000..23845c3d313 --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IStreamReader.java @@ -0,0 +1,52 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + +@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IStreamReader extends IVersionedInterface { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IStreamReader(Pointer p) { super(p); } + + /** + * TensorRT never calls the destructor for an IStreamReader defined by the + * application. + * */ + + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + + + //! + //! + //! + //! + public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo(); + + /** + * \brief Read the next number of bytes in the stream. + * + * @param destination The memory to write to + * @param nbBytes The number of bytes to read + * + * @return The number of bytes read. Negative values will be considered an automatic error. + * */ + public native @Cast("int64_t") long read(Pointer destination, @Cast("int64_t") long nbBytes); +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITensor.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITensor.java index fd493572497..e1cf2a68024 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITensor.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITensor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -101,7 +101,8 @@ public class ITensor extends INoCopy { //! //! //! - public native @NoException(true) void setDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); + //! + public native @NoException(true) void setDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions); /** * \brief Get the dimensions of a tensor. @@ -109,6 +110,7 @@ public class ITensor extends INoCopy { * @return The dimensions of the tensor. * * \warning getDimensions() returns a -1 for dimensions that are derived from a wildcard dimension. + * * @see setDimensions() * */ @@ -118,7 +120,7 @@ public class ITensor extends INoCopy { //! //! //! - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(); /** * \brief Set the data type of a tensor. @@ -191,25 +193,19 @@ public class ITensor extends INoCopy { //! //! //! - //! public native @Cast("bool") @NoException(true) boolean isNetworkOutput(); /** - * \brief Set whether to enable broadcast of tensor across the batch. - * - * When a tensor is broadcast across a batch, it has the same value for every member in the batch. - * Memory is only allocated once for the single member. - * - * This method is only valid for network input tensors, since the flags of layer output tensors are inferred based - * on layer inputs and parameters. - * If this state is modified for a tensor in the network, the states of all dependent tensors will be recomputed. - * If the tensor is for an explicit batch network, then this function does nothing. + * \brief Set whether to enable broadcast of tensor across the implicit batch dimension. * - * \warning The broadcast flag is ignored when using explicit batch network mode. + * \warning This method has no effect other than issuing a warning. * - * @param broadcastAcrossBatch Whether to enable broadcast of tensor across the batch. + * @param broadcastAcrossBatch Whether to broadcast the tensor across the implicit + * batch dimension that was a feature of TensorRT 9.x and prior. * * @see getBroadcastAcrossBatch() + * + * @deprecated Deprecated in TensorRT 10.0. Implicit batch is not supported since TensorRT 10.0. * */ @@ -218,28 +214,30 @@ public class ITensor extends INoCopy { //! //! //! - public native @NoException(true) void setBroadcastAcrossBatch(@Cast("bool") boolean broadcastAcrossBatch); + public native @Deprecated @NoException(true) void setBroadcastAcrossBatch(@Cast("bool") boolean broadcastAcrossBatch); /** - * \brief Check if tensor is broadcast across the batch. - * - * When a tensor is broadcast across a batch, it has the same value for every member in the batch. - * Memory is only allocated once for the single member. If the network is in explicit batch mode, - * this function returns true if the leading dimension is 1. + * \brief Check if tensor is broadcast across the implicit batch dimension. * - * @return True if tensor is broadcast across the batch, false otherwise. + * @return Always false since TensorRT 10.0 does not support an implicit batch dimension. * * @see setBroadcastAcrossBatch() + * + * @deprecated Deprecated in TensorRT 10.0. Implicit batch is not supported since TensorRT 10.0. * */ //! //! - public native @Cast("bool") @NoException(true) boolean getBroadcastAcrossBatch(); + //! + //! + public native @Cast("bool") @Deprecated @NoException(true) boolean getBroadcastAcrossBatch(); /** * \brief Get the storage location of a tensor. + * * @return The location of tensor data. + * * @see setLocation() * */ @@ -248,10 +246,13 @@ public class ITensor extends INoCopy { //! //! //! + //! + //! public native @NoException(true) TensorLocation getLocation(); /** * \brief Set the storage location of a tensor + * * @param location the location of tensor data * * Only network input tensors for storing sequence lengths for RNNv2 are supported. @@ -259,14 +260,17 @@ public class ITensor extends INoCopy { * errors at build time. * * @see getLocation() + * + * @deprecated Deprecated in TensorRT 10.0. RNNv2 is not supported and the location must + * always be TensorLocation::kDEVICE since TensorRT 10.0. * */ //! //! //! - public native @NoException(true) void setLocation(TensorLocation location); - public native @NoException(true) void setLocation(@Cast("nvinfer1::TensorLocation") int location); + public native @Deprecated @NoException(true) void setLocation(TensorLocation location); + public native @Deprecated @NoException(true) void setLocation(@Cast("nvinfer1::TensorLocation") int location); /** * \brief Query whether dynamic range is set. @@ -313,11 +317,12 @@ public class ITensor extends INoCopy { //! //! //! + //! public native @NoException(true) float getDynamicRangeMax(); /** * \brief Set allowed formats for this tensor. By default all formats are allowed. - * Shape tensors (for which isShapeTensor() returns true) may only have row major linear format. + * Shape tensors (for which isShapeTensor() returns true) may only have row-major linear format. * * When running network on DLA and the build option kGPU_FALLBACK is not specified, if DLA format(kCHW4 with Int8, * kCHW4 with FP16, kCHW16 with FP16, kCHW32 with Int8) is set, the input format is treated as native DLA format with @@ -327,6 +332,7 @@ public class ITensor extends INoCopy { * @param formats A bitmask of TensorFormat values that are supported for this tensor. * * @see ITensor::getAllowedFormats() + * * @see TensorFormats * */ @@ -339,7 +345,7 @@ public class ITensor extends INoCopy { /** * \brief Get a bitmask of TensorFormat values that the tensor supports. - * For a shape tensor, only row major linear format is allowed. + * For a shape tensor, only row-major linear format is allowed. * * @return The value specified by setAllowedFormats or all possible formats. * @@ -355,14 +361,13 @@ public class ITensor extends INoCopy { //! //! //! - //! public native @Cast("nvinfer1::TensorFormats") @NoException(true) int getAllowedFormats(); /** * \brief Whether the tensor is a shape tensor. * * A shape tensor is a tensor that is related to shape calculations. - * It must have type Int32, Bool, or Float, and its shape must be determinable at build time. + * It must have type Int32, Int64, Bool, or Float, and its shape must be determinable at build time. * Furthermore, it must be needed as a shape tensor, either marked as a network shape * output via markOutputForShapes(), or as a layer input that is required to be a shape * tensor, such as the second input to IShuffleLayer. Some layers are "polymorphic" in @@ -378,15 +383,11 @@ public class ITensor extends INoCopy { * cause all three tensors to be shape tensors, because IShuffleLayer requires that its * second optional input be a shape tensor, and IElementWiseLayer is "polymorphic". * - * If a tensor is a shape tensor and becomes an engine input or output, - * then ICudaEngine::isShapeBinding will be true for that tensor. - * Such a shape tensor must have type Int32. - * * It is possible for a tensor to be both a shape tensor and an execution tensor. * * @return True if tensor is a shape tensor, false otherwise. * - * @see INetworkDefinition::markOutputForShapes(), ICudaEngine::isShapeBinding() + * @see INetworkDefinition::markOutputForShapes() * */ @@ -408,8 +409,6 @@ public class ITensor extends INoCopy { * For example, if a partially built network has no path from a tensor to a network output, * isExecutionTensor() returns false. Completing the path would cause it to become true. * - * If a tensor is an execution tensor and becomes an engine input or output, - * then ICudaEngine::isExecutionBinding will be true for that tensor. * * A tensor with isShapeTensor() == false and isExecutionTensor() == false * can still show up as an input to the engine if its dimensions are required. diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITimingCache.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITimingCache.java index df22348afce..78ce566d2cb 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITimingCache.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITimingCache.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITopKLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITopKLayer.java index d1fcd47b849..11efb849e52 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITopKLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITopKLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITripLimitLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITripLimitLayer.java index 42d4e36beab..96854efd916 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITripLimitLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITripLimitLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,11 +19,25 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; +/** + * \class ITripLimitLayer + * + * \brief A layer that represents a trip-count limiter. + * + * The trip limit layer sets the execution condition for loops, using kCOUNT to define the number of iterations or + * kWHILE for a conditional loop. A loop can have one of each kind of limit, in which case the loop exits when + * the trip count is reached or the condition becomes false. + * + * See INetworkDefinition::addTripLimit(). + * */ @Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class ITripLimitLayer extends ILoopBoundaryLayer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public ITripLimitLayer(Pointer p) { super(p); } + /** + * \brief Get a trip limiter type. + * */ public native @NoException(true) TripLimit getTripLimit(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IUnaryLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IUnaryLayer.java index 6514e636bda..d291a85ddbf 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IUnaryLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IUnaryLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IVersionedInterface.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IVersionedInterface.java new file mode 100644 index 00000000000..ab01a89a9d2 --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IVersionedInterface.java @@ -0,0 +1,48 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + // namespace impl + +/** + * \class IVersionedInterface + * + * \brief An Interface class for version control. + * */ +@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class IVersionedInterface extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IVersionedInterface(Pointer p) { super(p); } + + /** + * \brief The language used to build the implementation of this Interface. + * + * Applications must not override this method. + * */ + + + //! + //! + @Virtual public native @NoException(true) @Const({false, false, true}) APILanguage getAPILanguage(); + + /** + * \brief Return version information associated with this interface. Applications must not override this method. + * */ + @Virtual(true) public native @ByVal @NoException(true) @Const({false, false, true}) InterfaceInfo getInterfaceInfo(); +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/InterfaceInfo.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/InterfaceInfo.java new file mode 100644 index 00000000000..38f2a766942 --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/InterfaceInfo.java @@ -0,0 +1,48 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + + +/** + * \class InterfaceInfo + * + * \brief Version information associated with a TRT interface + * */ +@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class InterfaceInfo extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public InterfaceInfo() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public InterfaceInfo(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public InterfaceInfo(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public InterfaceInfo position(long position) { + return (InterfaceInfo)super.position(position); + } + @Override public InterfaceInfo getPointer(long i) { + return new InterfaceInfo((Pointer)this).offsetAddress(i); + } + + public native String kind(); public native InterfaceInfo kind(String setter); + public native int major(); public native InterfaceInfo major(int setter); + public native int minor(); public native InterfaceInfo minor(int setter); +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Permutation.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Permutation.java index 70fb96de771..efb35343508 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Permutation.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Permutation.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,6 +19,11 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; +/** + * \struct Permutation + * + * \brief Represents a permutation of dimensions. + * */ @Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class Permutation extends Pointer { static { Loader.load(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginField.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginField.java index 4b1ab6f3514..7553116c71e 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginField.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginField.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -41,31 +41,13 @@ public class PluginField extends Pointer { return new PluginField((Pointer)this).offsetAddress(i); } - /** - * \brief Plugin field attribute name - * */ - - //! - //! + /** Plugin field attribute name */ public native String name(); public native PluginField name(String setter); - /** - * \brief Plugin field attribute data - * */ - - //! - //! + /** Plugin field attribute data */ public native @Const Pointer data(); public native PluginField data(Pointer setter); - /** - * \brief Plugin field attribute type - * @see PluginFieldType - * */ - - //! - //! + /** Plugin field attribute type */ public native PluginFieldType type(); public native PluginField type(PluginFieldType setter); - /** - * \brief Number of data entries in the Plugin attribute - * */ + /** Number of data entries in the Plugin attribute */ public native int length(); public native PluginField length(int setter); public PluginField(String name_/*=nullptr*/, @Const Pointer data_/*=nullptr*/, diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginFieldCollection.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginFieldCollection.java index 04b8cae7d96..7110df39297 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginFieldCollection.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginFieldCollection.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,7 +19,11 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; -/** Plugin field collection struct. */ +/** + * \struct PluginFieldCollection + * + * \brief Plugin field collection struct. + * */ @Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class PluginFieldCollection extends Pointer { static { Loader.load(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginTensorDesc.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginTensorDesc.java index 5931be6747e..03bc6f60b88 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginTensorDesc.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginTensorDesc.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -19,12 +19,13 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; -/** \struct PluginTensorDesc +/** + * \struct PluginTensorDesc * * \brief Fields that a plugin might see for an input or output. * * Scale is only valid when data type is DataType::kINT8. TensorRT will set - * the value to -1.0f if it is invalid. + * the value to -1.0F if it is invalid. * * @see IPluginV2IOExt::supportsFormatCombination * @see IPluginV2IOExt::configurePlugin @@ -48,7 +49,7 @@ public class PluginTensorDesc extends Pointer { } /** Dimensions. */ - public native @ByRef @Cast("nvinfer1::Dims*") Dims32 dims(); public native PluginTensorDesc dims(Dims32 setter); + public native @ByRef @Cast("nvinfer1::Dims*") Dims64 dims(); public native PluginTensorDesc dims(Dims64 setter); /** \warning DataType:kBOOL and DataType::kUINT8 are not supported. */ public native DataType type(); public native PluginTensorDesc type(DataType setter); /** Tensor format. */ diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/SafeIPluginRegistry.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/SafeIPluginRegistry.java index 7786fa59f58..84f67117f8a 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/SafeIPluginRegistry.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/SafeIPluginRegistry.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VActivationLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VActivationLayer.java index aa1abbcadad..078d5bceb01 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VActivationLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VActivationLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithm.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithm.java index e76ca396a4f..07fae30bd89 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithm.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithm.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,7 +25,6 @@ public class VAlgorithm extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VAlgorithm(Pointer p) { super(p); } - public native @Const @ByRef @NoException(true) IAlgorithmIOInfo getAlgorithmIOInfo(int index); public native @Const @ByRef @NoException(true) IAlgorithmVariant getAlgorithmVariant(); public native @NoException(true) float getTimingMSec(); public native @Cast("std::size_t") @NoException(true) long getWorkspaceSize(); diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmContext.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmContext.java index 7ec610d8542..529f5eaa107 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmContext.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -26,8 +26,8 @@ public class VAlgorithmContext extends VRoot { public VAlgorithmContext(Pointer p) { super(p); } public native @NoException(true) String getName(); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(int index, OptProfileSelector select); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(int index, @Cast("nvinfer1::OptProfileSelector") int select); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(int index, OptProfileSelector select); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(int index, @Cast("nvinfer1::OptProfileSelector") int select); public native @NoException(true) int getNbInputs(); public native @NoException(true) int getNbOutputs(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmIOInfo.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmIOInfo.java index 831821a5f37..1a514b72dfa 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmIOInfo.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmIOInfo.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,9 +25,8 @@ public class VAlgorithmIOInfo extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VAlgorithmIOInfo(Pointer p) { super(p); } - public native @NoException(true) TensorFormat getTensorFormat(); public native @NoException(true) DataType getDataType(); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrides(); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrides(); public native @Cast("int64_t") @NoException(true) long getVectorizedDim(); public native @Cast("int64_t") @NoException(true) long getComponentsPerElement(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmVariant.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmVariant.java index d3917764604..38174713f91 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmVariant.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmVariant.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAssertionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAssertionLayer.java index f311d9f8cf9..39d5960824d 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAssertionLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAssertionLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilder.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilder.java index 7eeadeb18c2..5d6d4177490 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilder.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilder.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,15 +25,12 @@ public class VBuilder extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VBuilder(Pointer p) { super(p); } - public native @NoException(true) void setMaxBatchSize(int batchSize); - public native @NoException(true) int getMaxBatchSize(); public native @Cast("bool") @NoException(true) boolean platformHasFastFp16(); public native @Cast("bool") @NoException(true) boolean platformHasFastInt8(); public native @NoException(true) int getMaxDLABatchSize(); public native @NoException(true) int getNbDLACores(); public native @NoException(true) void setGpuAllocator(IGpuAllocator allocator); public native @NoException(true) IBuilderConfig createBuilderConfig(); - public native @NoException(true) ICudaEngine buildEngineWithConfig(@ByRef INetworkDefinition network, @ByRef IBuilderConfig config); public native @NoException(true) INetworkDefinition createNetworkV2(@Cast("nvinfer1::NetworkDefinitionCreationFlags") int flags); public native @NoException(true) IOptimizationProfile createOptimizationProfile(); public native @NoException(true) void setErrorRecorder(IErrorRecorder recorder); diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilderConfig.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilderConfig.java index 90f35592ffe..7f6fdd089ba 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilderConfig.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilderConfig.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,8 +25,6 @@ public class VBuilderConfig extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VBuilderConfig(Pointer p) { super(p); } - public native @NoException(true) void setMinTimingIterations(int minTiming); - public native @NoException(true) int getMinTimingIterations(); public native @NoException(true) void setAvgTimingIterations(int avgTiming); public native @NoException(true) int getAvgTimingIterations(); public native @NoException(true) void setEngineCapability(EngineCapability capability); @@ -34,8 +32,6 @@ public class VBuilderConfig extends VRoot { public native @NoException(true) EngineCapability getEngineCapability(); public native @NoException(true) void setInt8Calibrator(IInt8Calibrator calibrator); public native @NoException(true) IInt8Calibrator getInt8Calibrator(); - public native @NoException(true) void setMaxWorkspaceSize(@Cast("std::size_t") long workspaceSize); - public native @Cast("std::size_t") @NoException(true) long getMaxWorkspaceSize(); public native @NoException(true) void setFlags(@Cast("nvinfer1::BuilderFlags") int builderFlags); public native @Cast("nvinfer1::BuilderFlags") @NoException(true) int getFlags(); public native @NoException(true) void clearFlag(BuilderFlag builderFlag); @@ -101,4 +97,6 @@ public class VBuilderConfig extends VRoot { public native @NoException(true) int getNbPluginsToSerialize(); public native @NoException(true) void setMaxAuxStreams(int nbStreams); public native @NoException(true) int getMaxAuxStreams(); + public native @NoException(true) void setProgressMonitor(IProgressMonitor monitor); + public native @NoException(true) IProgressMonitor getProgressMonitor(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCastLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCastLayer.java index a14208e81a9..fa548842c17 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCastLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCastLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConcatenationLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConcatenationLayer.java index 0f33506174e..78a20653dea 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConcatenationLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConcatenationLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionLayer.java index 86946195d28..2b0a5e46715 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalBoundaryLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalBoundaryLayer.java index 47bfedb9dc9..9991889f5a1 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalBoundaryLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalBoundaryLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalInputLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalInputLayer.java index 20cca99ce3b..87e3ba0e56e 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalInputLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalInputLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalOutputLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalOutputLayer.java index 0f4c77030b0..ec7e7de8e5b 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalOutputLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalOutputLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConstantLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConstantLayer.java index 644e2f2a1f2..c2ab52420e7 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConstantLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConstantLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -27,6 +27,6 @@ public class VConstantLayer extends VRoot { public native @NoException(true) void setWeights(@ByVal Weights weights); public native @ByVal @NoException(true) Weights getWeights(); - public native @NoException(true) void setDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(); + public native @NoException(true) void setDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConvolutionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConvolutionLayer.java index 043a9fec301..c8b92c0fd8c 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConvolutionLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConvolutionLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,35 +25,27 @@ public class VConvolutionLayer extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VConvolutionLayer(Pointer p) { super(p); } - public native @NoException(true) void setKernelSize(@ByVal DimsHW kernelSize); - public native @ByVal @NoException(true) DimsHW getKernelSize(); - public native @NoException(true) void setNbOutputMaps(int nbOutputMaps); - public native @NoException(true) int getNbOutputMaps(); - public native @NoException(true) void setStride(@ByVal DimsHW stride); - public native @ByVal @NoException(true) DimsHW getStride(); - public native @NoException(true) void setPadding(@ByVal DimsHW padding); - public native @ByVal @NoException(true) DimsHW getPadding(); - public native @NoException(true) void setNbGroups(int nbGroups); - public native @NoException(true) int getNbGroups(); + public native @NoException(true) void setNbOutputMaps(@Cast("int64_t") long nbOutputMaps); + public native @Cast("int64_t") @NoException(true) long getNbOutputMaps(); + public native @NoException(true) void setNbGroups(@Cast("int64_t") long nbGroups); + public native @Cast("int64_t") @NoException(true) long getNbGroups(); public native @NoException(true) void setKernelWeights(@ByVal Weights weights); public native @ByVal @NoException(true) Weights getKernelWeights(); public native @NoException(true) void setBiasWeights(@ByVal Weights weights); public native @ByVal @NoException(true) Weights getBiasWeights(); - public native @NoException(true) void setDilation(@ByVal DimsHW dilation); - public native @ByVal @NoException(true) DimsHW getDilation(); - public native @NoException(true) void setPrePadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPrePadding(); - public native @NoException(true) void setPostPadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPostPadding(); + public native @NoException(true) void setPrePadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPrePadding(); + public native @NoException(true) void setPostPadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPostPadding(); public native @NoException(true) void setPaddingMode(PaddingMode paddingMode); public native @NoException(true) void setPaddingMode(@Cast("nvinfer1::PaddingMode") int paddingMode); public native @NoException(true) PaddingMode getPaddingMode(); - public native @NoException(true) void setKernelSizeNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 kernelSize); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getKernelSizeNd(); - public native @NoException(true) void setStrideNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrideNd(); - public native @NoException(true) void setPaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPaddingNd(); - public native @NoException(true) void setDilationNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 dilation); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDilationNd(); + public native @NoException(true) void setKernelSizeNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 kernelSize); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getKernelSizeNd(); + public native @NoException(true) void setStrideNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrideNd(); + public native @NoException(true) void setPaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPaddingNd(); + public native @NoException(true) void setDilationNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dilation); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDilationNd(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCudaEngine.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCudaEngine.java index 51e4225e53a..463bcdd9a0d 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCudaEngine.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCudaEngine.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,38 +25,20 @@ public class VCudaEngine extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VCudaEngine(Pointer p) { super(p); } - public native @NoException(true) int getNbBindings(); - public native @NoException(true) int getBindingIndex(String name); - public native @NoException(true) int getBindingIndex(@Cast("const char*") BytePointer name); - public native @NoException(true) String getBindingName(int bindingIndex); - public native @Cast("bool") @NoException(true) boolean bindingIsInput(int bindingIndex); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getBindingDimensions(int bindingIndex); - public native @NoException(true) DataType getBindingDataType(int bindingIndex); - public native @NoException(true) int getMaxBatchSize(); + public native @NoException(true) ICudaEngine getPImpl(); public native @NoException(true) int getNbLayers(); public native @NoException(true) IHostMemory serialize(); - public native @NoException(true) IExecutionContext createExecutionContext(); - public native @NoException(true) TensorLocation getLocation(int bindingIndex); + public native @NoException(true) IExecutionContext createExecutionContext(ExecutionContextAllocationStrategy strategy); + public native @NoException(true) IExecutionContext createExecutionContext(@Cast("nvinfer1::ExecutionContextAllocationStrategy") int strategy); public native @NoException(true) IExecutionContext createExecutionContextWithoutDeviceMemory(); public native @Cast("size_t") @NoException(true) long getDeviceMemorySize(); public native @Cast("bool") @NoException(true) boolean isRefittable(); - public native @NoException(true) int getBindingBytesPerComponent(int bindingIndex); - public native @NoException(true) int getBindingComponentsPerElement(int bindingIndex); - public native @NoException(true) TensorFormat getBindingFormat(int bindingIndex); - public native @NoException(true) String getBindingFormatDesc(int bindingIndex); - public native @NoException(true) int getBindingVectorizedDim(int bindingIndex); public native @NoException(true) String getName(); public native @NoException(true) int getNbOptimizationProfiles(); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileDimensions( - int bindingIndex, int profileIndex, OptProfileSelector select); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileDimensions( - int bindingIndex, int profileIndex, @Cast("nvinfer1::OptProfileSelector") int select); - public native @Const @NoException(true) IntPointer getProfileShapeValues( - int profileIndex, int inputIndex, OptProfileSelector select); - public native @Const @NoException(true) IntBuffer getProfileShapeValues( - int profileIndex, int inputIndex, @Cast("nvinfer1::OptProfileSelector") int select); - public native @Cast("bool") @NoException(true) boolean isShapeBinding(int bindingIndex); - public native @Cast("bool") @NoException(true) boolean isExecutionBinding(int bindingIndex); + public native @Const @NoException(true) IntPointer getProfileTensorValues( + String tensorName, int profileIndex, OptProfileSelector select); + public native @Const @NoException(true) IntBuffer getProfileTensorValues( + @Cast("const char*") BytePointer tensorName, int profileIndex, @Cast("nvinfer1::OptProfileSelector") int select); public native @NoException(true) EngineCapability getEngineCapability(); public native @NoException(true) void setErrorRecorder(IErrorRecorder recorder); public native @NoException(true) IErrorRecorder getErrorRecorder(); @@ -64,8 +46,8 @@ public class VCudaEngine extends VRoot { public native @Cast("nvinfer1::TacticSources") @NoException(true) int getTacticSources(); public native @NoException(true) ProfilingVerbosity getProfilingVerbosity(); public native @NoException(true) IEngineInspector createEngineInspector(); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorShape(String tensorName); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorShape(@Cast("const char*") BytePointer tensorName); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorShape(String tensorName); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorShape(@Cast("const char*") BytePointer tensorName); public native @NoException(true) DataType getTensorDataType(String tensorName); public native @NoException(true) @Cast("nvinfer1::DataType") int getTensorDataType(@Cast("const char*") BytePointer tensorName); public native @NoException(true) TensorLocation getTensorLocation(String tensorName); @@ -84,14 +66,13 @@ public class VCudaEngine extends VRoot { public native @NoException(true) @Cast("const char*") BytePointer getTensorFormatDesc(@Cast("const char*") BytePointer tensorName); public native @NoException(true) int getTensorVectorizedDim(String tensorName); public native @NoException(true) int getTensorVectorizedDim(@Cast("const char*") BytePointer tensorName); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileShape( + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getProfileShape( String tensorName, int profileIndex, OptProfileSelector select); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileShape( + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getProfileShape( @Cast("const char*") BytePointer tensorName, int profileIndex, @Cast("nvinfer1::OptProfileSelector") int select); public native @NoException(true) int getNbIOTensors(); public native @NoException(true) String getIOTensorName(int index); public native @NoException(true) HardwareCompatibilityLevel getHardwareCompatibilityLevel(); - public native @NoException(true) ICudaEngine getPImpl(); public native @NoException(true) int getNbAuxStreams(); public native @NoException(true) int getTensorBytesPerComponentV2(String tensorName, int profileIndex); @@ -104,4 +85,18 @@ public class VCudaEngine extends VRoot { public native @NoException(true) @Cast("const char*") BytePointer getTensorFormatDescV2(@Cast("const char*") BytePointer tensorName, int profileIndex); public native @NoException(true) int getTensorVectorizedDimV2(String tensorName, int profileIndex); public native @NoException(true) int getTensorVectorizedDimV2(@Cast("const char*") BytePointer tensorName, int profileIndex); + + public native @NoException(true) ISerializationConfig createSerializationConfig(); + public native @NoException(true) IHostMemory serializeWithConfig(@ByRef ISerializationConfig config); + + public native @Cast("size_t") @NoException(true) long getDeviceMemorySizeForProfile(int profileIndex); + public native @NoException(true) IRefitter createRefitter(@ByRef ILogger logger); + + public native @Cast("bool") @NoException(true) boolean setWeightStreamingBudget(@Cast("int64_t") long gpuMemoryBudget); + public native @Cast("int64_t") @NoException(true) long getWeightStreamingBudget(); + public native @Cast("int64_t") @NoException(true) long getMinimumWeightStreamingBudget(); + public native @Cast("int64_t") @NoException(true) long getStreamableWeightsSize(); + + public native @Cast("bool") @NoException(true) boolean isDebugTensor(String name); + public native @Cast("bool") @NoException(true) boolean isDebugTensor(@Cast("const char*") BytePointer name); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDeconvolutionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDeconvolutionLayer.java index c76ca6716ae..93ccaa7621a 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDeconvolutionLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDeconvolutionLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,33 +25,27 @@ public class VDeconvolutionLayer extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VDeconvolutionLayer(Pointer p) { super(p); } - public native @NoException(true) void setKernelSize(@ByVal DimsHW kernelSize); - public native @ByVal @NoException(true) DimsHW getKernelSize(); - public native @NoException(true) void setNbOutputMaps(int nbOutputMaps); - public native @NoException(true) int getNbOutputMaps(); - public native @NoException(true) void setStride(@ByVal DimsHW stride); - public native @ByVal @NoException(true) DimsHW getStride(); - public native @NoException(true) void setPadding(@ByVal DimsHW padding); - public native @ByVal @NoException(true) DimsHW getPadding(); - public native @NoException(true) void setNbGroups(int nbGroups); - public native @NoException(true) int getNbGroups(); + public native @NoException(true) void setNbOutputMaps(@Cast("int64_t") long nbOutputMaps); + public native @Cast("int64_t") @NoException(true) long getNbOutputMaps(); + public native @NoException(true) void setNbGroups(@Cast("int64_t") long nbGroups); + public native @Cast("int64_t") @NoException(true) long getNbGroups(); public native @NoException(true) void setKernelWeights(@ByVal Weights weights); public native @ByVal @NoException(true) Weights getKernelWeights(); public native @NoException(true) void setBiasWeights(@ByVal Weights weights); public native @ByVal @NoException(true) Weights getBiasWeights(); - public native @NoException(true) void setPrePadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPrePadding(); - public native @NoException(true) void setPostPadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPostPadding(); + public native @NoException(true) void setPrePadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPrePadding(); + public native @NoException(true) void setPostPadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPostPadding(); public native @NoException(true) void setPaddingMode(PaddingMode paddingMode); public native @NoException(true) void setPaddingMode(@Cast("nvinfer1::PaddingMode") int paddingMode); public native @NoException(true) PaddingMode getPaddingMode(); - public native @NoException(true) void setKernelSizeNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 kernelSize); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getKernelSizeNd(); - public native @NoException(true) void setStrideNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrideNd(); - public native @NoException(true) void setPaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPaddingNd(); - public native @NoException(true) void setDilationNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 dilation); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDilationNd(); + public native @NoException(true) void setKernelSizeNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 kernelSize); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getKernelSizeNd(); + public native @NoException(true) void setStrideNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrideNd(); + public native @NoException(true) void setPaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPaddingNd(); + public native @NoException(true) void setDilationNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dilation); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDilationNd(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDequantizeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDequantizeLayer.java index de71528ff90..2303d02fa38 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDequantizeLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDequantizeLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -27,4 +27,7 @@ public class VDequantizeLayer extends VRoot { public native @NoException(true) int getAxis(); public native @NoException(true) void setAxis(int axis); + public native @NoException(true) DataType getToType(); + public native @NoException(true) void setToType(DataType toType); + public native @NoException(true) void setToType(@Cast("nvinfer1::DataType") int toType); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDimensionExpr.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDimensionExpr.java index 5efaa49e8da..117b48b7d60 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDimensionExpr.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDimensionExpr.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -26,5 +26,6 @@ public class VDimensionExpr extends VRoot { public VDimensionExpr(Pointer p) { super(p); } public native @Cast("bool") boolean isConstant(); - public native int getConstantValue(); + public native @Cast("int64_t") long getConstantValue(); + public native @Cast("bool") boolean isSizeTensor(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEinsumLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEinsumLayer.java index 322f158d9cc..1abe6059df9 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEinsumLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEinsumLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VElementWiseLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VElementWiseLayer.java index 171488f4909..beba6204e6c 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VElementWiseLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VElementWiseLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEngineInspector.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEngineInspector.java index 95654b852f8..08d714c6504 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEngineInspector.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEngineInspector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,6 +25,7 @@ public class VEngineInspector extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VEngineInspector(Pointer p) { super(p); } + public native @NoException(true) IEngineInspector getPImpl(); public native @Cast("bool") @NoException(true) boolean setExecutionContext(@Const IExecutionContext context); public native @Const @NoException(true) IExecutionContext getExecutionContext(); public native @NoException(true) String getLayerInformation(int layerIndex, LayerInformationFormat format); @@ -33,5 +34,4 @@ public class VEngineInspector extends VRoot { public native @NoException(true) @Cast("const char*") BytePointer getEngineInformation(@Cast("nvinfer1::LayerInformationFormat") int format); public native @NoException(true) void setErrorRecorder(IErrorRecorder recorder); public native @NoException(true) IErrorRecorder getErrorRecorder(); - public native @NoException(true) IEngineInspector getPImpl(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExecutionContext.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExecutionContext.java index 4b2db0b596c..7b1c4b8c353 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExecutionContext.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExecutionContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,12 +25,7 @@ public class VExecutionContext extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VExecutionContext(Pointer p) { super(p); } - public native @Cast("bool") @NoException(true) boolean execute(int batchSize, @Cast("void*const*") PointerPointer bindings); - public native @Cast("bool") @NoException(true) boolean execute(int batchSize, @Cast("void*const*") @ByPtrPtr Pointer bindings); - public native @Cast("bool") @NoException(true) boolean enqueue( - int batchSize, @Cast("void*const*") PointerPointer bindings, CUstream_st stream, @ByPtrPtr CUevent_st inputConsumed); - public native @Cast("bool") @NoException(true) boolean enqueue( - int batchSize, @Cast("void*const*") @ByPtrPtr Pointer bindings, CUstream_st stream, @ByPtrPtr CUevent_st inputConsumed); + public native @NoException(true) IExecutionContext getPImpl(); public native @NoException(true) void setDebugSync(@Cast("bool") boolean sync); public native @Cast("bool") @NoException(true) boolean getDebugSync(); public native @NoException(true) void setProfiler(IProfiler arg0); @@ -40,41 +35,31 @@ public class VExecutionContext extends VRoot { public native @NoException(true) void setName(@Cast("const char*") BytePointer name); public native @NoException(true) String getName(); public native @NoException(true) void setDeviceMemory(Pointer memory); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrides(int bindingIndex); - public native @Cast("bool") @NoException(true) boolean setOptimizationProfile(int profileIndex); public native @NoException(true) int getOptimizationProfile(); - public native @Cast("bool") @NoException(true) boolean setBindingDimensions(int bindingIndex, @ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getBindingDimensions(int bindingIndex); - public native @Cast("bool") @NoException(true) boolean setInputShapeBinding(int bindingIndex, @Const IntPointer data); - public native @Cast("bool") @NoException(true) boolean setInputShapeBinding(int bindingIndex, @Const IntBuffer data); - public native @Cast("bool") @NoException(true) boolean setInputShapeBinding(int bindingIndex, @Const int[] data); - public native @Cast("bool") @NoException(true) boolean getShapeBinding(int bindingIndex, IntPointer data); - public native @Cast("bool") @NoException(true) boolean getShapeBinding(int bindingIndex, IntBuffer data); - public native @Cast("bool") @NoException(true) boolean getShapeBinding(int bindingIndex, int[] data); public native @Cast("bool") @NoException(true) boolean allInputDimensionsSpecified(); public native @Cast("bool") @NoException(true) boolean allInputShapesSpecified(); public native @NoException(true) void setErrorRecorder(IErrorRecorder recorder); public native @NoException(true) IErrorRecorder getErrorRecorder(); public native @Cast("bool") @NoException(true) boolean executeV2(@Cast("void*const*") PointerPointer bindings); public native @Cast("bool") @NoException(true) boolean executeV2(@Cast("void*const*") @ByPtrPtr Pointer bindings); - public native @Cast("bool") @NoException(true) boolean enqueueV2(@Cast("void*const*") PointerPointer bindings, CUstream_st stream, @ByPtrPtr CUevent_st inputConsumed); - public native @Cast("bool") @NoException(true) boolean enqueueV2(@Cast("void*const*") @ByPtrPtr Pointer bindings, CUstream_st stream, @ByPtrPtr CUevent_st inputConsumed); public native @Cast("bool") @NoException(true) boolean setOptimizationProfileAsync(int profileIndex, CUstream_st stream); public native @NoException(true) void setEnqueueEmitsProfile(@Cast("bool") boolean enqueueEmitsProfile); public native @Cast("bool") @NoException(true) boolean getEnqueueEmitsProfile(); public native @Cast("bool") @NoException(true) boolean reportToProfiler(); - public native @Cast("bool") @NoException(true) boolean setInputShape(String tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims32 dims); - public native @Cast("bool") @NoException(true) boolean setInputShape(@Cast("const char*") BytePointer tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims32 dims); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorShape(String tensorName); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorShape(@Cast("const char*") BytePointer tensorName); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorStrides(String tensorName); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorStrides(@Cast("const char*") BytePointer tensorName); + public native @Cast("bool") @NoException(true) boolean setInputShape(String tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dims); + public native @Cast("bool") @NoException(true) boolean setInputShape(@Cast("const char*") BytePointer tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dims); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorShape(String tensorName); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorShape(@Cast("const char*") BytePointer tensorName); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorStrides(String tensorName); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorStrides(@Cast("const char*") BytePointer tensorName); public native @Cast("bool") @NoException(true) boolean setTensorAddress(String tensorName, Pointer data); public native @Cast("bool") @NoException(true) boolean setTensorAddress(@Cast("const char*") BytePointer tensorName, Pointer data); public native @Const @NoException(true) Pointer getTensorAddress(String tensorName); public native @Const @NoException(true) Pointer getTensorAddress(@Cast("const char*") BytePointer tensorName); public native @Cast("bool") @NoException(true) boolean setInputTensorAddress(String tensorName, @Const Pointer data); public native @Cast("bool") @NoException(true) boolean setInputTensorAddress(@Cast("const char*") BytePointer tensorName, @Const Pointer data); + public native @Cast("bool") @NoException(true) boolean setOutputTensorAddress(String tensorName, Pointer data); + public native @Cast("bool") @NoException(true) boolean setOutputTensorAddress(@Cast("const char*") BytePointer tensorName, Pointer data); public native @NoException(true) int inferShapes(int nbMaxNames, @Cast("const char**") PointerPointer tensorNames); public native @NoException(true) int inferShapes(int nbMaxNames, @Cast("const char**") @ByPtrPtr BytePointer tensorNames); public native @NoException(true) int inferShapes(int nbMaxNames, @Cast("const char**") @ByPtrPtr ByteBuffer tensorNames); @@ -97,6 +82,13 @@ public class VExecutionContext extends VRoot { public native @Cast("bool") @NoException(true) boolean setNvtxVerbosity(ProfilingVerbosity verbosity); public native @Cast("bool") @NoException(true) boolean setNvtxVerbosity(@Cast("nvinfer1::ProfilingVerbosity") int verbosity); public native @NoException(true) ProfilingVerbosity getNvtxVerbosity(); - public native @NoException(true) IExecutionContext getPImpl(); public native @NoException(true) void setAuxStreams(@ByPtrPtr CUstream_st auxStreams, int nbStreams); + public native @Cast("bool") @NoException(true) boolean setDebugListener(IDebugListener listener); + public native @NoException(true) IDebugListener getDebugListener(); + public native @Cast("bool") @NoException(true) boolean setTensorDebugState(String name, @Cast("bool") boolean flag); + public native @Cast("bool") @NoException(true) boolean setTensorDebugState(@Cast("const char*") BytePointer name, @Cast("bool") boolean flag); + public native @Cast("bool") @NoException(true) boolean getDebugState(String name); + public native @Cast("bool") @NoException(true) boolean getDebugState(@Cast("const char*") BytePointer name); + public native @Cast("bool") @NoException(true) boolean setAllTensorsDebugState(@Cast("bool") boolean flag); + public native @Cast("size_t") @NoException(true) long updateDeviceMemorySizeForShapes(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExprBuilder.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExprBuilder.java index fa5194f32ef..7113f628ead 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExprBuilder.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExprBuilder.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,9 +25,11 @@ public class VExprBuilder extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VExprBuilder(Pointer p) { super(p); } - public native @Const IDimensionExpr constant(int value); + public native @Const IDimensionExpr constant(@Cast("int64_t") long value); public native @Const IDimensionExpr operation( DimensionOperation op, @Const @ByRef IDimensionExpr first, @Const @ByRef IDimensionExpr second); public native @Const IDimensionExpr operation( @Cast("nvinfer1::DimensionOperation") int op, @Const @ByRef IDimensionExpr first, @Const @ByRef IDimensionExpr second); + public native @Const IDimensionExpr declareSizeTensor( + int outputIndex, @Const @ByRef IDimensionExpr opt, @Const @ByRef IDimensionExpr upper); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VFillLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VFillLayer.java index 08cb919fcfc..f764d46185a 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VFillLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VFillLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,8 +25,8 @@ public class VFillLayer extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VFillLayer(Pointer p) { super(p); } - public native @NoException(true) void setDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(); + public native @NoException(true) void setDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(); public native @NoException(true) void setOperation(FillOperation op); public native @NoException(true) void setOperation(@Cast("nvinfer1::FillOperation") int op); public native @NoException(true) FillOperation getOperation(); @@ -34,4 +34,12 @@ public class VFillLayer extends VRoot { public native @NoException(true) double getAlpha(); public native @NoException(true) void setBeta(double beta); public native @NoException(true) double getBeta(); + public native @NoException(true) void setAlphaInt64(@Cast("int64_t") long alpha); + public native @Cast("int64_t") @NoException(true) long getAlphaInt64(); + public native @NoException(true) void setBetaInt64(@Cast("int64_t") long beta); + public native @Cast("int64_t") @NoException(true) long getBetaInt64(); + public native @Cast("bool") @NoException(true) boolean isAlphaBetaInt64(); + public native @NoException(true) DataType getToType(); + public native @NoException(true) void setToType(DataType toType); + public native @NoException(true) void setToType(@Cast("nvinfer1::DataType") int toType); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGatherLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGatherLayer.java index b7edd5a86ca..f648f8e1427 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGatherLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGatherLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGridSampleLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGridSampleLayer.java index 5732b9b10b8..9d299a3631f 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGridSampleLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGridSampleLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VHostMemory.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VHostMemory.java index 37b1b519308..bbcaeb66f65 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VHostMemory.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VHostMemory.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIdentityLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIdentityLayer.java index 6654ba3e52b..e6f9107809c 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIdentityLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIdentityLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIfConditional.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIfConditional.java index 859fa0cc397..6fe180b929f 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIfConditional.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIfConditional.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIteratorLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIteratorLayer.java index d4003bf2dda..8717e14948a 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIteratorLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIteratorLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLRNLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLRNLayer.java index 3d5b3b5375c..d7e4bb232aa 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLRNLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLRNLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,8 +25,8 @@ public class VLRNLayer extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VLRNLayer(Pointer p) { super(p); } - public native @NoException(true) void setWindowSize(int windowSize); - public native @NoException(true) int getWindowSize(); + public native @NoException(true) void setWindowSize(@Cast("int64_t") long windowSize); + public native @Cast("int64_t") @NoException(true) long getWindowSize(); public native @NoException(true) void setAlpha(float alpha); public native @NoException(true) float getAlpha(); public native @NoException(true) void setBeta(float beta); diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLayer.java index cdf47ece051..e3f8409acd6 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoop.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoop.java index f827c6bc663..ca7c5dfc0a4 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoop.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoop.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopBoundaryLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopBoundaryLayer.java index 56106d38e85..0abb8ae8f30 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopBoundaryLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopBoundaryLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopOutputLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopOutputLayer.java index 03aaf11d24b..f2660e811e7 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopOutputLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopOutputLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VMatrixMultiplyLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VMatrixMultiplyLayer.java index 0d74832ae1d..238a009f9ea 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VMatrixMultiplyLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VMatrixMultiplyLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNMSLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNMSLayer.java index b55fcce5221..5d129d454a6 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNMSLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNMSLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNetworkDefinition.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNetworkDefinition.java index d2c56f2168c..0b613e85553 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNetworkDefinition.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNetworkDefinition.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,30 +25,23 @@ public class VNetworkDefinition extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VNetworkDefinition(Pointer p) { super(p); } - public native @NoException(true) ITensor addInput(String name, DataType type, @ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); - public native @NoException(true) ITensor addInput(@Cast("const char*") BytePointer name, @Cast("nvinfer1::DataType") int type, @ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); + public native @NoException(true) ITensor addInput(String name, DataType type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions); + public native @NoException(true) ITensor addInput(@Cast("const char*") BytePointer name, @Cast("nvinfer1::DataType") int type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions); public native @NoException(true) void markOutput(@ByRef ITensor tensor); - public native @NoException(true) IConvolutionLayer addConvolution(@ByRef ITensor input, int nbOutputMaps, @ByVal DimsHW kernelSize, - @ByVal Weights kernelWeights, @ByVal Weights biasWeights); - public native @NoException(true) IFullyConnectedLayer addFullyConnected( - @ByRef ITensor input, int nbOutputs, @ByVal Weights kernelWeights, @ByVal Weights biasWeights); public native @NoException(true) IActivationLayer addActivation(@ByRef ITensor input, ActivationType type); public native @NoException(true) IActivationLayer addActivation(@ByRef ITensor input, @Cast("nvinfer1::ActivationType") int type); - public native @NoException(true) IPoolingLayer addPooling(@ByRef ITensor input, PoolingType type, @ByVal DimsHW windowSize); - public native @NoException(true) IPoolingLayer addPooling(@ByRef ITensor input, @Cast("nvinfer1::PoolingType") int type, @ByVal DimsHW windowSize); - public native @NoException(true) ILRNLayer addLRN(@ByRef ITensor input, int window, float alpha, float beta, float k); - public native @NoException(true) IScaleLayer addScale(@ByRef ITensor input, ScaleMode mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power); - public native @NoException(true) IScaleLayer addScale(@ByRef ITensor input, @Cast("nvinfer1::ScaleMode") int mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power); + public native @NoException(true) ILRNLayer addLRN(@ByRef ITensor input, @Cast("int64_t") long window, float alpha, float beta, float k); + public native @NoException(true) IScaleLayer addScale( + @ByRef ITensor input, ScaleMode mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power); + public native @NoException(true) IScaleLayer addScale( + @ByRef ITensor input, @Cast("nvinfer1::ScaleMode") int mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power); public native @NoException(true) ISoftMaxLayer addSoftMax(@ByRef ITensor input); public native @NoException(true) IConcatenationLayer addConcatenation(@Cast("nvinfer1::ITensor*const*") PointerPointer inputs, int nbInputs); public native @NoException(true) IConcatenationLayer addConcatenation(@ByPtrPtr ITensor inputs, int nbInputs); - public native @NoException(true) IDeconvolutionLayer addDeconvolution( - @ByRef ITensor input, int nbOutputMaps, @ByVal DimsHW kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights); public native @NoException(true) IElementWiseLayer addElementWise(@ByRef ITensor input1, @ByRef ITensor input2, ElementWiseOperation op); public native @NoException(true) IElementWiseLayer addElementWise(@ByRef ITensor input1, @ByRef ITensor input2, @Cast("nvinfer1::ElementWiseOperation") int op); public native @NoException(true) IUnaryLayer addUnary(@ByRef ITensor input, UnaryOperation operation); public native @NoException(true) IUnaryLayer addUnary(@ByRef ITensor input, @Cast("nvinfer1::UnaryOperation") int operation); - public native @NoException(true) IPaddingLayer addPadding(@ByRef ITensor input, @ByVal DimsHW prePadding, @ByVal DimsHW postPadding); public native @NoException(true) IShuffleLayer addShuffle(@ByRef ITensor input); public native @NoException(true) int getNbLayers(); public native @NoException(true) ILayer getLayer(int index); @@ -68,17 +61,17 @@ public class VNetworkDefinition extends VRoot { @ByRef ITensor input0, MatrixOperation op0, @ByRef ITensor input1, MatrixOperation op1); public native @NoException(true) IMatrixMultiplyLayer addMatrixMultiply( @ByRef ITensor input0, @Cast("nvinfer1::MatrixOperation") int op0, @ByRef ITensor input1, @Cast("nvinfer1::MatrixOperation") int op1); - public native @NoException(true) IConstantLayer addConstant(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions, @ByVal Weights weights); - public native @NoException(true) IRNNv2Layer addRNNv2( - @ByRef ITensor input, int layerCount, int hiddenSize, int maxSeqLen, RNNOperation op); - public native @NoException(true) IRNNv2Layer addRNNv2( - @ByRef ITensor input, int layerCount, int hiddenSize, int maxSeqLen, @Cast("nvinfer1::RNNOperation") int op); + public native @NoException(true) IConstantLayer addConstant(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions, @ByVal Weights weights); public native @NoException(true) IIdentityLayer addIdentity(@ByRef ITensor input); public native @NoException(true) void removeTensor(@ByRef ITensor tensor); public native @NoException(true) void unmarkOutput(@ByRef ITensor tensor); public native @NoException(true) IPluginV2Layer addPluginV2(@Cast("nvinfer1::ITensor*const*") PointerPointer inputs, int nbInputs, @ByRef IPluginV2 plugin); public native @NoException(true) IPluginV2Layer addPluginV2(@ByPtrPtr ITensor inputs, int nbInputs, @ByRef IPluginV2 plugin); - public native @NoException(true) ISliceLayer addSlice(@ByRef ITensor input, @ByVal @Cast("nvinfer1::Dims*") Dims32 start, @ByVal @Cast("nvinfer1::Dims*") Dims32 size, @ByVal @Cast("nvinfer1::Dims*") Dims32 stride); + public native @NoException(true) IPluginV3Layer addPluginV3(@Cast("nvinfer1::ITensor*const*") PointerPointer inputs, int nbInputs, @Cast("nvinfer1::ITensor*const*") PointerPointer shapeInputs, + int nbShapeInputs, @ByRef IPluginV3 plugin); + public native @NoException(true) IPluginV3Layer addPluginV3(@ByPtrPtr ITensor inputs, int nbInputs, @ByPtrPtr ITensor shapeInputs, + int nbShapeInputs, @ByRef IPluginV3 plugin); + public native @NoException(true) ISliceLayer addSlice(@ByRef ITensor input, @Cast("const nvinfer1::Dims*") @ByRef Dims64 start, @Cast("const nvinfer1::Dims*") @ByRef Dims64 size, @Cast("const nvinfer1::Dims*") @ByRef Dims64 stride); public native @NoException(true) void setName(String name); public native @NoException(true) void setName(@Cast("const char*") BytePointer name); public native @NoException(true) String getName(); @@ -88,22 +81,21 @@ public class VNetworkDefinition extends VRoot { public native @Cast("bool") @NoException(true) boolean unmarkOutputForShapes(@ByRef ITensor tensor); public native @NoException(true) IParametricReLULayer addParametricReLU(@ByRef ITensor input, @ByRef ITensor slope); public native @NoException(true) IConvolutionLayer addConvolutionNd( - @ByRef ITensor input, int nbOutputMaps, @ByVal @Cast("nvinfer1::Dims*") Dims32 kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights); - public native @NoException(true) IPoolingLayer addPoolingNd(@ByRef ITensor input, PoolingType type, @ByVal @Cast("nvinfer1::Dims*") Dims32 windowSize); - public native @NoException(true) IPoolingLayer addPoolingNd(@ByRef ITensor input, @Cast("nvinfer1::PoolingType") int type, @ByVal @Cast("nvinfer1::Dims*") Dims32 windowSize); + @ByRef ITensor input, @Cast("int64_t") long nbOutputMaps, @Cast("const nvinfer1::Dims*") @ByRef Dims64 kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights); + public native @NoException(true) IPoolingLayer addPoolingNd(@ByRef ITensor input, PoolingType type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 windowSize); + public native @NoException(true) IPoolingLayer addPoolingNd(@ByRef ITensor input, @Cast("nvinfer1::PoolingType") int type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 windowSize); public native @NoException(true) IDeconvolutionLayer addDeconvolutionNd( - @ByRef ITensor input, int nbOutputMaps, @ByVal @Cast("nvinfer1::Dims*") Dims32 kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights); + @ByRef ITensor input, @Cast("int64_t") long nbOutputMaps, @Cast("const nvinfer1::Dims*") @ByRef Dims64 kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights); public native @NoException(true) IScaleLayer addScaleNd( @ByRef ITensor input, ScaleMode mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power, int channelAxis); public native @NoException(true) IScaleLayer addScaleNd( @ByRef ITensor input, @Cast("nvinfer1::ScaleMode") int mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power, int channelAxis); public native @NoException(true) IResizeLayer addResize(@ByRef ITensor input); - public native @Cast("bool") @NoException(true) boolean hasExplicitPrecision(); public native @NoException(true) ILoop addLoop(); public native @NoException(true) ISelectLayer addSelect(@ByRef ITensor condition, @ByRef ITensor thenInput, @ByRef ITensor elseInput); - public native @NoException(true) IFillLayer addFill(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions, FillOperation op); - public native @NoException(true) IFillLayer addFill(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions, @Cast("nvinfer1::FillOperation") int op); - public native @NoException(true) IPaddingLayer addPaddingNd(@ByRef ITensor input, @ByVal @Cast("nvinfer1::Dims*") Dims32 prePadding, @ByVal @Cast("nvinfer1::Dims*") Dims32 postPadding); + public native @NoException(true) IFillLayer addFill(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions, FillOperation op); + public native @NoException(true) IFillLayer addFill(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions, @Cast("nvinfer1::FillOperation") int op); + public native @NoException(true) IPaddingLayer addPaddingNd(@ByRef ITensor input, @Cast("const nvinfer1::Dims*") @ByRef Dims64 prePadding, @Cast("const nvinfer1::Dims*") @ByRef Dims64 postPadding); public native @Cast("bool") @NoException(true) boolean setWeightsName(@ByVal Weights weights, String name); public native @Cast("bool") @NoException(true) boolean setWeightsName(@ByVal Weights weights, @Cast("const char*") BytePointer name); public native @NoException(true) void setErrorRecorder(IErrorRecorder recorder); @@ -130,4 +122,16 @@ public class VNetworkDefinition extends VRoot { public native @NoException(true) ICastLayer addCast(@ByRef ITensor input, DataType toType); public native @NoException(true) ICastLayer addCast(@ByRef ITensor input, @Cast("nvinfer1::DataType") int toType); public native @ByRef @NoException(true) IBuilder getBuilder(); + public native @Cast("nvinfer1::NetworkDefinitionCreationFlags") @NoException(true) int getFlags(); + public native @Cast("bool") @NoException(true) boolean getFlag(NetworkDefinitionCreationFlag networkDefinitionCreationFlag); + public native @Cast("bool") @NoException(true) boolean getFlag(@Cast("nvinfer1::NetworkDefinitionCreationFlag") int networkDefinitionCreationFlag); + public native @NoException(true) IQuantizeLayer addQuantizeV2(@ByRef ITensor input, @ByRef ITensor scale, DataType outputType); + public native @NoException(true) IQuantizeLayer addQuantizeV2(@ByRef ITensor input, @ByRef ITensor scale, @Cast("nvinfer1::DataType") int outputType); + public native @NoException(true) IDequantizeLayer addDequantizeV2(@ByRef ITensor input, @ByRef ITensor scale, DataType outputType); + public native @NoException(true) IDequantizeLayer addDequantizeV2(@ByRef ITensor input, @ByRef ITensor scale, @Cast("nvinfer1::DataType") int outputType); + public native @NoException(true) IFillLayer addFillV2(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions, FillOperation op, DataType outputType); + public native @NoException(true) IFillLayer addFillV2(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions, @Cast("nvinfer1::FillOperation") int op, @Cast("nvinfer1::DataType") int outputType); + public native @Cast("bool") @NoException(true) boolean markDebug(@ByRef ITensor tensor); + public native @Cast("bool") @NoException(true) boolean unmarkDebug(@ByRef ITensor tensor); + public native @Cast("bool") @NoException(true) boolean isDebugTensor(@Const @ByRef ITensor tensor); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNonZeroLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNonZeroLayer.java index 9ba91611db6..158352603bb 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNonZeroLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNonZeroLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNormalizationLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNormalizationLayer.java index 0b447211df1..7735c39cdd6 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNormalizationLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNormalizationLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -29,8 +29,8 @@ public class VNormalizationLayer extends VRoot { public native @NoException(true) float getEpsilon(); public native @NoException(true) void setAxes(@Cast("uint32_t") int axesMask); public native @Cast("uint32_t") @NoException(true) int getAxes(); - public native @NoException(true) void setNbGroups(int nbGroups); - public native @NoException(true) int getNbGroups(); + public native @NoException(true) void setNbGroups(@Cast("int64_t") long nbGroups); + public native @Cast("int64_t") @NoException(true) long getNbGroups(); public native @NoException(true) void setComputePrecision(DataType type); public native @NoException(true) void setComputePrecision(@Cast("nvinfer1::DataType") int type); public native @NoException(true) DataType getComputePrecision(); diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOneHotLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOneHotLayer.java index a5ce0c38743..5b28db15e2c 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOneHotLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOneHotLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOptimizationProfile.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOptimizationProfile.java index d88fb92bbfb..aa10afb4f11 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOptimizationProfile.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOptimizationProfile.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,10 +25,10 @@ public class VOptimizationProfile extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VOptimizationProfile(Pointer p) { super(p); } - public native @Cast("bool") @NoException(true) boolean setDimensions(String inputName, OptProfileSelector select, @ByVal @Cast("nvinfer1::Dims*") Dims32 dims); - public native @Cast("bool") @NoException(true) boolean setDimensions(@Cast("const char*") BytePointer inputName, @Cast("nvinfer1::OptProfileSelector") int select, @ByVal @Cast("nvinfer1::Dims*") Dims32 dims); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(String inputName, OptProfileSelector select); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(@Cast("const char*") BytePointer inputName, @Cast("nvinfer1::OptProfileSelector") int select); + public native @Cast("bool") @NoException(true) boolean setDimensions(String inputName, OptProfileSelector select, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dims); + public native @Cast("bool") @NoException(true) boolean setDimensions(@Cast("const char*") BytePointer inputName, @Cast("nvinfer1::OptProfileSelector") int select, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dims); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(String inputName, OptProfileSelector select); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(@Cast("const char*") BytePointer inputName, @Cast("nvinfer1::OptProfileSelector") int select); public native @Cast("bool") @NoException(true) boolean setShapeValues( String inputName, OptProfileSelector select, @Const IntPointer values, int nbValues); public native @Cast("bool") @NoException(true) boolean setShapeValues( diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPaddingLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPaddingLayer.java index ae6f35f03b5..75e6bed3783 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPaddingLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPaddingLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,12 +25,8 @@ public class VPaddingLayer extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VPaddingLayer(Pointer p) { super(p); } - public native @NoException(true) void setPrePadding(@ByVal DimsHW padding); - public native @ByVal @NoException(true) DimsHW getPrePadding(); - public native @NoException(true) void setPostPadding(@ByVal DimsHW padding); - public native @ByVal @NoException(true) DimsHW getPostPadding(); - public native @NoException(true) void setPrePaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPrePaddingNd(); - public native @NoException(true) void setPostPaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPostPaddingNd(); + public native @NoException(true) void setPrePaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPrePaddingNd(); + public native @NoException(true) void setPostPaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPostPaddingNd(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VParametricReLULayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VParametricReLULayer.java index a12b4ff502c..6d30fcf197f 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VParametricReLULayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VParametricReLULayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginLayer.java index b3688bc5d65..e2e3df8b8c4 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginV2Layer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginV2Layer.java index 28619f5ec0e..71a28d647fc 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginV2Layer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginV2Layer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VFullyConnectedLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginV3Layer.java similarity index 55% rename from tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VFullyConnectedLayer.java rename to tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginV3Layer.java index caecea44990..a6a8de82d67 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VFullyConnectedLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginV3Layer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -20,15 +20,10 @@ @Namespace("nvinfer1::apiv") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) -public class VFullyConnectedLayer extends VRoot { +public class VPluginV3Layer extends VRoot { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public VFullyConnectedLayer(Pointer p) { super(p); } + public VPluginV3Layer(Pointer p) { super(p); } - public native @NoException(true) void setNbOutputChannels(int nbOutputs); - public native @NoException(true) int getNbOutputChannels(); - public native @NoException(true) void setKernelWeights(@ByVal Weights weights); - public native @ByVal @NoException(true) Weights getKernelWeights(); - public native @NoException(true) void setBiasWeights(@ByVal Weights weights); - public native @ByVal @NoException(true) Weights getBiasWeights(); + public native @ByRef @NoException(true) IPluginV3 getPlugin(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPoolingLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPoolingLayer.java index b3a3a6751bc..94690b71774 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPoolingLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPoolingLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -28,27 +28,21 @@ public class VPoolingLayer extends VRoot { public native @NoException(true) void setPoolingType(PoolingType type); public native @NoException(true) void setPoolingType(@Cast("nvinfer1::PoolingType") int type); public native @NoException(true) PoolingType getPoolingType(); - public native @NoException(true) void setWindowSize(@ByVal DimsHW windowSize); - public native @ByVal @NoException(true) DimsHW getWindowSize(); - public native @NoException(true) void setStride(@ByVal DimsHW stride); - public native @ByVal @NoException(true) DimsHW getStride(); - public native @NoException(true) void setPadding(@ByVal DimsHW padding); - public native @ByVal @NoException(true) DimsHW getPadding(); public native @NoException(true) void setBlendFactor(float blendFactor); public native @NoException(true) float getBlendFactor(); public native @NoException(true) void setAverageCountExcludesPadding(@Cast("bool") boolean exclusive); public native @Cast("bool") @NoException(true) boolean getAverageCountExcludesPadding(); - public native @NoException(true) void setPrePadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPrePadding(); - public native @NoException(true) void setPostPadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPostPadding(); + public native @NoException(true) void setPrePadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPrePadding(); + public native @NoException(true) void setPostPadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPostPadding(); public native @NoException(true) void setPaddingMode(PaddingMode paddingMode); public native @NoException(true) void setPaddingMode(@Cast("nvinfer1::PaddingMode") int paddingMode); public native @NoException(true) PaddingMode getPaddingMode(); - public native @NoException(true) void setWindowSizeNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 windowSize); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getWindowSizeNd(); - public native @NoException(true) void setStrideNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrideNd(); - public native @NoException(true) void setPaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPaddingNd(); + public native @NoException(true) void setWindowSizeNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 windowSize); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getWindowSizeNd(); + public native @NoException(true) void setStrideNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrideNd(); + public native @NoException(true) void setPaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPaddingNd(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VQuantizeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VQuantizeLayer.java index 6f8a3996909..2bb6ae90f21 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VQuantizeLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VQuantizeLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -27,4 +27,7 @@ public class VQuantizeLayer extends VRoot { public native @NoException(true) int getAxis(); public native @NoException(true) void setAxis(int axis); + public native @NoException(true) DataType getToType(); + public native @NoException(true) void setToType(DataType toType); + public native @NoException(true) void setToType(@Cast("nvinfer1::DataType") int toType); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRNNv2Layer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRNNv2Layer.java deleted file mode 100644 index 94d18c82b23..00000000000 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRNNv2Layer.java +++ /dev/null @@ -1,55 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.tensorrt.nvinfer; - -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import org.bytedeco.cuda.cudart.*; -import static org.bytedeco.cuda.global.cudart.*; -import org.bytedeco.cuda.cublas.*; -import static org.bytedeco.cuda.global.cublas.*; -import org.bytedeco.cuda.cudnn.*; -import static org.bytedeco.cuda.global.cudnn.*; -import org.bytedeco.cuda.nvrtc.*; -import static org.bytedeco.cuda.global.nvrtc.*; - -import static org.bytedeco.tensorrt.global.nvinfer.*; - - -@Namespace("nvinfer1::apiv") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) -public class VRNNv2Layer extends VRoot { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public VRNNv2Layer(Pointer p) { super(p); } - - public native @NoException(true) int getLayerCount(); - public native @NoException(true) int getHiddenSize(); - public native @NoException(true) int getMaxSeqLength(); - public native @NoException(true) int getDataLength(); - public native @NoException(true) void setSequenceLengths(@ByRef ITensor seqLengths); - public native @NoException(true) ITensor getSequenceLengths(); - public native @NoException(true) void setOperation(RNNOperation op); - public native @NoException(true) void setOperation(@Cast("nvinfer1::RNNOperation") int op); - public native @NoException(true) RNNOperation getOperation(); - public native @NoException(true) void setInputMode(RNNInputMode op); - public native @NoException(true) void setInputMode(@Cast("nvinfer1::RNNInputMode") int op); - public native @NoException(true) RNNInputMode getInputMode(); - public native @NoException(true) void setDirection(RNNDirection op); - public native @NoException(true) void setDirection(@Cast("nvinfer1::RNNDirection") int op); - public native @NoException(true) RNNDirection getDirection(); - public native @NoException(true) void setWeightsForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW, @ByVal Weights weights); - public native @NoException(true) void setWeightsForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW, @ByVal Weights weights); - public native @ByVal @NoException(true) Weights getWeightsForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW); - public native @ByVal @NoException(true) Weights getWeightsForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW); - public native @NoException(true) void setBiasForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW, @ByVal Weights bias); - public native @NoException(true) void setBiasForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW, @ByVal Weights bias); - public native @ByVal @NoException(true) Weights getBiasForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW); - public native @ByVal @NoException(true) Weights getBiasForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW); - public native @NoException(true) void setHiddenState(@ByRef ITensor hidden); - public native @NoException(true) ITensor getHiddenState(); - public native @NoException(true) void setCellState(@ByRef ITensor cell); - public native @NoException(true) ITensor getCellState(); -} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRaggedSoftMaxLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRaggedSoftMaxLayer.java index b8004313c57..8ca6ee9d34c 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRaggedSoftMaxLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRaggedSoftMaxLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRecurrenceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRecurrenceLayer.java index 0da9b90d605..86b044e0469 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRecurrenceLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRecurrenceLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReduceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReduceLayer.java index f44e1304ab7..699191ad037 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReduceLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReduceLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRefitter.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRefitter.java index c662fd4c9bd..a771c189a64 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRefitter.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRefitter.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,6 +25,7 @@ public class VRefitter extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VRefitter(Pointer p) { super(p); } + public native @NoException(true) IRefitter getPImpl(); public native @Cast("bool") @NoException(true) boolean setWeights(String layerName, WeightsRole role, @Const @ByVal Weights weights); public native @Cast("bool") @NoException(true) boolean setWeights(@Cast("const char*") BytePointer layerName, @Cast("nvinfer1::WeightsRole") int role, @Const @ByVal Weights weights); public native @Cast("bool") @NoException(true) boolean refitCudaEngine(); @@ -61,4 +62,17 @@ public class VRefitter extends VRoot { public native @NoException(true) ILogger getLogger(); public native @Cast("bool") @NoException(true) boolean setMaxThreads(int maxThreads); public native @NoException(true) int getMaxThreads(); + public native @Cast("bool") @NoException(true) boolean setNamedWeightsWithLocation(String name, @ByVal Weights weights, TensorLocation location); + public native @Cast("bool") @NoException(true) boolean setNamedWeightsWithLocation(@Cast("const char*") BytePointer name, @ByVal Weights weights, @Cast("nvinfer1::TensorLocation") int location); + public native @ByVal @NoException(true) Weights getNamedWeights(String weightsName); + public native @ByVal @NoException(true) Weights getNamedWeights(@Cast("const char*") BytePointer weightsName); + public native @NoException(true) TensorLocation getWeightsLocation(String weightsName); + public native @NoException(true) @Cast("nvinfer1::TensorLocation") int getWeightsLocation(@Cast("const char*") BytePointer weightsName); + public native @Cast("bool") @NoException(true) boolean unsetNamedWeights(String weightsName); + public native @Cast("bool") @NoException(true) boolean unsetNamedWeights(@Cast("const char*") BytePointer weightsName); + public native @NoException(true) void setWeightsValidation(@Cast("bool") boolean weightsValidation); + public native @Cast("bool") @NoException(true) boolean getWeightsValidation(); + public native @Cast("bool") @NoException(true) boolean refitCudaEngineAsync(CUstream_st stream); + public native @ByVal @NoException(true) Weights getWeightsPrototype(String weightsName); + public native @ByVal @NoException(true) Weights getWeightsPrototype(@Cast("const char*") BytePointer weightsName); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VResizeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VResizeLayer.java index da319812af2..e793b2952e4 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VResizeLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VResizeLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,19 +25,17 @@ public class VResizeLayer extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VResizeLayer(Pointer p) { super(p); } - public native @NoException(true) void setOutputDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getOutputDimensions(); + public native @NoException(true) void setOutputDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getOutputDimensions(); public native @NoException(true) void setScales(@Const FloatPointer scales, int nbScales); public native @NoException(true) void setScales(@Const FloatBuffer scales, int nbScales); public native @NoException(true) void setScales(@Const float[] scales, int nbScales); public native @NoException(true) int getScales(int size, FloatPointer scales); public native @NoException(true) int getScales(int size, FloatBuffer scales); public native @NoException(true) int getScales(int size, float[] scales); - public native @NoException(true) void setResizeMode(InterpolationMode resizeMode); - public native @NoException(true) void setResizeMode(@Cast("nvinfer1::InterpolationMode") int resizeMode); + public native @NoException(true) void setResizeMode(InterpolationMode interpolationMode); + public native @NoException(true) void setResizeMode(@Cast("nvinfer1::InterpolationMode") int interpolationMode); public native @NoException(true) InterpolationMode getResizeMode(); - public native @NoException(true) void setAlignCorners(@Cast("bool") boolean alignCorners); - public native @Cast("bool") @NoException(true) boolean getAlignCorners(); public native @NoException(true) void setCoordinateTransformation(ResizeCoordinateTransformation coordTransform); public native @NoException(true) void setCoordinateTransformation(@Cast("nvinfer1::ResizeCoordinateTransformation") int coordTransform); public native @NoException(true) ResizeCoordinateTransformation getCoordinateTransformation(); diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReverseSequenceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReverseSequenceLayer.java index a8553e7fcec..6df98e8bd93 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReverseSequenceLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReverseSequenceLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRoot.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRoot.java index 5b2ed9eb745..fcd30ae2a5b 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRoot.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRoot.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRuntime.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRuntime.java index 1b422afe130..207a5363fe3 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRuntime.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRuntime.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,8 +25,9 @@ public class VRuntime extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VRuntime(Pointer p) { super(p); } - public native @NoException(true) ICudaEngine deserializeCudaEngine( - @Const Pointer blob, @Cast("std::size_t") long size, IPluginFactory pluginFactory); + public native @NoException(true) IRuntime getPImpl(); + public native @NoException(true) ICudaEngine deserializeCudaEngine(@Const Pointer blob, @Cast("std::size_t") long size); + public native @NoException(true) ICudaEngine deserializeCudaEngine(@ByRef IStreamReader streamReader); public native @NoException(true) void setDLACore(int dlaCore); public native @NoException(true) int getDLACore(); public native @NoException(true) int getNbDLACores(); @@ -41,7 +42,6 @@ public class VRuntime extends VRoot { public native @NoException(true) String getTemporaryDirectory(); public native @NoException(true) void setTempfileControlFlags(@Cast("nvinfer1::TempfileControlFlags") int arg0); public native @Cast("nvinfer1::TempfileControlFlags") @NoException(true) int getTempfileControlFlags(); - public native @NoException(true) IRuntime getPImpl(); public native @ByRef @NoException(true) IPluginRegistry getPluginRegistry(); public native @NoException(true) void setPluginRegistryParent(IPluginRegistry parent); public native @NoException(true) IRuntime loadRuntime(String path); diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScaleLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScaleLayer.java index 308a7749620..59dc6bad167 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScaleLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScaleLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScatterLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScatterLayer.java index 3b329ba34cb..8e58010f05a 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScatterLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScatterLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSelectLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSelectLayer.java index ed4f3a1008c..5df8094cee5 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSelectLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSelectLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSerializationConfig.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSerializationConfig.java new file mode 100644 index 00000000000..22d4f4a1ca6 --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSerializationConfig.java @@ -0,0 +1,36 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvinfer; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; + +import static org.bytedeco.tensorrt.global.nvinfer.*; + + +@Namespace("nvinfer1::apiv") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) +public class VSerializationConfig extends VRoot { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public VSerializationConfig(Pointer p) { super(p); } + + public native @Cast("bool") @NoException(true) boolean setFlags(@Cast("nvinfer1::SerializationFlags") int serializationFlags); + public native @Cast("nvinfer1::SerializationFlags") @NoException(true) int getFlags(); + public native @Cast("bool") @NoException(true) boolean clearFlag(SerializationFlag serializationFlag); + public native @Cast("bool") @NoException(true) boolean clearFlag(@Cast("nvinfer1::SerializationFlag") int serializationFlag); + public native @Cast("bool") @NoException(true) boolean setFlag(SerializationFlag serializationFlag); + public native @Cast("bool") @NoException(true) boolean setFlag(@Cast("nvinfer1::SerializationFlag") int serializationFlag); + public native @Cast("bool") @NoException(true) boolean getFlag(SerializationFlag serializationFlag); + public native @Cast("bool") @NoException(true) boolean getFlag(@Cast("nvinfer1::SerializationFlag") int serializationFlag); +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShapeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShapeLayer.java index 33b4a3fac44..20243776aa1 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShapeLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShapeLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShuffleLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShuffleLayer.java index 8594cfccf03..83183bdf4bd 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShuffleLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShuffleLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -27,8 +27,8 @@ public class VShuffleLayer extends VRoot { public native @NoException(true) void setFirstTranspose(@Const @ByRef Permutation permutation); public native @Const @ByRef @NoException(true) Permutation getFirstTranspose(); - public native @NoException(true) void setReshapeDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getReshapeDimensions(); + public native @NoException(true) void setReshapeDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getReshapeDimensions(); public native @NoException(true) void setSecondTranspose(@Const @ByRef Permutation permutation); public native @Const @ByRef @NoException(true) Permutation getSecondTranspose(); public native @NoException(true) void setZeroIsPlaceholder(@Cast("bool") boolean zeroIsPlaceholder); diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSliceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSliceLayer.java index 716458b9e67..be345b9571b 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSliceLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSliceLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -25,12 +25,12 @@ public class VSliceLayer extends VRoot { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public VSliceLayer(Pointer p) { super(p); } - public native @NoException(true) void setStart(@ByVal @Cast("nvinfer1::Dims*") Dims32 start); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStart(); - public native @NoException(true) void setSize(@ByVal @Cast("nvinfer1::Dims*") Dims32 size); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getSize(); - public native @NoException(true) void setStride(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStride(); + public native @NoException(true) void setStart(@Cast("const nvinfer1::Dims*") @ByRef Dims64 start); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStart(); + public native @NoException(true) void setSize(@Cast("const nvinfer1::Dims*") @ByRef Dims64 size); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getSize(); + public native @NoException(true) void setStride(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStride(); public native @NoException(true) void setMode(SampleMode mode); public native @NoException(true) void setMode(@Cast("nvinfer1::SampleMode") int mode); public native @NoException(true) SampleMode getMode(); diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSoftMaxLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSoftMaxLayer.java index 35d2bd38569..b9ee7e76ee0 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSoftMaxLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSoftMaxLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTensor.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTensor.java index a3f1e096983..a61095f42d7 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTensor.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTensor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -28,8 +28,8 @@ public class VTensor extends VRoot { public native @NoException(true) void setName(String name); public native @NoException(true) void setName(@Cast("const char*") BytePointer name); public native @NoException(true) String getName(); - public native @NoException(true) void setDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions); - public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(); + public native @NoException(true) void setDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions); + public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(); public native @NoException(true) void setType(DataType type); public native @NoException(true) void setType(@Cast("nvinfer1::DataType") int type); public native @NoException(true) DataType getType(); diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTimingCache.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTimingCache.java index 7a5b6171598..3d1edd8709b 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTimingCache.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTimingCache.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTopKLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTopKLayer.java index 63e6c1c9d1a..0fe298bbb2f 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTopKLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTopKLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTripLimitLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTripLimitLayer.java index d9dd298ad16..84b7b42d0f4 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTripLimitLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTripLimitLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VUnaryLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VUnaryLayer.java index 4c11890d060..b8717de5bf7 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VUnaryLayer.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VUnaryLayer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Weights.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Weights.java index f37c89937a0..1dedbbfbd55 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Weights.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Weights.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cublasContext.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cublasContext.java index 8329d57c519..3645248cef1 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cublasContext.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cublasContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -18,7 +18,7 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; - /** Forward declaration of cublasContext to use in other interfaces */ + /** Forward declaration of cublasContext to use in other interfaces. */ @Opaque @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class cublasContext extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cudnnContext.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cudnnContext.java index 3fed7c9742a..c70fa7e1ee9 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cudnnContext.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cudnnContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer; @@ -18,7 +18,7 @@ import static org.bytedeco.tensorrt.global.nvinfer.*; - /** Forward declaration of cudnnContext to use in other interfaces */ + /** Forward declaration of cudnnContext to use in other interfaces. */ @Opaque @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class) public class cudnnContext extends Pointer { /** Empty constructor. Calls {@code super((Pointer)null)}. */ diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/DetectionOutputParameters.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/DetectionOutputParameters.java index f79c3a19985..a4fd22acb63 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/DetectionOutputParameters.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/DetectionOutputParameters.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer_plugin; @@ -22,23 +22,16 @@ /** - * \brief The DetectionOutput plugin layer generates the detection output based on location and confidence predictions by doing non maximum suppression. - * This plugin first decodes the bounding boxes based on the anchors generated. It then performs non_max_suppression on the decoded bounding boxes. + * \struct DetectionOutputParameters + * + * \brief The DetectionOutput plugin layer generates the detection output + * based on location and confidence predictions by doing non maximum suppression. + * + * This plugin first decodes the bounding boxes based on the anchors generated. + * It then performs non_max_suppression on the decoded bounding boxes. * DetectionOutputParameters defines a set of parameters for creating the DetectionOutput plugin layer. - * It contains: - * @param shareLocation If true, bounding box are shared among different classes. - * @param varianceEncodedInTarget If true, variance is encoded in target. Otherwise we need to adjust the predicted offset accordingly. - * @param backgroundLabelId Background label ID. If there is no background class, set it as -1. - * @param numClasses Number of classes to be predicted. - * @param topK Number of boxes per image with top confidence scores that are fed into the NMS algorithm. - * @param keepTopK Number of total bounding boxes to be kept per image after NMS step. - * @param confidenceThreshold Only consider detections whose confidences are larger than a threshold. - * @param nmsThreshold Threshold to be used in NMS. - * @param codeType Type of coding method for bbox. - * @param inputOrder Specifies the order of inputs {loc_data, conf_data, priorbox_data}. - * @param confSigmoid Set to true to calculate sigmoid of confidence scores. - * @param isNormalized Set to true if bounding box data is normalized by the network. - * @param isBatchAgnostic Defaults to true. Set to false if prior boxes are unique per batch + * + * @deprecated Deprecated in TensorRT 10.0. DetectionOutput plugin is deprecated. * */ @Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class) public class DetectionOutputParameters extends Pointer { @@ -58,18 +51,33 @@ public class DetectionOutputParameters extends Pointer { return new DetectionOutputParameters((Pointer)this).offsetAddress(i); } + /** If true, bounding box are shared among different classes. */ public native @Cast("bool") boolean shareLocation(); public native DetectionOutputParameters shareLocation(boolean setter); + /** If true, variance is encoded in target. + * Otherwise we need to adjust the predicted offset accordingly. */ public native @Cast("bool") boolean varianceEncodedInTarget(); public native DetectionOutputParameters varianceEncodedInTarget(boolean setter); + /** Background label ID. If there is no background class, set it as -1. */ public native int backgroundLabelId(); public native DetectionOutputParameters backgroundLabelId(int setter); + /** Number of classes to be predicted. */ public native int numClasses(); public native DetectionOutputParameters numClasses(int setter); + /** Number of boxes per image with top confidence scores that are fed + * into the NMS algorithm. */ public native int topK(); public native DetectionOutputParameters topK(int setter); + /** Number of total bounding boxes to be kept per image after NMS step. */ public native int keepTopK(); public native DetectionOutputParameters keepTopK(int setter); + /** Only consider detections whose confidences are larger than a threshold. */ public native float confidenceThreshold(); public native DetectionOutputParameters confidenceThreshold(float setter); + /** Threshold to be used in NMS. */ public native float nmsThreshold(); public native DetectionOutputParameters nmsThreshold(float setter); + /** Type of coding method for bbox. */ public native CodeTypeSSD codeType(); public native DetectionOutputParameters codeType(CodeTypeSSD setter); + /** Specifies the order of inputs {loc_data, conf_data, priorbox_data}. */ public native int inputOrder(int i); public native DetectionOutputParameters inputOrder(int i, int setter); @MemberGetter public native IntPointer inputOrder(); + /** Set to true to calculate sigmoid of confidence scores. */ public native @Cast("bool") boolean confSigmoid(); public native DetectionOutputParameters confSigmoid(boolean setter); + /** Set to true if bounding box data is normalized by the network. */ public native @Cast("bool") boolean isNormalized(); public native DetectionOutputParameters isNormalized(boolean setter); + /** Defaults to true. Set to false if prior boxes are unique per batch. */ public native @Cast("bool") boolean isBatchAgnostic(); public native DetectionOutputParameters isBatchAgnostic(boolean setter); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/GridAnchorParameters.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/GridAnchorParameters.java index 0f432b13fa4..18499c8bd73 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/GridAnchorParameters.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/GridAnchorParameters.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer_plugin; @@ -21,18 +21,11 @@ import static org.bytedeco.tensorrt.global.nvinfer_plugin.*; - /** + * \struct GridAnchorParameters + * * \brief The Anchor Generator plugin layer generates the prior boxes of designated sizes and aspect ratios across all dimensions (H x W). * GridAnchorParameters defines a set of parameters for creating the plugin layer for all feature maps. - * It contains: - * @param minScale Scale of anchors corresponding to finest resolution. - * @param maxScale Scale of anchors corresponding to coarsest resolution. - * @param aspectRatios List of aspect ratios to place on each grid point. - * @param numAspectRatios Number of elements in aspectRatios. - * @param H Height of feature map to generate anchors for. - * @param W Width of feature map to generate anchors for. - * @param variance Variance for adjusting the prior boxes. * */ @Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class) public class GridAnchorParameters extends Pointer { @@ -52,12 +45,19 @@ public class GridAnchorParameters extends Pointer { return new GridAnchorParameters((Pointer)this).offsetAddress(i); } + /** Scale of anchors corresponding to finest resolution. */ public native float minSize(); public native GridAnchorParameters minSize(float setter); + /** Scale of anchors corresponding to coarsest resolution. */ public native float maxSize(); public native GridAnchorParameters maxSize(float setter); + /** List of aspect ratios to place on each grid point. */ public native FloatPointer aspectRatios(); public native GridAnchorParameters aspectRatios(FloatPointer setter); + /** Number of elements in aspectRatios. */ public native int numAspectRatios(); public native GridAnchorParameters numAspectRatios(int setter); + /** Height of feature map to generate anchors for. */ public native int H(); public native GridAnchorParameters H(int setter); + /** Width of feature map to generate anchors for. */ public native int W(); public native GridAnchorParameters W(int setter); + /** Variance for adjusting the prior boxes. */ public native float variance(int i); public native GridAnchorParameters variance(int i, float setter); @MemberGetter public native FloatPointer variance(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/NMSParameters.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/NMSParameters.java index bd01ef451e3..6cbb65786f6 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/NMSParameters.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/NMSParameters.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer_plugin; @@ -24,20 +24,9 @@ /** * \brief The NMSParameters are used by the BatchedNMSPlugin for performing * the non_max_suppression operation over boxes for object detection networks. - * @param shareLocation If set to true, the boxes inputs are shared across all - * classes. If set to false, the boxes input should account for per class box data. - * @param backgroundLabelId Label ID for the background class. If there is no background class, set it as -1 - * @param numClasses Number of classes in the network. - * @param topK Number of bounding boxes to be fed into the NMS step. - * @param keepTopK Number of total bounding boxes to be kept per image after NMS step. - * Should be less than or equal to the topK value. - * @param scoreThreshold Scalar threshold for score (low scoring boxes are removed). - * @param iouThreshold scalar threshold for IOU (new boxes that have high IOU overlap - * with previously selected boxes are removed). - * @param isNormalized Set to false, if the box coordinates are not - * normalized, i.e. not in the range [0,1]. Defaults to false. + * + * @deprecated Deprecated in TensorRT 10.0. BatchedNMSPlugin plugin is deprecated. * */ - @Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class) public class NMSParameters extends Pointer { static { Loader.load(); } @@ -56,12 +45,25 @@ public class NMSParameters extends Pointer { return new NMSParameters((Pointer)this).offsetAddress(i); } + /** If set to true, the boxes inputs are shared across all classes. + * If set to false, the boxes input should account for per class box data. */ public native @Cast("bool") boolean shareLocation(); public native NMSParameters shareLocation(boolean setter); + /** Label ID for the background class. + * If there is no background class, set it as -1 */ public native int backgroundLabelId(); public native NMSParameters backgroundLabelId(int setter); + /** Number of classes in the network. */ public native int numClasses(); public native NMSParameters numClasses(int setter); + /** Number of bounding boxes to be fed into the NMS step. */ public native int topK(); public native NMSParameters topK(int setter); + /** Number of total bounding boxes to be kept per image after NMS step. + * Should be less than or equal to the topK value. */ public native int keepTopK(); public native NMSParameters keepTopK(int setter); + /** Scalar threshold for score (low scoring boxes are removed). */ public native float scoreThreshold(); public native NMSParameters scoreThreshold(float setter); + /** A scalar threshold for IOU (new boxes that have high IOU overlap + * with previously selected boxes are removed). */ public native float iouThreshold(); public native NMSParameters iouThreshold(float setter); + /** Set to false, if the box coordinates are not normalized, + * i.e. not in the range [0,1]. Defaults to false. */ public native @Cast("bool") boolean isNormalized(); public native NMSParameters isNormalized(boolean setter); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/PriorBoxParameters.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/PriorBoxParameters.java index 22912f85287..2ff1f84eeae 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/PriorBoxParameters.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/PriorBoxParameters.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer_plugin; @@ -22,24 +22,12 @@ /** + * \struct PriorBoxParameters + * * \brief The PriorBox plugin layer generates the prior boxes of designated sizes and aspect ratios across all - * dimensions (H x W). PriorBoxParameters defines a set of parameters for creating the PriorBox plugin layer. It - * contains: - * @param minSize Minimum box size in pixels. Can not be nullptr. - * @param maxSize Maximum box size in pixels. Can be nullptr. - * @param aspectRatios Aspect ratios of the boxes. Can be nullptr. - * @param numMinSize Number of elements in minSize. Must be larger than 0. - * @param numMaxSize Number of elements in maxSize. Can be 0 or same as numMinSize. - * @param numAspectRatios Number of elements in aspectRatios. Can be 0. - * @param flip If true, will flip each aspect ratio. For example, if there is an aspect ratio "r", the aspect ratio - * "1.0/r" will be generated as well. - * @param clip If true, will clip the prior so that it is within [0,1]. - * @param variance Variance for adjusting the prior boxes. - * @param imgH Image height. If 0, then the H dimension of the data tensor will be used. - * @param imgW Image width. If 0, then the W dimension of the data tensor will be used. - * @param stepH Step in H. If 0, then (float)imgH/h will be used where h is the H dimension of the 1st input tensor. - * @param stepW Step in W. If 0, then (float)imgW/w will be used where w is the W dimension of the 1st input tensor. - * @param offset Offset to the top left corner of each cell. + * dimensions (H x W). + * + * PriorBoxParameters defines a set of parameters for creating the PriorBox plugin layer. * */ @Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class) public class PriorBoxParameters extends Pointer { @@ -59,19 +47,34 @@ public class PriorBoxParameters extends Pointer { return new PriorBoxParameters((Pointer)this).offsetAddress(i); } + /** Minimum box size in pixels. Can not be nullptr. */ public native FloatPointer minSize(); public native PriorBoxParameters minSize(FloatPointer setter); + /** Maximum box size in pixels. Can be nullptr. */ public native FloatPointer maxSize(); public native PriorBoxParameters maxSize(FloatPointer setter); + /** Aspect ratios of the boxes. Can be nullptr. */ public native FloatPointer aspectRatios(); public native PriorBoxParameters aspectRatios(FloatPointer setter); + /** Number of elements in minSize. Must be larger than 0. */ public native int numMinSize(); public native PriorBoxParameters numMinSize(int setter); + /** Number of elements in maxSize. Can be 0 or same as numMinSize. */ public native int numMaxSize(); public native PriorBoxParameters numMaxSize(int setter); + /** Number of elements in aspectRatios. Can be 0. */ public native int numAspectRatios(); public native PriorBoxParameters numAspectRatios(int setter); + /** If true, will flip each aspect ratio. For example, + * if there is an aspect ratio "r", the aspect ratio "1.0/r" will be generated as well. */ public native @Cast("bool") boolean flip(); public native PriorBoxParameters flip(boolean setter); + /** If true, will clip the prior so that it is within [0,1]. */ public native @Cast("bool") boolean clip(); public native PriorBoxParameters clip(boolean setter); + /** Variance for adjusting the prior boxes. */ public native float variance(int i); public native PriorBoxParameters variance(int i, float setter); @MemberGetter public native FloatPointer variance(); + /** Image height. If 0, then the H dimension of the data tensor will be used. */ public native int imgH(); public native PriorBoxParameters imgH(int setter); + /** Image width. If 0, then the W dimension of the data tensor will be used. */ public native int imgW(); public native PriorBoxParameters imgW(int setter); + /** Step in H. If 0, then (float)imgH/h will be used where h is the H dimension of the 1st input tensor. */ public native float stepH(); public native PriorBoxParameters stepH(float setter); + /** Step in W. If 0, then (float)imgW/w will be used where w is the W dimension of the 1st input tensor. */ public native float stepW(); public native PriorBoxParameters stepW(float setter); + /** Offset to the top left corner of each cell. */ public native float offset(); public native PriorBoxParameters offset(float setter); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/Quadruple.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/Quadruple.java deleted file mode 100644 index 8f0c5c066df..00000000000 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/Quadruple.java +++ /dev/null @@ -1,49 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.tensorrt.nvinfer_plugin; - -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import org.bytedeco.cuda.cudart.*; -import static org.bytedeco.cuda.global.cudart.*; -import org.bytedeco.cuda.cublas.*; -import static org.bytedeco.cuda.global.cublas.*; -import org.bytedeco.cuda.cudnn.*; -import static org.bytedeco.cuda.global.cudnn.*; -import org.bytedeco.cuda.nvrtc.*; -import static org.bytedeco.cuda.global.nvrtc.*; -import org.bytedeco.tensorrt.nvinfer.*; -import static org.bytedeco.tensorrt.global.nvinfer.*; - -import static org.bytedeco.tensorrt.global.nvinfer_plugin.*; - - -/** - * \brief The Permute plugin layer permutes the input tensor by changing the memory order of the data. - * Quadruple defines a structure that contains an array of 4 integers. They can represent the permute orders or the - * strides in each dimension. - * */ -@Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class) -public class Quadruple extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public Quadruple() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public Quadruple(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public Quadruple(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public Quadruple position(long position) { - return (Quadruple)super.position(position); - } - @Override public Quadruple getPointer(long i) { - return new Quadruple((Pointer)this).offsetAddress(i); - } - - public native int data(int i); public native Quadruple data(int i, int setter); - @MemberGetter public native IntPointer data(); -} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RPROIParams.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RPROIParams.java index 5f8d90b862a..dd96e607aef 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RPROIParams.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RPROIParams.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer_plugin; @@ -22,19 +22,9 @@ /** + * \struct RPROIParams + * * \brief RPROIParams is used to create the RPROIPlugin instance. - * It contains: - * @param poolingH Height of the output in pixels after ROI pooling on feature map. - * @param poolingW Width of the output in pixels after ROI pooling on feature map. - * @param featureStride Feature stride; ratio of input image size to feature map size. Assuming that max pooling layers - * in the neural network use square filters. - * @param preNmsTop Number of proposals to keep before applying NMS. - * @param nmsMaxOut Number of remaining proposals after applying NMS. - * @param anchorsRatioCount Number of anchor box ratios. - * @param anchorsScaleCount Number of anchor box scales. - * @param iouThreshold IoU (Intersection over Union) threshold used for the NMS step. - * @param minBoxSize Minimum allowed bounding box size before scaling, used for anchor box calculation. - * @param spatialScale Spatial scale between the input image and the last feature map. * */ @Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class) public class RPROIParams extends Pointer { @@ -54,14 +44,25 @@ public class RPROIParams extends Pointer { return new RPROIParams((Pointer)this).offsetAddress(i); } + /** Height of the output in pixels after ROI pooling on feature map. */ public native int poolingH(); public native RPROIParams poolingH(int setter); + /** Width of the output in pixels after ROI pooling on feature map. */ public native int poolingW(); public native RPROIParams poolingW(int setter); + /** Feature stride; ratio of input image size to feature map size. + * Assuming that max pooling layers in the neural network use square filters. */ public native int featureStride(); public native RPROIParams featureStride(int setter); + /** Number of proposals to keep before applying NMS. */ public native int preNmsTop(); public native RPROIParams preNmsTop(int setter); + /** Number of remaining proposals after applying NMS. */ public native int nmsMaxOut(); public native RPROIParams nmsMaxOut(int setter); + /** Number of anchor box ratios. */ public native int anchorsRatioCount(); public native RPROIParams anchorsRatioCount(int setter); + /** Number of anchor box scales. */ public native int anchorsScaleCount(); public native RPROIParams anchorsScaleCount(int setter); + /** IoU (Intersection over Union) threshold used for the NMS step. */ public native float iouThreshold(); public native RPROIParams iouThreshold(float setter); + /** Minimum allowed bounding box size before scaling, used for anchor box calculation. */ public native float minBoxSize(); public native RPROIParams minBoxSize(float setter); + /** Spatial scale between the input image and the last feature map. */ public native float spatialScale(); public native RPROIParams spatialScale(float setter); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RegionParameters.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RegionParameters.java index 83eecd3e543..dc55215dc69 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RegionParameters.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RegionParameters.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer_plugin; @@ -22,14 +22,12 @@ /** - * \brief The Region plugin layer performs region proposal calculation: generate 5 bounding boxes per cell (for yolo9000, generate 3 bounding boxes per cell). - * For each box, calculating its probablities of objects detections from 80 pre-defined classifications (yolo9000 has 9418 pre-defined classifications, - * and these 9418 items are organized as work-tree structure). + * \brief The Region plugin layer performs region proposal calculation. + * + * Generate 5 bounding boxes per cell (for yolo9000, generate 3 bounding boxes per cell). + * For each box, calculating its probabilities of objects detections from 80 pre-defined classifications + * (yolo9000 has 9418 pre-defined classifications, and these 9418 items are organized as work-tree structure). * RegionParameters defines a set of parameters for creating the Region plugin layer. - * @param num Number of predicted bounding box for each grid cell. - * @param coords Number of coordinates for a bounding box. - * @param classes Number of classifications to be predicted. - * @param smTree Helping structure to do softmax on confidence scores. * */ @Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class) public class RegionParameters extends Pointer { @@ -49,8 +47,12 @@ public class RegionParameters extends Pointer { return new RegionParameters((Pointer)this).offsetAddress(i); } + /** Number of predicted bounding box for each grid cell. */ public native int num(); public native RegionParameters num(int setter); + /** Number of coordinates for a bounding box. */ public native int coords(); public native RegionParameters coords(int setter); + /** Number of classifications to be predicted. */ public native int classes(); public native RegionParameters classes(int setter); + /** Helping structure to do softmax on confidence scores. */ public native softmaxTree smTree(); public native RegionParameters smTree(softmaxTree setter); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/softmaxTree.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/softmaxTree.java index 5a9bce4fbfd..3408cd7a0f2 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/softmaxTree.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/softmaxTree.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvinfer_plugin; @@ -22,7 +22,8 @@ /** - * \brief When performing yolo9000, softmaxTree is helping to do softmax on confidence scores, for element to get the precise classification through word-tree structured classification definition. + * \brief When performing yolo9000, softmaxTree is helping to do softmax on confidence scores, + * for element to get the precise classification through word-tree structured classification definition. * */ @Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class) public class softmaxTree extends Pointer { @@ -49,7 +50,6 @@ public class softmaxTree extends Pointer { public native IntPointer group(); public native softmaxTree group(IntPointer setter); public native @Cast("char*") BytePointer name(int i); public native softmaxTree name(int i, BytePointer setter); public native @Cast("char**") PointerPointer name(); public native softmaxTree name(PointerPointer setter); - public native int groups(); public native softmaxTree groups(int setter); public native IntPointer groupSize(); public native softmaxTree groupSize(IntPointer setter); public native IntPointer groupOffset(); public native softmaxTree groupOffset(IntPointer setter); diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParser.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParser.java index d19d5ba7e5b..948673a72f6 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParser.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParser.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvonnxparser; @@ -139,18 +139,6 @@ public class IParser extends Pointer { public native @Cast("bool") boolean supportsOperator(String op_name); public native @Cast("bool") boolean supportsOperator(@Cast("const char*") BytePointer op_name); - /** - * \brief destroy this object - * - * \warning deprecated and planned on being removed in TensorRT 10.0 - * */ - - - //! - //! - //! - public native @Deprecated void destroy(); - /** * \brief Get the number of errors that occurred during prior calls to * \p parse @@ -282,6 +270,28 @@ public class IParser extends Pointer { * * @return True if flag is set, false if unset. * */ + + + //! + //! + //! + //! + //! public native @Cast("bool") @NoException(true) boolean getFlag(OnnxParserFlag onnxParserFlag); public native @Cast("bool") @NoException(true) boolean getFlag(@Cast("nvonnxparser::OnnxParserFlag") int onnxParserFlag); + + /** + * \brief Return the i-th output ITensor object for the ONNX layer "name". + * + * Return the i-th output ITensor object for the ONNX layer "name". + * If "name" is not found or i is out of range, return nullptr. + * In the case of multiple nodes sharing the same name this function will return + * the output tensors of the first instance of the node in the ONNX graph. + * + * @param name The name of the ONNX layer. + * + * @param i The index of the output. i must be in range [0, layer.num_outputs). + * */ + public native @Const ITensor getLayerOutputTensor(String name, @Cast("int64_t") long i); + public native @Const ITensor getLayerOutputTensor(@Cast("const char*") BytePointer name, @Cast("int64_t") long i); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParserError.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParserError.java index 33dd653bf0c..d445ae7143a 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParserError.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParserError.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvonnxparser; @@ -35,42 +35,74 @@ public class IParserError extends Pointer { public IParserError(Pointer p) { super(p); } /** - * \brief the error code + * \brief the error code. * */ //! //! public native org.bytedeco.tensorrt.global.nvonnxparser.ErrorCode code(); /** - * \brief description of the error + * \brief description of the error. * */ //! //! public native String desc(); /** - * \brief source file in which the error occurred + * \brief source file in which the error occurred. * */ //! //! public native String file(); /** - * \brief source line at which the error occurred + * \brief source line at which the error occurred. * */ //! //! public native int line(); /** - * \brief source function in which the error occurred + * \brief source function in which the error occurred. * */ //! //! public native String func(); /** - * \brief index of the ONNX model node in which the error occurred + * \brief index of the ONNX model node in which the error occurred. * */ + + //! + //! public native int node(); + /** + * \brief name of the node in which the error occurred. + * */ + + //! + //! + public native String nodeName(); + /** + * \brief name of the node operation in which the error occurred. + * */ + + //! + //! + public native String nodeOperator(); + /** + * \brief A list of the local function names, from the top level down, constituting the current + * stack trace in which the error occurred. A top-level node that is not inside any + * local function would return a nullptr. + * */ + + //! + //! + public native @Cast("const char*const*") PointerPointer localFunctionStack(); + /** + * \brief The size of the stack of local functions at the point where the error occurred. + * A top-level node that is not inside any local function would correspond to */ + // a stack size of 0. + /** */ + public native int localFunctionStackSize(); } diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParserRefitter.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParserRefitter.java new file mode 100644 index 00000000000..2a5444965b6 --- /dev/null +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParserRefitter.java @@ -0,0 +1,111 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.tensorrt.nvonnxparser; + +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.nvrtc.*; +import static org.bytedeco.cuda.global.nvrtc.*; +import org.bytedeco.tensorrt.nvinfer.*; +import static org.bytedeco.tensorrt.global.nvinfer.*; +import org.bytedeco.tensorrt.nvinfer_plugin.*; +import static org.bytedeco.tensorrt.global.nvinfer_plugin.*; + +import static org.bytedeco.tensorrt.global.nvonnxparser.*; + + +/** + * \class IParserRefitter + * + * \brief An interface designed to refit weights from an ONNX model. + * */ +@Namespace("nvonnxparser") @Properties(inherit = org.bytedeco.tensorrt.presets.nvonnxparser.class) +public class IParserRefitter extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public IParserRefitter(Pointer p) { super(p); } + + /** + * \brief Load a serialized ONNX model from memory and perform weight refit. + * + * @param serializedOnnxModel Pointer to the serialized ONNX model + * @param serializedOnnxModelSize Size of the serialized ONNX model + * in bytes + * @param modelPath Absolute path to the model file for loading external weights if required + * @return true if all the weights in the engine were refit successfully. + * + * The serialized ONNX model must be identical to the one used to generate the engine + * that will be refit. + * */ + + + //! + //! + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean refitFromBytes( + @Const Pointer serializedOnnxModel, @Cast("size_t") long serializedOnnxModelSize, String modelPath/*=nullptr*/); + public native @Cast("bool") @NoException(true) boolean refitFromBytes( + @Const Pointer serializedOnnxModel, @Cast("size_t") long serializedOnnxModelSize); + public native @Cast("bool") @NoException(true) boolean refitFromBytes( + @Const Pointer serializedOnnxModel, @Cast("size_t") long serializedOnnxModelSize, @Cast("const char*") BytePointer modelPath/*=nullptr*/); + + /** + * \brief Load and parse a ONNX model from disk and perform weight refit. + * + * @param onnxModelFile Path to the ONNX model to load from disk. + * + * @return true if the model was loaded successfully, and if all the weights in the engine were refit successfully. + * + * The provided ONNX model must be identical to the one used to generate the engine + * that will be refit. + * */ + + + //! + //! + //! + public native @Cast("bool") @NoException(true) boolean refitFromFile(String onnxModelFile); + public native @Cast("bool") @NoException(true) boolean refitFromFile(@Cast("const char*") BytePointer onnxModelFile); + + /** + * \brief Get the number of errors that occurred during prior calls to \p refitFromBytes or \p refitFromFile + * + * @see getError() IParserError + * */ + + + //! + //! + //! + public native @NoException(true) int getNbErrors(); + + /** + * \brief Get an error that occurred during prior calls to \p refitFromBytes or \p refitFromFile + * + * @see getNbErrors() IParserError + * */ + + + //! + //! + //! + public native @Const @NoException(true) IParserError getError(int index); + + /** + * \brief Clear errors from prior calls to \p refitFromBytes or \p refitFromFile + * + * @see getNbErrors() getError() IParserError + * */ + public native void clearErrors(); +} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraphCollection_t.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraphCollection_t.java index 97a429f73bc..d1abb0984ef 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraphCollection_t.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraphCollection_t.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvonnxparser; @@ -40,6 +40,8 @@ public class SubGraphCollection_t extends Pointer { public void clear() { resize(0); } public native void resize(@Cast("size_t") long n); + public SubGraph_t front() { return get(0); } + public SubGraph_t back() { return get(size() - 1); } @Index(function = "at") public native @ByRef SubGraph_t get(@Cast("size_t") long i); public native SubGraphCollection_t put(@Cast("size_t") long i, SubGraph_t value); diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraph_t.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraph_t.java index 8c7d8962468..cadbb523f6c 100644 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraph_t.java +++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraph_t.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE package org.bytedeco.tensorrt.nvonnxparser; diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/FieldCollection.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/FieldCollection.java deleted file mode 100644 index 0c7b36671d6..00000000000 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/FieldCollection.java +++ /dev/null @@ -1,46 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.tensorrt.nvparsers; - -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import org.bytedeco.cuda.cudart.*; -import static org.bytedeco.cuda.global.cudart.*; -import org.bytedeco.cuda.cublas.*; -import static org.bytedeco.cuda.global.cublas.*; -import org.bytedeco.cuda.cudnn.*; -import static org.bytedeco.cuda.global.cudnn.*; -import org.bytedeco.cuda.nvrtc.*; -import static org.bytedeco.cuda.global.nvrtc.*; -import org.bytedeco.tensorrt.nvinfer.*; -import static org.bytedeco.tensorrt.global.nvinfer.*; -import org.bytedeco.tensorrt.nvinfer_plugin.*; -import static org.bytedeco.tensorrt.global.nvinfer_plugin.*; - -import static org.bytedeco.tensorrt.global.nvparsers.*; - - -@Namespace("nvuffparser") @Properties(inherit = org.bytedeco.tensorrt.presets.nvparsers.class) -public class FieldCollection extends Pointer { - static { Loader.load(); } - /** Default native constructor. */ - public FieldCollection() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public FieldCollection(long size) { super((Pointer)null); allocateArray(size); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public FieldCollection(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public FieldCollection position(long position) { - return (FieldCollection)super.position(position); - } - @Override public FieldCollection getPointer(long i) { - return new FieldCollection((Pointer)this).offsetAddress(i); - } - - public native int nbFields(); public native FieldCollection nbFields(int setter); - public native @Const FieldMap fields(); public native FieldCollection fields(FieldMap setter); -} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/FieldMap.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/FieldMap.java deleted file mode 100644 index 179854566a3..00000000000 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/FieldMap.java +++ /dev/null @@ -1,69 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.tensorrt.nvparsers; - -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import org.bytedeco.cuda.cudart.*; -import static org.bytedeco.cuda.global.cudart.*; -import org.bytedeco.cuda.cublas.*; -import static org.bytedeco.cuda.global.cublas.*; -import org.bytedeco.cuda.cudnn.*; -import static org.bytedeco.cuda.global.cudnn.*; -import org.bytedeco.cuda.nvrtc.*; -import static org.bytedeco.cuda.global.nvrtc.*; -import org.bytedeco.tensorrt.nvinfer.*; -import static org.bytedeco.tensorrt.global.nvinfer.*; -import org.bytedeco.tensorrt.nvinfer_plugin.*; -import static org.bytedeco.tensorrt.global.nvinfer_plugin.*; - -import static org.bytedeco.tensorrt.global.nvparsers.*; - - -/** - * \class FieldMap - * - * \brief An array of field params used as a layer parameter for plugin layers. - * - * The node fields are passed by the parser to the API through the plugin - * constructor. The implementation of the plugin should parse the contents of - * the fieldMap as part of the plugin constructor - * */ -@Namespace("nvuffparser") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvparsers.class) -public class FieldMap extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public FieldMap(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public FieldMap(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public FieldMap position(long position) { - return (FieldMap)super.position(position); - } - @Override public FieldMap getPointer(long i) { - return new FieldMap((Pointer)this).offsetAddress(i); - } - - public native String name(); public native FieldMap name(String setter); - public native @Const Pointer data(); public native FieldMap data(Pointer setter); - public native FieldType type(); public native FieldMap type(FieldType setter); - public native int length(); public native FieldMap length(int setter); - - /** @deprecated Legacy constructor, retained for ABI compatibility. Deprecated in TensorRT 8.6. - * Use the default constructor instead. */ - public FieldMap(String name, @Const Pointer data, FieldType type, int length/*=1*/) { super((Pointer)null); allocate(name, data, type, length); } - @Deprecated private native void allocate(String name, @Const Pointer data, FieldType type, int length/*=1*/); - public FieldMap(String name, @Const Pointer data, FieldType type) { super((Pointer)null); allocate(name, data, type); } - @Deprecated private native void allocate(String name, @Const Pointer data, FieldType type); - public FieldMap(@Cast("const char*") BytePointer name, @Const Pointer data, @Cast("nvuffparser::FieldType") int type, int length/*=1*/) { super((Pointer)null); allocate(name, data, type, length); } - @Deprecated private native void allocate(@Cast("const char*") BytePointer name, @Const Pointer data, @Cast("nvuffparser::FieldType") int type, int length/*=1*/); - public FieldMap(@Cast("const char*") BytePointer name, @Const Pointer data, @Cast("nvuffparser::FieldType") int type) { super((Pointer)null); allocate(name, data, type); } - @Deprecated private native void allocate(@Cast("const char*") BytePointer name, @Const Pointer data, @Cast("nvuffparser::FieldType") int type); - - /** Default constructor */ - public FieldMap() { super((Pointer)null); allocate(); } - private native void allocate(); -} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IBinaryProtoBlob.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IBinaryProtoBlob.java deleted file mode 100644 index 289d2a322f8..00000000000 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IBinaryProtoBlob.java +++ /dev/null @@ -1,54 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.tensorrt.nvparsers; - -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import org.bytedeco.cuda.cudart.*; -import static org.bytedeco.cuda.global.cudart.*; -import org.bytedeco.cuda.cublas.*; -import static org.bytedeco.cuda.global.cublas.*; -import org.bytedeco.cuda.cudnn.*; -import static org.bytedeco.cuda.global.cudnn.*; -import org.bytedeco.cuda.nvrtc.*; -import static org.bytedeco.cuda.global.nvrtc.*; -import org.bytedeco.tensorrt.nvinfer.*; -import static org.bytedeco.tensorrt.global.nvinfer.*; -import org.bytedeco.tensorrt.nvinfer_plugin.*; -import static org.bytedeco.tensorrt.global.nvinfer_plugin.*; - -import static org.bytedeco.tensorrt.global.nvparsers.*; - - -/** - * \class IBinaryProtoBlob - * - * \brief Object used to store and query data extracted from a binaryproto file using the ICaffeParser. - * - * @see nvcaffeparser1::ICaffeParser - * - * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. - * */ -@Namespace("nvcaffeparser1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvparsers.class) -public class IBinaryProtoBlob extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public IBinaryProtoBlob(Pointer p) { super(p); } - - public native @Const @NoException(true) Pointer getData(); - public native @ByVal @NoException(true) Dims4 getDimensions(); - - //! - //! - //! - public native @NoException(true) DataType getDataType(); - /** - * @deprecated Deprecated in TensorRT 8.0. Superseded by {@code delete}. - * - * \warning Calling destroy on a managed pointer will result in a double-free error. - * */ - public native @Deprecated @NoException(true) void destroy(); -} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IBlobNameToTensor.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IBlobNameToTensor.java deleted file mode 100644 index f3388e4b7e6..00000000000 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IBlobNameToTensor.java +++ /dev/null @@ -1,51 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.tensorrt.nvparsers; - -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import org.bytedeco.cuda.cudart.*; -import static org.bytedeco.cuda.global.cudart.*; -import org.bytedeco.cuda.cublas.*; -import static org.bytedeco.cuda.global.cublas.*; -import org.bytedeco.cuda.cudnn.*; -import static org.bytedeco.cuda.global.cudnn.*; -import org.bytedeco.cuda.nvrtc.*; -import static org.bytedeco.cuda.global.nvrtc.*; -import org.bytedeco.tensorrt.nvinfer.*; -import static org.bytedeco.tensorrt.global.nvinfer.*; -import org.bytedeco.tensorrt.nvinfer_plugin.*; -import static org.bytedeco.tensorrt.global.nvinfer_plugin.*; - -import static org.bytedeco.tensorrt.global.nvparsers.*; - - -/** - * \class IBlobNameToTensor - * - * \brief Object used to store and query Tensors after they have been extracted from a Caffe model using the ICaffeParser. - * - * \note The lifetime of IBlobNameToTensor is the same as the lifetime of its parent ICaffeParser. - * - * @see nvcaffeparser1::ICaffeParser - * - * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. - * */ -@Namespace("nvcaffeparser1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvparsers.class) -public class IBlobNameToTensor extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public IBlobNameToTensor(Pointer p) { super(p); } - - /** \brief Given a blob name, returns a pointer to a ITensor object. - * - * @param name Caffe blob name for which the user wants the corresponding ITensor. - * - * @return ITensor* corresponding to the queried name. If no such ITensor exists, then nullptr is returned. - * */ - public native @NoException(true) ITensor find(String name); - public native @NoException(true) ITensor find(@Cast("const char*") BytePointer name); -} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/ICaffeParser.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/ICaffeParser.java deleted file mode 100644 index 5fe9f2d4eb9..00000000000 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/ICaffeParser.java +++ /dev/null @@ -1,207 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.tensorrt.nvparsers; - -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import org.bytedeco.cuda.cudart.*; -import static org.bytedeco.cuda.global.cudart.*; -import org.bytedeco.cuda.cublas.*; -import static org.bytedeco.cuda.global.cublas.*; -import org.bytedeco.cuda.cudnn.*; -import static org.bytedeco.cuda.global.cudnn.*; -import org.bytedeco.cuda.nvrtc.*; -import static org.bytedeco.cuda.global.nvrtc.*; -import org.bytedeco.tensorrt.nvinfer.*; -import static org.bytedeco.tensorrt.global.nvinfer.*; -import org.bytedeco.tensorrt.nvinfer_plugin.*; -import static org.bytedeco.tensorrt.global.nvinfer_plugin.*; - -import static org.bytedeco.tensorrt.global.nvparsers.*; - -/** - * \class ICaffeParser - * - * \brief Class used for parsing Caffe models. - * - * Allows users to export models trained using Caffe to TRT. - * - * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. - * */ -@Namespace("nvcaffeparser1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvparsers.class) -public class ICaffeParser extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public ICaffeParser(Pointer p) { super(p); } - - /** - * \brief Parse a prototxt file and a binaryproto Caffe model to extract - * network definition and weights associated with the network, respectively. - * - * @param deploy The plain text, prototxt file used to define the network definition. - * @param model The binaryproto Caffe model that contains the weights associated with the network. - * @param network Network in which the CaffeParser will fill the layers. - * @param weightType The type to which the weights will transformed. - * - * @return A pointer to an IBlobNameToTensor object that contains the extracted data. - * - * @see nvcaffeparser1::IBlobNameToTensor - * */ - - - //! - //! - //! - //! - //! - public native @Const @NoException(true) IBlobNameToTensor parse(String deploy, String model, @ByRef INetworkDefinition network, - DataType weightType); - public native @Const @NoException(true) IBlobNameToTensor parse(@Cast("const char*") BytePointer deploy, @Cast("const char*") BytePointer model, @ByRef INetworkDefinition network, - @Cast("nvinfer1::DataType") int weightType); - - /** - * \brief Parse a deploy prototxt and a binaryproto Caffe model from memory buffers to extract - * network definition and weights associated with the network, respectively. - * - * @param deployBuffer The plain text deploy prototxt used to define the network definition. - * @param deployLength The length of the deploy buffer. - * @param modelBuffer The binaryproto Caffe memory buffer that contains the weights associated with the network. - * @param modelLength The length of the model buffer. - * @param network Network in which the CaffeParser will fill the layers. - * @param weightType The type to which the weights will transformed. - * - * @return A pointer to an IBlobNameToTensor object that contains the extracted data. - * - * @see nvcaffeparser1::IBlobNameToTensor - * */ - - - //! - //! - //! - //! - //! - //! - public native @Const @NoException(true) IBlobNameToTensor parseBuffers(@Cast("const uint8_t*") BytePointer deployBuffer, @Cast("std::size_t") long deployLength, - @Cast("const uint8_t*") BytePointer modelBuffer, @Cast("std::size_t") long modelLength, @ByRef INetworkDefinition network, - DataType weightType); - public native @Const @NoException(true) IBlobNameToTensor parseBuffers(@Cast("const uint8_t*") ByteBuffer deployBuffer, @Cast("std::size_t") long deployLength, - @Cast("const uint8_t*") ByteBuffer modelBuffer, @Cast("std::size_t") long modelLength, @ByRef INetworkDefinition network, - @Cast("nvinfer1::DataType") int weightType); - public native @Const @NoException(true) IBlobNameToTensor parseBuffers(@Cast("const uint8_t*") byte[] deployBuffer, @Cast("std::size_t") long deployLength, - @Cast("const uint8_t*") byte[] modelBuffer, @Cast("std::size_t") long modelLength, @ByRef INetworkDefinition network, - DataType weightType); - public native @Const @NoException(true) IBlobNameToTensor parseBuffers(@Cast("const uint8_t*") BytePointer deployBuffer, @Cast("std::size_t") long deployLength, - @Cast("const uint8_t*") BytePointer modelBuffer, @Cast("std::size_t") long modelLength, @ByRef INetworkDefinition network, - @Cast("nvinfer1::DataType") int weightType); - public native @Const @NoException(true) IBlobNameToTensor parseBuffers(@Cast("const uint8_t*") ByteBuffer deployBuffer, @Cast("std::size_t") long deployLength, - @Cast("const uint8_t*") ByteBuffer modelBuffer, @Cast("std::size_t") long modelLength, @ByRef INetworkDefinition network, - DataType weightType); - public native @Const @NoException(true) IBlobNameToTensor parseBuffers(@Cast("const uint8_t*") byte[] deployBuffer, @Cast("std::size_t") long deployLength, - @Cast("const uint8_t*") byte[] modelBuffer, @Cast("std::size_t") long modelLength, @ByRef INetworkDefinition network, - @Cast("nvinfer1::DataType") int weightType); - - /** - * \brief Parse and extract data stored in binaryproto file. - * - * The binaryproto file contains data stored in a binary blob. parseBinaryProto() converts it - * to an IBinaryProtoBlob object which gives the user access to the data and meta-data about data. - * - * @param fileName Path to file containing binary proto. - * - * @return A pointer to an IBinaryProtoBlob object that contains the extracted data. - * - * @see nvcaffeparser1::IBinaryProtoBlob - * */ - - - //! - //! - //! - //! - public native @NoException(true) IBinaryProtoBlob parseBinaryProto(String fileName); - public native @NoException(true) IBinaryProtoBlob parseBinaryProto(@Cast("const char*") BytePointer fileName); - - /** - * \brief Set buffer size for the parsing and storage of the learned model. - * - * @param size The size of the buffer specified as the number of bytes. - * - * \note Default size is 2^30 bytes. - * */ - - - //! - //! - //! - //! - public native @NoException(true) void setProtobufBufferSize(@Cast("size_t") long size); - - /** - * \brief Destroy this ICaffeParser object. - * - * @deprecated Deprecated in TensorRT 8.0. Superseded by {@code delete}. - * - * \warning Calling destroy on a managed pointer will result in a double-free error. - * */ - - - //! - //! - //! - public native @Deprecated @NoException(true) void destroy(); - - /** - * \brief Set the IPluginFactoryV2 used to create the user defined pluginV2 objects. - * - * @param factory Pointer to an instance of the user implementation of IPluginFactoryV2. - * */ - - - //! - //! - public native @NoException(true) void setPluginFactoryV2(IPluginFactoryV2 factory); - - /** - * \brief Set the namespace used to lookup and create plugins in the network. - * */ - public native @NoException(true) void setPluginNamespace(String libNamespace); - public native @NoException(true) void setPluginNamespace(@Cast("const char*") BytePointer libNamespace); - /** - * \brief Set the ErrorRecorder for this interface - * - * Assigns the ErrorRecorder to this interface. The ErrorRecorder will track all errors during execution. - * This function will call incRefCount of the registered ErrorRecorder at least once. Setting - * recorder to nullptr unregisters the recorder with the interface, resulting in a call to decRefCount if - * a recorder has been registered. - * - * If an error recorder is not set, messages will be sent to the global log stream. - * - * @param recorder The error recorder to register with this interface. - * - * @see getErrorRecorder() - * */ - - - //! - //! - //! - //! - //! - public native @NoException(true) void setErrorRecorder(IErrorRecorder recorder); - - /** - * \brief get the ErrorRecorder assigned to this interface. - * - * Retrieves the assigned error recorder object for the given class. A - * nullptr will be returned if setErrorRecorder has not been called. - * - * @return A pointer to the IErrorRecorder object that has been registered. - * - * @see setErrorRecorder() - * */ - public native @NoException(true) IErrorRecorder getErrorRecorder(); -} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IPluginFactoryV2.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IPluginFactoryV2.java deleted file mode 100644 index 1233e593f9d..00000000000 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IPluginFactoryV2.java +++ /dev/null @@ -1,66 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.tensorrt.nvparsers; - -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import org.bytedeco.cuda.cudart.*; -import static org.bytedeco.cuda.global.cudart.*; -import org.bytedeco.cuda.cublas.*; -import static org.bytedeco.cuda.global.cublas.*; -import org.bytedeco.cuda.cudnn.*; -import static org.bytedeco.cuda.global.cudnn.*; -import org.bytedeco.cuda.nvrtc.*; -import static org.bytedeco.cuda.global.nvrtc.*; -import org.bytedeco.tensorrt.nvinfer.*; -import static org.bytedeco.tensorrt.global.nvinfer.*; -import org.bytedeco.tensorrt.nvinfer_plugin.*; -import static org.bytedeco.tensorrt.global.nvinfer_plugin.*; - -import static org.bytedeco.tensorrt.global.nvparsers.*; - - -/** - * \class IPluginFactoryV2 - * - * \brief Plugin factory used to configure plugins. - * */ -@Namespace("nvcaffeparser1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvparsers.class) -public class IPluginFactoryV2 extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public IPluginFactoryV2(Pointer p) { super(p); } - - /** - * \brief A user implemented function that determines if a layer configuration is provided by an IPluginV2. - * - * @param layerName Name of the layer which the user wishes to validate. - * */ - - - //! - //! - //! - public native @Cast("bool") @NoException(true) boolean isPluginV2(String layerName); - public native @Cast("bool") @NoException(true) boolean isPluginV2(@Cast("const char*") BytePointer layerName); - - /** - * \brief Creates a plugin. - * - * @param layerName Name of layer associated with the plugin. - * @param weights Weights used for the layer. - * @param nbWeights Number of weights. - * @param libNamespace Library Namespace associated with the plugin object - * */ - public native @NoException(true) IPluginV2 createPlugin(String layerName, @Const Weights weights, - int nbWeights, String libNamespace/*=""*/); - public native @NoException(true) IPluginV2 createPlugin(String layerName, @Const Weights weights, - int nbWeights); - public native @NoException(true) IPluginV2 createPlugin(@Cast("const char*") BytePointer layerName, @Const Weights weights, - int nbWeights, @Cast("const char*") BytePointer libNamespace/*=""*/); - public native @NoException(true) IPluginV2 createPlugin(@Cast("const char*") BytePointer layerName, @Const Weights weights, - int nbWeights); -} diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IUffParser.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IUffParser.java deleted file mode 100644 index 5a9d9ba669e..00000000000 --- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IUffParser.java +++ /dev/null @@ -1,180 +0,0 @@ -// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE - -package org.bytedeco.tensorrt.nvparsers; - -import java.nio.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.annotation.*; - -import static org.bytedeco.javacpp.presets.javacpp.*; -import org.bytedeco.cuda.cudart.*; -import static org.bytedeco.cuda.global.cudart.*; -import org.bytedeco.cuda.cublas.*; -import static org.bytedeco.cuda.global.cublas.*; -import org.bytedeco.cuda.cudnn.*; -import static org.bytedeco.cuda.global.cudnn.*; -import org.bytedeco.cuda.nvrtc.*; -import static org.bytedeco.cuda.global.nvrtc.*; -import org.bytedeco.tensorrt.nvinfer.*; -import static org.bytedeco.tensorrt.global.nvinfer.*; -import org.bytedeco.tensorrt.nvinfer_plugin.*; -import static org.bytedeco.tensorrt.global.nvinfer_plugin.*; - -import static org.bytedeco.tensorrt.global.nvparsers.*; - - -/** - * \class IUffParser - * - * \brief Class used for parsing models described using the UFF format. - * - * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI. - * */ -@Namespace("nvuffparser") @Properties(inherit = org.bytedeco.tensorrt.presets.nvparsers.class) -public class IUffParser extends Pointer { - static { Loader.load(); } - /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ - public IUffParser(Pointer p) { super(p); } - - /** - * \brief Register an input name of a UFF network with the associated Dimensions. - * - * @param inputName Input name. - * @param inputDims Input dimensions. - * @param inputOrder Input order on which the framework input was originally. - * */ - - - //! - //! - //! - public native @Cast("bool") @NoException(true) boolean registerInput(String inputName, @ByVal @Cast("nvinfer1::Dims*") Dims32 inputDims, UffInputOrder inputOrder); - public native @Cast("bool") @NoException(true) boolean registerInput(@Cast("const char*") BytePointer inputName, @ByVal @Cast("nvinfer1::Dims*") Dims32 inputDims, @Cast("nvuffparser::UffInputOrder") int inputOrder); - - /** - * \brief Register an output name of a UFF network. - * - * @param outputName Output name. - * */ - - - //! - //! - //! - public native @Cast("bool") @NoException(true) boolean registerOutput(String outputName); - public native @Cast("bool") @NoException(true) boolean registerOutput(@Cast("const char*") BytePointer outputName); - - /** - * \brief Parse a UFF file. - * - * @param file File name of the UFF file. - * @param network Network in which the UFFParser will fill the layers. - * @param weightsType The type on which the weights will transformed in. - * */ - - - //! - //! - //! - public native @Cast("bool") @NoException(true) boolean parse(String file, @ByRef INetworkDefinition network, - DataType weightsType/*=nvinfer1::DataType::kFLOAT*/); - public native @Cast("bool") @NoException(true) boolean parse(String file, @ByRef INetworkDefinition network); - public native @Cast("bool") @NoException(true) boolean parse(@Cast("const char*") BytePointer file, @ByRef INetworkDefinition network, - @Cast("nvinfer1::DataType") int weightsType/*=nvinfer1::DataType::kFLOAT*/); - public native @Cast("bool") @NoException(true) boolean parse(@Cast("const char*") BytePointer file, @ByRef INetworkDefinition network); - - /** - * \brief Parse a UFF buffer, useful if the file already live in memory. - * - * @param buffer Buffer of the UFF file. - * @param size Size of buffer of the UFF file. - * @param network Network in which the UFFParser will fill the layers. - * @param weightsType The type on which the weights will transformed in. - * */ - - - //! - //! - public native @Cast("bool") @NoException(true) boolean parseBuffer(String buffer, @Cast("std::size_t") long size, @ByRef INetworkDefinition network, - DataType weightsType/*=nvinfer1::DataType::kFLOAT*/); - public native @Cast("bool") @NoException(true) boolean parseBuffer(String buffer, @Cast("std::size_t") long size, @ByRef INetworkDefinition network); - public native @Cast("bool") @NoException(true) boolean parseBuffer(@Cast("const char*") BytePointer buffer, @Cast("std::size_t") long size, @ByRef INetworkDefinition network, - @Cast("nvinfer1::DataType") int weightsType/*=nvinfer1::DataType::kFLOAT*/); - public native @Cast("bool") @NoException(true) boolean parseBuffer(@Cast("const char*") BytePointer buffer, @Cast("std::size_t") long size, @ByRef INetworkDefinition network); - - /** - * @deprecated Use {@code delete} instead. Deprecated in TRT 8.0. - * */ - - - //! - //! - public native @Deprecated @NoException(true) void destroy(); - - /** - * \brief Return Version Major of the UFF. - * */ - - - //! - //! - public native @NoException(true) int getUffRequiredVersionMajor(); - - /** - * \brief Return Version Minor of the UFF. - * */ - - - //! - //! - public native @NoException(true) int getUffRequiredVersionMinor(); - - /** - * \brief Return Patch Version of the UFF. - * */ - - - //! - //! - public native @NoException(true) int getUffRequiredVersionPatch(); - - /** - * \brief Set the namespace used to lookup and create plugins in the network. - * */ - public native @NoException(true) void setPluginNamespace(String libNamespace); - public native @NoException(true) void setPluginNamespace(@Cast("const char*") BytePointer libNamespace); - /** - * \brief Set the ErrorRecorder for this interface - * - * Assigns the ErrorRecorder to this interface. The ErrorRecorder will track all errors during execution. - * This function will call incRefCount of the registered ErrorRecorder at least once. Setting - * recorder to nullptr unregisters the recorder with the interface, resulting in a call to decRefCount if - * a recorder has been registered. - * - * If an error recorder is not set, messages will be sent to the global log stream. - * - * @param recorder The error recorder to register with this interface. */ - // - /** @see getErrorRecorder() - /** */ - - - //! - //! - //! - //! - //! - public native @NoException(true) void setErrorRecorder(IErrorRecorder recorder); - - /** - * \brief get the ErrorRecorder assigned to this interface. - * - * Retrieves the assigned error recorder object for the given class. A - * nullptr will be returned if setErrorRecorder has not been called. - * - * @return A pointer to the IErrorRecorder object that has been registered. - * - * @see setErrorRecorder() - * */ - public native @NoException(true) IErrorRecorder getErrorRecorder(); -} diff --git a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer.java b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer.java index 037c418d2b8..9043f0d247d 100644 --- a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer.java +++ b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer.java @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2023 Samuel Audet + * Copyright (C) 2018-2024 Samuel Audet * * Licensed either under the Apache License, Version 2.0, or (at your option) * under the terms of the GNU General Public License as published by @@ -48,9 +48,10 @@ value = {"linux-arm64", "linux-ppc64le", "linux-x86_64", "windows-x86_64"}, compiler = "cpp11", include = {"NvInferVersion.h", "NvInferRuntimeBase.h", "NvInferRuntimePlugin.h", "NvInferRuntimeCommon.h", - "NvInferLegacyDims.h", "NvInferRuntime.h", "NvInfer.h", "NvInferImpl.h", "NvUtils.h"}, - link = "nvinfer@.8", - preload = "nvinfer_builder_resource@.8.6.1" + "NvInferLegacyDims.h", "NvInferRuntime.h", "NvInfer.h", "NvInferImpl.h"/*, "NvUtils.h"*/}, + exclude = "NvInferRuntimeBase.h", + link = "nvinfer@.10", + preload = "nvinfer_builder_resource@.10.0.1" ), @Platform( value = "linux-arm64", @@ -123,10 +124,12 @@ public void map(InfoMap infoMap) { .put(new Info("std::size_t").cast().valueTypes("long").pointerTypes("LongPointer", "LongBuffer", "long[]")) .put(new Info("const char", "nvinfer1::AsciiChar").pointerTypes("String", "@Cast(\"const char*\") BytePointer")) - .put(new Info("nvinfer1::IErrorRecorder::ErrorDesc").valueTypes("String", "@Cast(\"const char*\") BytePointer")) + .put(new Info("nvinfer1::IErrorRecorder::ErrorDesc", "nvinfer1::InterfaceKind", + "nvinfer1::v_1_0::IErrorRecorder::ErrorDesc").valueTypes("String", "@Cast(\"const char*\") BytePointer")) + .put(new Info("nvinfer1::NetworkDefinitionCreationFlags").cast().valueTypes("int")) .put(new Info("nvinfer1::PluginFormat").cast().valueTypes("TensorFormat", "int").pointerTypes("IntPointer", "IntBuffer", "int[]")) .put(new Info("nvinfer1::safe::IPluginRegistry").pointerTypes("SafeIPluginRegistry")) - .put(new Info("nvinfer1::EnumMax", "nvinfer1::EnumMaxImpl").skip()) + .put(new Info("nvinfer1::EnumMax", "nvinfer1::EnumMaxImpl", "nvinfer1::v_1_0::IPluginResource::operator =").skip()) .put(new Info("nvinfer1::Weights::values").javaText("public native @Const Pointer values(); public native Weights values(Pointer values);")) .put(new Info("nvinfer1::IDimensionExpr", "nvinfer1::IExprBuilder", "nvinfer1::IOptimizationProfile", "nvinfer1::ITensor", "nvinfer1::ILayer", "nvinfer1::IConvolutionLayer", "nvinfer1::IFullyConnectedLayer", "nvinfer1::IActivationLayer", "nvinfer1::IPoolingLayer", @@ -140,12 +143,15 @@ public void map(InfoMap infoMap) { "nvinfer1::IAssertionLayer", "nvinfer1::IConditionLayer", "nvinfer1::IEinsumLayer", "nvinfer1::IIfConditional", "nvinfer1::IIfConditionalBoundaryLayer", "nvinfer1::IIfConditionalInputLayer", "nvinfer1::IIfConditionalOutputLayer", "nvinfer1::IScatterLayer", "nvinfer1::IAlgorithmIOInfo", "nvinfer1::IAlgorithmVariant", "nvinfer1::IAlgorithmContext", "nvinfer1::IAlgorithm", "nvinfer1::ICastLayer", - "nvinfer1::IGridSampleLayer", "nvinfer1::INMSLayer", "nvinfer1::INonZeroLayer", "nvinfer1::INormalizationLayer", "nvinfer1::IReverseSequenceLayer").purify()) + "nvinfer1::IGridSampleLayer", "nvinfer1::INMSLayer", "nvinfer1::INonZeroLayer", "nvinfer1::INormalizationLayer", "nvinfer1::IReverseSequenceLayer", + "nvinfer1::IPluginV3Layer").purify()) .put(new Info("nvinfer1::IGpuAllocator::free").javaNames("_free")) .put(new Info("nvinfer1::IGpuAllocator", "nvinfer1::IProfiler", "nvinfer1::ILogger", "nvinfer1::IInt8Calibrator", "nvinfer1::IInt8EntropyCalibrator", - "nvinfer1::IInt8EntropyCalibrator2", "nvinfer1::IInt8MinMaxCalibrator", "nvinfer1::IInt8LegacyCalibrator").virtualize()) + "nvinfer1::IInt8EntropyCalibrator2", "nvinfer1::IInt8MinMaxCalibrator", "nvinfer1::IInt8LegacyCalibrator", "nvinfer1::IVersionedInterface").virtualize()) .put(new Info("nvinfer1::IPluginRegistry::getPluginCreatorList").javaText( "public native @Cast(\"nvinfer1::IPluginCreator*const*\") PointerPointer getPluginCreatorList(IntPointer numCreators);")) + .put(new Info("nvinfer1::IPluginRegistry::getAllCreators").javaText( + "public native @Cast(\"nvinfer1::IPluginCreatorInterface*const*\") @NoException(true) PointerPointer getAllCreators(IntPointer numCreators);")) ; } } diff --git a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer_plugin.java b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer_plugin.java index bd450875e6b..f6dc6f08530 100644 --- a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer_plugin.java +++ b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer_plugin.java @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018-2022 Samuel Audet + * Copyright (C) 2018-2024 Samuel Audet * * Licensed either under the Apache License, Version 2.0, or (at your option) * under the terms of the GNU General Public License as published by @@ -36,7 +36,7 @@ inherit = nvinfer.class, value = @Platform( include = {"NvInferPlugin.h", "NvInferPluginUtils.h"}, - link = "nvinfer_plugin@.8"), + link = "nvinfer_plugin@.10"), target = "org.bytedeco.tensorrt.nvinfer_plugin", global = "org.bytedeco.tensorrt.global.nvinfer_plugin") public class nvinfer_plugin implements InfoMapper { diff --git a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvonnxparser.java b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvonnxparser.java index 3fc901ee904..aede025be26 100644 --- a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvonnxparser.java +++ b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvonnxparser.java @@ -1,5 +1,5 @@ /* - * Copyright (C) 2019-2023 Samuel Audet + * Copyright (C) 2019-2024 Samuel Audet * * Licensed either under the Apache License, Version 2.0, or (at your option) * under the terms of the GNU General Public License as published by @@ -36,7 +36,8 @@ inherit = nvinfer_plugin.class, value = @Platform( include = "NvOnnxParser.h", - link = "nvonnxparser@.8"), + link = "nvonnxparser@.10", + preload = "nvinfer_vc_plugin@.10"), target = "org.bytedeco.tensorrt.nvonnxparser", global = "org.bytedeco.tensorrt.global.nvonnxparser") public class nvonnxparser implements InfoMapper { diff --git a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvparsers.java b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvparsers.java index b8a9a586f7f..46a1e39fd28 100644 --- a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvparsers.java +++ b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvparsers.java @@ -32,13 +32,13 @@ * * @author Samuel Audet */ -@Properties( - inherit = nvinfer_plugin.class, - value = @Platform( - include = {"NvCaffeParser.h", "NvUffParser.h"}, - link = "nvparsers@.8"), - target = "org.bytedeco.tensorrt.nvparsers", - global = "org.bytedeco.tensorrt.global.nvparsers") +//@Properties( +// inherit = nvinfer_plugin.class, +// value = @Platform( +// include = {"NvCaffeParser.h", "NvUffParser.h"}, +// link = "nvparsers@.8"), +// target = "org.bytedeco.tensorrt.nvparsers", +// global = "org.bytedeco.tensorrt.global.nvparsers") public class nvparsers implements InfoMapper { public void map(InfoMap infoMap) { infoMap.put(new Info("nvuffparser::IPluginFactory").pointerTypes("IUffPluginFactory")) diff --git a/tritonserver/README.md b/tritonserver/README.md index 2964ad4a30c..a8c5df099db 100644 --- a/tritonserver/README.md +++ b/tritonserver/README.md @@ -23,7 +23,7 @@ Introduction ------------ This directory contains the JavaCPP Presets module for: - * Triton Inference Server 2.41.0 https://github.com/triton-inference-server/server + * Triton Inference Server 2.44.0 https://github.com/triton-inference-server/server Please refer to the parent README.md file for more detailed information about the JavaCPP Presets. @@ -51,9 +51,9 @@ This sample intends to show how to call the Java-mapped C API of Triton to execu 1. Get the source code of Triton Inference Server to prepare the model repository: ```bash - $ wget https://github.com/triton-inference-server/server/archive/refs/tags/v2.41.0.tar.gz - $ tar zxvf v2.41.0.tar.gz - $ cd server-2.41.0/docs/examples/model_repository + $ wget https://github.com/triton-inference-server/server/archive/refs/tags/v2.44.0.tar.gz + $ tar zxvf v2.44.0.tar.gz + $ cd server-2.44.0/docs/examples/model_repository $ mkdir models $ cd models; cp -a ../simple . ``` @@ -61,7 +61,7 @@ Now, this `models` directory will be our model repository. 2. Start the Docker container to run the sample (assuming we are under the `models` directory created above): ```bash - $ docker run -it --gpus=all -v $(pwd):/workspace nvcr.io/nvidia/tritonserver:23.12-py3 bash + $ docker run -it --gpus=all -v $(pwd):/workspace nvcr.io/nvidia/tritonserver:24.03-py3 bash $ apt update $ apt install -y openjdk-11-jdk $ wget https://archive.apache.org/dist/maven/maven-3/3.8.4/binaries/apache-maven-3.8.4-bin.tar.gz diff --git a/tritonserver/cppbuild.sh b/tritonserver/cppbuild.sh index 02e9a5fa949..3a3315554df 100755 --- a/tritonserver/cppbuild.sh +++ b/tritonserver/cppbuild.sh @@ -11,9 +11,9 @@ INCLUDE_DEVELOPER_TOOLS_SERVER=${INCLUDE_DEVELOPER_TOOLS_SERVER:=1} if [[ ! -f "/opt/tritonserver/include/triton/developer_tools/generic_server_wrapper.h" ]] && [[ ! -f "/opt/tritonserver/lib/libtritondevelopertoolsserver.so" ]] && [[ ${INCLUDE_DEVELOPER_TOOLS_SERVER} -ne 0 ]]; then TOOLS_BRANCH=${TOOLS_BRANCH:="https://github.com/triton-inference-server/developer_tools.git"} - TOOLS_BRANCH_TAG=${TOOLS_BRANCH_TAG:="r23.12"} + TOOLS_BRANCH_TAG=${TOOLS_BRANCH_TAG:="r24.03"} TRITON_CORE_REPO=${TRITON_CORE_REPO:="https://github.com/triton-inference-server/core.git"} - TRITON_CORE_REPO_TAG=${TRITON_CORE_REPO_TAG="r23.12"} + TRITON_CORE_REPO_TAG=${TRITON_CORE_REPO_TAG="r24.03"} TRITON_HOME="/opt/tritonserver" BUILD_HOME="$PWD"/tritonbuild mkdir -p ${BUILD_HOME} && cd ${BUILD_HOME} diff --git a/tritonserver/platform/pom.xml b/tritonserver/platform/pom.xml index 1778ae8c87c..4c6b5bf447b 100644 --- a/tritonserver/platform/pom.xml +++ b/tritonserver/platform/pom.xml @@ -12,7 +12,7 @@ org.bytedeco tritonserver-platform - 2.41-${project.parent.version} + 2.44-${project.parent.version} JavaCPP Presets Platform for Triton Inference Server diff --git a/tritonserver/platform/redist/pom.xml b/tritonserver/platform/redist/pom.xml index 6a778c39536..cab51cb9da6 100644 --- a/tritonserver/platform/redist/pom.xml +++ b/tritonserver/platform/redist/pom.xml @@ -12,7 +12,7 @@ org.bytedeco tritonserver-platform-redist - 2.41-${project.parent.version} + 2.44-${project.parent.version} JavaCPP Presets Platform Redist for Triton Inference Server diff --git a/tritonserver/pom.xml b/tritonserver/pom.xml index 4349d4eedb2..6511114d7e5 100644 --- a/tritonserver/pom.xml +++ b/tritonserver/pom.xml @@ -11,7 +11,7 @@ org.bytedeco tritonserver - 2.41-${project.parent.version} + 2.44-${project.parent.version} JavaCPP Presets for Triton Inference Server diff --git a/tritonserver/samples/simple/pom.xml b/tritonserver/samples/simple/pom.xml index 74ec8ec2dc6..6c0faaaaf93 100644 --- a/tritonserver/samples/simple/pom.xml +++ b/tritonserver/samples/simple/pom.xml @@ -12,7 +12,7 @@ org.bytedeco tritonserver-platform - 2.41-1.5.11-SNAPSHOT + 2.44-1.5.11-SNAPSHOT shaded diff --git a/tritonserver/samples/simplecpp/pom.xml b/tritonserver/samples/simplecpp/pom.xml index 39f7f1066c0..0328fde73f5 100644 --- a/tritonserver/samples/simplecpp/pom.xml +++ b/tritonserver/samples/simplecpp/pom.xml @@ -12,7 +12,7 @@ org.bytedeco tritonserver-platform - 2.41-1.5.11-SNAPSHOT + 2.44-1.5.11-SNAPSHOT shaded diff --git a/tritonserver/samples/unsupported/pom.xml b/tritonserver/samples/unsupported/pom.xml index 2b7d378b2c6..4ec045ccb45 100644 --- a/tritonserver/samples/unsupported/pom.xml +++ b/tritonserver/samples/unsupported/pom.xml @@ -18,12 +18,12 @@ org.bytedeco tensorrt-platform - 8.6-1.5.11-SNAPSHOT + 10.0-1.5.11-SNAPSHOT org.bytedeco tritonserver-platform - 2.41-1.5.11-SNAPSHOT + 2.44-1.5.11-SNAPSHOT shaded