{@code +/** #include "tensorflow/lite/c/c_api_types.h" +/** }+/** to access the APIs documented on this page. */ +// NOLINTEND(whitespace/line_length) +// clang-format on // IWYU pragma: private, include "third_party/tensorflow/lite/c/c_api_types.h" @@ -357,9 +365,13 @@ public class tensorflowlite extends org.bytedeco.tensorflowlite.presets.tensorfl // #ifdef __cplusplus // #endif -/** \addtogroup c_api_types tensorflow/lite/c/c_api_types.h +// clang-format off +// NOLINTBEGIN(whitespace/line_length) +/** \defgroup c_api_types lite/c/c_api_types.h * \{ */ +// NOLINTEND(whitespace/line_length) +// clang-format on // Define TFL_CAPI_EXPORT macro to export a function properly with a shared // library. @@ -468,6 +480,7 @@ public class tensorflowlite extends org.bytedeco.tensorflowlite.presets.tensorfl * TfLiteDelegate; allows delegation of nodes to alternative backends. * For TF Lite in Play Services, this is an opaque type, * but for regular TF Lite, this is just a typedef for TfLiteDelegate. + * * WARNING: This is an experimental type and subject to change. */ // #if TFLITE_WITH_STABLE_ABI || TFLITE_USE_OPAQUE_DELEGATE // #else @@ -529,11 +542,10 @@ public class tensorflowlite extends org.bytedeco.tensorflowlite.presets.tensorfl See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -// \warning Note: Users of TensorFlow Lite should not include this file -// directly, but should instead include -// "third_party/tensorflow/lite/c/c_api.h". Only the TensorFlow Lite -// implementation itself should include this -// file directly. +// WARNING: Users of TensorFlow Lite should not include this file directly, but +// should instead include "third_party/tensorflow/lite/c/c_api.h". +// Only the TensorFlow Lite implementation itself should include this file +// directly. // #ifndef TENSORFLOW_LITE_CORE_C_C_API_H_ // #define TENSORFLOW_LITE_CORE_C_C_API_H_ @@ -555,9 +567,10 @@ public class tensorflowlite extends org.bytedeco.tensorflowlite.presets.tensorfl /// /// /// +/// // #include "tensorflow/lite/core/c/registration_external.h" // IWYU pragma: export -/** C API for TensorFlow Lite: +/** C API for TensorFlow Lite. * * The API leans towards simplicity and uniformity instead of convenience, as * most usage will be by language-specific wrappers. It provides largely the @@ -601,7 +614,17 @@ public class tensorflowlite extends org.bytedeco.tensorflowlite.presets.tensorfl * TfLiteInterpreterDelete(interpreter); * TfLiteInterpreterOptionsDelete(options); * TfLiteModelDelete(model); - * */ + * + * */ +// clang-format off +// NOLINTBEGIN(whitespace/line_length) +/** \note Users of TensorFlow Lite should use +/**
{@code +/** #include "tensorflow/lite/c/c_api.h" +/** }+/** to access the APIs documented on this page. */ +// NOLINTEND(whitespace/line_length) +// clang-format on // #ifdef __cplusplus // Targeting ../TfLiteModel.java @@ -798,7 +821,6 @@ public static native void TfLiteInterpreterOptionsSetErrorReporter( * interpreter's lifetime. * \warning This is an experimental API and subject to change. */ -/// /// public static native void TfLiteInterpreterOptionsAddRegistrationExternal( TfLiteInterpreterOptions options, @@ -808,9 +830,7 @@ public static native void TfLiteInterpreterOptionsAddRegistrationExternal( * {@code TfLiteInterpreterCancel}. * * By default it is disabled and calling to {@code TfLiteInterpreterCancel} will - * return kTfLiteError. See {@code TfLiteInterpreterCancel}. - * - * \warning This is an experimental API and subject to change. */ + * return kTfLiteError. See {@code TfLiteInterpreterCancel}. */ /// /// @@ -1001,7 +1021,6 @@ public static native int TfLiteInterpreterGetOutputTensorCount( * The ownership of the tensor remains with the TFLite runtime, meaning the * caller should not deallocate the pointer. */ -/// /// /// public static native TfLiteTensor TfLiteInterpreterGetTensor(@Const TfLiteInterpreter interpreter, @@ -1016,9 +1035,7 @@ public static native TfLiteTensor TfLiteInterpreterGetTensor(@Const TfLiteInterp * Non-blocking and thread safe. * * Returns kTfLiteError if cancellation is not enabled via - * {@code TfLiteInterpreterOptionsEnableCancellation}. - * - * \warning This is an experimental API and subject to change. */ + * {@code TfLiteInterpreterOptionsEnableCancellation}. */ /// /// @@ -1398,9 +1415,9 @@ public static native void TfLiteRegistrationExternalSetInplaceOperator( // #endif // TENSORFLOW_LITE_CORE_C_REGISTRATION_EXTERNAL_H_ -// Parsed from tensorflow/lite/c/c_api_experimental.h +// Parsed from tensorflow/lite/c/common.h -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -1414,17 +1431,30 @@ public static native void TfLiteRegistrationExternalSetInplaceOperator( See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -// #ifndef TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_ -// #define TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_ -// #include "tensorflow/lite/core/c/c_api_experimental.h" +/** \file +/** +/** This file defines common C types and APIs for implementing operations, +/** delegates and other constructs in TensorFlow Lite. The actual operations and +/** delegates can be defined using C++, but the interface between the +/** interpreter and the operations are C. +/** +/** For documentation, see tensorflow/lite/core/c/common.h. +/** +/** See also c_api_opaque.h which has more ABI-stable variants of some of these +/** APIs. */ -// #endif // TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_ +// #ifndef TENSORFLOW_LITE_C_COMMON_H_ +// #define TENSORFLOW_LITE_C_COMMON_H_ +// #include "tensorflow/lite/core/c/common.h" -// Parsed from tensorflow/lite/core/c/c_api_experimental.h +// #endif // TENSORFLOW_LITE_C_COMMON_H_ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +// Parsed from tensorflow/lite/core/c/common.h + +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -1438,1101 +1468,1189 @@ public static native void TfLiteRegistrationExternalSetInplaceOperator( See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -/** WARNING: Users of TensorFlow Lite should not include this file directly, -/** but should instead include -/** "third_party/tensorflow/lite/c/c_api_experimental.h". -/** Only the TensorFlow Lite implementation itself should include this -/** file directly. */ -// #ifndef TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_ -// #define TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_ - -// #include "tensorflow/lite/builtin_ops.h" -// #include "tensorflow/lite/core/c/c_api.h" -// #include "tensorflow/lite/core/c/common.h" - -// #ifdef __cplusplus -// #endif // __cplusplus - -// -------------------------------------------------------------------------- -/** Resets all variable tensors to zero. - * - * WARNING: This is an experimental API and subject to change. */ - -/// -/// -/// -/// -public static native @Cast("TfLiteStatus") int TfLiteInterpreterResetVariableTensors( - TfLiteInterpreter interpreter); +// WARNING: Users of TensorFlow Lite should not include this file directly, but +// should instead include "third_party/tensorflow/lite/c/common.h". +// Only the TensorFlow Lite implementation itself should include this file +// directly. -/** Adds an op registration for a builtin operator. - * - * Op registrations are used to map ops referenced in the flatbuffer model - * to executable function pointers ({@code TfLiteRegistration}s). - * - * NOTE: The interpreter will make a shallow copy of {@code registration} internally, - * so the caller should ensure that its contents (function pointers, etc...) - * remain valid for the duration of the interpreter's lifetime. A common - * practice is making the provided {@code TfLiteRegistration} instance static. - * - * Code that uses this function should NOT call - * {@code TfLiteInterpreterOptionsSetOpResolver} (or related functions) on the same - * options object. - * - * WARNING: This is an experimental API and subject to change. */ +/** This file defines common C types and APIs for implementing operations, +/** delegates and other constructs in TensorFlow Lite. The actual operations and +/** delegates can be defined using C++, but the interface between the +/** interpreter and the operations are C. +/** +/** Summary of abstractions: +/** * {@code TF_LITE_ENSURE} - self-sufficient error checking +/** * {@code TfLiteStatus} - status reporting +/** * {@code TfLiteIntArray} - stores tensor shapes (dims), +/** * {@code TfLiteContext} - allows an op to access the tensors +/** * {@code TfLiteTensor} - tensor (a multidimensional array) +/** * {@code TfLiteNode} - a single node or operation +/** * {@code TfLiteRegistration} - the implementation of a conceptual operation. +/** * {@code TfLiteDelegate} - allows delegation of nodes to alternative backends. +/** +/** Some abstractions in this file are created and managed by Interpreter. +/** +/** NOTE: The order of values in these structs are "semi-ABI stable". New values +/** should be added only to the end of structs and never reordered. +/** */ +// clang-format off +// NOLINTBEGIN(whitespace/line_length) +/** \note Users of TensorFlow Lite should use +/**
{@code +/** #include "tensorflow/lite/c/common.h" +/** }+/** to access the APIs documented on this page. */ +// NOLINTEND(whitespace/line_length) +// clang-format on -/// -/// -/// -/// -/// -public static native void TfLiteInterpreterOptionsAddBuiltinOp( - TfLiteInterpreterOptions options, @Cast("TfLiteBuiltinOperator") int op, - @Const TfLiteRegistration registration, int min_version, - int max_version); +// IWYU pragma: private, include "third_party/tensorflow/lite/c/common.h" -/** Adds an op registration for a custom operator. - * - * Op registrations are used to map ops referenced in the flatbuffer model - * to executable function pointers ({@code TfLiteRegistration}s). - * - * NOTE: The interpreter will make a shallow copy of {@code registration} internally, - * so the caller should ensure that its contents (function pointers, etc...) - * remain valid for the duration of any created interpreter's lifetime. A - * common practice is making the provided {@code TfLiteRegistration} instance static. - * - * The lifetime of the string pointed to by {@code name} must be at least as long - * as the lifetime of the {@code TfLiteInterpreterOptions}. - * - * Code that uses this function should NOT call - * {@code TfLiteInterpreterOptionsSetOpResolver} (or related functions) on the same - * options object. - * - * WARNING: This is an experimental API and subject to change. */ +// #ifndef TENSORFLOW_LITE_CORE_C_COMMON_H_ +// #define TENSORFLOW_LITE_CORE_C_COMMON_H_ -/// -/// -/// -/// -/// -public static native void TfLiteInterpreterOptionsAddCustomOp( - TfLiteInterpreterOptions options, @Cast("const char*") BytePointer name, - @Const TfLiteRegistration registration, int min_version, - int max_version); -public static native void TfLiteInterpreterOptionsAddCustomOp( - TfLiteInterpreterOptions options, String name, - @Const TfLiteRegistration registration, int min_version, - int max_version); -// Targeting ../Find_builtin_op_external_Pointer_int_int.java +// #include
- * Attempts to cancel in flight invocation if any.
- * This will not affect calls to {@code Invoke} that happend after this.
- * Non blocking and thread safe.
- * Returns kTfLiteError if cancellation is not enabled, otherwise returns
- * kTfLiteOk.
- * NOTE: Calling this function will cancel in-flight invocations
- * in all SignatureRunners built from the same interpreter.
- *
- * WARNING: This is an experimental API and subject to change. */
-public static native @Cast("TfLiteStatus") int TfLiteSignatureRunnerCancel(
- TfLiteSignatureRunner signature_runner);
-// Forward declaration, to avoid need for dependency on
-// tensorflow/lite/profiling/telemetry/profiler.h.
+// Targeting ../TfLiteSparsity.java
-/** Registers the telemetry profiler to the interpreter.
- * Note: The interpreter does not take the ownership of profiler, but callers
- * must ensure profiler->data outlives the lifespan of the interpreter.
- *
- * WARNING: This is an experimental API and subject to change. */
-public static native void TfLiteInterpreterOptionsSetTelemetryProfiler(
- TfLiteInterpreterOptions options,
- TfLiteTelemetryProfilerStruct profiler);
-// #ifdef __cplusplus // extern "C"
-// #endif // __cplusplus
+// Targeting ../TfLiteCustomAllocation.java
-// #endif // TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_
-// Parsed from tensorflow/lite/c/common.h
+/** The flags used in {@code Interpreter::SetCustomAllocationForTensor}.
+ * Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc. */
+/** enum TfLiteCustomAllocationFlags */
+public static final int
+ kTfLiteCustomAllocationFlagsNone = 0,
+ /** Skips checking whether allocation.data points to an aligned buffer as
+ * expected by the TFLite runtime.
+ * NOTE: Setting this flag can cause crashes when calling Invoke().
+ * Use with caution. */
+ kTfLiteCustomAllocationFlagsSkipAlignCheck = 1;
+// Targeting ../TfLiteTensor.java
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
+// Targeting ../TfLiteNode.java
+
+
+// #else // defined(TF_LITE_STATIC_MEMORY)?
+// NOTE: This flag is opt-in only at compile time.
+//
+// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct
+// contains only the minimum fields required to initialize and prepare a micro
+// inference graph. The fields in this struct have been ordered from
+// largest-to-smallest for optimal struct sizeof.
+//
+// This struct does not use:
+// - allocation
+// - buffer_handle
+// - data_is_stale
+// - delegate
+// - dims_signature
+// - name
+// - sparsity
+
+// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains
+// only the minimum fields required to represent a node.
+//
+// This struct does not use:
+// - delegate
+// - intermediates
+// - temporaries
+// Targeting ../TfLiteEvalTensor.java
+
+
+
+// #ifndef TF_LITE_STATIC_MEMORY
+/** Free data memory of tensor {@code t}. */
+public static native void TfLiteTensorDataFree(TfLiteTensor t);
+
+/** Free quantization data. */
+public static native void TfLiteQuantizationFree(TfLiteQuantization quantization);
+
+/** Free sparsity parameters. */
+public static native void TfLiteSparsityFree(TfLiteSparsity sparsity);
+
+/** Free memory of tensor {@code t}. */
+public static native void TfLiteTensorFree(TfLiteTensor t);
+
+/** Set all of a tensor's fields (and free any previously allocated data). */
+public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, TfLiteIntArray dims,
+ @ByVal TfLiteQuantizationParams quantization, @Cast("char*") BytePointer buffer,
+ @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type,
+ @Const Pointer allocation, @Cast("bool") boolean is_variable,
+ TfLiteTensor tensor);
+public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String name, TfLiteIntArray dims,
+ @ByVal TfLiteQuantizationParams quantization, @Cast("char*") ByteBuffer buffer,
+ @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type,
+ @Const Pointer allocation, @Cast("bool") boolean is_variable,
+ TfLiteTensor tensor);
+public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, TfLiteIntArray dims,
+ @ByVal TfLiteQuantizationParams quantization, @Cast("char*") byte[] buffer,
+ @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type,
+ @Const Pointer allocation, @Cast("bool") boolean is_variable,
+ TfLiteTensor tensor);
+public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String name, TfLiteIntArray dims,
+ @ByVal TfLiteQuantizationParams quantization, @Cast("char*") BytePointer buffer,
+ @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type,
+ @Const Pointer allocation, @Cast("bool") boolean is_variable,
+ TfLiteTensor tensor);
+public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, @Cast("const char*") BytePointer name, TfLiteIntArray dims,
+ @ByVal TfLiteQuantizationParams quantization, @Cast("char*") ByteBuffer buffer,
+ @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type,
+ @Const Pointer allocation, @Cast("bool") boolean is_variable,
+ TfLiteTensor tensor);
+public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String name, TfLiteIntArray dims,
+ @ByVal TfLiteQuantizationParams quantization, @Cast("char*") byte[] buffer,
+ @Cast("size_t") long size, @Cast("TfLiteAllocationType") int allocation_type,
+ @Const Pointer allocation, @Cast("bool") boolean is_variable,
+ TfLiteTensor tensor);
+
+/** Copies the contents of {@code src} in {@code dst}.
+ * Function does nothing if either {@code src} or {@code dst} is passed as nullptr and
+ * return {@code kTfLiteOk}.
+ * Returns {@code kTfLiteError} if {@code src} and {@code dst} doesn't have matching data size.
+ * Note function copies contents, so it won't create new data pointer
+ * or change allocation type.
+ * All Tensor related properties will be copied from {@code src} to {@code dst} like
+ * quantization, sparsity, ... */
+public static native @Cast("TfLiteStatus") int TfLiteTensorCopy(@Const TfLiteTensor src, TfLiteTensor dst);
+
+/** Change the size of the memory block owned by {@code tensor} to {@code num_bytes}.
+ * Tensors with allocation types other than {@code kTfLiteDynamic} will be ignored
+ * and a {@code kTfLiteOk} will be returned. {@code tensor}'s internal data buffer will be
+ * assigned a pointer which can safely be passed to free or realloc if
+ * {@code num_bytes} is zero. If {@code preserve_data} is true, tensor data will be
+ * unchanged in the range from the start of the region up to the minimum of the
+ * old and new sizes. In the case of NULL tensor, or an error allocating new
+ * memory, returns {@code kTfLiteError}. */
+public static native @Cast("TfLiteStatus") int TfLiteTensorResizeMaybeCopy(@Cast("size_t") long num_bytes, TfLiteTensor tensor,
+ @Cast("bool") boolean preserve_data);
+
+/** Change the size of the memory block owned by {@code tensor} to {@code num_bytes}.
+ * Tensors with allocation types other than {@code kTfLiteDynamic} will be ignored
+ * and a {@code kTfLiteOk} will be returned. {@code tensor}'s internal data buffer will be
+ * assigned a pointer which can safely be passed to free or realloc if
+ * {@code num_bytes} is zero. Tensor data will be unchanged in the range from the
+ * start of the region up to the minimum of the old and new sizes. In the case
+ * of NULL tensor, or an error allocating new memory, returns {@code kTfLiteError}. */
+
+///
+///
+public static native @Cast("TfLiteStatus") int TfLiteTensorRealloc(@Cast("size_t") long num_bytes, TfLiteTensor tensor);
+// Targeting ../TfLiteDelegateParams.java
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
+// Targeting ../TfLiteOpaqueDelegateParams.java
-/** \file
-/**
-/** This file defines common C types and APIs for implementing operations,
-/** delegates and other constructs in TensorFlow Lite. The actual operations and
-/** delegates can be defined using C++, but the interface between the
-/** interpreter and the operations are C.
-/**
-/** For documentation, see tensorflow/lite/core/c/common.h. */
-// #ifndef TENSORFLOW_LITE_C_COMMON_H_
-// #define TENSORFLOW_LITE_C_COMMON_H_
+// Targeting ../TfLiteContext.java
-// #include "tensorflow/lite/core/c/common.h"
-// #endif // TENSORFLOW_LITE_C_COMMON_H_
+/** {@code TfLiteRegistrationExternal} is an external version of {@code TfLiteRegistration}
+ * for C API which doesn't use internal types (such as {@code TfLiteContext}) but
+ * only uses stable API types (such as {@code TfLiteOpaqueContext}). The purpose of
+ * each field is the exactly the same as with {@code TfLiteRegistration}. */
-// Parsed from tensorflow/lite/core/c/common.h
+/** The valid values of the {@code inplace_operator} field in {@code TfLiteRegistration}.
+ * This allow an op to signal to the runtime that the same data pointer
+ * may be passed as an input and output without impacting the result.
+ * This does not mean that the memory can safely be reused, it is up to the
+ * runtime to determine this, e.g. if another op consumes the same input or not
+ * or if an input tensor has sufficient memory allocated to store the output
+ * data.
+ *
+ * Setting these flags authorizes the runtime to set the data pointers of an
+ * input and output tensor to the same value. In such cases, the memory
+ * required by the output must be less than or equal to that required by the
+ * shared input, never greater. If kTfLiteInplaceOpDataUnmodified is set, then
+ * the runtime can share the same input tensor with multiple operator's
+ * outputs, provided that kTfLiteInplaceOpDataUnmodified is set for all of
+ * them. Otherwise, if an input tensor is consumed by multiple operators, it
+ * may only be shared with the operator which is the last to consume it.
+ *
+ * Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc. */
+/** enum TfLiteInPlaceOp */
+public static final int
+ /** The default value. This indicates that the same data pointer cannot safely
+ * be passed as an op's input and output. */
+ kTfLiteInplaceOpNone = 0,
+ /** This indicates that an op's first output's data is identical to its first
+ * input's data, for example Reshape. */
+ kTfLiteInplaceOpDataUnmodified = 1,
+ /** Setting kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput means
+ * that InputN may be shared with OutputN instead of with the first output.
+ * This flag requires one or more of kTfLiteInplaceOpInputNShared to be set. */
+
+///
+ kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput = 2,
+ /** kTfLiteInplaceOpInputNShared indicates that it is safe for an op to share
+ * InputN's data pointer with an output tensor. If
+ * kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is set then
+ * kTfLiteInplaceOpInputNShared indicates that InputN may be shared
+ * with OutputN, otherwise kTfLiteInplaceOpInputNShared indicates that InputN
+ * may be shared with the first output.
+ *
+ * Indicates that an op's first input may be shared with the first output
+ * tensor. kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput has
+ * no impact on the behavior allowed by this flag. */
+ kTfLiteInplaceOpInput0Shared = 4,
+ /** Indicates that an op's second input may be shared with the first output
+ * if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is not set
+ * or second output if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput
+ * is set. */
+ kTfLiteInplaceOpInput1Shared = 8,
+ /** Indicates that an op's third input may be shared with the first output
+ * if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput is not set
+ * or third output if kTfLiteInplaceInputCanBeSharedWithCorrespondingOutput
+ * is
+ * set. */
+ kTfLiteInplaceOpInput2Shared = 16;
+public static native @MemberGetter int kTfLiteInplaceOpMaxValue();
+public static final int
+ /** Placeholder to ensure that enum can hold 64 bit values to accommodate
+ * future fields. */
+ kTfLiteInplaceOpMaxValue = kTfLiteInplaceOpMaxValue();
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+/** The number of shareable inputs supported. */
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
+///
+///
+@MemberGetter public static native int kTfLiteMaxSharableOpInputs();
+public static final int kTfLiteMaxSharableOpInputs = kTfLiteMaxSharableOpInputs();
+// Targeting ../TfLiteRegistration.java
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
+// Targeting ../TfLiteRegistration_V3.java
-// This file defines common C types and APIs for implementing operations,
-// delegates and other constructs in TensorFlow Lite. The actual operations and
-// delegates can be defined using C++, but the interface between the interpreter
-// and the operations are C.
-//
-// Summary of abstractions
-// TF_LITE_ENSURE - Self-sufficient error checking
-// TfLiteStatus - Status reporting
-// TfLiteIntArray - stores tensor shapes (dims),
-// TfLiteContext - allows an op to access the tensors
-// TfLiteTensor - tensor (a multidimensional array)
-// TfLiteNode - a single node or operation
-// TfLiteRegistration - the implementation of a conceptual operation.
-// TfLiteDelegate - allows delegation of nodes to alternative backends.
-//
-// Some abstractions in this file are created and managed by Interpreter.
-//
-// NOTE: The order of values in these structs are "semi-ABI stable". New values
-// should be added only to the end of structs and never reordered.
-/** WARNING: Users of TensorFlow Lite should not include this file directly,
-/** but should instead include
-/** "third_party/tensorflow/lite/c/common.h".
-/** Only the TensorFlow Lite implementation itself should include this
-/** file directly. */
-// IWYU pragma: private, include "third_party/tensorflow/lite/c/common.h"
+// Targeting ../TfLiteRegistration_V2.java
-// #ifndef TENSORFLOW_LITE_CORE_C_COMMON_H_
-// #define TENSORFLOW_LITE_CORE_C_COMMON_H_
-// #include
+ * Sets the delegate buffer handle for the given tensor.
+ *
+ * This function sets the buffer handle for a tensor that is used by other
+ * computing hardware such as EdgeTpu. For example, EdgeTpu delegate imports a
+ * tensor's memory into EdgeTpu's virtual address and returns a buffer handle.
+ * Then EdgeTpu delegate calls this API to associate the tensor with the buffer
+ * handle.
+ *
+ * WARNING: This is an experimental API and subject to change. */
-// Returns how stable a tensor data values are across runs.
-public static native @Cast("TfLiteRunStability") int TfLiteTensorGetDataStability(@Const TfLiteTensor t);
+///
+public static native @Cast("TfLiteStatus") int TfLiteInterpreterSetBufferHandle(
+ TfLiteInterpreter interpreter, TfLiteTensor tensor,
+ @Cast("TfLiteBufferHandle") int buffer_handle, @Cast("TfLiteOpaqueDelegate*") TfLiteOpaqueDelegateStruct delegate);
-// Returns the operation step when the data of a tensor is populated.
-//
-// Some operations can precompute their results before the evaluation step. This
-// makes the data available earlier for subsequent operations.
-public static native @Cast("TfLiteRunStep") int TfLiteTensorGetDataKnownStep(@Const TfLiteTensor t);
+/** Gets the delegate buffer handle, and the delegate which can process
+ * the buffer handle.
+ *
+ * WARNING: This is an experimental API and subject to change. */
-// Returns the operation steop when the shape of a tensor is computed.
-//
-// Some operations can precompute the shape of their results before the
-// evaluation step. This makes the shape available earlier for subsequent
-// operations.
-public static native @Cast("TfLiteRunStep") int TfLiteTensorGetShapeKnownStep(@Const TfLiteTensor t);
+///
+public static native @Cast("TfLiteStatus") int TfLiteInterpreterGetBufferHandle(
+ TfLiteInterpreter interpreter, int tensor_index,
+ @Cast("TfLiteBufferHandle*") IntPointer buffer_handle, @Cast("TfLiteOpaqueDelegate**") PointerPointer delegate);
+public static native @Cast("TfLiteStatus") int TfLiteInterpreterGetBufferHandle(
+ TfLiteInterpreter interpreter, int tensor_index,
+ @Cast("TfLiteBufferHandle*") IntPointer buffer_handle, @Cast("TfLiteOpaqueDelegate**") @ByPtrPtr TfLiteOpaqueDelegateStruct delegate);
+public static native @Cast("TfLiteStatus") int TfLiteInterpreterGetBufferHandle(
+ TfLiteInterpreter interpreter, int tensor_index,
+ @Cast("TfLiteBufferHandle*") IntBuffer buffer_handle, @Cast("TfLiteOpaqueDelegate**") @ByPtrPtr TfLiteOpaqueDelegateStruct delegate);
+public static native @Cast("TfLiteStatus") int TfLiteInterpreterGetBufferHandle(
+ TfLiteInterpreter interpreter, int tensor_index,
+ @Cast("TfLiteBufferHandle*") int[] buffer_handle, @Cast("TfLiteOpaqueDelegate**") @ByPtrPtr TfLiteOpaqueDelegateStruct delegate);
-// #ifdef __cplusplus // extern "C"
+/** Sets whether buffer handle output is allowed.
+ * When using hardware delegation, Interpreter will make the data of output
+ * tensors available in {@code tensor->data} by default. If the application can
+ * consume the buffer handle directly (e.g. reading output from OpenGL
+ * texture), it can set this flag to false, so Interpreter won't copy the
+ * data from buffer handle to CPU memory.
+ *
+ * WARNING: This is an experimental API and subject to change. */
-// #include
+ * Attempts to cancel in flight invocation if any.
+ * This will not affect calls to {@code Invoke} that happend after this.
+ * Non blocking and thread safe.
+ * Returns kTfLiteError if cancellation is not enabled, otherwise returns
+ * kTfLiteOk.
+ * NOTE: Calling this function will cancel in-flight invocations
+ * in all SignatureRunners built from the same interpreter.
+ *
+ * WARNING: This is an experimental API and subject to change. */
+public static native @Cast("TfLiteStatus") int TfLiteSignatureRunnerCancel(
+ TfLiteSignatureRunner signature_runner);
+// Forward declaration, to avoid need for dependency on
+// tensorflow/lite/profiling/telemetry/profiler.h.
-// Concrete implementations extend `AbstractVariantData` with CRPT.
+/** Registers the telemetry profiler to the interpreter.
+ * Note: The interpreter does not take the ownership of profiler, but callers
+ * must ensure profiler->data outlives the lifespan of the interpreter.
+ *
+ * WARNING: This is an experimental API and subject to change. */
-// Analogous to `TfLiteTensorRealloc` for allocation of tensors whose
-// data member points to an arbitrary C++ object. `VariantType` refers
-// to the erased type of said object and `VariantArgs` refers to
-// a list of argument types with which to construct a new `VariantType`.
-// `VariantArgs` must match a constructor of `VariantType`.
+///
+public static native void TfLiteInterpreterOptionsSetTelemetryProfiler(
+ TfLiteInterpreterOptions options,
+ TfLiteTelemetryProfilerStruct profiler);
+/** Ensures the data of the tensor at the given index is readable.
+ * Note: If a delegate has been used, and {@code SetAllowBufferHandleOutput(true)}
+ * has been called, tensor outputs may be stored as delegate buffer handles
+ * whose data is not directly readable until this method has been called. In
+ * such cases, this method will copy the data from the delegate buffer handle
+ * to CPU memory.
+ *
+ * WARNING: This is an experimental API and subject to change. */
+public static native @Cast("TfLiteStatus") int TfLiteInterpreterEnsureTensorDataIsReadable(
+ TfLiteInterpreter interpreter, int tensor_index);
+// #ifdef __cplusplus // extern "C"
// #endif // __cplusplus
-// #endif // TENSORFLOW_LITE_CORE_C_COMMON_H_
+
+// #endif // TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_
// Parsed from tensorflow/lite/core/api/error_reporter.h
@@ -3440,9 +3558,6 @@ public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String
// No string mapping is included here, since the TF Lite packed representation
// doesn't correspond to a C++ type well.
-// Targeting ../TfLiteTypeToType.java
-
-
@@ -3686,9 +3801,9 @@ public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String
// #include "tensorflow/lite/core/c/common.h"
// #include "tensorflow/lite/schema/schema_generated.h"
// #include "tensorflow/lite/util.h"
-// Targeting ../ValueHasher.java
-
+// Some versions of gcc don't support partial specialization in class scope,
+// so these are defined in a namescope.
// Targeting ../MutableOpResolver.java
@@ -3954,7 +4069,7 @@ public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String
/** For documentation, see third_party/tensorflow/lite/core/interpreter_builder.h. */
-// #include "tensorflow/lite/core/interpreter_builder.h"
+// #include "tensorflow/lite/core/interpreter_builder.h" // IWYU pragma: export
// namespace tflite
// #endif // TENSORFLOW_LITE_INTERPRETER_BUILDER_H_
@@ -4086,7 +4201,9 @@ public static native void TfLiteTensorReset(@Cast("TfLiteType") int type, String
// #ifndef TENSORFLOW_LITE_KERNELS_REGISTER_H_
// #define TENSORFLOW_LITE_KERNELS_REGISTER_H_
-// #include "tensorflow/lite/core/kernels/register.h"
+/** For documentation, see third_party/tensorflow/lite/core/kernels/register.h */
+
+// #include "tensorflow/lite/core/kernels/register.h" // IWYU pragma: export
// namespace builtin
// namespace ops
diff --git a/tensorflow-lite/src/main/java/org/bytedeco/tensorflowlite/presets/tensorflowlite.java b/tensorflow-lite/src/main/java/org/bytedeco/tensorflowlite/presets/tensorflowlite.java
index e0d38f01f3d..2258881116d 100644
--- a/tensorflow-lite/src/main/java/org/bytedeco/tensorflowlite/presets/tensorflowlite.java
+++ b/tensorflow-lite/src/main/java/org/bytedeco/tensorflowlite/presets/tensorflowlite.java
@@ -53,10 +53,10 @@
"tensorflow/lite/c/c_api.h",
"tensorflow/lite/core/c/c_api.h",
"tensorflow/lite/core/c/registration_external.h",
- "tensorflow/lite/c/c_api_experimental.h",
- "tensorflow/lite/core/c/c_api_experimental.h",
"tensorflow/lite/c/common.h",
"tensorflow/lite/core/c/common.h",
+ "tensorflow/lite/c/c_api_experimental.h",
+ "tensorflow/lite/core/c/c_api_experimental.h",
"tensorflow/lite/core/api/error_reporter.h",
"tensorflow/lite/core/api/op_resolver.h",
"tensorflow/lite/core/api/profiler.h",
diff --git a/tensorrt/README.md b/tensorrt/README.md
index 76f5b8dcf46..8a71be6966c 100644
--- a/tensorrt/README.md
+++ b/tensorrt/README.md
@@ -17,7 +17,7 @@ Introduction
------------
This directory contains the JavaCPP Presets module for:
- * TensorRT 8.6.1.6 https://developer.nvidia.com/tensorrt
+ * TensorRT 10.0.1.6 https://developer.nvidia.com/tensorrt
Please refer to the parent README.md file for more detailed information about the JavaCPP Presets.
@@ -46,7 +46,7 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic
- *
- * \namespace nvcaffeparser1
- *
- * \brief The TensorRT Caffe parser API namespace.
- * */
-// Targeting ../nvparsers/IBlobNameToTensor.java
-
-
-// Targeting ../nvparsers/IBinaryProtoBlob.java
-
-
-// Targeting ../nvparsers/IPluginFactoryV2.java
-
-
-// Targeting ../nvparsers/ICaffeParser.java
-
-
-
-/**
- * \brief Creates a ICaffeParser object.
- *
- * @return A pointer to the ICaffeParser object is returned.
- *
- * @see nvcaffeparser1::ICaffeParser
- *
- * @deprecated ICaffeParser will be removed in TensorRT 9.0. Plan to migrate your workflow to
- * use nvonnxparser::IParser for deployment.
- * */
-
-
-//!
-//!
-//!
-@Namespace("nvcaffeparser1") public static native @NoException(true) ICaffeParser createCaffeParser();
-
-/**
- * \brief Shuts down protocol buffers library.
- *
- * \note No part of the protocol buffers library can be used after this function is called.
- * */
-@Namespace("nvcaffeparser1") public static native @NoException(true) void shutdownProtobufLibrary();
- // namespace nvcaffeparser1
-
-/**
- * Internal C entry point for creating ICaffeParser.
- * \private
- * */
-public static native @NoException(true) Pointer createNvCaffeParser_INTERNAL();
-// #endif
-
-
-// Parsed from NvUffParser.h
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
- *
- * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
- * property and proprietary rights in and to this material, related
- * documentation and any modifications thereto. Any use, reproduction,
- * disclosure or distribution of this material and related documentation
- * without an express license agreement from NVIDIA CORPORATION or
- * its affiliates is strictly prohibited.
- */
-
-// #ifndef NV_UFF_PARSER_H
-// #define NV_UFF_PARSER_H
-
-
-
-//!
-//!
-//!
-// #include "NvInfer.h"
-
-/**
- * \file NvUffParser.h
- *
- * This is the API for the UFF Parser
- * */
-
-// Current supported Universal Framework Format (UFF) version for the parser.
-public static final int UFF_REQUIRED_VERSION_MAJOR = 0;
-public static final int UFF_REQUIRED_VERSION_MINOR = 6;
-
-
-//!
-//!
-//!
-public static final int UFF_REQUIRED_VERSION_PATCH = 9;
-
-/**
- * \namespace nvuffparser
- *
- * \brief The TensorRT UFF parser API namespace.
- * */
-
-/**
- * \enum UffInputOrder
- * \brief The different possible supported input order.
- * */
-@Namespace("nvuffparser") public enum UffInputOrder {
- /** NCHW order. */
- kNCHW(0),
- /** NHWC order. */
- kNHWC(1),
- /** NC order. */
- kNC(2);
-
- public final int value;
- private UffInputOrder(int v) { this.value = v; }
- private UffInputOrder(UffInputOrder e) { this.value = e.value; }
- public UffInputOrder intern() { for (UffInputOrder e : values()) if (e.value == value) return e; return this; }
- @Override public String toString() { return intern().name(); }
-}
-
-/**
- * \enum FieldType
- * \brief The possible field types for custom layer.
- * */
-
-@Namespace("nvuffparser") public enum FieldType {
- /** FP32 field type. */
- kFLOAT(0),
- /** INT32 field type. */
- kINT32(1),
- /** char field type. String for length>1. */
- kCHAR(2),
- /** nvinfer1::Dims field type. */
- kDIMS(4),
- /** nvinfer1::DataType field type. */
- kDATATYPE(5),
- kUNKNOWN(6);
-
- public final int value;
- private FieldType(int v) { this.value = v; }
- private FieldType(FieldType e) { this.value = e.value; }
- public FieldType intern() { for (FieldType e : values()) if (e.value == value) return e; return this; }
- @Override public String toString() { return intern().name(); }
-}
-// Targeting ../nvparsers/FieldMap.java
-
-
-// Targeting ../nvparsers/FieldCollection.java
-
-
-// Targeting ../nvparsers/IUffParser.java
-
-
-
-/**
- * \brief Creates a IUffParser object.
- *
- * @return A pointer to the IUffParser object is returned.
- *
- * @see nvuffparser::IUffParser
- *
- * @deprecated IUffParser will be removed in TensorRT 9.0. Plan to migrate your workflow to
- * use nvonnxparser::IParser for deployment.
- * */
-
-
-//!
-//!
-//!
-@Namespace("nvuffparser") public static native @NoException(true) IUffParser createUffParser();
-
-/**
- * \brief Shuts down protocol buffers library.
- *
- * \note No part of the protocol buffers library can be used after this function is called.
- * */
-
- // namespace nvuffparser
-
-/**
- * Internal C entry point for creating IUffParser
- * \private
- * */
-public static native @NoException(true) Pointer createNvUffParser_INTERNAL();
-
-// #endif /* !NV_UFF_PARSER_H */
-
-
-}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims2.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims2.java
index 11a8f686b80..e448ede3a85 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims2.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims2.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -20,10 +20,11 @@
/**
* \class Dims2
+ *
* \brief Descriptor for two-dimensional data.
* */
@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
-public class Dims2 extends Dims32 {
+public class Dims2 extends Dims64 {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Dims2(Pointer p) { super(p); }
@@ -54,6 +55,6 @@ public class Dims2 extends Dims32 {
* @param d0 The first element.
* @param d1 The second element.
* */
- public Dims2(int d0, int d1) { super((Pointer)null); allocate(d0, d1); }
- private native void allocate(int d0, int d1);
+ public Dims2(@Cast("int64_t") long d0, @Cast("int64_t") long d1) { super((Pointer)null); allocate(d0, d1); }
+ private native void allocate(@Cast("int64_t") long d0, @Cast("int64_t") long d1);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims3.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims3.java
index 6fcd03ea064..fb6c4e32ce2 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims3.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims3.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -21,6 +21,7 @@
/**
* \class Dims3
+ *
* \brief Descriptor for three-dimensional data.
* */
@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
@@ -56,6 +57,6 @@ public class Dims3 extends Dims2 {
* @param d1 The second element.
* @param d2 The third element.
* */
- public Dims3(int d0, int d1, int d2) { super((Pointer)null); allocate(d0, d1, d2); }
- private native void allocate(int d0, int d1, int d2);
+ public Dims3(@Cast("int64_t") long d0, @Cast("int64_t") long d1, @Cast("int64_t") long d2) { super((Pointer)null); allocate(d0, d1, d2); }
+ private native void allocate(@Cast("int64_t") long d0, @Cast("int64_t") long d1, @Cast("int64_t") long d2);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims4.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims4.java
index c53e4ef8f4c..b82c5ad9c31 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims4.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims4.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -21,6 +21,7 @@
/**
* \class Dims4
+ *
* \brief Descriptor for four-dimensional data.
* */
@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
@@ -57,6 +58,6 @@ public class Dims4 extends Dims3 {
* @param d2 The third element.
* @param d3 The fourth element.
* */
- public Dims4(int d0, int d1, int d2, int d3) { super((Pointer)null); allocate(d0, d1, d2, d3); }
- private native void allocate(int d0, int d1, int d2, int d3);
+ public Dims4(@Cast("int64_t") long d0, @Cast("int64_t") long d1, @Cast("int64_t") long d2, @Cast("int64_t") long d3) { super((Pointer)null); allocate(d0, d1, d2, d3); }
+ private native void allocate(@Cast("int64_t") long d0, @Cast("int64_t") long d1, @Cast("int64_t") long d2, @Cast("int64_t") long d3);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims32.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims64.java
similarity index 61%
rename from tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims32.java
rename to tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims64.java
index 1284a403697..0102c504119 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims32.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Dims64.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -23,36 +23,38 @@
* \class Dims
* \brief Structure to define the dimensions of a tensor.
*
- * TensorRT can also return an invalid dims structure. This structure is represented by nbDims == -1
- * and d[i] == 0 for all d.
+ * TensorRT can also return an "invalid dims" structure. This structure is
+ * represented by nbDims == -1 and d[i] == 0 for all i.
*
- * TensorRT can also return an "unknown rank" dims structure. This structure is represented by nbDims == -1
- * and d[i] == -1 for all d.
+ * TensorRT can also return an "unknown rank" dims structure. This structure is
+ * represented by nbDims == -1 and d[i] == -1 for all i.
* */
@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
-public class Dims32 extends Pointer {
+public class Dims64 extends Pointer {
static { Loader.load(); }
/** Default native constructor. */
- public Dims32() { super((Pointer)null); allocate(); }
+ public Dims64() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
- public Dims32(long size) { super((Pointer)null); allocateArray(size); }
+ public Dims64(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public Dims32(Pointer p) { super(p); }
+ public Dims64(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
- @Override public Dims32 position(long position) {
- return (Dims32)super.position(position);
+ @Override public Dims64 position(long position) {
+ return (Dims64)super.position(position);
}
- @Override public Dims32 getPointer(long i) {
- return new Dims32((Pointer)this).offsetAddress(i);
+ @Override public Dims64 getPointer(long i) {
+ return new Dims64((Pointer)this).offsetAddress(i);
}
/** The maximum rank (number of dimensions) supported for a tensor. */
@MemberGetter public static native int MAX_DIMS();
public static final int MAX_DIMS = MAX_DIMS();
+
/** The rank (number of dimensions). */
- public native int nbDims(); public native Dims32 nbDims(int setter);
+ public native int nbDims(); public native Dims64 nbDims(int setter);
+
/** The extent of each dimension. */
- public native int d(int i); public native Dims32 d(int i, int setter);
- @MemberGetter public native IntPointer d();
+ public native @Cast("int64_t") long d(int i); public native Dims64 d(int i, long setter);
+ @MemberGetter public native @Cast("int64_t*") LongPointer d();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsExprs.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsExprs.java
index f6bca198b8a..f25cf54d6d5 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsExprs.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsExprs.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -22,7 +22,7 @@
/**
* \class DimsExprs
*
- * Analog of class Dims with expressions instead of constants for the dimensions.
+ * \brief Analog of class Dims with expressions instead of constants for the dimensions.
* */
@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
public class DimsExprs extends Pointer {
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsHW.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsHW.java
index bda46b1ed0c..a286876eba8 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsHW.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DimsHW.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -21,6 +21,7 @@
/**
* \class DimsHW
+ *
* \brief Descriptor for two-dimensional spatial data.
* */
@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
@@ -60,8 +61,8 @@ public class DimsHW extends Dims2 {
//!
//!
//!
- public DimsHW(int height, int width) { super((Pointer)null); allocate(height, width); }
- private native void allocate(int height, int width);
+ public DimsHW(@Cast("int64_t") long height, @Cast("int64_t") long width) { super((Pointer)null); allocate(height, width); }
+ private native void allocate(@Cast("int64_t") long height, @Cast("int64_t") long width);
/**
* \brief Get the height.
@@ -73,7 +74,7 @@ public class DimsHW extends Dims2 {
//!
//!
//!
- public native @ByRef IntPointer h();
+ public native @Cast("int64_t*") @ByRef LongPointer h();
/**
* \brief Get the height.
@@ -91,7 +92,7 @@ public class DimsHW extends Dims2 {
//!
//!
//!
- public native @ByRef IntPointer w();
+ public native @Cast("int64_t*") @ByRef LongPointer w();
/**
* \brief Get the width.
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DynamicPluginTensorDesc.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DynamicPluginTensorDesc.java
index c52ec48e986..e91f83256d1 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DynamicPluginTensorDesc.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/DynamicPluginTensorDesc.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -20,9 +20,9 @@
/**
- * \class DynamicPluginTensorDesc
+ * \struct DynamicPluginTensorDesc
*
- * Summarizes tensors that a plugin might see for an input or output.
+ * \brief Summarizes tensors that a plugin might see for an input or output.
* */
@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
public class DynamicPluginTensorDesc extends Pointer {
@@ -46,8 +46,11 @@ public class DynamicPluginTensorDesc extends Pointer {
public native @ByRef PluginTensorDesc desc(); public native DynamicPluginTensorDesc desc(PluginTensorDesc setter);
/** Lower bounds on tensor’s dimensions */
- public native @ByRef @Cast("nvinfer1::Dims*") Dims32 min(); public native DynamicPluginTensorDesc min(Dims32 setter);
+ public native @ByRef @Cast("nvinfer1::Dims*") Dims64 min(); public native DynamicPluginTensorDesc min(Dims64 setter);
/** Upper bounds on tensor’s dimensions */
- public native @ByRef @Cast("nvinfer1::Dims*") Dims32 max(); public native DynamicPluginTensorDesc max(Dims32 setter);
+ public native @ByRef @Cast("nvinfer1::Dims*") Dims64 max(); public native DynamicPluginTensorDesc max(Dims64 setter);
+
+ /** Optimum value of tensor’s dimensions specified for auto-tuning */
+ public native @ByRef @Cast("nvinfer1::Dims*") Dims64 opt(); public native DynamicPluginTensorDesc opt(Dims64 setter);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IActivationLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IActivationLayer.java
index 6cdf5a5b30c..d7ea6966af5 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IActivationLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IActivationLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithm.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithm.java
index 00081e768d1..5c4a2632c8b 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithm.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithm.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -21,9 +21,11 @@
/**
* \class IAlgorithm
+ *
* \brief Describes a variation of execution of a layer.
* An algorithm is represented by IAlgorithmVariant and the IAlgorithmIOInfo for each of its inputs and outputs.
- * An algorithm can be selected or reproduced using AlgorithmSelector::selectAlgorithms()."
+ * An algorithm can be selected or reproduced using AlgorithmSelector::selectAlgorithms().
+ *
* @see IAlgorithmIOInfo, IAlgorithmVariant, IAlgorithmSelector::selectAlgorithms()
*
* \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI.
@@ -34,22 +36,6 @@ public class IAlgorithm extends INoCopy {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public IAlgorithm(Pointer p) { super(p); }
- /**
- * \brief Returns the format of an Algorithm input or output. Algorithm inputs are incrementally numbered first,
- * followed by algorithm outputs.
- * @param index Index of the input or output of the algorithm. Incremental numbers assigned to indices of inputs
- * and the outputs.
- *
- * @return a reference to IAlgorithmIOInfo specified by index or the first algorithm if index is out of range.
- *
- * @deprecated Deprecated in TensorRT 8.0. Superseded by IAlgorithm::getAlgorithmIOInfoByIndex().
- * */
-
-
- //!
- //!
- public native @Const @Deprecated @ByRef @NoException(true) IAlgorithmIOInfo getAlgorithmIOInfo(int index);
-
/**
* \brief Returns the algorithm variant.
* */
@@ -76,11 +62,13 @@ public class IAlgorithm extends INoCopy {
//!
//!
//!
+ //!
public native @Cast("std::size_t") @NoException(true) long getWorkspaceSize();
/**
* \brief Returns the format of an Algorithm input or output. Algorithm inputs are incrementally numbered first,
* followed by algorithm outputs.
+ *
* @param index Index of the input or output of the algorithm. Incremental numbers assigned to indices of inputs
* and the outputs.
*
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmContext.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmContext.java
index 703e557142e..9ffe58ea67c 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmContext.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmContext.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -35,16 +35,19 @@ public class IAlgorithmContext extends INoCopy {
/**
* \brief Return name of the algorithm node.
+ *
* This is a unique identifier for the IAlgorithmContext.
* */
+ //!
//!
//!
public native @NoException(true) String getName();
/**
* \brief Get the minimum / optimum / maximum dimensions for input or output tensor.
+ *
* @param index Index of the input or output of the algorithm. Incremental numbers assigned to indices of inputs
* and the outputs.
* @param select Which of the minimum, optimum, or maximum dimensions to be queried.
@@ -53,8 +56,8 @@ public class IAlgorithmContext extends INoCopy {
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(int index, OptProfileSelector select);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(int index, @Cast("nvinfer1::OptProfileSelector") int select);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(int index, OptProfileSelector select);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(int index, @Cast("nvinfer1::OptProfileSelector") int select);
/**
* \brief Return number of inputs of the algorithm.
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmIOInfo.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmIOInfo.java
index 41156dfb357..966336e96cf 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmIOInfo.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmIOInfo.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -35,21 +35,6 @@ public class IAlgorithmIOInfo extends INoCopy {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public IAlgorithmIOInfo(Pointer p) { super(p); }
- /**
- * \brief Return TensorFormat of the input/output of algorithm.
- *
- * @deprecated Deprecated in TensorRT 8.6. The strides, data type, and vectorization
- * information is sufficient to uniquely identify tensor formats.
- *
- * @return the tensor format
- * */
-
-
- //!
- //!
- //!
- public native @Deprecated @NoException(true) TensorFormat getTensorFormat();
-
/**
* \brief Return DataType of the input/output of algorithm.
*
@@ -73,7 +58,7 @@ public class IAlgorithmIOInfo extends INoCopy {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrides();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrides();
/**
* \brief Return the index of the vectorized dimension or -1 for non-vectorized formats.
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmSelector.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmSelector.java
index c163f42f13a..1b874408b6e 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmSelector.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmSelector.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -18,21 +18,21 @@
import static org.bytedeco.tensorrt.global.nvinfer.*;
// IAlgorithm
-
-/**
- * \class IAlgorithmSelector
- *
- * \brief Interface implemented by application for selecting and reporting algorithms of a layer provided by the
- * builder.
- * \note A layer in context of algorithm selection may be different from ILayer in INetworkDefiniton.
- * For example, an algorithm might be implementing a conglomeration of multiple ILayers in INetworkDefinition.
- * */
-@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
-public class IAlgorithmSelector extends Pointer {
+@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
+public class IAlgorithmSelector extends IVersionedInterface {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public IAlgorithmSelector(Pointer p) { super(p); }
+ /**
+ * \brief Return version information associated with this interface. Applications must not override this method.
+ * */
+
+ //!
+ //!
+ //!
+ //!
+ public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo();
/**
* \brief Select Algorithms for a layer from the given list of algorithm choices.
*
@@ -44,10 +44,11 @@ public class IAlgorithmSelector extends Pointer {
*
* \note TensorRT uses its default algorithm selection to choose from the list provided.
* If return value is 0, TensorRT's default algorithm selection is used unless
- * BuilderFlag::kREJECT_EMPTY_ALGORITHMS (or the deprecated BuilderFlag::kSTRICT_TYPES) is set.
+ * BuilderFlag::kREJECT_EMPTY_ALGORITHMS is set.
* The list of choices is valid only for this specific algorithm context.
* */
+
//!
//!
//!
@@ -60,6 +61,7 @@ public class IAlgorithmSelector extends Pointer {
int nbChoices, IntBuffer selection);
public native @NoException(true) int selectAlgorithms(@Const @ByRef IAlgorithmContext context, @Const @ByPtrPtr IAlgorithm choices,
int nbChoices, int[] selection);
+
/**
* \brief Called by TensorRT to report choices it made.
*
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmVariant.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmVariant.java
index c1a7035ce46..cac77e09535 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmVariant.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAlgorithmVariant.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAssertionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAssertionLayer.java
index afb245db46f..7692713a663 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAssertionLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IAssertionLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -19,7 +19,8 @@
import static org.bytedeco.tensorrt.global.nvinfer.*;
-/** \class IAssertionLayer
+/**
+ * \class IAssertionLayer
*
* \brief An assertion layer in a network
*
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilder.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilder.java
index e00586e05bb..21d802657a5 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilder.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilder.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -45,41 +45,6 @@ public class IBuilder extends INoCopy {
}
- /**
- * \brief Set the maximum batch size. This has no effect for networks created with explicit batch dimension mode.
- *
- * @param batchSize The maximum batch size which can be used at execution time, and also the batch size for which
- * the engine will be optimized.
- *
- * @deprecated Deprecated in TensorRT 8.4.
- *
- * @see getMaxBatchSize()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void setMaxBatchSize(int batchSize);
-
- /**
- * \brief Get the maximum batch size.
- *
- * @return The maximum batch size.
- *
- * @deprecated Deprecated in TensorRT 8.4.
- *
- * @see setMaxBatchSize()
- * @see getMaxDLABatchSize()
- * */
-
-
- //!
- //!
- public native @Deprecated @NoException(true) int getMaxBatchSize();
-
/**
* \brief Determine whether the platform has fast native fp16.
* */
@@ -97,23 +62,8 @@ public class IBuilder extends INoCopy {
//!
//!
//!
- //!
public native @Cast("bool") @NoException(true) boolean platformHasFastInt8();
- /**
- * \brief Destroy this object.
- *
- * @deprecated Deprecated in TensorRT 8.0. Superseded by {@code delete}.
- *
- * \warning Calling destroy on a managed pointer will result in a double-free error.
- * */
-
-
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void destroy();
-
/**
* \brief Get the maximum batch size DLA can support.
* For any tensor the total volume of index dimensions combined(dimensions other than CHW) with the requested
@@ -136,10 +86,12 @@ public class IBuilder extends INoCopy {
//!
//!
//!
+ //!
public native @NoException(true) int getNbDLACores();
/**
* \brief Set the GPU allocator.
+ *
* @param allocator Set the GPU allocator to be used by the builder. All GPU memory acquired will use this
* allocator. If NULL is passed, the default allocator will be used.
*
@@ -168,45 +120,38 @@ public class IBuilder extends INoCopy {
//!
//!
//!
+ //!
+ //!
+ //!
public native @NoException(true) IBuilderConfig createBuilderConfig();
/**
- * \brief Builds an engine for the given INetworkDefinition and given IBuilderConfig.
+ * \brief Create a network definition object
*
- * It enables the builder to build multiple engines based on the same network definition, but with different
- * builder configurations.
+ * Creates a network definition object with immutable properties specified using the flags parameter.
*
- * \note This function will synchronize the cuda stream returned by \p config.getProfileStream() before returning.
+ * createNetworkV2 supports creating network with properties from NetworkDefinitionCreationFlags.
*
- * @deprecated Deprecated in TensorRT 8.0. Superseded by IBuilder::buildSerializedNetwork().
- * */
-
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) ICudaEngine buildEngineWithConfig(
- @ByRef INetworkDefinition network, @ByRef IBuilderConfig config);
-
- /** \brief Create a network definition object
+ * CreateNetworkV2 supports dynamic shapes and explicit batch dimensions by default.
*
- * Creates a network definition object with immutable properties specified using the flags parameter.
- * CreateNetworkV2 supports dynamic shapes and explicit batch dimensions when used with
- * NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag.
- * Creating a network without NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag has been deprecated.
+ * createNetworkV2 with NetworkDefinitionCreationFlag::kSTRONGLY_TYPED flag supports creating a strongly typed plan
+ * where tensor data types are inferred from network input types and operator type specification.
*
* @param flags Bitset of NetworkDefinitionCreationFlags specifying network properties combined with bitwise OR.
- * e.g., 1U << NetworkDefinitionCreationFlag::kEXPLICIT_BATCH
+ * e.g., 1U << NetworkDefinitionCreationFlag::kSTRONGLY_TYPED
*
* @see INetworkDefinition, NetworkDefinitionCreationFlags
* */
+
+ //!
//!
//!
//!
public native @NoException(true) INetworkDefinition createNetworkV2(@Cast("nvinfer1::NetworkDefinitionCreationFlags") int flags);
- /** \brief Create a new optimization profile.
+ /**
+ * \brief Create a new optimization profile.
*
* If the network has any dynamic input tensors, the appropriate calls to setDimensions() must be made.
* Likewise, if there are any shape input tensors, the appropriate calls to setShapeValues() are required.
@@ -222,6 +167,7 @@ public class IBuilder extends INoCopy {
//!
//!
//!
+ //!
public native @NoException(true) IOptimizationProfile createOptimizationProfile();
/**
@@ -234,10 +180,10 @@ public class IBuilder extends INoCopy {
*
* If an error recorder is not set, messages will be sent to the global log stream.
*
- * @param recorder The error recorder to register with this interface. */
- //
- /** @see getErrorRecorder()
- /** */
+ * @param recorder The error recorder to register with this interface.
+ *
+ * @see getErrorRecorder()
+ * */
//!
@@ -308,7 +254,6 @@ public class IBuilder extends INoCopy {
//!
//!
//!
- //!
public native @NoException(true) IHostMemory buildSerializedNetwork(@ByRef INetworkDefinition network, @ByRef IBuilderConfig config);
/**
@@ -327,8 +272,6 @@ public class IBuilder extends INoCopy {
* false otherwise.
*
* \note This function will synchronize the cuda stream returned by \p config.getProfileStream() before returning.
- *
- * This function is only supported in NVIDIA Drive(R) products.
* */
@@ -344,6 +287,8 @@ public class IBuilder extends INoCopy {
* */
+ //!
+ //!
//!
//!
//!
@@ -351,7 +296,9 @@ public class IBuilder extends INoCopy {
/**
* \brief Set the maximum number of threads.
+ *
* @param maxThreads The maximum number of threads that can be used by the builder.
+ *
* @return True if successful, false otherwise.
*
* The default value is 1 and includes the current thread.
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilderConfig.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilderConfig.java
index 785b6833109..09793b00c82 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilderConfig.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IBuilderConfig.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -17,7 +17,7 @@
import static org.bytedeco.cuda.global.nvrtc.*;
import static org.bytedeco.tensorrt.global.nvinfer.*;
- // namespace impl
+
/**
* \class IBuilderConfig
@@ -45,44 +45,6 @@ public class IBuilderConfig extends INoCopy {
}
- /**
- * \brief Set the number of minimization iterations used when timing layers.
- *
- * When timing layers, the builder minimizes over a set of average times for layer execution. This parameter
- * controls the number of iterations used in minimization. The builder may sometimes run layers for more
- * iterations to improve timing accuracy if this parameter is set to a small value and the runtime of the
- * layer is short.
- *
- * @see getMinTimingIterations()
- *
- * @deprecated Deprecated in TensorRT 8.4. Superseded by setAvgTimingIterations().
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void setMinTimingIterations(int minTiming);
-
- /**
- * \brief Query the number of minimization iterations.
- *
- * By default the minimum number of iterations is 1.
- *
- * @see setMinTimingIterations()
- *
- * @deprecated Deprecated in TensorRT 8.4. Superseded by getAvgTimingIterations().
- * */
-
-
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) int getMinTimingIterations();
-
/**
* \brief Set the number of averaging iterations used when timing layers.
*
@@ -161,54 +123,13 @@ public class IBuilderConfig extends INoCopy {
* */
- //!
- //!
- //!
- //!
- //!
- public native @NoException(true) IInt8Calibrator getInt8Calibrator();
-
- /**
- * \brief Set the maximum workspace size.
- *
- * @param workspaceSize The maximum GPU temporary memory which the engine can use at execution time.
- *
- * @see getMaxWorkspaceSize()
- *
- * @deprecated Deprecated in TensorRT 8.3. Superseded by IBuilderConfig::setMemoryPoolLimit() with
- * MemoryPoolType::kWORKSPACE.
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void setMaxWorkspaceSize(@Cast("std::size_t") long workspaceSize);
-
- /**
- * \brief Get the maximum workspace size.
- *
- * By default the workspace size is the size of total global memory in the device.
- *
- * @return The maximum workspace size.
- *
- * @see setMaxWorkspaceSize()
- *
- * @deprecated Deprecated in TensorRT 8.3. Superseded by IBuilderConfig::getMemoryPoolLimit() with
- * MemoryPoolType::kWORKSPACE.
- * */
-
-
//!
//!
//!
//!
//!
//!
- public native @Cast("std::size_t") @Deprecated @NoException(true) long getMaxWorkspaceSize();
+ public native @NoException(true) IInt8Calibrator getInt8Calibrator();
/**
* \brief Set the build mode flags to turn on builder options for this network.
@@ -290,22 +211,25 @@ public class IBuilderConfig extends INoCopy {
//!
//!
//!
+ //!
public native @Cast("bool") @NoException(true) boolean getFlag(BuilderFlag builderFlag);
public native @Cast("bool") @NoException(true) boolean getFlag(@Cast("nvinfer1::BuilderFlag") int builderFlag);
/**
* \brief Set the device that this layer must execute on.
+ *
* @param layer which layer to execute.
* @param deviceType that this layer must execute on.
* If DeviceType is not set or is reset, TensorRT will use the default DeviceType set in the builder.
*
* \note The device type for a layer must be compatible with the safety flow (if specified).
- * For example a layer cannot be marked for DLA execution while the builder is configured for kSAFE_GPU.
+ * For example a layer cannot be marked for DLA execution while the builder is configured for kSAFETY.
*
* @see getDeviceType()
* */
+ //!
//!
//!
public native @NoException(true) void setDeviceType(@Const ILayer layer, DeviceType deviceType);
@@ -313,17 +237,22 @@ public class IBuilderConfig extends INoCopy {
/**
* \brief Get the device that this layer executes on.
+ *
* @return Returns DeviceType of the layer.
* */
+ //!
+ //!
//!
//!
public native @NoException(true) DeviceType getDeviceType(@Const ILayer layer);
/**
* \brief whether the DeviceType has been explicitly set for this layer
+ *
* @return true if device type is not default
+ *
* @see setDeviceType() getDeviceType() resetDeviceType()
* */
@@ -340,12 +269,14 @@ public class IBuilderConfig extends INoCopy {
* */
+ //!
//!
//!
public native @NoException(true) void resetDeviceType(@Const ILayer layer);
/**
* \brief Checks if a layer can run on DLA.
+ *
* @return status true if the layer can on DLA else returns false.
* */
@@ -355,10 +286,12 @@ public class IBuilderConfig extends INoCopy {
//!
//!
//!
+ //!
public native @Cast("bool") @NoException(true) boolean canRunOnDLA(@Const ILayer layer);
/**
* \brief Sets the DLA core used by the network. Defaults to -1.
+ *
* @param dlaCore The DLA core to execute the engine on, in the range [0,getNbDlaCores()).
*
* This function is used to specify which DLA core to use via indexing, if multiple DLA cores are available.
@@ -369,16 +302,19 @@ public class IBuilderConfig extends INoCopy {
* */
+ //!
//!
//!
public native @NoException(true) void setDLACore(int dlaCore);
/**
* \brief Get the DLA core that the engine executes on.
+ *
* @return assigned DLA core or -1 for DLA not present or unset.
* */
+ //!
//!
//!
public native @NoException(true) int getDLACore();
@@ -386,6 +322,7 @@ public class IBuilderConfig extends INoCopy {
/**
* \brief Sets the default DeviceType to be used by the builder. It ensures that all the layers that can run on
* this device will run on it, unless setDeviceType is used to override the default DeviceType for a layer.
+ *
* @see getDefaultDeviceType()
* */
@@ -419,26 +356,8 @@ public class IBuilderConfig extends INoCopy {
//!
//!
//!
- //!
public native @NoException(true) void reset();
- /**
- * \brief Delete this IBuilderConfig.
- *
- * De-allocates any internally allocated memory.
- *
- * @deprecated Deprecated in TensorRT 8.0. Superseded by {@code delete}.
- *
- * \warning Calling destroy on a managed pointer will result in a double-free error.
- * */
-
-
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void destroy();
-
/**
* \brief Set the cuda stream that is used to profile this network.
*
@@ -467,6 +386,7 @@ public class IBuilderConfig extends INoCopy {
//!
//!
//!
+ //!
public native @NoException(true) CUstream_st getProfileStream();
/**
@@ -477,6 +397,7 @@ public class IBuilderConfig extends INoCopy {
* a single optimization profile are not supported for refittable engines.
*
* @param profile The new optimization profile, which must satisfy profile->isValid() == true
+ *
* @return The index of the optimization profile (starting from 0) if the input is valid, or -1 if the input is
* not valid.
* */
@@ -553,6 +474,7 @@ public class IBuilderConfig extends INoCopy {
//!
//!
//!
+ //!
public native @NoException(true) IAlgorithmSelector getAlgorithmSelector();
/**
@@ -563,6 +485,7 @@ public class IBuilderConfig extends INoCopy {
*
* @param profile The new calibration profile, which must satisfy profile->isValid() == true or be nullptr.
* MIN and MAX values will be overwritten by kOPT.
+ *
* @return True if the calibration profile was set correctly.
* */
@@ -890,6 +813,8 @@ public class IBuilderConfig extends INoCopy {
//!
//!
//!
+ //!
+ //!
public native @Cast("bool") @NoException(true) boolean getPreviewFeature(PreviewFeature feature);
public native @Cast("bool") @NoException(true) boolean getPreviewFeature(@Cast("nvinfer1::PreviewFeature") int feature);
@@ -904,6 +829,19 @@ public class IBuilderConfig extends INoCopy {
* which is currently 5. Setting it to greater than the maximum level results in behavior identical to the
* maximum level.
*
+ * Below are the descriptions about each builder optimization level:
+ *
+ * - Level 0: This enables the fastest compilation by disabling dynamic kernel generation and selecting the first
+ * tactic that succeeds in execution. This will also not respect a timing cache.
+ * - Level 1: Available tactics are sorted by heuristics, but only the top are tested to select the best. If a
+ * dynamic kernel is generated its compile optimization is low.
+ * - Level 2: Available tactics are sorted by heuristics, but only the fastest tactics are tested to select the
+ * best.
+ * - Level 3: Apply heuristics to see if a static precompiled kernel is applicable or if a new one has to be
+ * compiled dynamically.
+ * - Level 4: Always compiles a dynamic kernel.
+ * - Level 5: Always compiles a dynamic kernel and compares it to static kernels.
+ *
* @param level The optimization level to set to. Must be non-negative.
*
* @see getBuilderOptimizationLevel
@@ -924,13 +862,16 @@ public class IBuilderConfig extends INoCopy {
* @see setBuilderOptimizationLevel
* */
+
+ //!
//!
//!
//!
//!
public native @NoException(true) int getBuilderOptimizationLevel();
- /** \brief Set the hardware compatibility level.
+ /**
+ * \brief Set the hardware compatibility level.
*
* Hardware compatibility allows an engine to run on GPU
* architectures other than that of the GPU where the engine was
@@ -1054,5 +995,37 @@ public class IBuilderConfig extends INoCopy {
*
* @see setMaxAuxStreams()
* */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
public native @NoException(true) int getMaxAuxStreams();
+
+ /**
+ * \brief Sets the progress monitor for building a network.
+ *
+ * @param monitor The progress monitor to assign to the IBuilderConfig.
+ *
+ * The progress monitor signals to the application when different phases of
+ * the compiler are being executed. Setting to nullptr unsets the monitor so
+ * that the application is not signaled.
+ *
+ * @see IBuilderConfig::getProgressMonitor
+ * */
+
+
+ //!
+ //!
+ //!
+ public native @NoException(true) void setProgressMonitor(IProgressMonitor monitor);
+
+ /**
+ * @return The progress monitor set by the application or nullptr.
+ *
+ * @see IBuilderConfig::setProgressMonitor
+ * */
+ public native @NoException(true) IProgressMonitor getProgressMonitor();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICastLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICastLayer.java
index 2511d27f8cd..48df20fabd2 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICastLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICastLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -33,9 +33,14 @@ public class ICastLayer extends ILayer {
/**
* \brief Set cast layer output type.
+ *
+ * @param toType The DataType of the output tensor.
+ *
+ * Set the output type of the cast layer.
* */
+ //!
//!
//!
public native @NoException(true) void setToType(DataType toType);
@@ -43,6 +48,9 @@ public class ICastLayer extends ILayer {
/**
* \brief Return cast layer output type.
+ *
+ * @return toType parameter set during layer creation or by setToType().
+ * The return value is the output type of the cast layer.
* */
public native @NoException(true) DataType getToType();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConcatenationLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConcatenationLayer.java
index 9c360dfbce9..886633afef0 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConcatenationLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConcatenationLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -42,7 +42,6 @@ public class IConcatenationLayer extends ILayer {
*
* The default axis is the number of tensor dimensions minus three, or zero if the tensor has fewer than three
* dimensions. For example, for a tensor with dimensions NCHW, it is C.
- * For implicit batch mode, the number of tensor dimensions does NOT include the implicit batch dimension.
*
* When running this layer on the DLA, the concatenation axis must be the third to last axis, e.g. C if tensor
* dimensions are NCHW.
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConditionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConditionLayer.java
index 8b937eeef9a..b6932232e0f 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConditionLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConditionLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -20,7 +20,9 @@
/**
- * This layer represents a condition input to an IIfConditional.
+ * \class IConditionLayer
+ *
+ * \brief This layer represents a condition input to an IIfConditional.
* */
@Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
public class IConditionLayer extends IIfConditionalBoundaryLayer {
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConstantLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConstantLayer.java
index cbf4bc0aac4..126fd50d14f 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConstantLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConstantLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -36,9 +36,8 @@ public class IConstantLayer extends ILayer {
/**
* \brief Set the weights for the layer.
*
- * If weights.type is DataType::kINT32, the output is a tensor of 32-bit indices.
- * Otherwise the output is a tensor of real values and the output type will be
- * follow TensorRT's normal precision rules.
+ * The output type is weights.type. If the network is weakly typed and the weights have a real type,
+ * the output type might be different per TensorRT's type conversion rules.
*
* @see getWeights()
* */
@@ -75,7 +74,7 @@ public class IConstantLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions);
+ public native @NoException(true) void setDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions);
/**
* \brief Get the dimensions for the layer.
@@ -84,5 +83,5 @@ public class IConstantLayer extends ILayer {
*
* @see getDimensions
* */
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConvolutionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConvolutionLayer.java
index 0ba0ebd05bd..c13fb3e7337 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConvolutionLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IConvolutionLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -37,38 +37,6 @@ public class IConvolutionLayer extends ILayer {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public IConvolutionLayer(Pointer p) { super(p); }
- /**
- * \brief Set the HW kernel size of the convolution.
- *
- * If executing this layer on DLA, both height and width of kernel size must be in the range [1,32].
- *
- * @see getKernelSize()
- *
- * @deprecated Superseded by setKernelSizeNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void setKernelSize(@ByVal DimsHW kernelSize);
-
- /**
- * \brief Get the HW kernel size of the convolution.
- *
- * @see setKernelSize()
- *
- * @deprecated Superseded by getKernelSizeNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- public native @Deprecated @ByVal @NoException(true) DimsHW getKernelSize();
-
/**
* \brief Set the number of output maps for the convolution.
*
@@ -81,7 +49,7 @@ public class IConvolutionLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setNbOutputMaps(int nbOutputMaps);
+ public native @NoException(true) void setNbOutputMaps(@Cast("int64_t") long nbOutputMaps);
/**
* \brief Get the number of output maps for the convolution.
@@ -94,84 +62,10 @@ public class IConvolutionLayer extends ILayer {
//!
//!
//!
- //!
- //!
- public native @NoException(true) int getNbOutputMaps();
-
- /**
- * \brief Get the stride of the convolution.
- *
- * Default: (1,1)
- *
- * If executing this layer on DLA, both height and width of stride must be in the range [1,8].
- *
- * @see getStride()
- *
- * @deprecated Superseded by setStrideNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void setStride(@ByVal DimsHW stride);
-
- /**
- * \brief Get the stride of the convolution.
- *
- * @deprecated Superseded by getStrideNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @ByVal @NoException(true) DimsHW getStride();
-
- /**
- * \brief Set the padding of the convolution.
- *
- * The input will be zero-padded by this number of elements in the height and width directions.
- * Padding is symmetric.
- *
- * Default: (0,0)
- *
- * If executing this layer on DLA, both height and width of padding must be in the range [0,31],
- * and the padding size must be less than the kernel size.
- *
- * @see getPadding()
- *
- * @deprecated Superseded by setPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void setPadding(@ByVal DimsHW padding);
-
- /**
- * \brief Get the padding of the convolution. If the padding is asymmetric, the pre-padding is returned.
- *
- * @see setPadding()
- *
- * @deprecated Superseded by getPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
//!
//!
//!
- //!
- //!
- //!
- //!
- public native @Deprecated @ByVal @NoException(true) DimsHW getPadding();
+ public native @Cast("int64_t") @NoException(true) long getNbOutputMaps();
/**
* \brief Set the number of groups for a convolution.
@@ -193,7 +87,7 @@ public class IConvolutionLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setNbGroups(int nbGroups);
+ public native @NoException(true) void setNbGroups(@Cast("int64_t") long nbGroups);
/**
* \brief Get the number of groups of the convolution.
@@ -206,7 +100,7 @@ public class IConvolutionLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) int getNbGroups();
+ public native @Cast("int64_t") @NoException(true) long getNbGroups();
/**
* \brief Set the kernel weights for the convolution.
@@ -270,42 +164,6 @@ public class IConvolutionLayer extends ILayer {
//!
public native @ByVal @NoException(true) Weights getBiasWeights();
- /**
- * \brief Set the dilation for a convolution.
- *
- * Default: (1,1)
- *
- * If executing this layer on DLA, both height and width must be in the range [1,32].
- *
- * @see getDilation()
- *
- * @deprecated Superseded by setDilationNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void setDilation(@ByVal DimsHW dilation);
-
- /**
- * \brief Get the dilation for a convolution.
- *
- * @see setDilation()
- *
- * @deprecated Superseded by getDilationNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @ByVal @NoException(true) DimsHW getDilation();
-
/**
* \brief Set the multi-dimension pre-padding of the convolution.
*
@@ -323,7 +181,7 @@ public class IConvolutionLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setPrePadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
+ public native @NoException(true) void setPrePadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
/**
* \brief Get the pre-padding.
@@ -338,7 +196,7 @@ public class IConvolutionLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPrePadding();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPrePadding();
/**
* \brief Set the multi-dimension post-padding of the convolution.
@@ -357,7 +215,7 @@ public class IConvolutionLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setPostPadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
+ public native @NoException(true) void setPostPadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
/**
* \brief Get the post-padding.
@@ -371,7 +229,7 @@ public class IConvolutionLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPostPadding();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPostPadding();
/**
* \brief Set the padding mode.
@@ -419,7 +277,7 @@ public class IConvolutionLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setKernelSizeNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 kernelSize);
+ public native @NoException(true) void setKernelSizeNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 kernelSize);
/**
* \brief Get the multi-dimension kernel size of the convolution.
@@ -433,7 +291,7 @@ public class IConvolutionLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getKernelSizeNd();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getKernelSizeNd();
/**
* \brief Set the multi-dimension stride of the convolution.
@@ -443,14 +301,14 @@ public class IConvolutionLayer extends ILayer {
* If executing this layer on DLA, only support 2D stride, both height and width of stride must be in the range
* [1,8].
*
- * @see getStrideNd() setStride() getStride()
+ * @see getStrideNd()
* */
//!
//!
//!
- public native @NoException(true) void setStrideNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride);
+ public native @NoException(true) void setStrideNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride);
/**
* \brief Get the multi-dimension stride of the convolution.
@@ -465,7 +323,7 @@ public class IConvolutionLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrideNd();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrideNd();
/**
* \brief Set the multi-dimension padding of the convolution.
@@ -486,7 +344,7 @@ public class IConvolutionLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setPaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
+ public native @NoException(true) void setPaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
/**
* \brief Get the multi-dimension padding of the convolution.
@@ -502,7 +360,7 @@ public class IConvolutionLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPaddingNd();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPaddingNd();
/**
* \brief Set the multi-dimension dilation of the convolution.
@@ -511,19 +369,19 @@ public class IConvolutionLayer extends ILayer {
*
* If executing this layer on DLA, only support 2D padding, both height and width must be in the range [1,32].
*
- * @see getDilation()
+ * @see getDilationNd()
* */
//!
//!
//!
- public native @NoException(true) void setDilationNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 dilation);
+ public native @NoException(true) void setDilationNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dilation);
/**
* \brief Get the multi-dimension dilation of the convolution.
*
- * @see setDilation()
+ * @see setDilationNd()
* */
@@ -532,7 +390,8 @@ public class IConvolutionLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDilationNd();
+ //!
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDilationNd();
/**
* \brief Append or replace an input of this layer with a specific tensor
@@ -545,6 +404,7 @@ public class IConvolutionLayer extends ILayer {
* Input 0 is the input activation tensor.
* Input 1 is the kernel tensor. If used, the kernel weights parameter must be set to empty weights.
* Input 2 is the bias tensor. If used, the bias parameter must be set to empty weights.
+ *
* @see getKernelWeights(), setKernelWeights(), getBiasWeights(), setBiasWeights()
* */
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICudaEngine.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICudaEngine.java
index ff1d64f7f1a..cb5c0ad5869 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICudaEngine.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ICudaEngine.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -45,145 +45,6 @@ public class ICudaEngine extends INoCopy {
}
- /**
- * \brief Get the number of binding indices.
- *
- * There are separate binding indices for each optimization profile.
- * This method returns the total over all profiles.
- * If the engine has been built for K profiles, the first getNbBindings() / K bindings are used by profile
- * number 0, the following getNbBindings() / K bindings are used by profile number 1 etc.
- *
- * @deprecated Deprecated in TensorRT 8.5. Superseded by getNbIOTensors.
- *
- * @see getBindingIndex()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) int getNbBindings();
-
- /**
- * \brief Retrieve the binding index for a named tensor.
- *
- * IExecutionContext::enqueueV2() and IExecutionContext::executeV2() require an array of buffers.
- *
- * Engine bindings map from tensor names to indices in this array.
- * Binding indices are assigned at engine build time, and take values in the range [0 ... n-1] where n is the total
- * number of inputs and outputs.
- *
- * To get the binding index of the name in an optimization profile with index k > 0,
- * mangle the name by appending " [profile k]", as described for method getBindingName().
- *
- * @param name The tensor name.
- * @return The binding index for the named tensor, or -1 if the provided name does not map to an input or output
- * tensor.
- *
- * \warning The string name must be null-terminated, and be at most 4096 bytes including the terminator.
- *
- * @deprecated Deprecated in TensorRT 8.5. Superseded by name-based methods. Use them instead of binding-index
- * based methods.
- *
- * @see getNbBindings() getBindingName()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) int getBindingIndex(String name);
- public native @Deprecated @NoException(true) int getBindingIndex(@Cast("const char*") BytePointer name);
-
- /**
- * \brief Retrieve the name corresponding to a binding index.
- *
- * This is the reverse mapping to that provided by getBindingIndex().
- *
- * For optimization profiles with an index k > 0, the name is mangled by appending
- * " [profile k]", with k written in decimal. For example, if the tensor in the
- * INetworkDefinition had the name "foo", and bindingIndex refers to that tensor in the
- * optimization profile with index 3, getBindingName returns "foo [profile 3]".
- *
- * @param bindingIndex The binding index.
- * @return The name corresponding to the index, or nullptr if the index is out of range.
- *
- * @deprecated Deprecated in TensorRT 8.5. Superseded by name-based methods. Use them instead of binding-index
- * based methods.
- *
- * @see getBindingIndex()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) String getBindingName(int bindingIndex);
-
- /**
- * \brief Determine whether a binding is an input binding.
- *
- * @param bindingIndex The binding index.
- * @return True if the index corresponds to an input binding and the index is in range.
- *
- * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorIOMode().
- *
- * @see getTensorIOMode()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Cast("bool") @Deprecated @NoException(true) boolean bindingIsInput(int bindingIndex);
-
- /**
- * \brief Get the dimensions of a binding.
- *
- * @param bindingIndex The binding index.
- * @return The dimensions of the binding if the index is in range, otherwise Dims().
- * Has -1 for any dimension that varies within the optimization profile.
- *
- * For example, suppose an INetworkDefinition has an input with shape [-1,-1]
- * that becomes a binding b in the engine. If the associated optimization profile
- * specifies that b has minimum dimensions as [6,9] and maximum dimensions [7,9],
- * getBindingDimensions(b) returns [-1,9], despite the second dimension being
- * dynamic in the INetworkDefinition.
- *
- * Because each optimization profile has separate bindings, the returned value can
- * differ across profiles. Consider another binding b' for the same network input,
- * but for another optimization profile. If that other profile specifies minimum
- * dimensions [5,8] and maximum dimensions [5,9], getBindingDimensions(b') returns [5,-1].
- *
- * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorShape().
- *
- * @see getTensorShape()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getBindingDimensions(int bindingIndex);
-
/**
* \brief Get shape of an input or output tensor.
*
@@ -201,27 +62,8 @@ public class ICudaEngine extends INoCopy {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorShape(String tensorName);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorShape(@Cast("const char*") BytePointer tensorName);
-
- /**
- * \brief Determine the required data type for a buffer from its binding index.
- *
- * @param bindingIndex The binding index.
- * @return The type of the data in the buffer.
- *
- * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorDataType().
- *
- * @see getTensorDataType()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) DataType getBindingDataType(int bindingIndex);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorShape(String tensorName);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorShape(@Cast("const char*") BytePointer tensorName);
/**
* \brief Determine the required data type for a buffer from its tensor name.
@@ -239,29 +81,9 @@ public class ICudaEngine extends INoCopy {
//!
//!
//!
- //!
public native @NoException(true) DataType getTensorDataType(String tensorName);
public native @NoException(true) @Cast("nvinfer1::DataType") int getTensorDataType(@Cast("const char*") BytePointer tensorName);
- /**
- * \brief Get the maximum batch size which can be used for inference. Should only be called if the engine is built
- * from an INetworkDefinition with implicit batch dimension mode.
- *
- * @return The maximum batch size for this engine.
- *
- * \warning For an engine built from an INetworkDefinition with explicit batch dimension mode, this will always
- * return 1.
- *
- * @deprecated Deprecated in TensorRT 8.4.
- * */
-
-
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) int getMaxBatchSize();
-
/**
* \brief Get the number of layers in the network.
*
@@ -298,31 +120,16 @@ public class ICudaEngine extends INoCopy {
public native @NoException(true) IHostMemory serialize();
/**
- * \brief Create an execution context.
- *
- * The execution context created will call setOptimizationProfile(0) implicitly if there are
- * no other execution contexts assigned to optimization profile 0. This functionality is
- * deprecated in TensorRT 8.6 and will instead default all optimization profiles to 0 starting
- * in TensorRT 9.0.
- * If an error recorder has been set for the engine, it will also be passed to the execution context.
- *
- * @see IExecutionContext.
- * @see IExecutionContext::setOptimizationProfile()
- * */
-
-
- //!
- //!
- //!
- //!
- public native @NoException(true) IExecutionContext createExecutionContext();
-
- /**
- * \brief Destroy this object;
+ * \brief Create an execution context and specify the strategy for allocating internal activation memory.
*
- * @deprecated Deprecated in TRT 8.0. Superseded by {@code delete}.
+ * The default value for the allocation strategy is ExecutionContextAllocationStrategy::kSTATIC, which means the
+ * context will pre-allocate a block of device memory that is sufficient for all profiles. The newly created
+ * execution context will be assigned optimization profile 0. If an error recorder has been set for the engine, it
+ * will also be passed to the execution context.
*
- * \warning Calling destroy on a managed pointer will result in a double-free error.
+ * @see IExecutionContext
+ * @see IExecutionContext::setOptimizationProfileAsync()
+ * @see ExecutionContextAllocationStrategy
* */
@@ -332,30 +139,11 @@ public class ICudaEngine extends INoCopy {
//!
//!
//!
- public native @Deprecated @NoException(true) void destroy();
-
- /**
- * \brief Get location of binding
- *
- * This lets you know whether the binding should be a pointer to device or host memory.
- *
- * @param bindingIndex The binding index.
- * @return The location of the bound tensor with given index.
- *
- * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorLocation().
- *
- * @see ITensor::setLocation() ITensor::getLocation()
- * @see getTensorLocation()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) TensorLocation getLocation(int bindingIndex);
+ public native @NoException(true) IExecutionContext createExecutionContext(
+ ExecutionContextAllocationStrategy strategy/*=nvinfer1::ExecutionContextAllocationStrategy::kSTATIC*/);
+ public native @NoException(true) IExecutionContext createExecutionContext();
+ public native @NoException(true) IExecutionContext createExecutionContext(
+ @Cast("nvinfer1::ExecutionContextAllocationStrategy") int strategy/*=nvinfer1::ExecutionContextAllocationStrategy::kSTATIC*/);
/**
* \brief Get whether an input or output tensor must be on GPU or CPU.
@@ -415,24 +203,30 @@ public class ICudaEngine extends INoCopy {
* \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator.
* */
+
+ //!
+ //!
//!
//!
public native @NoException(true) TensorIOMode getTensorIOMode(String tensorName);
public native @NoException(true) @Cast("nvinfer1::TensorIOMode") int getTensorIOMode(@Cast("const char*") BytePointer tensorName);
- /** \brief create an execution context without any device memory allocated
+ /**
+ * \brief create an execution context without any device memory allocated
*
* The memory for execution of this device context must be supplied by the application.
+ *
+ * @deprecated Deprecated in TensorRT 10.0. Superseded by createExecutionContext() with parameter.
* */
//!
//!
//!
- public native @NoException(true) IExecutionContext createExecutionContextWithoutDeviceMemory();
+ public native @Deprecated @NoException(true) IExecutionContext createExecutionContextWithoutDeviceMemory();
/**
- * \brief Return the amount of device memory required by an execution context.
+ * \brief Return the maximum device memory required by the context over all profiles.
*
* @see IExecutionContext::setDeviceMemory()
* */
@@ -444,31 +238,21 @@ public class ICudaEngine extends INoCopy {
public native @Cast("size_t") @NoException(true) long getDeviceMemorySize();
/**
- * \brief Return true if an engine can be refit.
+ * \brief Return the maximum device memory required by the context for a profile.
*
- * @see nvinfer1::createInferRefitter()
+ * @see IExecutionContext::setDeviceMemory()
* */
//!
//!
//!
- //!
- //!
- //!
- public native @Cast("bool") @NoException(true) boolean isRefittable();
+ public native @Cast("size_t") @NoException(true) long getDeviceMemorySizeForProfile(int profileIndex);
/**
- * \brief Return the number of bytes per component of an element.
- *
- * The vector component size is returned if getBindingVectorizedDim() != -1.
- *
- * @param bindingIndex The binding Index.
- *
- * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorBytesPerComponent().
+ * \brief Return true if an engine can be refit.
*
- * @see getBindingVectorizedDim()
- * @see getTensorBytesPerComponent()
+ * @see nvinfer1::createInferRefitter()
* */
@@ -478,7 +262,7 @@ public class ICudaEngine extends INoCopy {
//!
//!
//!
- public native @Deprecated @NoException(true) int getBindingBytesPerComponent(int bindingIndex);
+ public native @Cast("bool") @NoException(true) boolean isRefittable();
/**
* \brief Return the number of bytes per component of an element, or -1 if the provided name does not map to an
@@ -531,27 +315,6 @@ public class ICudaEngine extends INoCopy {
public native @NoException(true) int getTensorBytesPerComponent(String tensorName, int profileIndex);
public native @NoException(true) int getTensorBytesPerComponent(@Cast("const char*") BytePointer tensorName, int profileIndex);
- /**
- * \brief Return the number of components included in one element.
- *
- * The number of elements in the vectors is returned if getBindingVectorizedDim() != -1.
- *
- * @param bindingIndex The binding Index.
- *
- * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorComponentsPerElement().
- *
- * @see getBindingVectorizedDim()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) int getBindingComponentsPerElement(int bindingIndex);
-
/**
* \brief Return the number of components included in one element, or -1 if the provided name does not map to an
* input or output tensor.
@@ -598,27 +361,9 @@ public class ICudaEngine extends INoCopy {
//!
//!
//!
- //!
public native @NoException(true) int getTensorComponentsPerElement(String tensorName, int profileIndex);
public native @NoException(true) int getTensorComponentsPerElement(@Cast("const char*") BytePointer tensorName, int profileIndex);
- /**
- * \brief Return the binding format.
- *
- * @param bindingIndex The binding Index.
- *
- * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorFormat().
- *
- * @see getTensorFormat()
- * */
-
-
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) TensorFormat getBindingFormat(int bindingIndex);
-
/**
* \brief Return the tensor format, or TensorFormat::kLINEAR if the provided name does not map to an input or
* output tensor.
@@ -654,38 +399,9 @@ public class ICudaEngine extends INoCopy {
//!
//!
//!
- //!
public native @NoException(true) TensorFormat getTensorFormat(String tensorName, int profileIndex);
public native @NoException(true) @Cast("nvinfer1::TensorFormat") int getTensorFormat(@Cast("const char*") BytePointer tensorName, int profileIndex);
- /**
- * \brief Return the human readable description of the tensor format, or nullptr if the provided name does not
- * map to an input or output tensor.
- *
- * The description includes the order, vectorization, data type, and strides.
- * Examples are shown as follows:
- * Example 1: kCHW + FP32
- * "Row major linear FP32 format"
- * Example 2: kCHW2 + FP16
- * "Two wide channel vectorized row major FP16 format"
- * Example 3: kHWC8 + FP16 + Line Stride = 32
- * "Channel major FP16 format where C % 8 == 0 and H Stride % 32 == 0"
- *
- * @param bindingIndex The binding Index.
- *
- * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorFormatDesc().
- *
- * @see getTensorFormatDesc()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) String getBindingFormatDesc(int bindingIndex);
-
/**
* \brief Return the human readable description of the tensor format, or empty string if the provided name does not
* map to an input or output tensor.
@@ -693,9 +409,9 @@ public class ICudaEngine extends INoCopy {
* The description includes the order, vectorization, data type, and strides.
* Examples are shown as follows:
* Example 1: kCHW + FP32
- * "Row major linear FP32 format"
+ * "Row-major linear FP32 format"
* Example 2: kCHW2 + FP16
- * "Two wide channel vectorized row major FP16 format"
+ * "Two-wide channel vectorized row-major FP16 format"
* Example 3: kHWC8 + FP16 + Line Stride = 32
* "Channel major FP16 format where C % 8 == 0 and H Stride % 32 == 0"
*
@@ -722,9 +438,9 @@ public class ICudaEngine extends INoCopy {
* The description includes the order, vectorization, data type, and strides.
* Examples are shown as follows:
* Example 1: kCHW + FP32
- * "Row major linear FP32 format"
+ * "Row-major linear FP32 format"
* Example 2: kCHW2 + FP16
- * "Two wide channel vectorized row major FP16 format"
+ * "Two-wide channel vectorized row-major FP16 format"
* Example 3: kHWC8 + FP16 + Line Stride = 32
* "Channel major FP16 format where C % 8 == 0 and H Stride % 32 == 0"
*
@@ -740,30 +456,9 @@ public class ICudaEngine extends INoCopy {
//!
//!
//!
- //!
public native @NoException(true) String getTensorFormatDesc(String tensorName, int profileIndex);
public native @NoException(true) @Cast("const char*") BytePointer getTensorFormatDesc(@Cast("const char*") BytePointer tensorName, int profileIndex);
- /**
- * \brief Return the dimension index that the buffer is vectorized, or -1 is the name is not found.
- *
- * Specifically -1 is returned if scalars per vector is 1.
- *
- * @param bindingIndex The binding Index.
- *
- * @deprecated Deprecated in TensorRT 8.5. Superseded by getTensorVectorizedDim().
- *
- * @see getTensorVectorizedDim()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) int getBindingVectorizedDim(int bindingIndex);
-
/**
* \brief Return the dimension index that the buffer is vectorized, or -1 if the provided name does not
* map to an input or output tensor.
@@ -829,7 +524,7 @@ public class ICudaEngine extends INoCopy {
*
* @return Number of optimization profiles. It is always at least 1.
*
- * @see IExecutionContext::setOptimizationProfile() */
+ * @see IExecutionContext::setOptimizationProfileAsync() */
//!
@@ -839,53 +534,8 @@ public class ICudaEngine extends INoCopy {
//!
//!
//!
- //!
- //!
- //!
- //!
public native @NoException(true) int getNbOptimizationProfiles();
- /**
- * \brief Get the minimum / optimum / maximum dimensions for a particular input binding under an optimization
- * profile.
- *
- * @param bindingIndex The input binding index, which must belong to the given profile,
- * or be between 0 and bindingsPerProfile-1 as described below.
- *
- * @param profileIndex The profile index, which must be between 0 and getNbOptimizationProfiles()-1.
- *
- * @param select Whether to query the minimum, optimum, or maximum dimensions for this binding.
- *
- * @return The minimum / optimum / maximum dimensions for this binding in this profile.
- * If the profileIndex or bindingIndex are invalid, return Dims with nbDims=-1.
- *
- * For backwards compatibility with earlier versions of TensorRT, if the bindingIndex
- * does not belong to the current optimization profile, but is between 0 and bindingsPerProfile-1,
- * where bindingsPerProfile = getNbBindings()/getNbOptimizationProfiles,
- * then a corrected bindingIndex is used instead, computed by:
- *
- * profileIndex * bindingsPerProfile + bindingIndex % bindingsPerProfile
- *
- * Otherwise the bindingIndex is considered invalid.
- *
- * @deprecated Deprecated in TensorRT 8.5. Superseded by getProfileShape().
- *
- * @see getProfileShape()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileDimensions(
- int bindingIndex, int profileIndex, OptProfileSelector select);
- public native @Deprecated @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileDimensions(
- int bindingIndex, int profileIndex, @Cast("nvinfer1::OptProfileSelector") int select);
-
/**
* \brief Get the minimum / optimum / maximum dimensions for an input tensor given its name under an optimization
* profile.
@@ -910,107 +560,24 @@ public class ICudaEngine extends INoCopy {
//!
//!
//!
- //!
- //!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileShape(String tensorName, int profileIndex, OptProfileSelector select);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileShape(@Cast("const char*") BytePointer tensorName, int profileIndex, @Cast("nvinfer1::OptProfileSelector") int select);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getProfileShape(String tensorName, int profileIndex, OptProfileSelector select);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getProfileShape(@Cast("const char*") BytePointer tensorName, int profileIndex, @Cast("nvinfer1::OptProfileSelector") int select);
/**
- * \brief Get minimum / optimum / maximum values for an input shape binding under an optimization profile.
- *
- * @param profileIndex The profile index (must be between 0 and getNbOptimizationProfiles()-1)
+ * \brief Get the minimum / optimum / maximum values (not dimensions) for an input tensor given
+ * its name under an optimization profile. These correspond to the values set using
+ * IOptimizationProfile::setShapeValues when the engine was built.
*
- * @param inputIndex The input index (must be between 0 and getNbBindings() - 1)
- *
- * @param select Whether to query the minimum, optimum, or maximum shape values for this binding.
- *
- * @return If the binding is an input shape binding, return a pointer to an array that has
- * the same number of elements as the corresponding tensor, i.e. 1 if dims.nbDims == 0, or dims.d[0]
- * if dims.nbDims == 1, where dims = getBindingDimensions(inputIndex). The array contains
- * the elementwise minimum / optimum / maximum values for this shape binding under the profile.
- * If either of the indices is out of range, or if the binding is not an input shape binding, return
- * nullptr.
- *
- * For backwards compatibility with earlier versions of TensorRT, a bindingIndex that does not belong
- * to the profile is corrected as described for getProfileDimensions().
- *
- * @deprecated Deprecated in TensorRT 8.5. Superseded by getShapeValues(). Difference between Execution and shape
- * tensor is superficial since TensorRT 8.5.
- *
- * @see getProfileDimensions() getShapeValues()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Const @Deprecated @NoException(true) IntPointer getProfileShapeValues(
- int profileIndex, int inputIndex, OptProfileSelector select);
- public native @Const @Deprecated @NoException(true) IntBuffer getProfileShapeValues(
- int profileIndex, int inputIndex, @Cast("nvinfer1::OptProfileSelector") int select);
-
- /**
- * \brief True if tensor is required as input for shape calculations or output from them.
- *
- * TensorRT evaluates a network in two phases:
- *
- * 1. Compute shape information required to determine memory allocation requirements
- * and validate that runtime sizes make sense.
- *
- * 2. Process tensors on the device.
- *
- * Some tensors are required in phase 1. These tensors are called "shape tensors", and always
- * have type Int32 and no more than one dimension. These tensors are not always shapes
- * themselves, but might be used to calculate tensor shapes for phase 2.
- *
- * isShapeBinding(i) returns true if the tensor is a required input or an output computed in phase 1.
- * isExecutionBinding(i) returns true if the tensor is a required input or an output computed in phase 2.
- *
- * For example, if a network uses an input tensor with binding i as an addend
- * to an IElementWiseLayer that computes the "reshape dimensions" for IShuffleLayer,
- * then isShapeBinding(i) == true.
- *
- * It's possible to have a tensor be required by both phases. For instance, a tensor
- * can be used for the "reshape dimensions" and as the indices for an IGatherLayer
- * collecting floating-point data.
- *
- * It's also possible to have a tensor be required by neither phase, but nonetheless
- * shows up in the engine's inputs. For example, if an input tensor is used only
- * as an input to IShapeLayer, only its shape matters and its values are irrelevant.
- *
- * @deprecated Use name-based isShapeInferenceIO() instead to know whether a tensor is a shape tensor.
+ * @param tensorName The name of an input tensor.
*
- * @see isExecutionBinding() isShapeInferenceIO()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @Cast("bool") @Deprecated @NoException(true) boolean isShapeBinding(int bindingIndex);
-
- /**
- * \brief True if pointer to tensor data is required for execution phase, false if nullptr can be supplied.
+ * @param profileIndex The profile index, which must be between 0 and getNbOptimizationProfiles()-1.
*
- * For example, if a network uses an input tensor with binding i ONLY as the "reshape dimensions"
- * input of IShuffleLayer, then isExecutionBinding(i) is false, and a nullptr can be
- * supplied for it when calling IExecutionContext::execute or IExecutionContext::enqueue.
+ * @param select Whether to query the minimum, optimum, or maximum values for this input tensor.
*
- * @deprecated No name-based equivalent replacement. Use getTensorLocation() instead to know the location of tensor
- * data. Distinction between execution binding and shape binding is superficial since TensorRT 8.5.
+ * @return The minimum / optimum / maximum values for an input tensor in this profile.
+ * If the profileIndex is invalid or the provided name does not map to an input tensor, return nullptr.
*
- * @see isShapeBinding() getTensorLocation()
+ * \warning The string tensorName must be null-terminated, and be at most 4096 bytes including the terminator.
* */
@@ -1018,26 +585,31 @@ public class ICudaEngine extends INoCopy {
//!
//!
//!
- public native @Cast("bool") @Deprecated @NoException(true) boolean isExecutionBinding(int bindingIndex);
+ public native @Const @NoException(true) IntPointer getProfileTensorValues(String tensorName, int profileIndex, OptProfileSelector select);
+ public native @Const @NoException(true) IntBuffer getProfileTensorValues(@Cast("const char*") BytePointer tensorName, int profileIndex, @Cast("nvinfer1::OptProfileSelector") int select);
/**
* \brief Determine what execution capability this engine has.
*
* If the engine has EngineCapability::kSTANDARD, then all engine functionality is valid.
* If the engine has EngineCapability::kSAFETY, then only the functionality in safe engine is valid.
- * If the engine has EngineCapability::kDLA_STANDALONE, then only serialize, destroy, and const-accessor functions are
- * valid.
+ * If the engine has EngineCapability::kDLA_STANDALONE, then only serialize, destroy, and const-accessor functions
+ * are valid.
*
* @return The EngineCapability flag that the engine was built for.
* */
+
+ //!
+ //!
//!
//!
//!
//!
public native @NoException(true) EngineCapability getEngineCapability();
- /** \brief Set the ErrorRecorder for this interface
+ /**
+ * \brief Set the ErrorRecorder for this interface
*
* Assigns the ErrorRecorder to this interface. The ErrorRecorder will track all errors during execution.
* This function will call incRefCount of the registered ErrorRecorder at least once. Setting
@@ -1046,10 +618,10 @@ public class ICudaEngine extends INoCopy {
*
* If an error recorder is not set, messages will be sent to the global log stream.
*
- * @param recorder The error recorder to register with this interface. */
- //
- /** @see getErrorRecorder()
- /** */
+ * @param recorder The error recorder to register with this interface.
+ *
+ * @see getErrorRecorder()
+ * */
//!
@@ -1076,30 +648,27 @@ public class ICudaEngine extends INoCopy {
//!
//!
//!
- //!
public native @NoException(true) IErrorRecorder getErrorRecorder();
/**
* \brief Query whether the engine was built with an implicit batch dimension.
*
- * @return True if tensors have implicit batch dimension, false otherwise.
- *
- * This is an engine-wide property. Either all tensors in the engine
- * have an implicit batch dimension or none of them do.
- *
- * hasImplicitBatchDimension() is true if and only if the INetworkDefinition
- * from which this engine was built was created with createNetworkV2() without
- * NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag.
+ * @return Always false since TensorRT 10.0 does not support an implicit batch dimension.
*
* @see createNetworkV2
+ *
+ * @deprecated Deprecated in TensorRT 10.0. Implicit batch is no supported since TensorRT 10.0.
* */
+
+ //!
//!
//!
//!
- public native @Cast("bool") @NoException(true) boolean hasImplicitBatchDimension();
+ public native @Cast("bool") @Deprecated @NoException(true) boolean hasImplicitBatchDimension();
- /** \brief return the tactic sources required by this engine.
+ /**
+ * \brief return the tactic sources required by this engine.
*
* The value returned is equal to zero or more tactics sources set
* at build time via setTacticSources() in IBuilderConfig. Sources
@@ -1110,12 +679,15 @@ public class ICudaEngine extends INoCopy {
* @see IBuilderConfig::setTacticSources()
* */
+
+ //!
//!
//!
//!
public native @Cast("nvinfer1::TacticSources") @NoException(true) int getTacticSources();
- /** \brief Return the \ref ProfilingVerbosity the builder config was set to when the engine was built.
+ /**
+ * \brief Return the \ref ProfilingVerbosity the builder config was set to when the engine was built.
*
* @return the profiling verbosity the builder config was set to when the engine was built.
*
@@ -1165,12 +737,15 @@ public class ICudaEngine extends INoCopy {
* @see getNbIOTensors()
* */
+
+ //!
//!
//!
//!
public native @NoException(true) String getIOTensorName(int index);
- /** \brief Return the hardware compatibility level of this engine.
+ /**
+ * \brief Return the hardware compatibility level of this engine.
*
* @return hardwareCompatibilityLevel The level of hardware
* compatibility.
@@ -1196,5 +771,180 @@ public class ICudaEngine extends INoCopy {
*
* @see IBuilderConfig::setMaxAuxStreams(), IExecutionContext::setAuxStreams()
* */
+
+
+ //!
+ //!
+ //!
public native @NoException(true) int getNbAuxStreams();
+
+ /**
+ * \brief Create a serialization configuration object.
+ *
+ * @see ISerializationConfig
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ public native @NoException(true) ISerializationConfig createSerializationConfig();
+
+ /**
+ * \brief Serialize the network to a stream with the provided SerializationConfig.
+ *
+ * @return An IHostMemory object that contains the serialized engine.
+ *
+ * The network may be deserialized with IRuntime::deserializeCudaEngine().
+ *
+ * @see IRuntime::deserializeCudaEngine()
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ public native @NoException(true) IHostMemory serializeWithConfig(@ByRef ISerializationConfig config);
+
+ /**
+ * \brief Limit the maximum amount of GPU memory usable for network weights
+ * in bytes.
+ *
+ * @param gpuMemoryBudget This parameter may take on 3 types of values:
+ * -1: Allows TensorRT to choose the budget according to the streamable weights size.
+ * Free CUDA memory will be queried at ::createExecutionContext and accordingly:
+ * * If streamable weights all fit: weight streaming is not required and disabled.
+ * * Otherwise: Budget is set to getMinimumWeightStreamingBudget
+ * 0: (default) Disables weight streaming. The execution may fail if the network is too large for GPU memory.
+ * >0: The maximum bytes of GPU memory that weights can occupy. It must be bounded by
+ * [getMinimumWeightStreamingBudget, min(getStreamableWeightsSize - 1, free GPU memory)].
+ *
+ * By setting a weight limit, users can expect a GPU memory usage reduction
+ * of |network weights| - gpuMemoryBudget bytes. Maximum memory savings occur
+ * when gpuMemoryBudget is set to getMinimumWeightStreamingBudget.
+ *
+ * Streaming larger amounts of memory will likely result in lower performance
+ * except in some boundary cases where streaming weights allows the user to
+ * run larger batch sizes. The higher throughput offsets the increased
+ * latency in these cases. Tuning the value of the memory limit is
+ * recommended for best performance.
+ *
+ * \warning If weight streaming is active, then multiple concurrent IExecutionContexts will forced to run serially.
+ *
+ * \warning GPU memory for the weights is allocated upon the first IExecutionContext's creation
+ * and deallocated upon the last one's destruction.
+ *
+ * \warning BuilderFlag::kWEIGHT_STREAMING must be set during engine building.
+ *
+ * @return true if the memory limit is valid and the call was successful
+ * otherwise false.
+ *
+ * @see BuilderFlag::kWEIGHT_STREAMING,
+ * ICudaEngine::getWeightStreamingBudget
+ * ICudaEngine::getMinimumWeightStreamingBudget,
+ * ICudaEngine::getStreamableWeightsSize
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ public native @Cast("bool") @NoException(true) boolean setWeightStreamingBudget(@Cast("int64_t") long gpuMemoryBudget);
+
+ /**
+ * \brief Returns the current weight streaming device memory budget in bytes.
+ *
+ * \warning BuilderFlag::kWEIGHT_STREAMING must be set during engine building.
+ *
+ * @return The weight streaming budget in bytes. Please see ::setWeightStreamingBudget for the possible
+ * values.
+ *
+ * @see BuilderFlag::kWEIGHT_STREAMING,
+ * ICudaEngine::setWeightStreamingBudget,
+ * ICudaEngine::getMinimumWeightStreamingBudget,
+ * ICudaEngine::getStreamableWeightsSize
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ public native @Cast("int64_t") @NoException(true) long getWeightStreamingBudget();
+
+ /**
+ * \brief The minimum number of bytes of GPU memory required by network
+ * weights for successful weight streaming.
+ *
+ * This is a positive integer for engines with streamable weights because a
+ * staging buffer on the GPU is required to temporarily hold the streamed
+ * weights. The size of the staging buffer is determined by TensorRT and must
+ * be at least as large as the size of the largest streamable weight in the
+ * network.
+ *
+ * \warning BuilderFlag::kWEIGHT_STREAMING must be set during engine building.
+ *
+ *
+ * @return The minimum number of bytes of GPU memory required for streaming.
+ *
+ * @see ICudaEngine::setWeightStreamingBudget
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ public native @Cast("int64_t") @NoException(true) long getMinimumWeightStreamingBudget();
+
+ /**
+ * \brief Get the total size in bytes of all streamable weights.
+ *
+ * The set of streamable weights is a subset of all network weights. The
+ * total size may exceed free GPU memory.
+ *
+ * Returns 0 if BuilderFlag::kWEIGHT_STREAMING is unset during engine building.
+ *
+ *
+ * @return The total size in bytes of all streamable weights.
+ *
+ * @see ICudaEngine::setWeightStreamingBudget
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ public native @Cast("int64_t") @NoException(true) long getStreamableWeightsSize();
+
+ /**
+ * \brief Check if a tensor is marked as a debug tensor.
+ *
+ * Determine whether the given name corresponds to a debug tensor.
+ *
+ * @return True if tensor is a debug tensor, false otherwise.
+ *
+ * @see INetworkDefinition::markDebug
+ * */
+ public native @Cast("bool") @NoException(true) boolean isDebugTensor(String name);
+ public native @Cast("bool") @NoException(true) boolean isDebugTensor(@Cast("const char*") BytePointer name);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDebugListener.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDebugListener.java
new file mode 100644
index 00000000000..958d98cedf3
--- /dev/null
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDebugListener.java
@@ -0,0 +1,55 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.tensorrt.nvinfer;
+
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import org.bytedeco.cuda.cudart.*;
+import static org.bytedeco.cuda.global.cudart.*;
+import org.bytedeco.cuda.cublas.*;
+import static org.bytedeco.cuda.global.cublas.*;
+import org.bytedeco.cuda.cudnn.*;
+import static org.bytedeco.cuda.global.cudnn.*;
+import org.bytedeco.cuda.nvrtc.*;
+import static org.bytedeco.cuda.global.nvrtc.*;
+
+import static org.bytedeco.tensorrt.global.nvinfer.*;
+
+@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
+public class IDebugListener extends IVersionedInterface {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public IDebugListener(Pointer p) { super(p); }
+
+ /**
+ * \brief Return version information associated with this interface. Applications must not override this method.
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo();
+
+ /**
+ * \brief Callback function that is called when a debug tensor’s value is updated and the debug state of the tensor
+ * is set to true. Content in the given address is only guaranteed to be valid for the duration of the callback.
+ *
+ * @param location TensorLocation of the tensor.
+ * @param addr pointer to buffer.
+ * @param type data Type of the tensor.
+ * @param shape shape of the tensor.
+ * @param name name of the tensor.
+ * @param stream Cuda stream object.
+ *
+ * @return True on success, false otherwise.
+ * */
+ public native @Cast("bool") boolean processDebugTensor(@Const Pointer addr, TensorLocation location, DataType type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 shape,
+ String name, CUstream_st stream);
+ public native @Cast("bool") boolean processDebugTensor(@Const Pointer addr, @Cast("nvinfer1::TensorLocation") int location, @Cast("nvinfer1::DataType") int type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 shape,
+ @Cast("const char*") BytePointer name, CUstream_st stream);
+}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDeconvolutionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDeconvolutionLayer.java
index 494de1a0f75..061a13992fd 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDeconvolutionLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDeconvolutionLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -32,40 +32,6 @@ public class IDeconvolutionLayer extends ILayer {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public IDeconvolutionLayer(Pointer p) { super(p); }
- /**
- * \brief Set the HW kernel size of the convolution.
- *
- * If executing this layer on DLA, both height and width of kernel size must be in the range [1,32], or the
- * combinations of [64, 96, 128] in one dimension and 1 in the other dimensions, i.e. [1x64] or [64x1] are valid,
- * but not [64x64].
- *
- * @see getKernelSize()
- *
- * @deprecated Superseded by setKernelSizeNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void setKernelSize(@ByVal DimsHW kernelSize);
-
- /**
- * \brief Get the HW kernel size of the deconvolution.
- *
- * @see setKernelSize()
- *
- * @deprecated Superseded by getKernelSizeNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- public native @Deprecated @ByVal @NoException(true) DimsHW getKernelSize();
-
/**
* \brief Set the number of output feature maps for the deconvolution.
*
@@ -78,7 +44,7 @@ public class IDeconvolutionLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setNbOutputMaps(int nbOutputMaps);
+ public native @NoException(true) void setNbOutputMaps(@Cast("int64_t") long nbOutputMaps);
/**
* \brief Get the number of output feature maps for the deconvolution.
@@ -91,89 +57,10 @@ public class IDeconvolutionLayer extends ILayer {
//!
//!
//!
- //!
- public native @NoException(true) int getNbOutputMaps();
-
- /**
- * \brief Set the stride of the deconvolution.
- *
- * If executing this layer on DLA, there is one restriction:
- * 1) Stride height and width must be in the range [1,32] or the combinations of [64, 96, 128] in one
- * dimension and 1 in the other dimensions, i.e. [1x64] or [64x1] are valid, but not [64x64].
- *
- * @see getStride()
- *
- * @deprecated Superseded by setStrideNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
//!
//!
//!
- //!
- public native @Deprecated @NoException(true) void setStride(@ByVal DimsHW stride);
-
- /**
- * \brief Get the stride of the deconvolution.
- *
- * Default: (1,1)
- *
- * @deprecated Superseded by getStrideNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @ByVal @NoException(true) DimsHW getStride();
-
- /**
- * \brief Set the padding of the deconvolution.
- *
- * The output will be trimmed by this number of elements on each side in the height and width directions.
- * In other words, it resembles the inverse of a convolution layer with this padding size.
- * Padding is symmetric, and negative padding is not supported.
- *
- * Default: (0,0)
- *
- * If executing this layer on DLA, both height and width of padding must be 0.
- *
- * @see getPadding()
- *
- * @deprecated Superseded by setPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void setPadding(@ByVal DimsHW padding);
-
- /**
- * \brief Get the padding of the deconvolution.
- *
- * Default: (0, 0)
- *
- * @see setPadding()
- *
- * @deprecated Superseded by getPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @ByVal @NoException(true) DimsHW getPadding();
+ public native @Cast("int64_t") @NoException(true) long getNbOutputMaps();
/**
* \brief Set the number of groups for a deconvolution.
@@ -195,7 +82,7 @@ public class IDeconvolutionLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setNbGroups(int nbGroups);
+ public native @NoException(true) void setNbGroups(@Cast("int64_t") long nbGroups);
/**
* \brief Get the number of groups for a deconvolution.
@@ -208,7 +95,7 @@ public class IDeconvolutionLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) int getNbGroups();
+ public native @Cast("int64_t") @NoException(true) long getNbGroups();
/**
* \brief Set the kernel weights for the deconvolution.
@@ -290,7 +177,7 @@ public class IDeconvolutionLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setPrePadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
+ public native @NoException(true) void setPrePadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
/**
* \brief Get the pre-padding.
@@ -305,7 +192,7 @@ public class IDeconvolutionLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPrePadding();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPrePadding();
/**
* \brief Set the multi-dimension post-padding of the deconvolution.
@@ -325,7 +212,7 @@ public class IDeconvolutionLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setPostPadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
+ public native @NoException(true) void setPostPadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
/**
* \brief Get the padding.
@@ -339,7 +226,7 @@ public class IDeconvolutionLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPostPadding();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPostPadding();
/**
* \brief Set the padding mode.
@@ -382,14 +269,14 @@ public class IDeconvolutionLayer extends ILayer {
* 2) Kernel height and width must be in the range [1,32] or the combinations of [64, 96, 128] in one
* dimension and 1 in the other dimensions, i.e. [1x64] or [64x1] are valid, but not [64x64].
*
- * @see getKernelSizeNd() setKernelSize() getKernelSize()
+ * @see getKernelSizeNd()
* */
//!
//!
//!
- public native @NoException(true) void setKernelSizeNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 kernelSize);
+ public native @NoException(true) void setKernelSizeNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 kernelSize);
/**
* \brief Get the multi-dimension kernel size of the deconvolution.
@@ -403,7 +290,7 @@ public class IDeconvolutionLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getKernelSizeNd();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getKernelSizeNd();
/**
* \brief Set the multi-dimension stride of the deconvolution.
@@ -415,14 +302,14 @@ public class IDeconvolutionLayer extends ILayer {
* 2) Stride height and width must be in the range [1,32] or the combinations of [64, 96, 128] in one
* dimension and 1 in the other dimensions, i.e. [1x64] or [64x1] are valid, but not [64x64].
*
- * @see getStrideNd() setStride() getStride()
+ * @see getStrideNd()
* */
//!
//!
//!
- public native @NoException(true) void setStrideNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride);
+ public native @NoException(true) void setStrideNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride);
/**
* \brief Get the multi-dimension stride of the deconvolution.
@@ -437,7 +324,7 @@ public class IDeconvolutionLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrideNd();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrideNd();
/**
* \brief Set the multi-dimension padding of the deconvolution.
@@ -458,7 +345,7 @@ public class IDeconvolutionLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setPaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
+ public native @NoException(true) void setPaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
/**
* \brief Get the multi-dimension padding of the deconvolution.
@@ -473,7 +360,8 @@ public class IDeconvolutionLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPaddingNd();
+ //!
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPaddingNd();
/**
* \brief Append or replace an input of this layer with a specific tensor
@@ -484,14 +372,18 @@ public class IDeconvolutionLayer extends ILayer {
* Input 0 is the input activation tensor.
* Input 1 is the kernel tensor. If used, the kernel weights parameter must be set to empty weights.
* Input 2 is the bias tensor. If used, the bias parameter must be set to empty weights.
+ *
* @see getKernelWeights(), setKernelWeights(), getBiasWeights(), setBiasWeights()
* */
+
+ //!
//!
//!
//!
- /** \brief Set the multi-dimension dilation of the deconvolution.
+ /**
+ * \brief Set the multi-dimension dilation of the deconvolution.
*
* Default: (1, 1, ..., 1)
*
@@ -502,12 +394,12 @@ public class IDeconvolutionLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setDilationNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 dilation);
+ public native @NoException(true) void setDilationNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dilation);
/**
* \brief Get the multi-dimension dilation of the deconvolution.
*
* @see setDilationNd()
* */
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDilationNd();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDilationNd();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDequantizeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDequantizeLayer.java
index c2497fbb5d3..736b5219365 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDequantizeLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDequantizeLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -24,29 +24,35 @@
*
* \brief A Dequantize layer in a network definition.
*
- * This layer accepts a signed 8-bit integer input tensor, and uses the configured scale and zeroPt inputs to
+ * This layer accepts a quantized type input tensor, and uses the configured scale and zeroPt inputs to
* dequantize the input according to:
* \p output = (\p input - \p zeroPt) * \p scale
*
* The first input (index 0) is the tensor to be quantized.
* The second (index 1) and third (index 2) are the scale and zero point respectively.
- * Each of \p scale and \p zeroPt must be either a scalar, or a 1D tensor.
+ * \p scale and \p zeroPt should have identical dimensions, and rank lower or equal to 2.
*
- * The \p zeroPt tensor is optional, and if not set, will be assumed to be zero. Its data type must be
- * DataType::kINT8. \p zeroPt must only contain zero-valued coefficients, because only symmetric quantization is
+ * The \p zeroPt tensor is optional, and if not set, will be assumed to be zero. Its data type must be identical to
+ * the input's data type. \p zeroPt must only contain zero-valued coefficients, because only symmetric quantization is
* supported.
- * The \p scale value must be either a scalar for per-tensor quantization, or a 1D tensor for per-channel
- * quantization. All \p scale coefficients must have positive values. The size of the 1-D \p scale tensor must match
- * the size of the quantization axis. The size of the \p scale must match the size of the \p zeroPt.
+ * The \p scale value must be either a scalar for per-tensor quantization, a 1-D tensor for per-channel quantization,
+ * or a 2-D tensor for block quantization (supported for DataType::kINT4 only). All \p scale coefficients must have
+ * positive values. The size of the 1-D \p scale tensor must match the size of the quantization axis. For block
+ * quantization, the shape of \p scale tensor must match the shape of the input, except for one dimension in which
+ * blocking occurs. The size of \p zeroPt must match the size of \p scale.
*
* The subgraph which terminates with the \p scale tensor must be a build-time constant. The same restrictions apply
* to the \p zeroPt.
- * The output type, if constrained, must be constrained to DataType::kFLOAT or DataType::kHALF. The input type, if
- * constrained, must be constrained to DataType::kINT8. The output size is the same as the input size. The quantization
- * axis is in reference to the input tensor's dimensions.
+ * The output type, if constrained, must be constrained to DataType::kFLOAT, DataType::kHALF, or DataType::kBF16. The
+ * input type, if constrained, must be constrained to DataType::kINT8, DataType::kFP8 or DataType::kINT4. The output
+ * size is the same as the input size. The quantization axis is in reference to the input tensor's dimensions.
*
- * IDequantizeLayer only supports DataType::kINT8 precision and will default to this precision during instantiation.
- * IDequantizeLayer only supports DataType::kFLOAT or DataType::kHALF output.
+ * IDequantizeLayer supports DataType::kINT8, DataType::kFP8 or DataType::kINT4 precision and will default to
+ * DataType::kINT8 precision during instantiation. For strongly typed networks, \p input data type must be same as
+ * \p zeroPt data type.
+ *
+ * IDequantizeLayer supports DataType::kFLOAT, DataType::kHALF, or DataType::kBF16 output. For strongly typed
+ * networks, \p output data type is inferred from \p scale data type.
*
* As an example of the operation of this layer, imagine a 4D NCHW activation input which can be quantized using a
* single scale coefficient (referred to as per-tensor quantization):
@@ -66,11 +72,21 @@
* For each s in S:
* output[k,c,r,s] = (\p input[k,c,r,s] - \p zeroPt[k]) * \p scale[k]
*
+ * Block dequantization is supported only for 2-D input tensors with DataType::kINT4 that are rooted at an
+ * IConstantLayer (i.e. weights). As an example of blocked operation, imagine a 2-D RS weights input with R
+ * (dimension 0) as the blocking axis and B as the block size. The scale is a 2-D array of coefficients, with
+ * dimensions (R//B, S).
+ * For each r in R:
+ * For each s in S:
+ * output[r,s] = (\p input[r,s] - \p zeroPt[r//B, s]) * \p scale[r//B, s]
+ *
* \note Only symmetric quantization is supported.
* \note Currently the only allowed build-time constant \p scale and \p zeroPt subgraphs are:
* 1. Constant -> Quantize
* 2. Constant -> Cast -> Quantize
*
+ * \note The input tensor for this layer must not be a scalar.
+ *
* \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI.
* */
@Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
@@ -99,5 +115,40 @@ public class IDequantizeLayer extends ILayer {
* The axis must be a valid axis if the scale tensor has more than one coefficient.
* The axis value will be ignored if the scale tensor has exactly one coefficient (per-tensor quantization).
* */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
public native @NoException(true) void setAxis(int axis);
+
+ /**
+ * \brief Set the Dequantize layer output type.
+ *
+ * @param toType The DataType of the output tensor.
+ *
+ * Set the output type of the dequantize layer. Valid values are DataType::kFLOAT and DataType::kHALF.
+ * If the network is strongly typed, setToType must be used to set the output type, and use of setOutputType
+ * is an error. Otherwise, types passed to setOutputType and setToType must be the same.
+ *
+ * @see NetworkDefinitionCreationFlag::kSTRONGLY_TYPED
+ * */
+
+
+ //!
+ //!
+ //!
+ public native @NoException(true) void setToType(DataType toType);
+ public native @NoException(true) void setToType(@Cast("nvinfer1::DataType") int toType);
+
+ /**
+ * \brief Return the Dequantize layer output type.
+ *
+ * @return toType parameter set during layer creation or by setToType().
+ * The return value is the output type of the quantize layer.
+ * The default value is DataType::kFLOAT.
+ * */
+ public native @NoException(true) DataType getToType();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDimensionExpr.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDimensionExpr.java
index 25c168a2ae6..e96b8a5d0b9 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDimensionExpr.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IDimensionExpr.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -22,14 +22,14 @@
/**
* \class IDimensionExpr
*
- * An IDimensionExpr represents an integer expression constructed from constants,
+ * \brief An IDimensionExpr represents an integer expression constructed from constants,
* input dimensions, and binary operations. These expressions are can be used
- * in overrides of IPluginV2DynamicExt::getOutputDimensions to define output
+ * in overrides of IPluginV2DynamicExt::getOutputDimensions or IPluginV3OneBuild::getOutputShapes() to define output
* dimensions in terms of input dimensions.
*
* \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI.
*
- * @see DimensionOperation, IPluginV2DynamicExt::getOutputDimensions
+ * @see DimensionOperation, IPluginV2DynamicExt::getOutputDimensions, IPluginV3OneBuild::getOutputShapes()
* */
@Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
public class IDimensionExpr extends INoCopy {
@@ -37,10 +37,27 @@ public class IDimensionExpr extends INoCopy {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public IDimensionExpr(Pointer p) { super(p); }
- /** Return true if expression is a build-time constant. */
+ /**
+ * \brief Return true if expression is a build-time constant.
+ * */
+
+
+ //!
+ //!
+ //!
public native @Cast("bool") @NoException(true) boolean isConstant();
- /** If isConstant(), returns value of the constant.
- * If !isConstant(), return std::numeric_limits
+ * \note To serialize arbitrary plugin data, use a PluginField of
+ * PluginFieldType::kUNKNOWN, with the length of the PluginField set to the correct number of bytes.
+ * */
+ public native @Const @NoException(true) PluginFieldCollection getFieldsToSerialize();
+}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPoolingLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPoolingLayer.java
index d36ceacc6ca..0a8466648d4 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPoolingLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IPoolingLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -62,116 +62,8 @@ public class IPoolingLayer extends ILayer {
//!
//!
//!
- //!
public native @NoException(true) PoolingType getPoolingType();
- /**
- * \brief Set the window size for pooling.
- *
- * If executing this layer on DLA, both height and width of window size must be in the range [1,8].
- *
- * @see getWindowSize()
- *
- * @deprecated Superseded by setWindowSizeNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void setWindowSize(@ByVal DimsHW windowSize);
-
- /**
- * \brief Get the window size for pooling.
- *
- * @see setWindowSize()
- *
- * @deprecated Superseded by getWindowSizeNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @ByVal @NoException(true) DimsHW getWindowSize();
-
- /**
- * \brief Set the stride for pooling.
- *
- * Default: 1
- *
- * If executing this layer on DLA, both height and width of stride must be in the range [1,16].
- *
- * @see getStride()
- *
- * @deprecated Superseded by setStrideNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void setStride(@ByVal DimsHW stride);
-
- /**
- * \brief Get the stride for pooling.
- *
- * @see setStride()
- *
- * @deprecated Superseded by getStrideNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @ByVal @NoException(true) DimsHW getStride();
-
- /**
- * \brief Set the padding for pooling.
- *
- * Default: 0
- *
- * If executing this layer on DLA, both height and width of padding must be in the range [0,7].
- *
- * @see getPadding()
- *
- * @deprecated Superseded by setPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void setPadding(@ByVal DimsHW padding);
-
- /**
- * \brief Get the padding for pooling.
- *
- * Default: 0
- *
- * @see setPadding()
- *
- * @deprecated Superseded by getPaddingNd. Deprecated prior to TensorRT 8.0 and will be removed in 9.0
- * */
-
-
- //!
- //!
- //!
- //!
- public native @Deprecated @ByVal @NoException(true) DimsHW getPadding();
-
/**
* \brief Set the blending factor for the max_average_blend mode:
* max_average_blendPool = (1-blendFactor)*maxPool + blendFactor*avgPool
@@ -203,7 +95,6 @@ public class IPoolingLayer extends ILayer {
//!
//!
//!
- //!
public native @NoException(true) float getBlendFactor();
/**
@@ -213,9 +104,6 @@ public class IPoolingLayer extends ILayer {
*
* Default: true
*
- * \note On Xavier, DLA supports only inclusive padding and this must be explicitly
- * set to false.
- *
* @see getAverageCountExcludesPadding()
* */
@@ -259,7 +147,7 @@ public class IPoolingLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setPrePadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
+ public native @NoException(true) void setPrePadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
/**
* \brief Get the pre-padding.
@@ -274,7 +162,7 @@ public class IPoolingLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPrePadding();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPrePadding();
/**
* \brief Set the multi-dimension post-padding for pooling.
@@ -294,7 +182,7 @@ public class IPoolingLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setPostPadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
+ public native @NoException(true) void setPostPadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
/**
* \brief Get the padding.
@@ -307,7 +195,7 @@ public class IPoolingLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPostPadding();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPostPadding();
/**
* \brief Set the padding mode.
@@ -352,7 +240,7 @@ public class IPoolingLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setWindowSizeNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 windowSize);
+ public native @NoException(true) void setWindowSizeNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 windowSize);
/**
* \brief Get the multi-dimension window size for pooling.
@@ -366,7 +254,7 @@ public class IPoolingLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getWindowSizeNd();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getWindowSizeNd();
/**
* \brief Set the multi-dimension stride for pooling.
@@ -376,14 +264,14 @@ public class IPoolingLayer extends ILayer {
* If executing this layer on DLA, only support 2D stride, both height and width of stride must be in the range
* [1,16].
*
- * @see getStrideNd() setStride() getStride()
+ * @see getStrideNd()
* */
//!
//!
//!
- public native @NoException(true) void setStrideNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride);
+ public native @NoException(true) void setStrideNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride);
/**
* \brief Get the multi-dimension stride for pooling.
@@ -398,7 +286,7 @@ public class IPoolingLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrideNd();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrideNd();
/**
* \brief Set the multi-dimension padding for pooling.
@@ -420,7 +308,7 @@ public class IPoolingLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setPaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
+ public native @NoException(true) void setPaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
/**
* \brief Get the multi-dimension padding for pooling.
@@ -429,5 +317,5 @@ public class IPoolingLayer extends ILayer {
*
* @see setPaddingNd()
* */
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPaddingNd();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPaddingNd();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IProfiler.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IProfiler.java
index 9b6dc222c8d..14b7239ea25 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IProfiler.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IProfiler.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -18,35 +18,11 @@
import static org.bytedeco.tensorrt.global.nvinfer.*;
-
-/**
- * \class IProfiler
- *
- * \brief Application-implemented interface for profiling.
- *
- * When this class is added to an execution context, the profiler will be called once per layer for each invocation of
- * executeV2()/enqueueV2()/enqueueV3().
- *
- * It is not recommended to run inference with profiler enabled when the inference execution time is critical since the
- * profiler may affect execution time negatively.
- * */
-@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
+@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
public class IProfiler extends Pointer {
static { Loader.load(); }
- /** Default native constructor. */
- public IProfiler() { super((Pointer)null); allocate(); }
- /** Native array allocator. Access with {@link Pointer#position(long)}. */
- public IProfiler(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public IProfiler(Pointer p) { super(p); }
- private native void allocate();
- private native void allocateArray(long size);
- @Override public IProfiler position(long position) {
- return (IProfiler)super.position(position);
- }
- @Override public IProfiler getPointer(long i) {
- return new IProfiler((Pointer)this).offsetAddress(i);
- }
/**
* \brief Layer time reporting callback.
@@ -55,5 +31,6 @@ public class IProfiler extends Pointer {
* with profiling verbosity set to kNONE, the layerName is the decimal index of the layer.
* @param ms The time in milliseconds to execute the layer.
* */
- @Virtual(true) public native @NoException(true) void reportLayerTime(String layerName, float ms);
+ public native @NoException(true) void reportLayerTime(String layerName, float ms);
+ public native @NoException(true) void reportLayerTime(@Cast("const char*") BytePointer layerName, float ms);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IProgressMonitor.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IProgressMonitor.java
new file mode 100644
index 00000000000..e331660e000
--- /dev/null
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IProgressMonitor.java
@@ -0,0 +1,106 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.tensorrt.nvinfer;
+
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import org.bytedeco.cuda.cudart.*;
+import static org.bytedeco.cuda.global.cudart.*;
+import org.bytedeco.cuda.cublas.*;
+import static org.bytedeco.cuda.global.cublas.*;
+import org.bytedeco.cuda.cudnn.*;
+import static org.bytedeco.cuda.global.cudnn.*;
+import org.bytedeco.cuda.nvrtc.*;
+import static org.bytedeco.cuda.global.nvrtc.*;
+
+import static org.bytedeco.tensorrt.global.nvinfer.*;
+ // namespace impl
+@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
+public class IProgressMonitor extends IVersionedInterface {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public IProgressMonitor(Pointer p) { super(p); }
+
+
+ /**
+ * \brief Return version information associated with this interface. Applications must not override this method.
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo();
+
+ /**
+ * \brief Signal that a phase of the optimizer has started.
+ *
+ * @param phaseName The name of this phase for tracking purposes.
+ * @param parentPhase The parent phase that this phase belongs to, or nullptr if there is no parent.
+ * @param nbSteps The number of steps that are involved in this phase.
+ *
+ * The phaseStart function signals to the application that the current phase is beginning, and that it has a
+ * certain number of steps to perform. If \p phaseParent is nullptr, then the phaseStart is beginning an
+ * independent phase, and if \p phaseParent is specified, then the current phase, specified by \p phaseName, is
+ * within the scope of the parent phase. \p nbSteps will always be a positive number. The phaseStart function
+ * implies that the first step is being executed. TensorRT will signal when each step is complete.
+ *
+ * Phase names are human readable English strings which are unique within a single phase hierarchy but which can be
+ * reused once the previous instance has completed. Phase names and their hierarchies may change between versions
+ * of TensorRT.
+ *
+ * @see phaseFinish
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ public native @NoException(true) void phaseStart(String phaseName, String parentPhase, int nbSteps);
+ public native @NoException(true) void phaseStart(@Cast("const char*") BytePointer phaseName, @Cast("const char*") BytePointer parentPhase, int nbSteps);
+
+ /**
+ * \brief Signal that a step of an optimizer phase has finished.
+ *
+ * @param phaseName The name of the innermost phase being executed.
+ * @param step The step number that was completed.
+ *
+ * The stepComplete function signals to the application that TensorRT has finished the current \p step for the
+ * phase \p phaseName, and will move onto the next step if there is one. The application can return false for
+ * TensorRT to exit the build early. The step value will increase on subsequent calls in the range [0, nbSteps).
+ *
+ * @return true to continue to the next step or false to stop the build.
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ public native @Cast("bool") @NoException(true) boolean stepComplete(String phaseName, int step);
+ public native @Cast("bool") @NoException(true) boolean stepComplete(@Cast("const char*") BytePointer phaseName, int step);
+
+ /**
+ * \brief Signal that a phase of the optimizer has finished.
+ *
+ * @param phaseName The name of the phase that has finished.
+ *
+ * The phaseFinish function signals to the application that the phase is complete. This function may be called
+ * before all steps in the range [0, nbSteps) have been reported to stepComplete. This scenario can be triggered by
+ * error handling, internal optimizations, or when stepComplete returns false to request cancellation of the build.
+ *
+ * @see phaseStart
+ * */
+ public native @NoException(true) void phaseFinish(String phaseName);
+ public native @NoException(true) void phaseFinish(@Cast("const char*") BytePointer phaseName);
+
+}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IQuantizeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IQuantizeLayer.java
index 144d4107728..bd93c7ca2db 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IQuantizeLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IQuantizeLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,32 +25,39 @@
* \brief A Quantize layer in a network definition.
*
* This layer accepts a floating-point data input tensor, and uses the scale and zeroPt inputs to
- * quantize the data to an 8-bit signed integer according to:
+ * quantize the data according to:
* \p output = clamp(round(\p input / \p scale) + \p zeroPt)
*
* Rounding type is rounding-to-nearest ties-to-even (https://en.wikipedia.org/wiki/Rounding#Round_half_to_even).
- * Clamping is in the range [-128, 127].
+ * Clamping range according to data type:
+ * - FP8: [-448, 448]
+ * - INT4: [-8, 7]
+ * - INT8: [-128, 127]
*
* The first input (index 0) is the tensor to be quantized.
* The second (index 1) and third (index 2) are the scale and zero point respectively.
- * Each of \p scale and \p zeroPt must be either a scalar, or a 1D tensor.
+ * \p scale and \p zeroPt should have identical dimensions, and rank lower or equal to 2.
*
- * The \p zeroPt tensor is optional, and if not set, will be assumed to be zero. Its data type must be
- * DataType::kINT8. \p zeroPt must only contain zero-valued coefficients, because only symmetric quantization is
+ * The \p zeroPt tensor is optional, and if not set, will be assumed to be zero. Its data type must match the
+ * output data type. \p zeroPt must only contain zero-valued coefficients, because only symmetric quantization is
* supported.
- * The \p scale value must be either a scalar for per-tensor quantization, or a 1D tensor for per-channel
- * quantization. All \p scale coefficients must have positive values. The size of the 1-D \p scale tensor must match
- * the size of the quantization axis. The size of the \p scale must match the size of the \p zeroPt.
+ * The \p scale value must be a scalar for per-tensor quantization, a 1-D tensor for per-channel quantization, or a
+ * 2-D tensor for block quantization (supported for DataType::kINT4 only). All \p scale coefficients must have
+ * positive values. The size of the 1-D \p scale tensor must match the size of the quantization axis. For block
+ * quantization, the shape of \p scale tensor must match the shape of the input, except for one dimension in which
+ * blocking occurs. The size of \p zeroPt must match the size of \p scale.
*
- * The subgraph which terminates with the \p scale tensor must be a build-time constant. The same restrictions apply
+ * The subgraph which terminates with the \p scale tensor must be a build-time constant. The same restrictions apply
* to the \p zeroPt.
- * The output type, if constrained, must be constrained to DataType::kINT8. The input type, if constrained, must be
- * constrained to DataType::kFLOAT or DataType::kHALF.
- * The output size is the same as the input size. The quantization axis is in reference to the input tensor's
- * dimensions.
+ * The output type, if constrained, must be constrained to DataType::kINT8, DataType::kFP8 or DataType::kINT4. The
+ * input type, if constrained, must be constrained to DataType::kFLOAT, DataType::kHALF, or DataType::kBF16. The
+ * output size is the same as the input size. The quantization axis is in reference to the input tensor's dimensions.
*
- * IQuantizeLayer only supports DataType::kFLOAT precision and will default to this precision during instantiation.
- * IQuantizeLayer only supports DataType::kINT8 output.
+ * IQuantizeLayer supports DataType::kFLOAT, DataType::kHALF, or DataType::kBF16 precision and will default to
+ * DataType::kFLOAT precision during instantiation. For strongly typed networks, \p input data type must match the
+ * \p scale data type.
+ *
+ * IQuantizeLayer supports DataType::kINT8, DataType::kFP8, or DataType::kINT4 output.
*
* As an example of the operation of this layer, imagine a 4D NCHW activation input which can be quantized using a
* single scale coefficient (referred to as per-tensor quantization):
@@ -69,11 +76,20 @@
* For each s in S:
* output[k,c,r,s] = clamp(round(\p input[k,c,r,s] / \p scale[k]) + \p zeroPt[k])
*
+ * Block quantization is supported only for 2-D weight inputs of DataType::kINT4. As an example of blocked
+ * operation, imagine a 2-D RS weights input, R (dimension 0) as the blocking axis and B as the block size.
+ * The scale is a 2D array of coefficients, with dimensions (R//B, S).
+ * For each r in R:
+ * For each s in S:
+ * output[r,s] = clamp(round(\p input[r,s] / \p scale[r//B, s]) + \p zeroPt[r//B, s])
+ *
* \note Only symmetric quantization is supported.
* \note Currently the only allowed build-time constant \p scale and \p zeroPt subgraphs are:
* 1. Constant -> Quantize
* 2. Constant -> Cast -> Quantize
*
+ * \note The input tensor for this layer must not be a scalar.
+ *
* \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI.
* */
@Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
@@ -102,5 +118,40 @@ public class IQuantizeLayer extends ILayer {
* The axis must be a valid axis if the scale tensor has more than one coefficient.
* The axis value will be ignored if the scale tensor has exactly one coefficient (per-tensor quantization).
* */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
public native @NoException(true) void setAxis(int axis);
+
+ /**
+ * \brief Set the Quantize layer output type.
+ *
+ * @param toType The DataType of the output tensor.
+ *
+ * Set the output type of the quantize layer. Valid values are DataType::kINT8 and DataType::kFP8.
+ * If the network is strongly typed, setToType must be used to set the output type, and use of setOutputType
+ * is an error. Otherwise, types passed to setOutputType and setToType must be the same.
+ *
+ * @see NetworkDefinitionCreationFlag::kSTRONGLY_TYPED
+ * */
+
+
+ //!
+ //!
+ //!
+ public native @NoException(true) void setToType(DataType toType);
+ public native @NoException(true) void setToType(@Cast("nvinfer1::DataType") int toType);
+
+ /**
+ * \brief Return the Quantize layer output type.
+ *
+ * @return toType parameter set during layer creation or by setToType().
+ * The return value is the output type of the quantize layer.
+ * The default value is DataType::kINT8.
+ * */
+ public native @NoException(true) DataType getToType();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRNNv2Layer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRNNv2Layer.java
deleted file mode 100644
index 529b7a09bbf..00000000000
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRNNv2Layer.java
+++ /dev/null
@@ -1,358 +0,0 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
-
-package org.bytedeco.tensorrt.nvinfer;
-
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import org.bytedeco.cuda.cudart.*;
-import static org.bytedeco.cuda.global.cudart.*;
-import org.bytedeco.cuda.cublas.*;
-import static org.bytedeco.cuda.global.cublas.*;
-import org.bytedeco.cuda.cudnn.*;
-import static org.bytedeco.cuda.global.cudnn.*;
-import org.bytedeco.cuda.nvrtc.*;
-import static org.bytedeco.cuda.global.nvrtc.*;
-
-import static org.bytedeco.tensorrt.global.nvinfer.*;
-
-
-/**
- * \class IRNNv2Layer
- *
- * \brief An RNN layer in a network definition, version 2.
- *
- * This layer supersedes IRNNLayer.
- *
- * @deprecated Deprecated prior to TensorRT 8.0 and will be removed in 9.0. Superseded by
- * INetworkDefinition::addLoop().
- *
- * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI.
- * */
-@Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
-public class IRNNv2Layer extends ILayer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public IRNNv2Layer(Pointer p) { super(p); }
-
- /** Get the layer count of the RNN. */
- public native @NoException(true) int getLayerCount();
- /** Get the hidden size of the RNN. */
- public native @NoException(true) int getHiddenSize();
- /** Get the maximum sequence length of the RNN. */
- public native @NoException(true) int getMaxSeqLength();
- /** Get the embedding length of the RNN.
-
- //!
- //!
- //!
- //!
- //!
- //! */
- public native @NoException(true) int getDataLength();
-
- /**
- * \brief Specify individual sequence lengths in the batch with the ITensor pointed to by
- * \p seqLengths.
- *
- * The \p seqLengths ITensor should be a {N1, ..., Np} tensor, where N1..Np are the index dimensions
- * of the input tensor to the RNN.
- *
- * If this is not specified, then the RNN layer assumes all sequences are size getMaxSeqLength().
- *
- * All sequence lengths in \p seqLengths should be in the range [1, getMaxSeqLength()]. Zero-length
- * sequences are not supported.
- *
- * This tensor must be of type DataType::kINT32.
- * */
-
-
- //!
- //!
- //!
- //!
- public native @NoException(true) void setSequenceLengths(@ByRef ITensor seqLengths);
-
- /**
- * \brief Get the sequence lengths specified for the RNN.
- *
- * @return nullptr if no sequence lengths were specified, the sequence length data otherwise.
- *
- * @see setSequenceLengths()
- * */
-
-
- //!
- //!
- //!
- public native @NoException(true) ITensor getSequenceLengths();
-
- /**
- * \brief Set the operation of the RNN layer.
- *
- * @see getOperation(), RNNOperation
- * */
-
-
- //!
- //!
- //!
- public native @NoException(true) void setOperation(RNNOperation op);
- public native @NoException(true) void setOperation(@Cast("nvinfer1::RNNOperation") int op);
-
- /**
- * \brief Get the operation of the RNN layer.
- *
- * @see setOperation(), RNNOperation
- * */
-
-
- //!
- //!
- //!
- public native @NoException(true) RNNOperation getOperation();
-
- /**
- * \brief Set the input mode of the RNN layer.
- *
- * @see getInputMode(), RNNInputMode
- * */
-
-
- //!
- //!
- //!
- public native @NoException(true) void setInputMode(RNNInputMode op);
- public native @NoException(true) void setInputMode(@Cast("nvinfer1::RNNInputMode") int op);
-
- /**
- * \brief Get the input mode of the RNN layer.
- *
- * @see setInputMode(), RNNInputMode
- * */
-
-
- //!
- //!
- //!
- //!
- public native @NoException(true) RNNInputMode getInputMode();
-
- /**
- * \brief Set the direction of the RNN layer.
- *
- * The direction determines if the RNN is run as a unidirectional(left to right) or
- * bidirectional(left to right and right to left).
- * In the RNNDirection::kBIDIRECTION case the output is concatenated together, resulting
- * in output size of 2x getHiddenSize().
- *
- * @see getDirection(), RNNDirection
- * */
-
-
- //!
- //!
- //!
- public native @NoException(true) void setDirection(RNNDirection op);
- public native @NoException(true) void setDirection(@Cast("nvinfer1::RNNDirection") int op);
-
- /**
- * \brief Get the direction of the RNN layer.
- *
- * @see setDirection(), RNNDirection
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- //!
- public native @NoException(true) RNNDirection getDirection();
-
- /**
- * \brief Set the weight parameters for an individual gate in the RNN.
- *
- * The DataType for this structure must be DataType::kFLOAT or DataType::kHALF, and must be the same
- * datatype as the input tensor.
- *
- * Each parameter matrix is row-major in memory, and has the following dimensions:
- *
- * ~~~
- * Let K := { ::kUNIDIRECTION => 1
- * { ::kBIDIRECTION => 2
- * l := layer index (as described above)
- * H := getHiddenSize()
- * E := getDataLength() (the embedding length)
- * isW := true if the matrix is an input (W) matrix, and false if
- * the matrix is a recurrent input (R) matrix.
- *
- * if isW:
- * if l < K and ::kSKIP:
- * (numRows, numCols) := (0, 0) # input matrix is skipped
- * elif l < K and ::kLINEAR:
- * (numRows, numCols) := (H, E) # input matrix acts on input data size E
- * elif l >= K:
- * (numRows, numCols) := (H, K * H) # input matrix acts on previous hidden state
- * else: # not isW
- * (numRows, numCols) := (H, H)
- * ~~~
- *
- * In other words, the input weights of the first layer of the RNN (if
- * not skipped) transform a {@code getDataLength()}-size column
- * vector into a {@code getHiddenSize()}-size column vector. The input
- * weights of subsequent layers transform a {@code K*getHiddenSize()}-size
- * column vector into a {@code getHiddenSize()}-size column vector. {@code K=2} in
- * the bidirectional case to account for the full hidden state being
- * the concatenation of the forward and backward RNN hidden states.
- *
- * The recurrent weight matrices for all layers all have shape {@code (H, H)},
- * both in the unidirectional and bidirectional cases. (In the
- * bidirectional case, each recurrent weight matrix for the (forward or
- * backward) RNN cell operates on the previous (forward or
- * backward) RNN cell's hidden state, which is size {@code H}).
- *
- * @param layerIndex The index of the layer that contains this gate.
- * @param gate The name of the gate within the RNN layer. The gate name must correspond
- * to one of the gates used by this layer's #RNNOperation.
- * @param isW True if the weight parameters are for the input matrix W[g]
- * and false if they are for the recurrent input matrix R[g]. See
- * #RNNOperation for equations showing how these matrices are used
- * in the RNN gate.
- * @param weights The weight structure holding the weight parameters, which are stored
- * as a row-major 2D matrix. See See \ref setWeightsForGate() for documentation on the expected
- * dimensions of this matrix.
- * */
-
-
- //!
- //!
- //!
- public native @NoException(true) void setWeightsForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW, @ByVal Weights weights);
- public native @NoException(true) void setWeightsForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW, @ByVal Weights weights);
-
- /**
- * \brief Get the weight parameters for an individual gate in the RNN.
- *
- * @see setWeightsForGate()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @ByVal @NoException(true) Weights getWeightsForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW);
- public native @ByVal @NoException(true) Weights getWeightsForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW);
-
- /**
- * \brief Set the bias parameters for an individual gate in the RNN.
- *
- * The DataType for this structure must be DataType::kFLOAT or DataType::kHALF, and must be the same
- * datatype as the input tensor.
- *
- * Each bias vector has a fixed size, getHiddenSize().
- *
- * @param layerIndex The index of the layer that contains this gate. See \ref setWeightsForGate()
- * for a description of the layer index.
- * @param gate The name of the gate within the RNN layer. The gate name must correspond
- * to one of the gates used by this layer's #RNNOperation.
- * @param isW True if the bias parameters are for the input bias Wb[g]
- * and false if they are for the recurrent input bias Rb[g]. See
- * #RNNOperation for equations showing how these bias vectors are used
- * in the RNN gate.
- * @param bias The weight structure holding the bias parameters, which should be an
- * array of size getHiddenSize().
- * */
-
-
- //!
- //!
- //!
- public native @NoException(true) void setBiasForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW, @ByVal Weights bias);
- public native @NoException(true) void setBiasForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW, @ByVal Weights bias);
-
- /**
- * \brief Get the bias parameters for an individual gate in the RNN.
- *
- * @see setBiasForGate()
- * */
-
-
- //!
- //!
- //!
- //!
- public native @ByVal @NoException(true) Weights getBiasForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW);
- public native @ByVal @NoException(true) Weights getBiasForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW);
-
- /**
- * \brief Set the initial hidden state of the RNN with the provided \p hidden ITensor.
- *
- * The \p hidden ITensor should have the dimensions {@code {N1, ..., Np, L, H}}, where:
- *
- * - {@code N1..Np} are the index dimensions specified by the input tensor
- * - {@code L} is the number of layers in the RNN, equal to getLayerCount() if getDirection is
- * RNNDirection::kUNIDIRECTION,
- * and 2x getLayerCount() if getDirection is RNNDirection::kBIDIRECTION. In the bi-directional
- * case, layer {@code l}'s final forward hidden state is stored in {@code L = 2*l}, and
- * final backward hidden state is stored in {@code L= 2*l + 1}.
- * - {@code H} is the hidden state for each layer, equal to getHiddenSize().
- * */
-
-
- //!
- //!
- //!
- public native @NoException(true) void setHiddenState(@ByRef ITensor hidden);
-
- /**
- * \brief Get the initial hidden state of the RNN.
- *
- * @see setHiddenState()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @NoException(true) ITensor getHiddenState();
-
- /**
- * \brief Set the initial cell state of the LSTM with the provided \p cell ITensor.
- *
- * The \p cell ITensor should have the dimensions {@code {N1, ..., Np, L, H}}, where:
- *
- * - {@code N1..Np} are the index dimensions specified by the input tensor
- * - {@code L} is the number of layers in the RNN, equal to getLayerCount() if getDirection is
- * RNNDirection::kUNIDIRECTION,
- * and 2x getLayerCount() if getDirection is RNNDirection::kBIDIRECTION. In the bi-directional
- * case, layer {@code l}'s final forward hidden state is stored in {@code L = 2*l}, and
- * final backward hidden state is stored in {@code L= 2*l + 1}.
- * - {@code H} is the hidden state for each layer, equal to getHiddenSize().
- *
- * It is an error to call setCellState() on an RNN layer that is not configured with RNNOperation::kLSTM.
- * */
-
-
- //!
- //!
- //!
- public native @NoException(true) void setCellState(@ByRef ITensor cell);
-
- /**
- * \brief Get the initial cell state of the RNN.
- *
- * @see setCellState()
- * */
- public native @NoException(true) ITensor getCellState();
-}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRaggedSoftMaxLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRaggedSoftMaxLayer.java
index 104a2b68c41..fb4213576b9 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRaggedSoftMaxLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRaggedSoftMaxLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRecurrenceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRecurrenceLayer.java
index 998a7be0619..d0b8fb5e8b3 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRecurrenceLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRecurrenceLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -19,7 +19,13 @@
import static org.bytedeco.tensorrt.global.nvinfer.*;
-
+/**
+ * \class IRecurrenceLayer
+ *
+ * \brief A recurrence layer in a network definition.
+ *
+ * The recurrence layer allows a loop iteration to compute a result from a value computed in the previous iteration.
+ * */
@Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
public class IRecurrenceLayer extends ILoopBoundaryLayer {
static { Loader.load(); }
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReduceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReduceLayer.java
index 015a91a418a..dd431628f86 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReduceLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReduceLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRefitter.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRefitter.java
index e589ac8ddeb..fe27d73aee5 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRefitter.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRefitter.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -52,9 +52,11 @@ public class IRefitter extends INoCopy {
*
* * There is no such layer by that name.
* * The layer does not have weights with the specified role.
- * * The number of weights is inconsistent with the layer’s original specification.
+ * * The count of weights is inconsistent with the layer’s original specification.
+ * * The type of weights is inconsistent with the layer’s original specification.
*
- * Modifying the weights before method refit() completes will result in undefined behavior.
+ * Modifying the weights before method refitCudaEngine or refitCudaEngineAsync returns will result in undefined
+ * behavior.
*
* \warning The string layerName must be null-terminated, and be at most 4096 bytes including the terminator.
* */
@@ -69,14 +71,16 @@ public class IRefitter extends INoCopy {
public native @Cast("bool") @NoException(true) boolean setWeights(@Cast("const char*") BytePointer layerName, @Cast("nvinfer1::WeightsRole") int role, @ByVal Weights weights);
/**
- * \brief Updates associated engine. Return true if successful.
+ * \brief Refits associated engine.
*
- * Failure occurs if getMissing() != 0 before the call.
+ * @return True on success, or false if new weights validation fails or getMissingWeights() != 0 before the call.
+ * If false is returned, a subset of weights may have been refitted.
*
* The behavior is undefined if the engine has pending enqueued work.
+ * Provided weights on CPU or GPU can be unset and released, or updated after refitCudaEngine returns.
*
- * Extant IExecutionContexts associated with the engine should not be used afterwards.
- * Instead, create new IExecutionContexts after refitting.
+ * IExecutionContexts associated with the engine remain valid for use afterwards. There is no need to set the same
+ * weights repeatedly for multiple refit calls as the weights memory can be updated directly instead.
* */
@@ -130,6 +134,9 @@ public class IRefitter extends INoCopy {
* */
+ //!
+ //!
+ //!
//!
//!
//!
@@ -138,21 +145,6 @@ public class IRefitter extends INoCopy {
public native @NoException(true) int getAll(int size, @Cast("const char**") @ByPtrPtr ByteBuffer layerNames, @Cast("nvinfer1::WeightsRole*") IntBuffer roles);
public native @NoException(true) int getAll(int size, @Cast("const char**") @ByPtrPtr byte[] layerNames, @Cast("nvinfer1::WeightsRole*") int[] roles);
- /**
- * @deprecated Deprecated in TRT 8.0. Superseded by {@code delete}.
- *
- * \warning Calling destroy on a managed pointer will result in a double-free error.
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void destroy();
-
/**
* Update dynamic range for a tensor.
*
@@ -293,9 +285,13 @@ public class IRefitter extends INoCopy {
* Possible reasons for rejection are:
*
* * The name of weights is nullptr or does not correspond to any refittable weights.
- * * The number of weights is inconsistent with the original specification.
+ * * The count of the weights is inconsistent with the count returned from calling getWeightsPrototype() with the
+ * same name.
+ * * The type of the weights is inconsistent with the type returned from calling getWeightsPrototype() with the
+ * same name.
*
- * Modifying the weights before method refitCudaEngine() completes will result in undefined behavior.
+ * Modifying the weights before method refitCudaEngine or refitCudaEngineAsync returns will result in undefined
+ * behavior.
*
* \warning The string name must be null-terminated, and be at most 4096 bytes including the terminator.
* */
@@ -365,6 +361,8 @@ public class IRefitter extends INoCopy {
* */
+ //!
+ //!
//!
//!
//!
@@ -372,7 +370,9 @@ public class IRefitter extends INoCopy {
/**
* \brief Set the maximum number of threads.
+ *
* @param maxThreads The maximum number of threads that can be used by the refitter.
+ *
* @return True if successful, false otherwise.
*
* The default value is 1 and includes the current thread.
@@ -397,5 +397,184 @@ public class IRefitter extends INoCopy {
*
* @see setMaxThreads()
* */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
public native @NoException(true) int getMaxThreads();
+
+ /**
+ * \brief Specify new weights on a specified device of given name.
+ *
+ * @param name The name of the weights to be refitted.
+ * @param weights The new weights on the specified device.
+ * @param location The location (host vs. device) of the new weights.
+ *
+ * @return True on success, or false if new weights are rejected.
+ * Possible reasons for rejection are:
+ *
+ * * The name of the weights is nullptr or does not correspond to any refittable weights.
+ * * The count of the weights is inconsistent with the count returned from calling getWeightsPrototype() with the
+ * same name.
+ * * The type of the weights is inconsistent with the type returned from calling getWeightsPrototype() with the
+ * same name.
+ *
+ * It is allowed to provide some weights on CPU and others on GPU.
+ * Modifying the weights before the method refitCudaEngine() or refitCudaEngineAsync() completes will result in
+ * undefined behavior.
+ *
+ * \warning The string name must be null-terminated, and be at most 4096 bytes including the terminator.
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ public native @Cast("bool") @NoException(true) boolean setNamedWeights(String name, @ByVal Weights weights, TensorLocation location);
+ public native @Cast("bool") @NoException(true) boolean setNamedWeights(@Cast("const char*") BytePointer name, @ByVal Weights weights, @Cast("nvinfer1::TensorLocation") int location);
+
+ /**
+ * \brief Get weights associated with the given name.
+ *
+ * @param weightsName The name of the weights to be refitted.
+ *
+ * @return Weights associated with the given name.
+ *
+ * If the weights were never set, returns null weights and reports an error to the refitter errorRecorder.
+ *
+ * \warning The string weightsName must be null-terminated, and be at most 4096 bytes including the terminator.
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ public native @ByVal @NoException(true) Weights getNamedWeights(String weightsName);
+ public native @ByVal @NoException(true) Weights getNamedWeights(@Cast("const char*") BytePointer weightsName);
+
+ /**
+ * \brief Get location for the weights associated with the given name.
+ *
+ * @param weightsName The name of the weights to be refitted.
+ *
+ * @return Location for the weights associated with the given name.
+ *
+ * If the weights were never set, returns TensorLocation::kHOST and reports an error to the refitter errorRecorder.
+ *
+ * \warning The string weightsName must be null-terminated, and be at most 4096 bytes including the terminator.
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ public native @NoException(true) TensorLocation getWeightsLocation(String weightsName);
+ public native @NoException(true) @Cast("nvinfer1::TensorLocation") int getWeightsLocation(@Cast("const char*") BytePointer weightsName);
+
+ /**
+ * \brief Unset weights associated with the given name.
+ *
+ * @param weightsName The name of the weights to be refitted.
+ *
+ * @return False if the weights were never set, returns true otherwise.
+ *
+ * Unset weights before releasing them.
+ *
+ * \warning The string weightsName must be null-terminated, and be at most 4096 bytes including the terminator.
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ public native @Cast("bool") @NoException(true) boolean unsetNamedWeights(String weightsName);
+ public native @Cast("bool") @NoException(true) boolean unsetNamedWeights(@Cast("const char*") BytePointer weightsName);
+
+ /**
+ * \brief Set whether to validate weights during refitting.
+ *
+ * @param weightsValidation Indicate whether to validate weights during refitting.
+ *
+ * When set to true, TensorRT will validate weights during FP32 to FP16/BF16 weights conversions or
+ * sparsifying weights in the refit call. If provided weights are not proper for some weights transformations,
+ * TensorRT will issue a warning and continue the transformation for minor issues (such as overflow during
+ * narrowing conversion), or issue an error and stop the refitting process for severe issues (such as sparsifying
+ * dense weights). By default the flag is true. Set the flag to false for faster refitting performance.
+ * */
+
+
+ //!
+ //!
+ public native @NoException(true) void setWeightsValidation(@Cast("bool") boolean weightsValidation);
+
+ /**
+ * \brief Get whether to validate weights values during refitting.
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ public native @Cast("bool") @NoException(true) boolean getWeightsValidation();
+
+ /**
+ * \brief Enqueue weights refitting of the associated engine on the given stream.
+ *
+ * @param stream The stream to enqueue the weights updating task.
+ *
+ * @return True on success, or false if new weights validation fails or getMissingWeights() != 0 before the call.
+ * If false is returned, a subset of weights may have been refitted.
+ *
+ * The behavior is undefined if the engine has pending enqueued work on a different stream from the provided one.
+ * Provided weights on CPU can be unset and released, or updated after refitCudaEngineAsync returns.
+ * Freeing or updating of the provided weights on GPU can be enqueued on the same stream after refitCudaEngineAsync
+ * returns.
+ *
+ * IExecutionContexts associated with the engine remain valid for use afterwards. There is no need to set the same
+ * weights repeatedly for multiple refit calls as the weights memory can be updated directly instead. The weights
+ * updating task should use the same stream as the one used for the refit call.
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ //!
+ public native @Cast("bool") @NoException(true) boolean refitCudaEngineAsync(CUstream_st stream);
+
+ /**
+ * \brief Get the Weights prototype associated with the given name.
+ *
+ * @param weightsName The name of the weights to be refitted.
+ *
+ * @return Weights prototype associated with the given name.
+ *
+ * The type and count of weights prototype is the same as weights used for engine building. The values property
+ * is nullptr for weights prototypes. The count of the weights prototype is -1 when the name of the weights is
+ * nullptr or does not correspond to any refittable weights.
+ *
+ * \warning The string weightsName must be null-terminated, and be at most 4096 bytes including the terminator.
+ * */
+ public native @ByVal @NoException(true) Weights getWeightsPrototype(String weightsName);
+ public native @ByVal @NoException(true) Weights getWeightsPrototype(@Cast("const char*") BytePointer weightsName);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IResizeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IResizeLayer.java
index 41bc2125d1b..1ac757091df 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IResizeLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IResizeLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -26,13 +26,13 @@
* Resize layer can be used for resizing a N-D tensor.
*
* Resize layer currently supports the following configurations:
- * - ResizeMode::kNEAREST - resizes innermost {@code m} dimensions of N-D, where 0 < m <= min(8, N) and N > 0
- * - ResizeMode::kLINEAR - resizes innermost {@code m} dimensions of N-D, where 0 < m <= min(3, N) and N > 0
+ * - InterpolationMode::kNEAREST - resizes innermost {@code m} dimensions of N-D, where 0 < m <= min(8, N) and N > 0
+ * - InterpolationMode::kLINEAR - resizes innermost {@code m} dimensions of N-D, where 0 < m <= min(3, N) and N > 0
*
- * Default resize mode is ResizeMode::kNEAREST.
+ * Default resize mode is InterpolationMode::kNEAREST.
*
* The coordinates in the output tensor are mapped to coordinates in the input tensor using a function set by calling
- * setCoordinateTransformation(). The default for all ResizeMode settings (nearest, linear, bilinear, etc.) is
+ * setCoordinateTransformation(). The default for all InterpolationMode settings (nearest, linear, bilinear, etc.) is
* ResizeCoordinateTransformation::kASYMMETRIC.
*
* The resize layer provides two ways to resize tensor dimensions.
@@ -84,7 +84,7 @@ public class IResizeLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setOutputDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions);
+ public native @NoException(true) void setOutputDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions);
/**
* \brief Get the output dimensions.
@@ -101,7 +101,7 @@ public class IResizeLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getOutputDimensions();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getOutputDimensions();
/**
* \brief Set the resize scales.
@@ -169,15 +169,15 @@ public class IResizeLayer extends ILayer {
*
* Supported resize modes are Nearest Neighbor and Linear.
*
- * @see ResizeMode
+ * @see InterpolationMode
* */
//!
//!
//!
- public native @NoException(true) void setResizeMode(InterpolationMode resizeMode);
- public native @NoException(true) void setResizeMode(@Cast("nvinfer1::InterpolationMode") int resizeMode);
+ public native @NoException(true) void setResizeMode(InterpolationMode interpolationMode);
+ public native @NoException(true) void setResizeMode(@Cast("nvinfer1::InterpolationMode") int interpolationMode);
/**
* \brief Get resize mode for an input tensor.
@@ -191,44 +191,9 @@ public class IResizeLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) InterpolationMode getResizeMode();
-
- /**
- * \brief Set whether to align corners while resizing.
- *
- * If true, the centers of the 4 corner pixels of both input and output
- * tensors are aligned i.e. preserves the values of corner
- * pixels.
- *
- * Default: false.
- *
- * @deprecated Deprecated in TensorRT 8.0. Superseded by IResizeLayer::setCoordinateTransformation().
- * */
-
-
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void setAlignCorners(@Cast("bool") boolean alignCorners);
-
- /**
- * \brief True if align corners has been set.
- *
- * @return True if align corners has been set, false otherwise.
- *
- * @deprecated Deprecated in TensorRT 8.0. Superseded by IResizeLayer::getCoordinateTransformation().
- * */
-
-
//!
//!
- //!
- //!
- //!
- //!
- //!
- public native @Cast("bool") @Deprecated @NoException(true) boolean getAlignCorners();
+ public native @NoException(true) InterpolationMode getResizeMode();
/**
* \brief Append or replace an input of this layer with a specific tensor
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReverseSequenceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReverseSequenceLayer.java
index 76cf0df8854..66da1642d78 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReverseSequenceLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IReverseSequenceLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -19,7 +19,8 @@
import static org.bytedeco.tensorrt.global.nvinfer.*;
// class INMSLayer
-/** \class IReverseSequenceLayer
+/**
+ * \class IReverseSequenceLayer
*
* \brief A ReverseSequence layer in a network definition.
*
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRuntime.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRuntime.java
index ceca09131c1..bfb8cd472a5 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRuntime.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IRuntime.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -45,33 +45,9 @@ public class IRuntime extends INoCopy {
}
- /**
- * \brief Deserialize an engine from a stream.
- *
- * If an error recorder has been set for the runtime, it will also be passed to the engine.
- *
- * @param blob The memory that holds the serialized engine.
- * @param size The size of the memory in bytes.
- * @param pluginFactory The plugin factory, if any plugins are used by the network, otherwise nullptr.
- *
- * @return The engine, or nullptr if it could not be deserialized.
- *
- * @deprecated Deprecated in TensorRT 8.0.
- *
- * \warning IPluginFactory is no longer supported, therefore pluginFactory must be a nullptr.
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @Deprecated @NoException(true) ICudaEngine deserializeCudaEngine(
- @Const Pointer blob, @Cast("std::size_t") long size, IPluginFactory pluginFactory);
-
/**
* \brief Sets the DLA core used by the network. Defaults to -1.
+ *
* @param dlaCore The DLA core to execute the engine on, in the range [0,getNbDlaCores()).
*
* This function is used to specify which DLA core to use via indexing, if multiple DLA cores are available.
@@ -82,12 +58,14 @@ public class IRuntime extends INoCopy {
* */
+ //!
//!
//!
public native @NoException(true) void setDLACore(int dlaCore);
/**
* \brief Get the DLA core that the engine executes on.
+ *
* @return assigned DLA core or -1 for DLA not present or unset.
* */
@@ -103,27 +81,14 @@ public class IRuntime extends INoCopy {
//!
//!
- //!
- //!
- public native @NoException(true) int getNbDLACores();
-
- /**
- * \brief Destroy this object.
- *
- * @deprecated Deprecated in TRT 8.0. Superseded by {@code delete}.
- *
- * \warning Calling destroy on a managed pointer will result in a double-free error.
- * */
-
-
//!
//!
//!
- //!
- public native @Deprecated @NoException(true) void destroy();
+ public native @NoException(true) int getNbDLACores();
/**
* \brief Set the GPU allocator.
+ *
* @param allocator Set the GPU allocator to be used by the runtime. All GPU memory acquired will use this
* allocator. If NULL is passed, the default allocator will be used.
*
@@ -183,7 +148,7 @@ public class IRuntime extends INoCopy {
public native @NoException(true) IErrorRecorder getErrorRecorder();
/**
- * \brief Deserialize an engine from a stream.
+ * \brief Deserialize an engine from host memory.
*
* If an error recorder has been set for the runtime, it will also be passed to the engine.
*
@@ -194,11 +159,34 @@ public class IRuntime extends INoCopy {
* */
+ //!
+ //!
+ //!
//!
//!
//!
public native @NoException(true) ICudaEngine deserializeCudaEngine(@Const Pointer blob, @Cast("std::size_t") long size);
+ /**
+ * \brief Deserialize an engine from a stream.
+ *
+ * If an error recorder has been set for the runtime, it will also be passed to the
+ * engine.
+ *
+ * This deserialization path will reduce host memory usage when weight streaming is enabled.
+ *
+ * @param streamReader a read-only stream from which TensorRT will deserialize a
+ * previously serialized engine.
+ *
+ * @return The engine, or nullptr if it could not be deserialized.
+ * */
+
+
+ //!
+ //!
+ //!
+ public native ICudaEngine deserializeCudaEngine(@ByRef IStreamReader streamReader);
+
/**
* \brief get the logger with which the runtime was created
*
@@ -209,10 +197,12 @@ public class IRuntime extends INoCopy {
//!
//!
//!
+ //!
public native @NoException(true) ILogger getLogger();
/**
* \brief Set the maximum number of threads.
+ *
* @param maxThreads The maximum number of threads that can be used by the runtime.
* @return True if successful, false otherwise.
*
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScaleLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScaleLayer.java
index e3061409adb..511e076ec49 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScaleLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScaleLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -35,8 +35,7 @@
*
* The output size is the same as the input size.
*
- * \note The input tensor for this layer is required to have a minimum of 3 dimensions in implicit batch mode
- * and a minimum of 4 dimensions in explicit batch mode.
+ * \note The input tensor is required to have at least 4 dimensions.
*
* A scale layer may be used as an INT8 quantization node in a graph, if the output is constrained to INT8 and
* the input to FP32. Quantization rounds ties to even, and clamps to [-128, 127].
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScatterLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScatterLayer.java
index 57abf942ce7..30ae4bd5e22 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScatterLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IScatterLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -38,7 +38,7 @@
* Scattermode::kELEMENT: s = q = r
* * Output is a tensor with the same dimensions as Data that stores the resulting values of the
* transformation. It must not be a shape tensor.
- * The types of Data, Update, and Output shall be the same, and Indices shall be DataType::kINT32.
+ * The types of Data, Update, and Output shall be the same, and Indices shall be DataType::kINT32 or DataType::kINT64.
*
* The output is computed by copying the data, and then updating elements of it based on indices.
* How Indices are interpreted depends upon the ScatterMode.
@@ -69,7 +69,7 @@
* for c in [0,n)
* for h in [0,n)
* for w in [0,n)
- * output[n,c,indices[n,c,h,w],w] = updates[n,c,h,w]]
+ * output[n,c,indices[n,c,h,w],w] = updates[n,c,h,w]
*
* Writes to the same output element cause undefined behavior.
*
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISelectLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISelectLayer.java
index b08a3d0b273..711da35038e 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISelectLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISelectLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -20,6 +20,15 @@
/**
+ * \class ISelectLayer
+ *
+ * \brief Select elements from two data tensors based on a condition tensor.
+ *
+ * The select layer makes elementwise selections from two data tensors based on a condition tensor,
+ * behaving similarly to the numpy.where function with three parameters.
+ * The three input tensors must share the same rank. Multidirectional broadcasting is supported.
+ * The output tensor has the dimensions of the inputs AFTER applying the broadcast rule.
+ *
* \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI.
* */
@Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISerializationConfig.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISerializationConfig.java
new file mode 100644
index 00000000000..f53961f88ba
--- /dev/null
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISerializationConfig.java
@@ -0,0 +1,123 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.tensorrt.nvinfer;
+
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import org.bytedeco.cuda.cudart.*;
+import static org.bytedeco.cuda.global.cudart.*;
+import org.bytedeco.cuda.cublas.*;
+import static org.bytedeco.cuda.global.cublas.*;
+import org.bytedeco.cuda.cudnn.*;
+import static org.bytedeco.cuda.global.cudnn.*;
+import org.bytedeco.cuda.nvrtc.*;
+import static org.bytedeco.cuda.global.nvrtc.*;
+
+import static org.bytedeco.tensorrt.global.nvinfer.*;
+
+
+/**
+ * \class ISerializationConfig
+ *
+ * \brief Holds properties for configuring an engine to serialize the binary.
+ *
+ * @see SerializationFlag
+ * */
+@Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
+public class ISerializationConfig extends INoCopy {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public ISerializationConfig() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public ISerializationConfig(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public ISerializationConfig(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public ISerializationConfig position(long position) {
+ return (ISerializationConfig)super.position(position);
+ }
+ @Override public ISerializationConfig getPointer(long i) {
+ return new ISerializationConfig((Pointer)this).offsetAddress(i);
+ }
+
+
+ /**
+ * \brief Set the serialization flags to turn on for this config.
+ *
+ * The flags are listed in the SerializationFlag enum.
+ *
+ * @param serializationFlags The serialization flags for an engine.
+ *
+ * \note This function will override the previous set flags, rather than bitwise ORing the new flag.
+ *
+ * @see getFlags()
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ public native @Cast("bool") @NoException(true) boolean setFlags(@Cast("nvinfer1::SerializationFlags") int serializationFlags);
+
+ /**
+ * \brief Get the serialization flags for this config.
+ *
+ * @return The serialization flags as a bitmask.
+ *
+ * @see setFlags()
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ public native @Cast("nvinfer1::SerializationFlags") @NoException(true) int getFlags();
+
+ /**
+ * \brief clear a serialization flag.
+ *
+ * clears the serialization flag from the config.
+ *
+ * @see setFlags()
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ public native @Cast("bool") @NoException(true) boolean clearFlag(SerializationFlag serializationFlag);
+ public native @Cast("bool") @NoException(true) boolean clearFlag(@Cast("nvinfer1::SerializationFlag") int serializationFlag);
+
+ /**
+ * \brief Set a serialization flag.
+ *
+ * Add the input serialization flag to the already enabled flags.
+ *
+ * @see setFlags()
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ public native @Cast("bool") @NoException(true) boolean setFlag(SerializationFlag serializationFlag);
+ public native @Cast("bool") @NoException(true) boolean setFlag(@Cast("nvinfer1::SerializationFlag") int serializationFlag);
+
+ /**
+ * \brief Returns true if the serialization flag is set
+ *
+ * @see getFlags()
+ *
+ * @return True if flag is set, false if unset.
+ * */
+ public native @Cast("bool") @NoException(true) boolean getFlag(SerializationFlag serializationFlag);
+ public native @Cast("bool") @NoException(true) boolean getFlag(@Cast("nvinfer1::SerializationFlag") int serializationFlag);
+}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShapeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShapeLayer.java
index e152ea02596..95794f3b13a 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShapeLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShapeLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -23,10 +23,10 @@
*
* \brief Layer type for getting shape of a tensor.
*
- * This layer sets the output to a 1D tensor of type Int32 with the dimensions of the input tensor.
+ * This layer sets the output to a 1D tensor of type Int64 with the dimensions of the input tensor.
*
* For example, if the input is a four-dimensional tensor (of any type) with
- * dimensions [2,3,5,7], the output tensor is a one-dimensional Int32 tensor
+ * dimensions [2,3,5,7], the output tensor is a one-dimensional Int64 tensor
* of length 4 containing the sequence 2, 3, 5, 7.
*
* \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI.
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShuffleLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShuffleLayer.java
index 6c70b038a1d..3b3f5624078 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShuffleLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IShuffleLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -99,7 +99,7 @@ public class IShuffleLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setReshapeDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions);
+ public native @NoException(true) void setReshapeDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions);
/**
* \brief Get the reshaped dimensions.
@@ -119,7 +119,7 @@ public class IShuffleLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getReshapeDimensions();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getReshapeDimensions();
/**
* \brief Append or replace an input of this layer with a specific tensor
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISliceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISliceLayer.java
index f52cb2c9e62..ec94379f9cd 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISliceLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISliceLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -42,7 +42,7 @@
* stride = {1, 2}
* output = {{1, 5}}
*
- * When the sliceMode is kCLAMP or kREFLECT, for each input dimension, if its size is 0 then the corresponding output
+ * When the sampleMode is kCLAMP or kREFLECT, for each input dimension, if its size is 0 then the corresponding output
* dimension must be 0 too.
*
* A slice layer can produce a shape tensor if the following conditions are met:
@@ -54,7 +54,7 @@
*
* The following constraints must be satisfied to execute this layer on DLA:
* * start, size, and stride are build time constants, either as static Dims or as constant input tensors.
- * * sliceMode is kDEFAULT.
+ * * sampleMode is kSTRICT_BOUNDS.
* * Strides are 1 for all dimensions.
* * Slicing is not performed on the first dimension
* * The input tensor has four dimensions
@@ -83,7 +83,7 @@ public class ISliceLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setStart(@ByVal @Cast("nvinfer1::Dims*") Dims32 start);
+ public native @NoException(true) void setStart(@Cast("const nvinfer1::Dims*") @ByRef Dims64 start);
/**
* \brief Get the start offset for the slice layer.
@@ -102,7 +102,7 @@ public class ISliceLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStart();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStart();
/**
* \brief Set the dimensions of the output slice.
@@ -120,7 +120,7 @@ public class ISliceLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setSize(@ByVal @Cast("nvinfer1::Dims*") Dims32 size);
+ public native @NoException(true) void setSize(@Cast("const nvinfer1::Dims*") @ByRef Dims64 size);
/**
* \brief Get dimensions of the output slice.
@@ -139,7 +139,7 @@ public class ISliceLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getSize();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getSize();
/**
* \brief Set the stride for computing the output slice data.
@@ -157,7 +157,7 @@ public class ISliceLayer extends ILayer {
//!
//!
//!
- public native @NoException(true) void setStride(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride);
+ public native @NoException(true) void setStride(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride);
/**
* \brief Get the stride for the output slice.
@@ -174,7 +174,7 @@ public class ISliceLayer extends ILayer {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStride();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStride();
/**
* \brief Set the slice mode.
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISoftMaxLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISoftMaxLayer.java
index 2ad9f2d4604..5bdd53ea5cd 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISoftMaxLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ISoftMaxLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -28,8 +28,7 @@
*
* The output size is the same as the input size.
*
- * On Xavier, this layer is not supported on DLA.
- * Otherwise, the following constraints must be satisfied to execute this layer on DLA:
+ * The following constraints must be satisfied to execute this layer on DLA:
* * Axis must be one of the channel or spatial dimensions.
* * There are two classes of supported input sizes:
* 1. Non-axis, non-batch dimensions are all 1 and the axis dimension is at most 8192.
@@ -50,17 +49,8 @@ public class ISoftMaxLayer extends ILayer {
* \brief Set the axis along which softmax is computed. Currently, only one axis can be set.
*
* The axis is specified by setting the bit corresponding to the axis to 1.
- * For example, consider an NCHW tensor as input (three non-batch dimensions).
+ * For example, consider an NCHW tensor as input.
*
- * In implicit mode :
- * Bit 0 corresponds to the C dimension boolean.
- * Bit 1 corresponds to the H dimension boolean.
- * Bit 2 corresponds to the W dimension boolean.
- * By default, softmax is performed on the axis which is the number of axes minus three. It is 0 if
- * there are fewer than 3 non-batch axes. For example, if the input is NCHW, the default axis is C. If the input
- * is NHW, then the default axis is H.
- *
- * In explicit mode :
* Bit 0 corresponds to the N dimension boolean.
* Bit 1 corresponds to the C dimension boolean.
* Bit 2 corresponds to the H dimension boolean.
@@ -69,8 +59,7 @@ public class ISoftMaxLayer extends ILayer {
* there are fewer than 3 axes. For example, if the input is NCHW, the default axis is C. If the input
* is NHW, then the default axis is N.
*
- * For example, to perform softmax on axis R of a NPQRCHW input, set bit 2 with implicit batch mode,
- * set bit 3 with explicit batch mode.
+ * For example, to perform softmax on axis R of a NPQRCHW input, set bit 3.
*
* @param axes The axis along which softmax is computed.
* Here axes is a bitmap. For example, when doing softmax along axis 0, bit 0 is set to 1, axes = 1 << axis
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IStreamReader.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IStreamReader.java
new file mode 100644
index 00000000000..23845c3d313
--- /dev/null
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IStreamReader.java
@@ -0,0 +1,52 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.tensorrt.nvinfer;
+
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import org.bytedeco.cuda.cudart.*;
+import static org.bytedeco.cuda.global.cudart.*;
+import org.bytedeco.cuda.cublas.*;
+import static org.bytedeco.cuda.global.cublas.*;
+import org.bytedeco.cuda.cudnn.*;
+import static org.bytedeco.cuda.global.cudnn.*;
+import org.bytedeco.cuda.nvrtc.*;
+import static org.bytedeco.cuda.global.nvrtc.*;
+
+import static org.bytedeco.tensorrt.global.nvinfer.*;
+
+@Namespace("nvinfer1::v_1_0") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
+public class IStreamReader extends IVersionedInterface {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public IStreamReader(Pointer p) { super(p); }
+
+ /**
+ * TensorRT never calls the destructor for an IStreamReader defined by the
+ * application.
+ * */
+
+ /**
+ * \brief Return version information associated with this interface. Applications must not override this method.
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ public native @ByVal @NoException(true) InterfaceInfo getInterfaceInfo();
+
+ /**
+ * \brief Read the next number of bytes in the stream.
+ *
+ * @param destination The memory to write to
+ * @param nbBytes The number of bytes to read
+ *
+ * @return The number of bytes read. Negative values will be considered an automatic error.
+ * */
+ public native @Cast("int64_t") long read(Pointer destination, @Cast("int64_t") long nbBytes);
+}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITensor.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITensor.java
index fd493572497..e1cf2a68024 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITensor.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITensor.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -101,7 +101,8 @@ public class ITensor extends INoCopy {
//!
//!
//!
- public native @NoException(true) void setDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions);
+ //!
+ public native @NoException(true) void setDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions);
/**
* \brief Get the dimensions of a tensor.
@@ -109,6 +110,7 @@ public class ITensor extends INoCopy {
* @return The dimensions of the tensor.
*
* \warning getDimensions() returns a -1 for dimensions that are derived from a wildcard dimension.
+ *
* @see setDimensions()
* */
@@ -118,7 +120,7 @@ public class ITensor extends INoCopy {
//!
//!
//!
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions();
/**
* \brief Set the data type of a tensor.
@@ -191,25 +193,19 @@ public class ITensor extends INoCopy {
//!
//!
//!
- //!
public native @Cast("bool") @NoException(true) boolean isNetworkOutput();
/**
- * \brief Set whether to enable broadcast of tensor across the batch.
- *
- * When a tensor is broadcast across a batch, it has the same value for every member in the batch.
- * Memory is only allocated once for the single member.
- *
- * This method is only valid for network input tensors, since the flags of layer output tensors are inferred based
- * on layer inputs and parameters.
- * If this state is modified for a tensor in the network, the states of all dependent tensors will be recomputed.
- * If the tensor is for an explicit batch network, then this function does nothing.
+ * \brief Set whether to enable broadcast of tensor across the implicit batch dimension.
*
- * \warning The broadcast flag is ignored when using explicit batch network mode.
+ * \warning This method has no effect other than issuing a warning.
*
- * @param broadcastAcrossBatch Whether to enable broadcast of tensor across the batch.
+ * @param broadcastAcrossBatch Whether to broadcast the tensor across the implicit
+ * batch dimension that was a feature of TensorRT 9.x and prior.
*
* @see getBroadcastAcrossBatch()
+ *
+ * @deprecated Deprecated in TensorRT 10.0. Implicit batch is not supported since TensorRT 10.0.
* */
@@ -218,28 +214,30 @@ public class ITensor extends INoCopy {
//!
//!
//!
- public native @NoException(true) void setBroadcastAcrossBatch(@Cast("bool") boolean broadcastAcrossBatch);
+ public native @Deprecated @NoException(true) void setBroadcastAcrossBatch(@Cast("bool") boolean broadcastAcrossBatch);
/**
- * \brief Check if tensor is broadcast across the batch.
- *
- * When a tensor is broadcast across a batch, it has the same value for every member in the batch.
- * Memory is only allocated once for the single member. If the network is in explicit batch mode,
- * this function returns true if the leading dimension is 1.
+ * \brief Check if tensor is broadcast across the implicit batch dimension.
*
- * @return True if tensor is broadcast across the batch, false otherwise.
+ * @return Always false since TensorRT 10.0 does not support an implicit batch dimension.
*
* @see setBroadcastAcrossBatch()
+ *
+ * @deprecated Deprecated in TensorRT 10.0. Implicit batch is not supported since TensorRT 10.0.
* */
//!
//!
- public native @Cast("bool") @NoException(true) boolean getBroadcastAcrossBatch();
+ //!
+ //!
+ public native @Cast("bool") @Deprecated @NoException(true) boolean getBroadcastAcrossBatch();
/**
* \brief Get the storage location of a tensor.
+ *
* @return The location of tensor data.
+ *
* @see setLocation()
* */
@@ -248,10 +246,13 @@ public class ITensor extends INoCopy {
//!
//!
//!
+ //!
+ //!
public native @NoException(true) TensorLocation getLocation();
/**
* \brief Set the storage location of a tensor
+ *
* @param location the location of tensor data
*
* Only network input tensors for storing sequence lengths for RNNv2 are supported.
@@ -259,14 +260,17 @@ public class ITensor extends INoCopy {
* errors at build time.
*
* @see getLocation()
+ *
+ * @deprecated Deprecated in TensorRT 10.0. RNNv2 is not supported and the location must
+ * always be TensorLocation::kDEVICE since TensorRT 10.0.
* */
//!
//!
//!
- public native @NoException(true) void setLocation(TensorLocation location);
- public native @NoException(true) void setLocation(@Cast("nvinfer1::TensorLocation") int location);
+ public native @Deprecated @NoException(true) void setLocation(TensorLocation location);
+ public native @Deprecated @NoException(true) void setLocation(@Cast("nvinfer1::TensorLocation") int location);
/**
* \brief Query whether dynamic range is set.
@@ -313,11 +317,12 @@ public class ITensor extends INoCopy {
//!
//!
//!
+ //!
public native @NoException(true) float getDynamicRangeMax();
/**
* \brief Set allowed formats for this tensor. By default all formats are allowed.
- * Shape tensors (for which isShapeTensor() returns true) may only have row major linear format.
+ * Shape tensors (for which isShapeTensor() returns true) may only have row-major linear format.
*
* When running network on DLA and the build option kGPU_FALLBACK is not specified, if DLA format(kCHW4 with Int8,
* kCHW4 with FP16, kCHW16 with FP16, kCHW32 with Int8) is set, the input format is treated as native DLA format with
@@ -327,6 +332,7 @@ public class ITensor extends INoCopy {
* @param formats A bitmask of TensorFormat values that are supported for this tensor.
*
* @see ITensor::getAllowedFormats()
+ *
* @see TensorFormats
* */
@@ -339,7 +345,7 @@ public class ITensor extends INoCopy {
/**
* \brief Get a bitmask of TensorFormat values that the tensor supports.
- * For a shape tensor, only row major linear format is allowed.
+ * For a shape tensor, only row-major linear format is allowed.
*
* @return The value specified by setAllowedFormats or all possible formats.
*
@@ -355,14 +361,13 @@ public class ITensor extends INoCopy {
//!
//!
//!
- //!
public native @Cast("nvinfer1::TensorFormats") @NoException(true) int getAllowedFormats();
/**
* \brief Whether the tensor is a shape tensor.
*
* A shape tensor is a tensor that is related to shape calculations.
- * It must have type Int32, Bool, or Float, and its shape must be determinable at build time.
+ * It must have type Int32, Int64, Bool, or Float, and its shape must be determinable at build time.
* Furthermore, it must be needed as a shape tensor, either marked as a network shape
* output via markOutputForShapes(), or as a layer input that is required to be a shape
* tensor, such as the second input to IShuffleLayer. Some layers are "polymorphic" in
@@ -378,15 +383,11 @@ public class ITensor extends INoCopy {
* cause all three tensors to be shape tensors, because IShuffleLayer requires that its
* second optional input be a shape tensor, and IElementWiseLayer is "polymorphic".
*
- * If a tensor is a shape tensor and becomes an engine input or output,
- * then ICudaEngine::isShapeBinding will be true for that tensor.
- * Such a shape tensor must have type Int32.
- *
* It is possible for a tensor to be both a shape tensor and an execution tensor.
*
* @return True if tensor is a shape tensor, false otherwise.
*
- * @see INetworkDefinition::markOutputForShapes(), ICudaEngine::isShapeBinding()
+ * @see INetworkDefinition::markOutputForShapes()
* */
@@ -408,8 +409,6 @@ public class ITensor extends INoCopy {
* For example, if a partially built network has no path from a tensor to a network output,
* isExecutionTensor() returns false. Completing the path would cause it to become true.
*
- * If a tensor is an execution tensor and becomes an engine input or output,
- * then ICudaEngine::isExecutionBinding will be true for that tensor.
*
* A tensor with isShapeTensor() == false and isExecutionTensor() == false
* can still show up as an input to the engine if its dimensions are required.
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITimingCache.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITimingCache.java
index df22348afce..78ce566d2cb 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITimingCache.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITimingCache.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITopKLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITopKLayer.java
index d1fcd47b849..11efb849e52 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITopKLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITopKLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITripLimitLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITripLimitLayer.java
index 42d4e36beab..96854efd916 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITripLimitLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/ITripLimitLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -19,11 +19,25 @@
import static org.bytedeco.tensorrt.global.nvinfer.*;
+/**
+ * \class ITripLimitLayer
+ *
+ * \brief A layer that represents a trip-count limiter.
+ *
+ * The trip limit layer sets the execution condition for loops, using kCOUNT to define the number of iterations or
+ * kWHILE for a conditional loop. A loop can have one of each kind of limit, in which case the loop exits when
+ * the trip count is reached or the condition becomes false.
+ *
+ * See INetworkDefinition::addTripLimit().
+ * */
@Namespace("nvinfer1") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
public class ITripLimitLayer extends ILoopBoundaryLayer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ITripLimitLayer(Pointer p) { super(p); }
+ /**
+ * \brief Get a trip limiter type.
+ * */
public native @NoException(true) TripLimit getTripLimit();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IUnaryLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IUnaryLayer.java
index 6514e636bda..d291a85ddbf 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IUnaryLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IUnaryLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IVersionedInterface.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IVersionedInterface.java
new file mode 100644
index 00000000000..ab01a89a9d2
--- /dev/null
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/IVersionedInterface.java
@@ -0,0 +1,48 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.tensorrt.nvinfer;
+
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import org.bytedeco.cuda.cudart.*;
+import static org.bytedeco.cuda.global.cudart.*;
+import org.bytedeco.cuda.cublas.*;
+import static org.bytedeco.cuda.global.cublas.*;
+import org.bytedeco.cuda.cudnn.*;
+import static org.bytedeco.cuda.global.cudnn.*;
+import org.bytedeco.cuda.nvrtc.*;
+import static org.bytedeco.cuda.global.nvrtc.*;
+
+import static org.bytedeco.tensorrt.global.nvinfer.*;
+ // namespace impl
+
+/**
+ * \class IVersionedInterface
+ *
+ * \brief An Interface class for version control.
+ * */
+@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
+public class IVersionedInterface extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public IVersionedInterface(Pointer p) { super(p); }
+
+ /**
+ * \brief The language used to build the implementation of this Interface.
+ *
+ * Applications must not override this method.
+ * */
+
+
+ //!
+ //!
+ @Virtual public native @NoException(true) @Const({false, false, true}) APILanguage getAPILanguage();
+
+ /**
+ * \brief Return version information associated with this interface. Applications must not override this method.
+ * */
+ @Virtual(true) public native @ByVal @NoException(true) @Const({false, false, true}) InterfaceInfo getInterfaceInfo();
+}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/InterfaceInfo.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/InterfaceInfo.java
new file mode 100644
index 00000000000..38f2a766942
--- /dev/null
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/InterfaceInfo.java
@@ -0,0 +1,48 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.tensorrt.nvinfer;
+
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import org.bytedeco.cuda.cudart.*;
+import static org.bytedeco.cuda.global.cudart.*;
+import org.bytedeco.cuda.cublas.*;
+import static org.bytedeco.cuda.global.cublas.*;
+import org.bytedeco.cuda.cudnn.*;
+import static org.bytedeco.cuda.global.cudnn.*;
+import org.bytedeco.cuda.nvrtc.*;
+import static org.bytedeco.cuda.global.nvrtc.*;
+
+import static org.bytedeco.tensorrt.global.nvinfer.*;
+
+
+/**
+ * \class InterfaceInfo
+ *
+ * \brief Version information associated with a TRT interface
+ * */
+@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
+public class InterfaceInfo extends Pointer {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public InterfaceInfo() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public InterfaceInfo(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public InterfaceInfo(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public InterfaceInfo position(long position) {
+ return (InterfaceInfo)super.position(position);
+ }
+ @Override public InterfaceInfo getPointer(long i) {
+ return new InterfaceInfo((Pointer)this).offsetAddress(i);
+ }
+
+ public native String kind(); public native InterfaceInfo kind(String setter);
+ public native int major(); public native InterfaceInfo major(int setter);
+ public native int minor(); public native InterfaceInfo minor(int setter);
+}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Permutation.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Permutation.java
index 70fb96de771..efb35343508 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Permutation.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Permutation.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -19,6 +19,11 @@
import static org.bytedeco.tensorrt.global.nvinfer.*;
+/**
+ * \struct Permutation
+ *
+ * \brief Represents a permutation of dimensions.
+ * */
@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
public class Permutation extends Pointer {
static { Loader.load(); }
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginField.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginField.java
index 4b1ab6f3514..7553116c71e 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginField.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginField.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -41,31 +41,13 @@ public class PluginField extends Pointer {
return new PluginField((Pointer)this).offsetAddress(i);
}
- /**
- * \brief Plugin field attribute name
- * */
-
- //!
- //!
+ /** Plugin field attribute name */
public native String name(); public native PluginField name(String setter);
- /**
- * \brief Plugin field attribute data
- * */
-
- //!
- //!
+ /** Plugin field attribute data */
public native @Const Pointer data(); public native PluginField data(Pointer setter);
- /**
- * \brief Plugin field attribute type
- * @see PluginFieldType
- * */
-
- //!
- //!
+ /** Plugin field attribute type */
public native PluginFieldType type(); public native PluginField type(PluginFieldType setter);
- /**
- * \brief Number of data entries in the Plugin attribute
- * */
+ /** Number of data entries in the Plugin attribute */
public native int length(); public native PluginField length(int setter);
public PluginField(String name_/*=nullptr*/, @Const Pointer data_/*=nullptr*/,
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginFieldCollection.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginFieldCollection.java
index 04b8cae7d96..7110df39297 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginFieldCollection.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginFieldCollection.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -19,7 +19,11 @@
import static org.bytedeco.tensorrt.global.nvinfer.*;
-/** Plugin field collection struct. */
+/**
+ * \struct PluginFieldCollection
+ *
+ * \brief Plugin field collection struct.
+ * */
@Namespace("nvinfer1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
public class PluginFieldCollection extends Pointer {
static { Loader.load(); }
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginTensorDesc.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginTensorDesc.java
index 5931be6747e..03bc6f60b88 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginTensorDesc.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/PluginTensorDesc.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -19,12 +19,13 @@
import static org.bytedeco.tensorrt.global.nvinfer.*;
-/** \struct PluginTensorDesc
+/**
+ * \struct PluginTensorDesc
*
* \brief Fields that a plugin might see for an input or output.
*
* Scale is only valid when data type is DataType::kINT8. TensorRT will set
- * the value to -1.0f if it is invalid.
+ * the value to -1.0F if it is invalid.
*
* @see IPluginV2IOExt::supportsFormatCombination
* @see IPluginV2IOExt::configurePlugin
@@ -48,7 +49,7 @@ public class PluginTensorDesc extends Pointer {
}
/** Dimensions. */
- public native @ByRef @Cast("nvinfer1::Dims*") Dims32 dims(); public native PluginTensorDesc dims(Dims32 setter);
+ public native @ByRef @Cast("nvinfer1::Dims*") Dims64 dims(); public native PluginTensorDesc dims(Dims64 setter);
/** \warning DataType:kBOOL and DataType::kUINT8 are not supported. */
public native DataType type(); public native PluginTensorDesc type(DataType setter);
/** Tensor format. */
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/SafeIPluginRegistry.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/SafeIPluginRegistry.java
index 7786fa59f58..84f67117f8a 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/SafeIPluginRegistry.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/SafeIPluginRegistry.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VActivationLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VActivationLayer.java
index aa1abbcadad..078d5bceb01 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VActivationLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VActivationLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithm.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithm.java
index e76ca396a4f..07fae30bd89 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithm.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithm.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,7 +25,6 @@ public class VAlgorithm extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VAlgorithm(Pointer p) { super(p); }
- public native @Const @ByRef @NoException(true) IAlgorithmIOInfo getAlgorithmIOInfo(int index);
public native @Const @ByRef @NoException(true) IAlgorithmVariant getAlgorithmVariant();
public native @NoException(true) float getTimingMSec();
public native @Cast("std::size_t") @NoException(true) long getWorkspaceSize();
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmContext.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmContext.java
index 7ec610d8542..529f5eaa107 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmContext.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmContext.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -26,8 +26,8 @@ public class VAlgorithmContext extends VRoot {
public VAlgorithmContext(Pointer p) { super(p); }
public native @NoException(true) String getName();
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(int index, OptProfileSelector select);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(int index, @Cast("nvinfer1::OptProfileSelector") int select);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(int index, OptProfileSelector select);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(int index, @Cast("nvinfer1::OptProfileSelector") int select);
public native @NoException(true) int getNbInputs();
public native @NoException(true) int getNbOutputs();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmIOInfo.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmIOInfo.java
index 831821a5f37..1a514b72dfa 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmIOInfo.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmIOInfo.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,9 +25,8 @@ public class VAlgorithmIOInfo extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VAlgorithmIOInfo(Pointer p) { super(p); }
- public native @NoException(true) TensorFormat getTensorFormat();
public native @NoException(true) DataType getDataType();
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrides();
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrides();
public native @Cast("int64_t") @NoException(true) long getVectorizedDim();
public native @Cast("int64_t") @NoException(true) long getComponentsPerElement();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmVariant.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmVariant.java
index d3917764604..38174713f91 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmVariant.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAlgorithmVariant.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAssertionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAssertionLayer.java
index f311d9f8cf9..39d5960824d 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAssertionLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VAssertionLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilder.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilder.java
index 7eeadeb18c2..5d6d4177490 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilder.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilder.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,15 +25,12 @@ public class VBuilder extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VBuilder(Pointer p) { super(p); }
- public native @NoException(true) void setMaxBatchSize(int batchSize);
- public native @NoException(true) int getMaxBatchSize();
public native @Cast("bool") @NoException(true) boolean platformHasFastFp16();
public native @Cast("bool") @NoException(true) boolean platformHasFastInt8();
public native @NoException(true) int getMaxDLABatchSize();
public native @NoException(true) int getNbDLACores();
public native @NoException(true) void setGpuAllocator(IGpuAllocator allocator);
public native @NoException(true) IBuilderConfig createBuilderConfig();
- public native @NoException(true) ICudaEngine buildEngineWithConfig(@ByRef INetworkDefinition network, @ByRef IBuilderConfig config);
public native @NoException(true) INetworkDefinition createNetworkV2(@Cast("nvinfer1::NetworkDefinitionCreationFlags") int flags);
public native @NoException(true) IOptimizationProfile createOptimizationProfile();
public native @NoException(true) void setErrorRecorder(IErrorRecorder recorder);
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilderConfig.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilderConfig.java
index 90f35592ffe..7f6fdd089ba 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilderConfig.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VBuilderConfig.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,8 +25,6 @@ public class VBuilderConfig extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VBuilderConfig(Pointer p) { super(p); }
- public native @NoException(true) void setMinTimingIterations(int minTiming);
- public native @NoException(true) int getMinTimingIterations();
public native @NoException(true) void setAvgTimingIterations(int avgTiming);
public native @NoException(true) int getAvgTimingIterations();
public native @NoException(true) void setEngineCapability(EngineCapability capability);
@@ -34,8 +32,6 @@ public class VBuilderConfig extends VRoot {
public native @NoException(true) EngineCapability getEngineCapability();
public native @NoException(true) void setInt8Calibrator(IInt8Calibrator calibrator);
public native @NoException(true) IInt8Calibrator getInt8Calibrator();
- public native @NoException(true) void setMaxWorkspaceSize(@Cast("std::size_t") long workspaceSize);
- public native @Cast("std::size_t") @NoException(true) long getMaxWorkspaceSize();
public native @NoException(true) void setFlags(@Cast("nvinfer1::BuilderFlags") int builderFlags);
public native @Cast("nvinfer1::BuilderFlags") @NoException(true) int getFlags();
public native @NoException(true) void clearFlag(BuilderFlag builderFlag);
@@ -101,4 +97,6 @@ public class VBuilderConfig extends VRoot {
public native @NoException(true) int getNbPluginsToSerialize();
public native @NoException(true) void setMaxAuxStreams(int nbStreams);
public native @NoException(true) int getMaxAuxStreams();
+ public native @NoException(true) void setProgressMonitor(IProgressMonitor monitor);
+ public native @NoException(true) IProgressMonitor getProgressMonitor();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCastLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCastLayer.java
index a14208e81a9..fa548842c17 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCastLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCastLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConcatenationLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConcatenationLayer.java
index 0f33506174e..78a20653dea 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConcatenationLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConcatenationLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionLayer.java
index 86946195d28..2b0a5e46715 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalBoundaryLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalBoundaryLayer.java
index 47bfedb9dc9..9991889f5a1 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalBoundaryLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalBoundaryLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalInputLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalInputLayer.java
index 20cca99ce3b..87e3ba0e56e 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalInputLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalInputLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalOutputLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalOutputLayer.java
index 0f4c77030b0..ec7e7de8e5b 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalOutputLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConditionalOutputLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConstantLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConstantLayer.java
index 644e2f2a1f2..c2ab52420e7 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConstantLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConstantLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -27,6 +27,6 @@ public class VConstantLayer extends VRoot {
public native @NoException(true) void setWeights(@ByVal Weights weights);
public native @ByVal @NoException(true) Weights getWeights();
- public native @NoException(true) void setDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions();
+ public native @NoException(true) void setDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConvolutionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConvolutionLayer.java
index 043a9fec301..c8b92c0fd8c 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConvolutionLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VConvolutionLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,35 +25,27 @@ public class VConvolutionLayer extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VConvolutionLayer(Pointer p) { super(p); }
- public native @NoException(true) void setKernelSize(@ByVal DimsHW kernelSize);
- public native @ByVal @NoException(true) DimsHW getKernelSize();
- public native @NoException(true) void setNbOutputMaps(int nbOutputMaps);
- public native @NoException(true) int getNbOutputMaps();
- public native @NoException(true) void setStride(@ByVal DimsHW stride);
- public native @ByVal @NoException(true) DimsHW getStride();
- public native @NoException(true) void setPadding(@ByVal DimsHW padding);
- public native @ByVal @NoException(true) DimsHW getPadding();
- public native @NoException(true) void setNbGroups(int nbGroups);
- public native @NoException(true) int getNbGroups();
+ public native @NoException(true) void setNbOutputMaps(@Cast("int64_t") long nbOutputMaps);
+ public native @Cast("int64_t") @NoException(true) long getNbOutputMaps();
+ public native @NoException(true) void setNbGroups(@Cast("int64_t") long nbGroups);
+ public native @Cast("int64_t") @NoException(true) long getNbGroups();
public native @NoException(true) void setKernelWeights(@ByVal Weights weights);
public native @ByVal @NoException(true) Weights getKernelWeights();
public native @NoException(true) void setBiasWeights(@ByVal Weights weights);
public native @ByVal @NoException(true) Weights getBiasWeights();
- public native @NoException(true) void setDilation(@ByVal DimsHW dilation);
- public native @ByVal @NoException(true) DimsHW getDilation();
- public native @NoException(true) void setPrePadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPrePadding();
- public native @NoException(true) void setPostPadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPostPadding();
+ public native @NoException(true) void setPrePadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPrePadding();
+ public native @NoException(true) void setPostPadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPostPadding();
public native @NoException(true) void setPaddingMode(PaddingMode paddingMode);
public native @NoException(true) void setPaddingMode(@Cast("nvinfer1::PaddingMode") int paddingMode);
public native @NoException(true) PaddingMode getPaddingMode();
- public native @NoException(true) void setKernelSizeNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 kernelSize);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getKernelSizeNd();
- public native @NoException(true) void setStrideNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrideNd();
- public native @NoException(true) void setPaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPaddingNd();
- public native @NoException(true) void setDilationNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 dilation);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDilationNd();
+ public native @NoException(true) void setKernelSizeNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 kernelSize);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getKernelSizeNd();
+ public native @NoException(true) void setStrideNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrideNd();
+ public native @NoException(true) void setPaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPaddingNd();
+ public native @NoException(true) void setDilationNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dilation);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDilationNd();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCudaEngine.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCudaEngine.java
index 51e4225e53a..463bcdd9a0d 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCudaEngine.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VCudaEngine.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,38 +25,20 @@ public class VCudaEngine extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VCudaEngine(Pointer p) { super(p); }
- public native @NoException(true) int getNbBindings();
- public native @NoException(true) int getBindingIndex(String name);
- public native @NoException(true) int getBindingIndex(@Cast("const char*") BytePointer name);
- public native @NoException(true) String getBindingName(int bindingIndex);
- public native @Cast("bool") @NoException(true) boolean bindingIsInput(int bindingIndex);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getBindingDimensions(int bindingIndex);
- public native @NoException(true) DataType getBindingDataType(int bindingIndex);
- public native @NoException(true) int getMaxBatchSize();
+ public native @NoException(true) ICudaEngine getPImpl();
public native @NoException(true) int getNbLayers();
public native @NoException(true) IHostMemory serialize();
- public native @NoException(true) IExecutionContext createExecutionContext();
- public native @NoException(true) TensorLocation getLocation(int bindingIndex);
+ public native @NoException(true) IExecutionContext createExecutionContext(ExecutionContextAllocationStrategy strategy);
+ public native @NoException(true) IExecutionContext createExecutionContext(@Cast("nvinfer1::ExecutionContextAllocationStrategy") int strategy);
public native @NoException(true) IExecutionContext createExecutionContextWithoutDeviceMemory();
public native @Cast("size_t") @NoException(true) long getDeviceMemorySize();
public native @Cast("bool") @NoException(true) boolean isRefittable();
- public native @NoException(true) int getBindingBytesPerComponent(int bindingIndex);
- public native @NoException(true) int getBindingComponentsPerElement(int bindingIndex);
- public native @NoException(true) TensorFormat getBindingFormat(int bindingIndex);
- public native @NoException(true) String getBindingFormatDesc(int bindingIndex);
- public native @NoException(true) int getBindingVectorizedDim(int bindingIndex);
public native @NoException(true) String getName();
public native @NoException(true) int getNbOptimizationProfiles();
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileDimensions(
- int bindingIndex, int profileIndex, OptProfileSelector select);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileDimensions(
- int bindingIndex, int profileIndex, @Cast("nvinfer1::OptProfileSelector") int select);
- public native @Const @NoException(true) IntPointer getProfileShapeValues(
- int profileIndex, int inputIndex, OptProfileSelector select);
- public native @Const @NoException(true) IntBuffer getProfileShapeValues(
- int profileIndex, int inputIndex, @Cast("nvinfer1::OptProfileSelector") int select);
- public native @Cast("bool") @NoException(true) boolean isShapeBinding(int bindingIndex);
- public native @Cast("bool") @NoException(true) boolean isExecutionBinding(int bindingIndex);
+ public native @Const @NoException(true) IntPointer getProfileTensorValues(
+ String tensorName, int profileIndex, OptProfileSelector select);
+ public native @Const @NoException(true) IntBuffer getProfileTensorValues(
+ @Cast("const char*") BytePointer tensorName, int profileIndex, @Cast("nvinfer1::OptProfileSelector") int select);
public native @NoException(true) EngineCapability getEngineCapability();
public native @NoException(true) void setErrorRecorder(IErrorRecorder recorder);
public native @NoException(true) IErrorRecorder getErrorRecorder();
@@ -64,8 +46,8 @@ public class VCudaEngine extends VRoot {
public native @Cast("nvinfer1::TacticSources") @NoException(true) int getTacticSources();
public native @NoException(true) ProfilingVerbosity getProfilingVerbosity();
public native @NoException(true) IEngineInspector createEngineInspector();
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorShape(String tensorName);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorShape(@Cast("const char*") BytePointer tensorName);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorShape(String tensorName);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorShape(@Cast("const char*") BytePointer tensorName);
public native @NoException(true) DataType getTensorDataType(String tensorName);
public native @NoException(true) @Cast("nvinfer1::DataType") int getTensorDataType(@Cast("const char*") BytePointer tensorName);
public native @NoException(true) TensorLocation getTensorLocation(String tensorName);
@@ -84,14 +66,13 @@ public class VCudaEngine extends VRoot {
public native @NoException(true) @Cast("const char*") BytePointer getTensorFormatDesc(@Cast("const char*") BytePointer tensorName);
public native @NoException(true) int getTensorVectorizedDim(String tensorName);
public native @NoException(true) int getTensorVectorizedDim(@Cast("const char*") BytePointer tensorName);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileShape(
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getProfileShape(
String tensorName, int profileIndex, OptProfileSelector select);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getProfileShape(
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getProfileShape(
@Cast("const char*") BytePointer tensorName, int profileIndex, @Cast("nvinfer1::OptProfileSelector") int select);
public native @NoException(true) int getNbIOTensors();
public native @NoException(true) String getIOTensorName(int index);
public native @NoException(true) HardwareCompatibilityLevel getHardwareCompatibilityLevel();
- public native @NoException(true) ICudaEngine getPImpl();
public native @NoException(true) int getNbAuxStreams();
public native @NoException(true) int getTensorBytesPerComponentV2(String tensorName, int profileIndex);
@@ -104,4 +85,18 @@ public class VCudaEngine extends VRoot {
public native @NoException(true) @Cast("const char*") BytePointer getTensorFormatDescV2(@Cast("const char*") BytePointer tensorName, int profileIndex);
public native @NoException(true) int getTensorVectorizedDimV2(String tensorName, int profileIndex);
public native @NoException(true) int getTensorVectorizedDimV2(@Cast("const char*") BytePointer tensorName, int profileIndex);
+
+ public native @NoException(true) ISerializationConfig createSerializationConfig();
+ public native @NoException(true) IHostMemory serializeWithConfig(@ByRef ISerializationConfig config);
+
+ public native @Cast("size_t") @NoException(true) long getDeviceMemorySizeForProfile(int profileIndex);
+ public native @NoException(true) IRefitter createRefitter(@ByRef ILogger logger);
+
+ public native @Cast("bool") @NoException(true) boolean setWeightStreamingBudget(@Cast("int64_t") long gpuMemoryBudget);
+ public native @Cast("int64_t") @NoException(true) long getWeightStreamingBudget();
+ public native @Cast("int64_t") @NoException(true) long getMinimumWeightStreamingBudget();
+ public native @Cast("int64_t") @NoException(true) long getStreamableWeightsSize();
+
+ public native @Cast("bool") @NoException(true) boolean isDebugTensor(String name);
+ public native @Cast("bool") @NoException(true) boolean isDebugTensor(@Cast("const char*") BytePointer name);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDeconvolutionLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDeconvolutionLayer.java
index c76ca6716ae..93ccaa7621a 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDeconvolutionLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDeconvolutionLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,33 +25,27 @@ public class VDeconvolutionLayer extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VDeconvolutionLayer(Pointer p) { super(p); }
- public native @NoException(true) void setKernelSize(@ByVal DimsHW kernelSize);
- public native @ByVal @NoException(true) DimsHW getKernelSize();
- public native @NoException(true) void setNbOutputMaps(int nbOutputMaps);
- public native @NoException(true) int getNbOutputMaps();
- public native @NoException(true) void setStride(@ByVal DimsHW stride);
- public native @ByVal @NoException(true) DimsHW getStride();
- public native @NoException(true) void setPadding(@ByVal DimsHW padding);
- public native @ByVal @NoException(true) DimsHW getPadding();
- public native @NoException(true) void setNbGroups(int nbGroups);
- public native @NoException(true) int getNbGroups();
+ public native @NoException(true) void setNbOutputMaps(@Cast("int64_t") long nbOutputMaps);
+ public native @Cast("int64_t") @NoException(true) long getNbOutputMaps();
+ public native @NoException(true) void setNbGroups(@Cast("int64_t") long nbGroups);
+ public native @Cast("int64_t") @NoException(true) long getNbGroups();
public native @NoException(true) void setKernelWeights(@ByVal Weights weights);
public native @ByVal @NoException(true) Weights getKernelWeights();
public native @NoException(true) void setBiasWeights(@ByVal Weights weights);
public native @ByVal @NoException(true) Weights getBiasWeights();
- public native @NoException(true) void setPrePadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPrePadding();
- public native @NoException(true) void setPostPadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPostPadding();
+ public native @NoException(true) void setPrePadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPrePadding();
+ public native @NoException(true) void setPostPadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPostPadding();
public native @NoException(true) void setPaddingMode(PaddingMode paddingMode);
public native @NoException(true) void setPaddingMode(@Cast("nvinfer1::PaddingMode") int paddingMode);
public native @NoException(true) PaddingMode getPaddingMode();
- public native @NoException(true) void setKernelSizeNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 kernelSize);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getKernelSizeNd();
- public native @NoException(true) void setStrideNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrideNd();
- public native @NoException(true) void setPaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPaddingNd();
- public native @NoException(true) void setDilationNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 dilation);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDilationNd();
+ public native @NoException(true) void setKernelSizeNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 kernelSize);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getKernelSizeNd();
+ public native @NoException(true) void setStrideNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrideNd();
+ public native @NoException(true) void setPaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPaddingNd();
+ public native @NoException(true) void setDilationNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dilation);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDilationNd();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDequantizeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDequantizeLayer.java
index de71528ff90..2303d02fa38 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDequantizeLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDequantizeLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -27,4 +27,7 @@ public class VDequantizeLayer extends VRoot {
public native @NoException(true) int getAxis();
public native @NoException(true) void setAxis(int axis);
+ public native @NoException(true) DataType getToType();
+ public native @NoException(true) void setToType(DataType toType);
+ public native @NoException(true) void setToType(@Cast("nvinfer1::DataType") int toType);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDimensionExpr.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDimensionExpr.java
index 5efaa49e8da..117b48b7d60 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDimensionExpr.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VDimensionExpr.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -26,5 +26,6 @@ public class VDimensionExpr extends VRoot {
public VDimensionExpr(Pointer p) { super(p); }
public native @Cast("bool") boolean isConstant();
- public native int getConstantValue();
+ public native @Cast("int64_t") long getConstantValue();
+ public native @Cast("bool") boolean isSizeTensor();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEinsumLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEinsumLayer.java
index 322f158d9cc..1abe6059df9 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEinsumLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEinsumLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VElementWiseLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VElementWiseLayer.java
index 171488f4909..beba6204e6c 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VElementWiseLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VElementWiseLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEngineInspector.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEngineInspector.java
index 95654b852f8..08d714c6504 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEngineInspector.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VEngineInspector.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,6 +25,7 @@ public class VEngineInspector extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VEngineInspector(Pointer p) { super(p); }
+ public native @NoException(true) IEngineInspector getPImpl();
public native @Cast("bool") @NoException(true) boolean setExecutionContext(@Const IExecutionContext context);
public native @Const @NoException(true) IExecutionContext getExecutionContext();
public native @NoException(true) String getLayerInformation(int layerIndex, LayerInformationFormat format);
@@ -33,5 +34,4 @@ public class VEngineInspector extends VRoot {
public native @NoException(true) @Cast("const char*") BytePointer getEngineInformation(@Cast("nvinfer1::LayerInformationFormat") int format);
public native @NoException(true) void setErrorRecorder(IErrorRecorder recorder);
public native @NoException(true) IErrorRecorder getErrorRecorder();
- public native @NoException(true) IEngineInspector getPImpl();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExecutionContext.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExecutionContext.java
index 4b2db0b596c..7b1c4b8c353 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExecutionContext.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExecutionContext.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,12 +25,7 @@ public class VExecutionContext extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VExecutionContext(Pointer p) { super(p); }
- public native @Cast("bool") @NoException(true) boolean execute(int batchSize, @Cast("void*const*") PointerPointer bindings);
- public native @Cast("bool") @NoException(true) boolean execute(int batchSize, @Cast("void*const*") @ByPtrPtr Pointer bindings);
- public native @Cast("bool") @NoException(true) boolean enqueue(
- int batchSize, @Cast("void*const*") PointerPointer bindings, CUstream_st stream, @ByPtrPtr CUevent_st inputConsumed);
- public native @Cast("bool") @NoException(true) boolean enqueue(
- int batchSize, @Cast("void*const*") @ByPtrPtr Pointer bindings, CUstream_st stream, @ByPtrPtr CUevent_st inputConsumed);
+ public native @NoException(true) IExecutionContext getPImpl();
public native @NoException(true) void setDebugSync(@Cast("bool") boolean sync);
public native @Cast("bool") @NoException(true) boolean getDebugSync();
public native @NoException(true) void setProfiler(IProfiler arg0);
@@ -40,41 +35,31 @@ public class VExecutionContext extends VRoot {
public native @NoException(true) void setName(@Cast("const char*") BytePointer name);
public native @NoException(true) String getName();
public native @NoException(true) void setDeviceMemory(Pointer memory);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrides(int bindingIndex);
- public native @Cast("bool") @NoException(true) boolean setOptimizationProfile(int profileIndex);
public native @NoException(true) int getOptimizationProfile();
- public native @Cast("bool") @NoException(true) boolean setBindingDimensions(int bindingIndex, @ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getBindingDimensions(int bindingIndex);
- public native @Cast("bool") @NoException(true) boolean setInputShapeBinding(int bindingIndex, @Const IntPointer data);
- public native @Cast("bool") @NoException(true) boolean setInputShapeBinding(int bindingIndex, @Const IntBuffer data);
- public native @Cast("bool") @NoException(true) boolean setInputShapeBinding(int bindingIndex, @Const int[] data);
- public native @Cast("bool") @NoException(true) boolean getShapeBinding(int bindingIndex, IntPointer data);
- public native @Cast("bool") @NoException(true) boolean getShapeBinding(int bindingIndex, IntBuffer data);
- public native @Cast("bool") @NoException(true) boolean getShapeBinding(int bindingIndex, int[] data);
public native @Cast("bool") @NoException(true) boolean allInputDimensionsSpecified();
public native @Cast("bool") @NoException(true) boolean allInputShapesSpecified();
public native @NoException(true) void setErrorRecorder(IErrorRecorder recorder);
public native @NoException(true) IErrorRecorder getErrorRecorder();
public native @Cast("bool") @NoException(true) boolean executeV2(@Cast("void*const*") PointerPointer bindings);
public native @Cast("bool") @NoException(true) boolean executeV2(@Cast("void*const*") @ByPtrPtr Pointer bindings);
- public native @Cast("bool") @NoException(true) boolean enqueueV2(@Cast("void*const*") PointerPointer bindings, CUstream_st stream, @ByPtrPtr CUevent_st inputConsumed);
- public native @Cast("bool") @NoException(true) boolean enqueueV2(@Cast("void*const*") @ByPtrPtr Pointer bindings, CUstream_st stream, @ByPtrPtr CUevent_st inputConsumed);
public native @Cast("bool") @NoException(true) boolean setOptimizationProfileAsync(int profileIndex, CUstream_st stream);
public native @NoException(true) void setEnqueueEmitsProfile(@Cast("bool") boolean enqueueEmitsProfile);
public native @Cast("bool") @NoException(true) boolean getEnqueueEmitsProfile();
public native @Cast("bool") @NoException(true) boolean reportToProfiler();
- public native @Cast("bool") @NoException(true) boolean setInputShape(String tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims32 dims);
- public native @Cast("bool") @NoException(true) boolean setInputShape(@Cast("const char*") BytePointer tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims32 dims);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorShape(String tensorName);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorShape(@Cast("const char*") BytePointer tensorName);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorStrides(String tensorName);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getTensorStrides(@Cast("const char*") BytePointer tensorName);
+ public native @Cast("bool") @NoException(true) boolean setInputShape(String tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dims);
+ public native @Cast("bool") @NoException(true) boolean setInputShape(@Cast("const char*") BytePointer tensorName, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dims);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorShape(String tensorName);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorShape(@Cast("const char*") BytePointer tensorName);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorStrides(String tensorName);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getTensorStrides(@Cast("const char*") BytePointer tensorName);
public native @Cast("bool") @NoException(true) boolean setTensorAddress(String tensorName, Pointer data);
public native @Cast("bool") @NoException(true) boolean setTensorAddress(@Cast("const char*") BytePointer tensorName, Pointer data);
public native @Const @NoException(true) Pointer getTensorAddress(String tensorName);
public native @Const @NoException(true) Pointer getTensorAddress(@Cast("const char*") BytePointer tensorName);
public native @Cast("bool") @NoException(true) boolean setInputTensorAddress(String tensorName, @Const Pointer data);
public native @Cast("bool") @NoException(true) boolean setInputTensorAddress(@Cast("const char*") BytePointer tensorName, @Const Pointer data);
+ public native @Cast("bool") @NoException(true) boolean setOutputTensorAddress(String tensorName, Pointer data);
+ public native @Cast("bool") @NoException(true) boolean setOutputTensorAddress(@Cast("const char*") BytePointer tensorName, Pointer data);
public native @NoException(true) int inferShapes(int nbMaxNames, @Cast("const char**") PointerPointer tensorNames);
public native @NoException(true) int inferShapes(int nbMaxNames, @Cast("const char**") @ByPtrPtr BytePointer tensorNames);
public native @NoException(true) int inferShapes(int nbMaxNames, @Cast("const char**") @ByPtrPtr ByteBuffer tensorNames);
@@ -97,6 +82,13 @@ public class VExecutionContext extends VRoot {
public native @Cast("bool") @NoException(true) boolean setNvtxVerbosity(ProfilingVerbosity verbosity);
public native @Cast("bool") @NoException(true) boolean setNvtxVerbosity(@Cast("nvinfer1::ProfilingVerbosity") int verbosity);
public native @NoException(true) ProfilingVerbosity getNvtxVerbosity();
- public native @NoException(true) IExecutionContext getPImpl();
public native @NoException(true) void setAuxStreams(@ByPtrPtr CUstream_st auxStreams, int nbStreams);
+ public native @Cast("bool") @NoException(true) boolean setDebugListener(IDebugListener listener);
+ public native @NoException(true) IDebugListener getDebugListener();
+ public native @Cast("bool") @NoException(true) boolean setTensorDebugState(String name, @Cast("bool") boolean flag);
+ public native @Cast("bool") @NoException(true) boolean setTensorDebugState(@Cast("const char*") BytePointer name, @Cast("bool") boolean flag);
+ public native @Cast("bool") @NoException(true) boolean getDebugState(String name);
+ public native @Cast("bool") @NoException(true) boolean getDebugState(@Cast("const char*") BytePointer name);
+ public native @Cast("bool") @NoException(true) boolean setAllTensorsDebugState(@Cast("bool") boolean flag);
+ public native @Cast("size_t") @NoException(true) long updateDeviceMemorySizeForShapes();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExprBuilder.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExprBuilder.java
index fa5194f32ef..7113f628ead 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExprBuilder.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VExprBuilder.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,9 +25,11 @@ public class VExprBuilder extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VExprBuilder(Pointer p) { super(p); }
- public native @Const IDimensionExpr constant(int value);
+ public native @Const IDimensionExpr constant(@Cast("int64_t") long value);
public native @Const IDimensionExpr operation(
DimensionOperation op, @Const @ByRef IDimensionExpr first, @Const @ByRef IDimensionExpr second);
public native @Const IDimensionExpr operation(
@Cast("nvinfer1::DimensionOperation") int op, @Const @ByRef IDimensionExpr first, @Const @ByRef IDimensionExpr second);
+ public native @Const IDimensionExpr declareSizeTensor(
+ int outputIndex, @Const @ByRef IDimensionExpr opt, @Const @ByRef IDimensionExpr upper);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VFillLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VFillLayer.java
index 08cb919fcfc..f764d46185a 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VFillLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VFillLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,8 +25,8 @@ public class VFillLayer extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VFillLayer(Pointer p) { super(p); }
- public native @NoException(true) void setDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions();
+ public native @NoException(true) void setDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions();
public native @NoException(true) void setOperation(FillOperation op);
public native @NoException(true) void setOperation(@Cast("nvinfer1::FillOperation") int op);
public native @NoException(true) FillOperation getOperation();
@@ -34,4 +34,12 @@ public class VFillLayer extends VRoot {
public native @NoException(true) double getAlpha();
public native @NoException(true) void setBeta(double beta);
public native @NoException(true) double getBeta();
+ public native @NoException(true) void setAlphaInt64(@Cast("int64_t") long alpha);
+ public native @Cast("int64_t") @NoException(true) long getAlphaInt64();
+ public native @NoException(true) void setBetaInt64(@Cast("int64_t") long beta);
+ public native @Cast("int64_t") @NoException(true) long getBetaInt64();
+ public native @Cast("bool") @NoException(true) boolean isAlphaBetaInt64();
+ public native @NoException(true) DataType getToType();
+ public native @NoException(true) void setToType(DataType toType);
+ public native @NoException(true) void setToType(@Cast("nvinfer1::DataType") int toType);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGatherLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGatherLayer.java
index b7edd5a86ca..f648f8e1427 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGatherLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGatherLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGridSampleLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGridSampleLayer.java
index 5732b9b10b8..9d299a3631f 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGridSampleLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VGridSampleLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VHostMemory.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VHostMemory.java
index 37b1b519308..bbcaeb66f65 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VHostMemory.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VHostMemory.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIdentityLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIdentityLayer.java
index 6654ba3e52b..e6f9107809c 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIdentityLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIdentityLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIfConditional.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIfConditional.java
index 859fa0cc397..6fe180b929f 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIfConditional.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIfConditional.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIteratorLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIteratorLayer.java
index d4003bf2dda..8717e14948a 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIteratorLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VIteratorLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLRNLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLRNLayer.java
index 3d5b3b5375c..d7e4bb232aa 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLRNLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLRNLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,8 +25,8 @@ public class VLRNLayer extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VLRNLayer(Pointer p) { super(p); }
- public native @NoException(true) void setWindowSize(int windowSize);
- public native @NoException(true) int getWindowSize();
+ public native @NoException(true) void setWindowSize(@Cast("int64_t") long windowSize);
+ public native @Cast("int64_t") @NoException(true) long getWindowSize();
public native @NoException(true) void setAlpha(float alpha);
public native @NoException(true) float getAlpha();
public native @NoException(true) void setBeta(float beta);
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLayer.java
index cdf47ece051..e3f8409acd6 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoop.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoop.java
index f827c6bc663..ca7c5dfc0a4 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoop.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoop.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopBoundaryLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopBoundaryLayer.java
index 56106d38e85..0abb8ae8f30 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopBoundaryLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopBoundaryLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopOutputLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopOutputLayer.java
index 03aaf11d24b..f2660e811e7 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopOutputLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VLoopOutputLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VMatrixMultiplyLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VMatrixMultiplyLayer.java
index 0d74832ae1d..238a009f9ea 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VMatrixMultiplyLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VMatrixMultiplyLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNMSLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNMSLayer.java
index b55fcce5221..5d129d454a6 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNMSLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNMSLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNetworkDefinition.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNetworkDefinition.java
index d2c56f2168c..0b613e85553 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNetworkDefinition.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNetworkDefinition.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,30 +25,23 @@ public class VNetworkDefinition extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VNetworkDefinition(Pointer p) { super(p); }
- public native @NoException(true) ITensor addInput(String name, DataType type, @ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions);
- public native @NoException(true) ITensor addInput(@Cast("const char*") BytePointer name, @Cast("nvinfer1::DataType") int type, @ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions);
+ public native @NoException(true) ITensor addInput(String name, DataType type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions);
+ public native @NoException(true) ITensor addInput(@Cast("const char*") BytePointer name, @Cast("nvinfer1::DataType") int type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions);
public native @NoException(true) void markOutput(@ByRef ITensor tensor);
- public native @NoException(true) IConvolutionLayer addConvolution(@ByRef ITensor input, int nbOutputMaps, @ByVal DimsHW kernelSize,
- @ByVal Weights kernelWeights, @ByVal Weights biasWeights);
- public native @NoException(true) IFullyConnectedLayer addFullyConnected(
- @ByRef ITensor input, int nbOutputs, @ByVal Weights kernelWeights, @ByVal Weights biasWeights);
public native @NoException(true) IActivationLayer addActivation(@ByRef ITensor input, ActivationType type);
public native @NoException(true) IActivationLayer addActivation(@ByRef ITensor input, @Cast("nvinfer1::ActivationType") int type);
- public native @NoException(true) IPoolingLayer addPooling(@ByRef ITensor input, PoolingType type, @ByVal DimsHW windowSize);
- public native @NoException(true) IPoolingLayer addPooling(@ByRef ITensor input, @Cast("nvinfer1::PoolingType") int type, @ByVal DimsHW windowSize);
- public native @NoException(true) ILRNLayer addLRN(@ByRef ITensor input, int window, float alpha, float beta, float k);
- public native @NoException(true) IScaleLayer addScale(@ByRef ITensor input, ScaleMode mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power);
- public native @NoException(true) IScaleLayer addScale(@ByRef ITensor input, @Cast("nvinfer1::ScaleMode") int mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power);
+ public native @NoException(true) ILRNLayer addLRN(@ByRef ITensor input, @Cast("int64_t") long window, float alpha, float beta, float k);
+ public native @NoException(true) IScaleLayer addScale(
+ @ByRef ITensor input, ScaleMode mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power);
+ public native @NoException(true) IScaleLayer addScale(
+ @ByRef ITensor input, @Cast("nvinfer1::ScaleMode") int mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power);
public native @NoException(true) ISoftMaxLayer addSoftMax(@ByRef ITensor input);
public native @NoException(true) IConcatenationLayer addConcatenation(@Cast("nvinfer1::ITensor*const*") PointerPointer inputs, int nbInputs);
public native @NoException(true) IConcatenationLayer addConcatenation(@ByPtrPtr ITensor inputs, int nbInputs);
- public native @NoException(true) IDeconvolutionLayer addDeconvolution(
- @ByRef ITensor input, int nbOutputMaps, @ByVal DimsHW kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights);
public native @NoException(true) IElementWiseLayer addElementWise(@ByRef ITensor input1, @ByRef ITensor input2, ElementWiseOperation op);
public native @NoException(true) IElementWiseLayer addElementWise(@ByRef ITensor input1, @ByRef ITensor input2, @Cast("nvinfer1::ElementWiseOperation") int op);
public native @NoException(true) IUnaryLayer addUnary(@ByRef ITensor input, UnaryOperation operation);
public native @NoException(true) IUnaryLayer addUnary(@ByRef ITensor input, @Cast("nvinfer1::UnaryOperation") int operation);
- public native @NoException(true) IPaddingLayer addPadding(@ByRef ITensor input, @ByVal DimsHW prePadding, @ByVal DimsHW postPadding);
public native @NoException(true) IShuffleLayer addShuffle(@ByRef ITensor input);
public native @NoException(true) int getNbLayers();
public native @NoException(true) ILayer getLayer(int index);
@@ -68,17 +61,17 @@ public class VNetworkDefinition extends VRoot {
@ByRef ITensor input0, MatrixOperation op0, @ByRef ITensor input1, MatrixOperation op1);
public native @NoException(true) IMatrixMultiplyLayer addMatrixMultiply(
@ByRef ITensor input0, @Cast("nvinfer1::MatrixOperation") int op0, @ByRef ITensor input1, @Cast("nvinfer1::MatrixOperation") int op1);
- public native @NoException(true) IConstantLayer addConstant(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions, @ByVal Weights weights);
- public native @NoException(true) IRNNv2Layer addRNNv2(
- @ByRef ITensor input, int layerCount, int hiddenSize, int maxSeqLen, RNNOperation op);
- public native @NoException(true) IRNNv2Layer addRNNv2(
- @ByRef ITensor input, int layerCount, int hiddenSize, int maxSeqLen, @Cast("nvinfer1::RNNOperation") int op);
+ public native @NoException(true) IConstantLayer addConstant(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions, @ByVal Weights weights);
public native @NoException(true) IIdentityLayer addIdentity(@ByRef ITensor input);
public native @NoException(true) void removeTensor(@ByRef ITensor tensor);
public native @NoException(true) void unmarkOutput(@ByRef ITensor tensor);
public native @NoException(true) IPluginV2Layer addPluginV2(@Cast("nvinfer1::ITensor*const*") PointerPointer inputs, int nbInputs, @ByRef IPluginV2 plugin);
public native @NoException(true) IPluginV2Layer addPluginV2(@ByPtrPtr ITensor inputs, int nbInputs, @ByRef IPluginV2 plugin);
- public native @NoException(true) ISliceLayer addSlice(@ByRef ITensor input, @ByVal @Cast("nvinfer1::Dims*") Dims32 start, @ByVal @Cast("nvinfer1::Dims*") Dims32 size, @ByVal @Cast("nvinfer1::Dims*") Dims32 stride);
+ public native @NoException(true) IPluginV3Layer addPluginV3(@Cast("nvinfer1::ITensor*const*") PointerPointer inputs, int nbInputs, @Cast("nvinfer1::ITensor*const*") PointerPointer shapeInputs,
+ int nbShapeInputs, @ByRef IPluginV3 plugin);
+ public native @NoException(true) IPluginV3Layer addPluginV3(@ByPtrPtr ITensor inputs, int nbInputs, @ByPtrPtr ITensor shapeInputs,
+ int nbShapeInputs, @ByRef IPluginV3 plugin);
+ public native @NoException(true) ISliceLayer addSlice(@ByRef ITensor input, @Cast("const nvinfer1::Dims*") @ByRef Dims64 start, @Cast("const nvinfer1::Dims*") @ByRef Dims64 size, @Cast("const nvinfer1::Dims*") @ByRef Dims64 stride);
public native @NoException(true) void setName(String name);
public native @NoException(true) void setName(@Cast("const char*") BytePointer name);
public native @NoException(true) String getName();
@@ -88,22 +81,21 @@ public class VNetworkDefinition extends VRoot {
public native @Cast("bool") @NoException(true) boolean unmarkOutputForShapes(@ByRef ITensor tensor);
public native @NoException(true) IParametricReLULayer addParametricReLU(@ByRef ITensor input, @ByRef ITensor slope);
public native @NoException(true) IConvolutionLayer addConvolutionNd(
- @ByRef ITensor input, int nbOutputMaps, @ByVal @Cast("nvinfer1::Dims*") Dims32 kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights);
- public native @NoException(true) IPoolingLayer addPoolingNd(@ByRef ITensor input, PoolingType type, @ByVal @Cast("nvinfer1::Dims*") Dims32 windowSize);
- public native @NoException(true) IPoolingLayer addPoolingNd(@ByRef ITensor input, @Cast("nvinfer1::PoolingType") int type, @ByVal @Cast("nvinfer1::Dims*") Dims32 windowSize);
+ @ByRef ITensor input, @Cast("int64_t") long nbOutputMaps, @Cast("const nvinfer1::Dims*") @ByRef Dims64 kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights);
+ public native @NoException(true) IPoolingLayer addPoolingNd(@ByRef ITensor input, PoolingType type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 windowSize);
+ public native @NoException(true) IPoolingLayer addPoolingNd(@ByRef ITensor input, @Cast("nvinfer1::PoolingType") int type, @Cast("const nvinfer1::Dims*") @ByRef Dims64 windowSize);
public native @NoException(true) IDeconvolutionLayer addDeconvolutionNd(
- @ByRef ITensor input, int nbOutputMaps, @ByVal @Cast("nvinfer1::Dims*") Dims32 kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights);
+ @ByRef ITensor input, @Cast("int64_t") long nbOutputMaps, @Cast("const nvinfer1::Dims*") @ByRef Dims64 kernelSize, @ByVal Weights kernelWeights, @ByVal Weights biasWeights);
public native @NoException(true) IScaleLayer addScaleNd(
@ByRef ITensor input, ScaleMode mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power, int channelAxis);
public native @NoException(true) IScaleLayer addScaleNd(
@ByRef ITensor input, @Cast("nvinfer1::ScaleMode") int mode, @ByVal Weights shift, @ByVal Weights scale, @ByVal Weights power, int channelAxis);
public native @NoException(true) IResizeLayer addResize(@ByRef ITensor input);
- public native @Cast("bool") @NoException(true) boolean hasExplicitPrecision();
public native @NoException(true) ILoop addLoop();
public native @NoException(true) ISelectLayer addSelect(@ByRef ITensor condition, @ByRef ITensor thenInput, @ByRef ITensor elseInput);
- public native @NoException(true) IFillLayer addFill(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions, FillOperation op);
- public native @NoException(true) IFillLayer addFill(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions, @Cast("nvinfer1::FillOperation") int op);
- public native @NoException(true) IPaddingLayer addPaddingNd(@ByRef ITensor input, @ByVal @Cast("nvinfer1::Dims*") Dims32 prePadding, @ByVal @Cast("nvinfer1::Dims*") Dims32 postPadding);
+ public native @NoException(true) IFillLayer addFill(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions, FillOperation op);
+ public native @NoException(true) IFillLayer addFill(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions, @Cast("nvinfer1::FillOperation") int op);
+ public native @NoException(true) IPaddingLayer addPaddingNd(@ByRef ITensor input, @Cast("const nvinfer1::Dims*") @ByRef Dims64 prePadding, @Cast("const nvinfer1::Dims*") @ByRef Dims64 postPadding);
public native @Cast("bool") @NoException(true) boolean setWeightsName(@ByVal Weights weights, String name);
public native @Cast("bool") @NoException(true) boolean setWeightsName(@ByVal Weights weights, @Cast("const char*") BytePointer name);
public native @NoException(true) void setErrorRecorder(IErrorRecorder recorder);
@@ -130,4 +122,16 @@ public class VNetworkDefinition extends VRoot {
public native @NoException(true) ICastLayer addCast(@ByRef ITensor input, DataType toType);
public native @NoException(true) ICastLayer addCast(@ByRef ITensor input, @Cast("nvinfer1::DataType") int toType);
public native @ByRef @NoException(true) IBuilder getBuilder();
+ public native @Cast("nvinfer1::NetworkDefinitionCreationFlags") @NoException(true) int getFlags();
+ public native @Cast("bool") @NoException(true) boolean getFlag(NetworkDefinitionCreationFlag networkDefinitionCreationFlag);
+ public native @Cast("bool") @NoException(true) boolean getFlag(@Cast("nvinfer1::NetworkDefinitionCreationFlag") int networkDefinitionCreationFlag);
+ public native @NoException(true) IQuantizeLayer addQuantizeV2(@ByRef ITensor input, @ByRef ITensor scale, DataType outputType);
+ public native @NoException(true) IQuantizeLayer addQuantizeV2(@ByRef ITensor input, @ByRef ITensor scale, @Cast("nvinfer1::DataType") int outputType);
+ public native @NoException(true) IDequantizeLayer addDequantizeV2(@ByRef ITensor input, @ByRef ITensor scale, DataType outputType);
+ public native @NoException(true) IDequantizeLayer addDequantizeV2(@ByRef ITensor input, @ByRef ITensor scale, @Cast("nvinfer1::DataType") int outputType);
+ public native @NoException(true) IFillLayer addFillV2(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions, FillOperation op, DataType outputType);
+ public native @NoException(true) IFillLayer addFillV2(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions, @Cast("nvinfer1::FillOperation") int op, @Cast("nvinfer1::DataType") int outputType);
+ public native @Cast("bool") @NoException(true) boolean markDebug(@ByRef ITensor tensor);
+ public native @Cast("bool") @NoException(true) boolean unmarkDebug(@ByRef ITensor tensor);
+ public native @Cast("bool") @NoException(true) boolean isDebugTensor(@Const @ByRef ITensor tensor);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNonZeroLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNonZeroLayer.java
index 9ba91611db6..158352603bb 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNonZeroLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNonZeroLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNormalizationLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNormalizationLayer.java
index 0b447211df1..7735c39cdd6 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNormalizationLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VNormalizationLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -29,8 +29,8 @@ public class VNormalizationLayer extends VRoot {
public native @NoException(true) float getEpsilon();
public native @NoException(true) void setAxes(@Cast("uint32_t") int axesMask);
public native @Cast("uint32_t") @NoException(true) int getAxes();
- public native @NoException(true) void setNbGroups(int nbGroups);
- public native @NoException(true) int getNbGroups();
+ public native @NoException(true) void setNbGroups(@Cast("int64_t") long nbGroups);
+ public native @Cast("int64_t") @NoException(true) long getNbGroups();
public native @NoException(true) void setComputePrecision(DataType type);
public native @NoException(true) void setComputePrecision(@Cast("nvinfer1::DataType") int type);
public native @NoException(true) DataType getComputePrecision();
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOneHotLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOneHotLayer.java
index a5ce0c38743..5b28db15e2c 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOneHotLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOneHotLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOptimizationProfile.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOptimizationProfile.java
index d88fb92bbfb..aa10afb4f11 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOptimizationProfile.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VOptimizationProfile.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,10 +25,10 @@ public class VOptimizationProfile extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VOptimizationProfile(Pointer p) { super(p); }
- public native @Cast("bool") @NoException(true) boolean setDimensions(String inputName, OptProfileSelector select, @ByVal @Cast("nvinfer1::Dims*") Dims32 dims);
- public native @Cast("bool") @NoException(true) boolean setDimensions(@Cast("const char*") BytePointer inputName, @Cast("nvinfer1::OptProfileSelector") int select, @ByVal @Cast("nvinfer1::Dims*") Dims32 dims);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(String inputName, OptProfileSelector select);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions(@Cast("const char*") BytePointer inputName, @Cast("nvinfer1::OptProfileSelector") int select);
+ public native @Cast("bool") @NoException(true) boolean setDimensions(String inputName, OptProfileSelector select, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dims);
+ public native @Cast("bool") @NoException(true) boolean setDimensions(@Cast("const char*") BytePointer inputName, @Cast("nvinfer1::OptProfileSelector") int select, @Cast("const nvinfer1::Dims*") @ByRef Dims64 dims);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(String inputName, OptProfileSelector select);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions(@Cast("const char*") BytePointer inputName, @Cast("nvinfer1::OptProfileSelector") int select);
public native @Cast("bool") @NoException(true) boolean setShapeValues(
String inputName, OptProfileSelector select, @Const IntPointer values, int nbValues);
public native @Cast("bool") @NoException(true) boolean setShapeValues(
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPaddingLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPaddingLayer.java
index ae6f35f03b5..75e6bed3783 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPaddingLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPaddingLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,12 +25,8 @@ public class VPaddingLayer extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VPaddingLayer(Pointer p) { super(p); }
- public native @NoException(true) void setPrePadding(@ByVal DimsHW padding);
- public native @ByVal @NoException(true) DimsHW getPrePadding();
- public native @NoException(true) void setPostPadding(@ByVal DimsHW padding);
- public native @ByVal @NoException(true) DimsHW getPostPadding();
- public native @NoException(true) void setPrePaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPrePaddingNd();
- public native @NoException(true) void setPostPaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPostPaddingNd();
+ public native @NoException(true) void setPrePaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPrePaddingNd();
+ public native @NoException(true) void setPostPaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPostPaddingNd();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VParametricReLULayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VParametricReLULayer.java
index a12b4ff502c..6d30fcf197f 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VParametricReLULayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VParametricReLULayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginLayer.java
index b3688bc5d65..e2e3df8b8c4 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginV2Layer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginV2Layer.java
index 28619f5ec0e..71a28d647fc 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginV2Layer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginV2Layer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VFullyConnectedLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginV3Layer.java
similarity index 55%
rename from tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VFullyConnectedLayer.java
rename to tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginV3Layer.java
index caecea44990..a6a8de82d67 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VFullyConnectedLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPluginV3Layer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -20,15 +20,10 @@
@Namespace("nvinfer1::apiv") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
-public class VFullyConnectedLayer extends VRoot {
+public class VPluginV3Layer extends VRoot {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public VFullyConnectedLayer(Pointer p) { super(p); }
+ public VPluginV3Layer(Pointer p) { super(p); }
- public native @NoException(true) void setNbOutputChannels(int nbOutputs);
- public native @NoException(true) int getNbOutputChannels();
- public native @NoException(true) void setKernelWeights(@ByVal Weights weights);
- public native @ByVal @NoException(true) Weights getKernelWeights();
- public native @NoException(true) void setBiasWeights(@ByVal Weights weights);
- public native @ByVal @NoException(true) Weights getBiasWeights();
+ public native @ByRef @NoException(true) IPluginV3 getPlugin();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPoolingLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPoolingLayer.java
index b3a3a6751bc..94690b71774 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPoolingLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VPoolingLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -28,27 +28,21 @@ public class VPoolingLayer extends VRoot {
public native @NoException(true) void setPoolingType(PoolingType type);
public native @NoException(true) void setPoolingType(@Cast("nvinfer1::PoolingType") int type);
public native @NoException(true) PoolingType getPoolingType();
- public native @NoException(true) void setWindowSize(@ByVal DimsHW windowSize);
- public native @ByVal @NoException(true) DimsHW getWindowSize();
- public native @NoException(true) void setStride(@ByVal DimsHW stride);
- public native @ByVal @NoException(true) DimsHW getStride();
- public native @NoException(true) void setPadding(@ByVal DimsHW padding);
- public native @ByVal @NoException(true) DimsHW getPadding();
public native @NoException(true) void setBlendFactor(float blendFactor);
public native @NoException(true) float getBlendFactor();
public native @NoException(true) void setAverageCountExcludesPadding(@Cast("bool") boolean exclusive);
public native @Cast("bool") @NoException(true) boolean getAverageCountExcludesPadding();
- public native @NoException(true) void setPrePadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPrePadding();
- public native @NoException(true) void setPostPadding(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPostPadding();
+ public native @NoException(true) void setPrePadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPrePadding();
+ public native @NoException(true) void setPostPadding(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPostPadding();
public native @NoException(true) void setPaddingMode(PaddingMode paddingMode);
public native @NoException(true) void setPaddingMode(@Cast("nvinfer1::PaddingMode") int paddingMode);
public native @NoException(true) PaddingMode getPaddingMode();
- public native @NoException(true) void setWindowSizeNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 windowSize);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getWindowSizeNd();
- public native @NoException(true) void setStrideNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStrideNd();
- public native @NoException(true) void setPaddingNd(@ByVal @Cast("nvinfer1::Dims*") Dims32 padding);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getPaddingNd();
+ public native @NoException(true) void setWindowSizeNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 windowSize);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getWindowSizeNd();
+ public native @NoException(true) void setStrideNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStrideNd();
+ public native @NoException(true) void setPaddingNd(@Cast("const nvinfer1::Dims*") @ByRef Dims64 padding);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getPaddingNd();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VQuantizeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VQuantizeLayer.java
index 6f8a3996909..2bb6ae90f21 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VQuantizeLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VQuantizeLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -27,4 +27,7 @@ public class VQuantizeLayer extends VRoot {
public native @NoException(true) int getAxis();
public native @NoException(true) void setAxis(int axis);
+ public native @NoException(true) DataType getToType();
+ public native @NoException(true) void setToType(DataType toType);
+ public native @NoException(true) void setToType(@Cast("nvinfer1::DataType") int toType);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRNNv2Layer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRNNv2Layer.java
deleted file mode 100644
index 94d18c82b23..00000000000
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRNNv2Layer.java
+++ /dev/null
@@ -1,55 +0,0 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
-
-package org.bytedeco.tensorrt.nvinfer;
-
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import org.bytedeco.cuda.cudart.*;
-import static org.bytedeco.cuda.global.cudart.*;
-import org.bytedeco.cuda.cublas.*;
-import static org.bytedeco.cuda.global.cublas.*;
-import org.bytedeco.cuda.cudnn.*;
-import static org.bytedeco.cuda.global.cudnn.*;
-import org.bytedeco.cuda.nvrtc.*;
-import static org.bytedeco.cuda.global.nvrtc.*;
-
-import static org.bytedeco.tensorrt.global.nvinfer.*;
-
-
-@Namespace("nvinfer1::apiv") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
-public class VRNNv2Layer extends VRoot {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public VRNNv2Layer(Pointer p) { super(p); }
-
- public native @NoException(true) int getLayerCount();
- public native @NoException(true) int getHiddenSize();
- public native @NoException(true) int getMaxSeqLength();
- public native @NoException(true) int getDataLength();
- public native @NoException(true) void setSequenceLengths(@ByRef ITensor seqLengths);
- public native @NoException(true) ITensor getSequenceLengths();
- public native @NoException(true) void setOperation(RNNOperation op);
- public native @NoException(true) void setOperation(@Cast("nvinfer1::RNNOperation") int op);
- public native @NoException(true) RNNOperation getOperation();
- public native @NoException(true) void setInputMode(RNNInputMode op);
- public native @NoException(true) void setInputMode(@Cast("nvinfer1::RNNInputMode") int op);
- public native @NoException(true) RNNInputMode getInputMode();
- public native @NoException(true) void setDirection(RNNDirection op);
- public native @NoException(true) void setDirection(@Cast("nvinfer1::RNNDirection") int op);
- public native @NoException(true) RNNDirection getDirection();
- public native @NoException(true) void setWeightsForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW, @ByVal Weights weights);
- public native @NoException(true) void setWeightsForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW, @ByVal Weights weights);
- public native @ByVal @NoException(true) Weights getWeightsForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW);
- public native @ByVal @NoException(true) Weights getWeightsForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW);
- public native @NoException(true) void setBiasForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW, @ByVal Weights bias);
- public native @NoException(true) void setBiasForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW, @ByVal Weights bias);
- public native @ByVal @NoException(true) Weights getBiasForGate(int layerIndex, RNNGateType gate, @Cast("bool") boolean isW);
- public native @ByVal @NoException(true) Weights getBiasForGate(int layerIndex, @Cast("nvinfer1::RNNGateType") int gate, @Cast("bool") boolean isW);
- public native @NoException(true) void setHiddenState(@ByRef ITensor hidden);
- public native @NoException(true) ITensor getHiddenState();
- public native @NoException(true) void setCellState(@ByRef ITensor cell);
- public native @NoException(true) ITensor getCellState();
-}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRaggedSoftMaxLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRaggedSoftMaxLayer.java
index b8004313c57..8ca6ee9d34c 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRaggedSoftMaxLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRaggedSoftMaxLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRecurrenceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRecurrenceLayer.java
index 0da9b90d605..86b044e0469 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRecurrenceLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRecurrenceLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReduceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReduceLayer.java
index f44e1304ab7..699191ad037 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReduceLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReduceLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRefitter.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRefitter.java
index c662fd4c9bd..a771c189a64 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRefitter.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRefitter.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,6 +25,7 @@ public class VRefitter extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VRefitter(Pointer p) { super(p); }
+ public native @NoException(true) IRefitter getPImpl();
public native @Cast("bool") @NoException(true) boolean setWeights(String layerName, WeightsRole role, @Const @ByVal Weights weights);
public native @Cast("bool") @NoException(true) boolean setWeights(@Cast("const char*") BytePointer layerName, @Cast("nvinfer1::WeightsRole") int role, @Const @ByVal Weights weights);
public native @Cast("bool") @NoException(true) boolean refitCudaEngine();
@@ -61,4 +62,17 @@ public class VRefitter extends VRoot {
public native @NoException(true) ILogger getLogger();
public native @Cast("bool") @NoException(true) boolean setMaxThreads(int maxThreads);
public native @NoException(true) int getMaxThreads();
+ public native @Cast("bool") @NoException(true) boolean setNamedWeightsWithLocation(String name, @ByVal Weights weights, TensorLocation location);
+ public native @Cast("bool") @NoException(true) boolean setNamedWeightsWithLocation(@Cast("const char*") BytePointer name, @ByVal Weights weights, @Cast("nvinfer1::TensorLocation") int location);
+ public native @ByVal @NoException(true) Weights getNamedWeights(String weightsName);
+ public native @ByVal @NoException(true) Weights getNamedWeights(@Cast("const char*") BytePointer weightsName);
+ public native @NoException(true) TensorLocation getWeightsLocation(String weightsName);
+ public native @NoException(true) @Cast("nvinfer1::TensorLocation") int getWeightsLocation(@Cast("const char*") BytePointer weightsName);
+ public native @Cast("bool") @NoException(true) boolean unsetNamedWeights(String weightsName);
+ public native @Cast("bool") @NoException(true) boolean unsetNamedWeights(@Cast("const char*") BytePointer weightsName);
+ public native @NoException(true) void setWeightsValidation(@Cast("bool") boolean weightsValidation);
+ public native @Cast("bool") @NoException(true) boolean getWeightsValidation();
+ public native @Cast("bool") @NoException(true) boolean refitCudaEngineAsync(CUstream_st stream);
+ public native @ByVal @NoException(true) Weights getWeightsPrototype(String weightsName);
+ public native @ByVal @NoException(true) Weights getWeightsPrototype(@Cast("const char*") BytePointer weightsName);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VResizeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VResizeLayer.java
index da319812af2..e793b2952e4 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VResizeLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VResizeLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,19 +25,17 @@ public class VResizeLayer extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VResizeLayer(Pointer p) { super(p); }
- public native @NoException(true) void setOutputDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getOutputDimensions();
+ public native @NoException(true) void setOutputDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getOutputDimensions();
public native @NoException(true) void setScales(@Const FloatPointer scales, int nbScales);
public native @NoException(true) void setScales(@Const FloatBuffer scales, int nbScales);
public native @NoException(true) void setScales(@Const float[] scales, int nbScales);
public native @NoException(true) int getScales(int size, FloatPointer scales);
public native @NoException(true) int getScales(int size, FloatBuffer scales);
public native @NoException(true) int getScales(int size, float[] scales);
- public native @NoException(true) void setResizeMode(InterpolationMode resizeMode);
- public native @NoException(true) void setResizeMode(@Cast("nvinfer1::InterpolationMode") int resizeMode);
+ public native @NoException(true) void setResizeMode(InterpolationMode interpolationMode);
+ public native @NoException(true) void setResizeMode(@Cast("nvinfer1::InterpolationMode") int interpolationMode);
public native @NoException(true) InterpolationMode getResizeMode();
- public native @NoException(true) void setAlignCorners(@Cast("bool") boolean alignCorners);
- public native @Cast("bool") @NoException(true) boolean getAlignCorners();
public native @NoException(true) void setCoordinateTransformation(ResizeCoordinateTransformation coordTransform);
public native @NoException(true) void setCoordinateTransformation(@Cast("nvinfer1::ResizeCoordinateTransformation") int coordTransform);
public native @NoException(true) ResizeCoordinateTransformation getCoordinateTransformation();
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReverseSequenceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReverseSequenceLayer.java
index a8553e7fcec..6df98e8bd93 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReverseSequenceLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VReverseSequenceLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRoot.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRoot.java
index 5b2ed9eb745..fcd30ae2a5b 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRoot.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRoot.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRuntime.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRuntime.java
index 1b422afe130..207a5363fe3 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRuntime.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VRuntime.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,8 +25,9 @@ public class VRuntime extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VRuntime(Pointer p) { super(p); }
- public native @NoException(true) ICudaEngine deserializeCudaEngine(
- @Const Pointer blob, @Cast("std::size_t") long size, IPluginFactory pluginFactory);
+ public native @NoException(true) IRuntime getPImpl();
+ public native @NoException(true) ICudaEngine deserializeCudaEngine(@Const Pointer blob, @Cast("std::size_t") long size);
+ public native @NoException(true) ICudaEngine deserializeCudaEngine(@ByRef IStreamReader streamReader);
public native @NoException(true) void setDLACore(int dlaCore);
public native @NoException(true) int getDLACore();
public native @NoException(true) int getNbDLACores();
@@ -41,7 +42,6 @@ public class VRuntime extends VRoot {
public native @NoException(true) String getTemporaryDirectory();
public native @NoException(true) void setTempfileControlFlags(@Cast("nvinfer1::TempfileControlFlags") int arg0);
public native @Cast("nvinfer1::TempfileControlFlags") @NoException(true) int getTempfileControlFlags();
- public native @NoException(true) IRuntime getPImpl();
public native @ByRef @NoException(true) IPluginRegistry getPluginRegistry();
public native @NoException(true) void setPluginRegistryParent(IPluginRegistry parent);
public native @NoException(true) IRuntime loadRuntime(String path);
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScaleLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScaleLayer.java
index 308a7749620..59dc6bad167 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScaleLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScaleLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScatterLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScatterLayer.java
index 3b329ba34cb..8e58010f05a 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScatterLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VScatterLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSelectLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSelectLayer.java
index ed4f3a1008c..5df8094cee5 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSelectLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSelectLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSerializationConfig.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSerializationConfig.java
new file mode 100644
index 00000000000..22d4f4a1ca6
--- /dev/null
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSerializationConfig.java
@@ -0,0 +1,36 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.tensorrt.nvinfer;
+
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import org.bytedeco.cuda.cudart.*;
+import static org.bytedeco.cuda.global.cudart.*;
+import org.bytedeco.cuda.cublas.*;
+import static org.bytedeco.cuda.global.cublas.*;
+import org.bytedeco.cuda.cudnn.*;
+import static org.bytedeco.cuda.global.cudnn.*;
+import org.bytedeco.cuda.nvrtc.*;
+import static org.bytedeco.cuda.global.nvrtc.*;
+
+import static org.bytedeco.tensorrt.global.nvinfer.*;
+
+
+@Namespace("nvinfer1::apiv") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
+public class VSerializationConfig extends VRoot {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public VSerializationConfig(Pointer p) { super(p); }
+
+ public native @Cast("bool") @NoException(true) boolean setFlags(@Cast("nvinfer1::SerializationFlags") int serializationFlags);
+ public native @Cast("nvinfer1::SerializationFlags") @NoException(true) int getFlags();
+ public native @Cast("bool") @NoException(true) boolean clearFlag(SerializationFlag serializationFlag);
+ public native @Cast("bool") @NoException(true) boolean clearFlag(@Cast("nvinfer1::SerializationFlag") int serializationFlag);
+ public native @Cast("bool") @NoException(true) boolean setFlag(SerializationFlag serializationFlag);
+ public native @Cast("bool") @NoException(true) boolean setFlag(@Cast("nvinfer1::SerializationFlag") int serializationFlag);
+ public native @Cast("bool") @NoException(true) boolean getFlag(SerializationFlag serializationFlag);
+ public native @Cast("bool") @NoException(true) boolean getFlag(@Cast("nvinfer1::SerializationFlag") int serializationFlag);
+}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShapeLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShapeLayer.java
index 33b4a3fac44..20243776aa1 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShapeLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShapeLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShuffleLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShuffleLayer.java
index 8594cfccf03..83183bdf4bd 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShuffleLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VShuffleLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -27,8 +27,8 @@ public class VShuffleLayer extends VRoot {
public native @NoException(true) void setFirstTranspose(@Const @ByRef Permutation permutation);
public native @Const @ByRef @NoException(true) Permutation getFirstTranspose();
- public native @NoException(true) void setReshapeDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getReshapeDimensions();
+ public native @NoException(true) void setReshapeDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getReshapeDimensions();
public native @NoException(true) void setSecondTranspose(@Const @ByRef Permutation permutation);
public native @Const @ByRef @NoException(true) Permutation getSecondTranspose();
public native @NoException(true) void setZeroIsPlaceholder(@Cast("bool") boolean zeroIsPlaceholder);
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSliceLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSliceLayer.java
index 716458b9e67..be345b9571b 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSliceLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSliceLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -25,12 +25,12 @@ public class VSliceLayer extends VRoot {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public VSliceLayer(Pointer p) { super(p); }
- public native @NoException(true) void setStart(@ByVal @Cast("nvinfer1::Dims*") Dims32 start);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStart();
- public native @NoException(true) void setSize(@ByVal @Cast("nvinfer1::Dims*") Dims32 size);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getSize();
- public native @NoException(true) void setStride(@ByVal @Cast("nvinfer1::Dims*") Dims32 stride);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getStride();
+ public native @NoException(true) void setStart(@Cast("const nvinfer1::Dims*") @ByRef Dims64 start);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStart();
+ public native @NoException(true) void setSize(@Cast("const nvinfer1::Dims*") @ByRef Dims64 size);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getSize();
+ public native @NoException(true) void setStride(@Cast("const nvinfer1::Dims*") @ByRef Dims64 stride);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getStride();
public native @NoException(true) void setMode(SampleMode mode);
public native @NoException(true) void setMode(@Cast("nvinfer1::SampleMode") int mode);
public native @NoException(true) SampleMode getMode();
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSoftMaxLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSoftMaxLayer.java
index 35d2bd38569..b9ee7e76ee0 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSoftMaxLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VSoftMaxLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTensor.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTensor.java
index a3f1e096983..a61095f42d7 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTensor.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTensor.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -28,8 +28,8 @@ public class VTensor extends VRoot {
public native @NoException(true) void setName(String name);
public native @NoException(true) void setName(@Cast("const char*") BytePointer name);
public native @NoException(true) String getName();
- public native @NoException(true) void setDimensions(@ByVal @Cast("nvinfer1::Dims*") Dims32 dimensions);
- public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims32 getDimensions();
+ public native @NoException(true) void setDimensions(@Cast("const nvinfer1::Dims*") @ByRef Dims64 dimensions);
+ public native @ByVal @Cast("nvinfer1::Dims*") @NoException(true) Dims64 getDimensions();
public native @NoException(true) void setType(DataType type);
public native @NoException(true) void setType(@Cast("nvinfer1::DataType") int type);
public native @NoException(true) DataType getType();
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTimingCache.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTimingCache.java
index 7a5b6171598..3d1edd8709b 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTimingCache.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTimingCache.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTopKLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTopKLayer.java
index 63e6c1c9d1a..0fe298bbb2f 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTopKLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTopKLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTripLimitLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTripLimitLayer.java
index d9dd298ad16..84b7b42d0f4 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTripLimitLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VTripLimitLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VUnaryLayer.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VUnaryLayer.java
index 4c11890d060..b8717de5bf7 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VUnaryLayer.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/VUnaryLayer.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Weights.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Weights.java
index f37c89937a0..1dedbbfbd55 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Weights.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/Weights.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cublasContext.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cublasContext.java
index 8329d57c519..3645248cef1 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cublasContext.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cublasContext.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -18,7 +18,7 @@
import static org.bytedeco.tensorrt.global.nvinfer.*;
- /** Forward declaration of cublasContext to use in other interfaces */
+ /** Forward declaration of cublasContext to use in other interfaces. */
@Opaque @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
public class cublasContext extends Pointer {
/** Empty constructor. Calls {@code super((Pointer)null)}. */
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cudnnContext.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cudnnContext.java
index 3fed7c9742a..c70fa7e1ee9 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cudnnContext.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer/cudnnContext.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer;
@@ -18,7 +18,7 @@
import static org.bytedeco.tensorrt.global.nvinfer.*;
- /** Forward declaration of cudnnContext to use in other interfaces */
+ /** Forward declaration of cudnnContext to use in other interfaces. */
@Opaque @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer.class)
public class cudnnContext extends Pointer {
/** Empty constructor. Calls {@code super((Pointer)null)}. */
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/DetectionOutputParameters.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/DetectionOutputParameters.java
index f79c3a19985..a4fd22acb63 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/DetectionOutputParameters.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/DetectionOutputParameters.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer_plugin;
@@ -22,23 +22,16 @@
/**
- * \brief The DetectionOutput plugin layer generates the detection output based on location and confidence predictions by doing non maximum suppression.
- * This plugin first decodes the bounding boxes based on the anchors generated. It then performs non_max_suppression on the decoded bounding boxes.
+ * \struct DetectionOutputParameters
+ *
+ * \brief The DetectionOutput plugin layer generates the detection output
+ * based on location and confidence predictions by doing non maximum suppression.
+ *
+ * This plugin first decodes the bounding boxes based on the anchors generated.
+ * It then performs non_max_suppression on the decoded bounding boxes.
* DetectionOutputParameters defines a set of parameters for creating the DetectionOutput plugin layer.
- * It contains:
- * @param shareLocation If true, bounding box are shared among different classes.
- * @param varianceEncodedInTarget If true, variance is encoded in target. Otherwise we need to adjust the predicted offset accordingly.
- * @param backgroundLabelId Background label ID. If there is no background class, set it as -1.
- * @param numClasses Number of classes to be predicted.
- * @param topK Number of boxes per image with top confidence scores that are fed into the NMS algorithm.
- * @param keepTopK Number of total bounding boxes to be kept per image after NMS step.
- * @param confidenceThreshold Only consider detections whose confidences are larger than a threshold.
- * @param nmsThreshold Threshold to be used in NMS.
- * @param codeType Type of coding method for bbox.
- * @param inputOrder Specifies the order of inputs {loc_data, conf_data, priorbox_data}.
- * @param confSigmoid Set to true to calculate sigmoid of confidence scores.
- * @param isNormalized Set to true if bounding box data is normalized by the network.
- * @param isBatchAgnostic Defaults to true. Set to false if prior boxes are unique per batch
+ *
+ * @deprecated Deprecated in TensorRT 10.0. DetectionOutput plugin is deprecated.
* */
@Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class)
public class DetectionOutputParameters extends Pointer {
@@ -58,18 +51,33 @@ public class DetectionOutputParameters extends Pointer {
return new DetectionOutputParameters((Pointer)this).offsetAddress(i);
}
+ /** If true, bounding box are shared among different classes. */
public native @Cast("bool") boolean shareLocation(); public native DetectionOutputParameters shareLocation(boolean setter);
+ /** If true, variance is encoded in target.
+ * Otherwise we need to adjust the predicted offset accordingly. */
public native @Cast("bool") boolean varianceEncodedInTarget(); public native DetectionOutputParameters varianceEncodedInTarget(boolean setter);
+ /** Background label ID. If there is no background class, set it as -1. */
public native int backgroundLabelId(); public native DetectionOutputParameters backgroundLabelId(int setter);
+ /** Number of classes to be predicted. */
public native int numClasses(); public native DetectionOutputParameters numClasses(int setter);
+ /** Number of boxes per image with top confidence scores that are fed
+ * into the NMS algorithm. */
public native int topK(); public native DetectionOutputParameters topK(int setter);
+ /** Number of total bounding boxes to be kept per image after NMS step. */
public native int keepTopK(); public native DetectionOutputParameters keepTopK(int setter);
+ /** Only consider detections whose confidences are larger than a threshold. */
public native float confidenceThreshold(); public native DetectionOutputParameters confidenceThreshold(float setter);
+ /** Threshold to be used in NMS. */
public native float nmsThreshold(); public native DetectionOutputParameters nmsThreshold(float setter);
+ /** Type of coding method for bbox. */
public native CodeTypeSSD codeType(); public native DetectionOutputParameters codeType(CodeTypeSSD setter);
+ /** Specifies the order of inputs {loc_data, conf_data, priorbox_data}. */
public native int inputOrder(int i); public native DetectionOutputParameters inputOrder(int i, int setter);
@MemberGetter public native IntPointer inputOrder();
+ /** Set to true to calculate sigmoid of confidence scores. */
public native @Cast("bool") boolean confSigmoid(); public native DetectionOutputParameters confSigmoid(boolean setter);
+ /** Set to true if bounding box data is normalized by the network. */
public native @Cast("bool") boolean isNormalized(); public native DetectionOutputParameters isNormalized(boolean setter);
+ /** Defaults to true. Set to false if prior boxes are unique per batch. */
public native @Cast("bool") boolean isBatchAgnostic(); public native DetectionOutputParameters isBatchAgnostic(boolean setter);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/GridAnchorParameters.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/GridAnchorParameters.java
index 0f432b13fa4..18499c8bd73 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/GridAnchorParameters.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/GridAnchorParameters.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer_plugin;
@@ -21,18 +21,11 @@
import static org.bytedeco.tensorrt.global.nvinfer_plugin.*;
-
/**
+ * \struct GridAnchorParameters
+ *
* \brief The Anchor Generator plugin layer generates the prior boxes of designated sizes and aspect ratios across all dimensions (H x W).
* GridAnchorParameters defines a set of parameters for creating the plugin layer for all feature maps.
- * It contains:
- * @param minScale Scale of anchors corresponding to finest resolution.
- * @param maxScale Scale of anchors corresponding to coarsest resolution.
- * @param aspectRatios List of aspect ratios to place on each grid point.
- * @param numAspectRatios Number of elements in aspectRatios.
- * @param H Height of feature map to generate anchors for.
- * @param W Width of feature map to generate anchors for.
- * @param variance Variance for adjusting the prior boxes.
* */
@Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class)
public class GridAnchorParameters extends Pointer {
@@ -52,12 +45,19 @@ public class GridAnchorParameters extends Pointer {
return new GridAnchorParameters((Pointer)this).offsetAddress(i);
}
+ /** Scale of anchors corresponding to finest resolution. */
public native float minSize(); public native GridAnchorParameters minSize(float setter);
+ /** Scale of anchors corresponding to coarsest resolution. */
public native float maxSize(); public native GridAnchorParameters maxSize(float setter);
+ /** List of aspect ratios to place on each grid point. */
public native FloatPointer aspectRatios(); public native GridAnchorParameters aspectRatios(FloatPointer setter);
+ /** Number of elements in aspectRatios. */
public native int numAspectRatios(); public native GridAnchorParameters numAspectRatios(int setter);
+ /** Height of feature map to generate anchors for. */
public native int H(); public native GridAnchorParameters H(int setter);
+ /** Width of feature map to generate anchors for. */
public native int W(); public native GridAnchorParameters W(int setter);
+ /** Variance for adjusting the prior boxes. */
public native float variance(int i); public native GridAnchorParameters variance(int i, float setter);
@MemberGetter public native FloatPointer variance();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/NMSParameters.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/NMSParameters.java
index bd01ef451e3..6cbb65786f6 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/NMSParameters.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/NMSParameters.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer_plugin;
@@ -24,20 +24,9 @@
/**
* \brief The NMSParameters are used by the BatchedNMSPlugin for performing
* the non_max_suppression operation over boxes for object detection networks.
- * @param shareLocation If set to true, the boxes inputs are shared across all
- * classes. If set to false, the boxes input should account for per class box data.
- * @param backgroundLabelId Label ID for the background class. If there is no background class, set it as -1
- * @param numClasses Number of classes in the network.
- * @param topK Number of bounding boxes to be fed into the NMS step.
- * @param keepTopK Number of total bounding boxes to be kept per image after NMS step.
- * Should be less than or equal to the topK value.
- * @param scoreThreshold Scalar threshold for score (low scoring boxes are removed).
- * @param iouThreshold scalar threshold for IOU (new boxes that have high IOU overlap
- * with previously selected boxes are removed).
- * @param isNormalized Set to false, if the box coordinates are not
- * normalized, i.e. not in the range [0,1]. Defaults to false.
+ *
+ * @deprecated Deprecated in TensorRT 10.0. BatchedNMSPlugin plugin is deprecated.
* */
-
@Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class)
public class NMSParameters extends Pointer {
static { Loader.load(); }
@@ -56,12 +45,25 @@ public class NMSParameters extends Pointer {
return new NMSParameters((Pointer)this).offsetAddress(i);
}
+ /** If set to true, the boxes inputs are shared across all classes.
+ * If set to false, the boxes input should account for per class box data. */
public native @Cast("bool") boolean shareLocation(); public native NMSParameters shareLocation(boolean setter);
+ /** Label ID for the background class.
+ * If there is no background class, set it as -1 */
public native int backgroundLabelId(); public native NMSParameters backgroundLabelId(int setter);
+ /** Number of classes in the network. */
public native int numClasses(); public native NMSParameters numClasses(int setter);
+ /** Number of bounding boxes to be fed into the NMS step. */
public native int topK(); public native NMSParameters topK(int setter);
+ /** Number of total bounding boxes to be kept per image after NMS step.
+ * Should be less than or equal to the topK value. */
public native int keepTopK(); public native NMSParameters keepTopK(int setter);
+ /** Scalar threshold for score (low scoring boxes are removed). */
public native float scoreThreshold(); public native NMSParameters scoreThreshold(float setter);
+ /** A scalar threshold for IOU (new boxes that have high IOU overlap
+ * with previously selected boxes are removed). */
public native float iouThreshold(); public native NMSParameters iouThreshold(float setter);
+ /** Set to false, if the box coordinates are not normalized,
+ * i.e. not in the range [0,1]. Defaults to false. */
public native @Cast("bool") boolean isNormalized(); public native NMSParameters isNormalized(boolean setter);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/PriorBoxParameters.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/PriorBoxParameters.java
index 22912f85287..2ff1f84eeae 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/PriorBoxParameters.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/PriorBoxParameters.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer_plugin;
@@ -22,24 +22,12 @@
/**
+ * \struct PriorBoxParameters
+ *
* \brief The PriorBox plugin layer generates the prior boxes of designated sizes and aspect ratios across all
- * dimensions (H x W). PriorBoxParameters defines a set of parameters for creating the PriorBox plugin layer. It
- * contains:
- * @param minSize Minimum box size in pixels. Can not be nullptr.
- * @param maxSize Maximum box size in pixels. Can be nullptr.
- * @param aspectRatios Aspect ratios of the boxes. Can be nullptr.
- * @param numMinSize Number of elements in minSize. Must be larger than 0.
- * @param numMaxSize Number of elements in maxSize. Can be 0 or same as numMinSize.
- * @param numAspectRatios Number of elements in aspectRatios. Can be 0.
- * @param flip If true, will flip each aspect ratio. For example, if there is an aspect ratio "r", the aspect ratio
- * "1.0/r" will be generated as well.
- * @param clip If true, will clip the prior so that it is within [0,1].
- * @param variance Variance for adjusting the prior boxes.
- * @param imgH Image height. If 0, then the H dimension of the data tensor will be used.
- * @param imgW Image width. If 0, then the W dimension of the data tensor will be used.
- * @param stepH Step in H. If 0, then (float)imgH/h will be used where h is the H dimension of the 1st input tensor.
- * @param stepW Step in W. If 0, then (float)imgW/w will be used where w is the W dimension of the 1st input tensor.
- * @param offset Offset to the top left corner of each cell.
+ * dimensions (H x W).
+ *
+ * PriorBoxParameters defines a set of parameters for creating the PriorBox plugin layer.
* */
@Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class)
public class PriorBoxParameters extends Pointer {
@@ -59,19 +47,34 @@ public class PriorBoxParameters extends Pointer {
return new PriorBoxParameters((Pointer)this).offsetAddress(i);
}
+ /** Minimum box size in pixels. Can not be nullptr. */
public native FloatPointer minSize(); public native PriorBoxParameters minSize(FloatPointer setter);
+ /** Maximum box size in pixels. Can be nullptr. */
public native FloatPointer maxSize(); public native PriorBoxParameters maxSize(FloatPointer setter);
+ /** Aspect ratios of the boxes. Can be nullptr. */
public native FloatPointer aspectRatios(); public native PriorBoxParameters aspectRatios(FloatPointer setter);
+ /** Number of elements in minSize. Must be larger than 0. */
public native int numMinSize(); public native PriorBoxParameters numMinSize(int setter);
+ /** Number of elements in maxSize. Can be 0 or same as numMinSize. */
public native int numMaxSize(); public native PriorBoxParameters numMaxSize(int setter);
+ /** Number of elements in aspectRatios. Can be 0. */
public native int numAspectRatios(); public native PriorBoxParameters numAspectRatios(int setter);
+ /** If true, will flip each aspect ratio. For example,
+ * if there is an aspect ratio "r", the aspect ratio "1.0/r" will be generated as well. */
public native @Cast("bool") boolean flip(); public native PriorBoxParameters flip(boolean setter);
+ /** If true, will clip the prior so that it is within [0,1]. */
public native @Cast("bool") boolean clip(); public native PriorBoxParameters clip(boolean setter);
+ /** Variance for adjusting the prior boxes. */
public native float variance(int i); public native PriorBoxParameters variance(int i, float setter);
@MemberGetter public native FloatPointer variance();
+ /** Image height. If 0, then the H dimension of the data tensor will be used. */
public native int imgH(); public native PriorBoxParameters imgH(int setter);
+ /** Image width. If 0, then the W dimension of the data tensor will be used. */
public native int imgW(); public native PriorBoxParameters imgW(int setter);
+ /** Step in H. If 0, then (float)imgH/h will be used where h is the H dimension of the 1st input tensor. */
public native float stepH(); public native PriorBoxParameters stepH(float setter);
+ /** Step in W. If 0, then (float)imgW/w will be used where w is the W dimension of the 1st input tensor. */
public native float stepW(); public native PriorBoxParameters stepW(float setter);
+ /** Offset to the top left corner of each cell. */
public native float offset(); public native PriorBoxParameters offset(float setter);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/Quadruple.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/Quadruple.java
deleted file mode 100644
index 8f0c5c066df..00000000000
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/Quadruple.java
+++ /dev/null
@@ -1,49 +0,0 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
-
-package org.bytedeco.tensorrt.nvinfer_plugin;
-
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import org.bytedeco.cuda.cudart.*;
-import static org.bytedeco.cuda.global.cudart.*;
-import org.bytedeco.cuda.cublas.*;
-import static org.bytedeco.cuda.global.cublas.*;
-import org.bytedeco.cuda.cudnn.*;
-import static org.bytedeco.cuda.global.cudnn.*;
-import org.bytedeco.cuda.nvrtc.*;
-import static org.bytedeco.cuda.global.nvrtc.*;
-import org.bytedeco.tensorrt.nvinfer.*;
-import static org.bytedeco.tensorrt.global.nvinfer.*;
-
-import static org.bytedeco.tensorrt.global.nvinfer_plugin.*;
-
-
-/**
- * \brief The Permute plugin layer permutes the input tensor by changing the memory order of the data.
- * Quadruple defines a structure that contains an array of 4 integers. They can represent the permute orders or the
- * strides in each dimension.
- * */
-@Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class)
-public class Quadruple extends Pointer {
- static { Loader.load(); }
- /** Default native constructor. */
- public Quadruple() { super((Pointer)null); allocate(); }
- /** Native array allocator. Access with {@link Pointer#position(long)}. */
- public Quadruple(long size) { super((Pointer)null); allocateArray(size); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public Quadruple(Pointer p) { super(p); }
- private native void allocate();
- private native void allocateArray(long size);
- @Override public Quadruple position(long position) {
- return (Quadruple)super.position(position);
- }
- @Override public Quadruple getPointer(long i) {
- return new Quadruple((Pointer)this).offsetAddress(i);
- }
-
- public native int data(int i); public native Quadruple data(int i, int setter);
- @MemberGetter public native IntPointer data();
-}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RPROIParams.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RPROIParams.java
index 5f8d90b862a..dd96e607aef 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RPROIParams.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RPROIParams.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer_plugin;
@@ -22,19 +22,9 @@
/**
+ * \struct RPROIParams
+ *
* \brief RPROIParams is used to create the RPROIPlugin instance.
- * It contains:
- * @param poolingH Height of the output in pixels after ROI pooling on feature map.
- * @param poolingW Width of the output in pixels after ROI pooling on feature map.
- * @param featureStride Feature stride; ratio of input image size to feature map size. Assuming that max pooling layers
- * in the neural network use square filters.
- * @param preNmsTop Number of proposals to keep before applying NMS.
- * @param nmsMaxOut Number of remaining proposals after applying NMS.
- * @param anchorsRatioCount Number of anchor box ratios.
- * @param anchorsScaleCount Number of anchor box scales.
- * @param iouThreshold IoU (Intersection over Union) threshold used for the NMS step.
- * @param minBoxSize Minimum allowed bounding box size before scaling, used for anchor box calculation.
- * @param spatialScale Spatial scale between the input image and the last feature map.
* */
@Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class)
public class RPROIParams extends Pointer {
@@ -54,14 +44,25 @@ public class RPROIParams extends Pointer {
return new RPROIParams((Pointer)this).offsetAddress(i);
}
+ /** Height of the output in pixels after ROI pooling on feature map. */
public native int poolingH(); public native RPROIParams poolingH(int setter);
+ /** Width of the output in pixels after ROI pooling on feature map. */
public native int poolingW(); public native RPROIParams poolingW(int setter);
+ /** Feature stride; ratio of input image size to feature map size.
+ * Assuming that max pooling layers in the neural network use square filters. */
public native int featureStride(); public native RPROIParams featureStride(int setter);
+ /** Number of proposals to keep before applying NMS. */
public native int preNmsTop(); public native RPROIParams preNmsTop(int setter);
+ /** Number of remaining proposals after applying NMS. */
public native int nmsMaxOut(); public native RPROIParams nmsMaxOut(int setter);
+ /** Number of anchor box ratios. */
public native int anchorsRatioCount(); public native RPROIParams anchorsRatioCount(int setter);
+ /** Number of anchor box scales. */
public native int anchorsScaleCount(); public native RPROIParams anchorsScaleCount(int setter);
+ /** IoU (Intersection over Union) threshold used for the NMS step. */
public native float iouThreshold(); public native RPROIParams iouThreshold(float setter);
+ /** Minimum allowed bounding box size before scaling, used for anchor box calculation. */
public native float minBoxSize(); public native RPROIParams minBoxSize(float setter);
+ /** Spatial scale between the input image and the last feature map. */
public native float spatialScale(); public native RPROIParams spatialScale(float setter);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RegionParameters.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RegionParameters.java
index 83eecd3e543..dc55215dc69 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RegionParameters.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/RegionParameters.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer_plugin;
@@ -22,14 +22,12 @@
/**
- * \brief The Region plugin layer performs region proposal calculation: generate 5 bounding boxes per cell (for yolo9000, generate 3 bounding boxes per cell).
- * For each box, calculating its probablities of objects detections from 80 pre-defined classifications (yolo9000 has 9418 pre-defined classifications,
- * and these 9418 items are organized as work-tree structure).
+ * \brief The Region plugin layer performs region proposal calculation.
+ *
+ * Generate 5 bounding boxes per cell (for yolo9000, generate 3 bounding boxes per cell).
+ * For each box, calculating its probabilities of objects detections from 80 pre-defined classifications
+ * (yolo9000 has 9418 pre-defined classifications, and these 9418 items are organized as work-tree structure).
* RegionParameters defines a set of parameters for creating the Region plugin layer.
- * @param num Number of predicted bounding box for each grid cell.
- * @param coords Number of coordinates for a bounding box.
- * @param classes Number of classifications to be predicted.
- * @param smTree Helping structure to do softmax on confidence scores.
* */
@Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class)
public class RegionParameters extends Pointer {
@@ -49,8 +47,12 @@ public class RegionParameters extends Pointer {
return new RegionParameters((Pointer)this).offsetAddress(i);
}
+ /** Number of predicted bounding box for each grid cell. */
public native int num(); public native RegionParameters num(int setter);
+ /** Number of coordinates for a bounding box. */
public native int coords(); public native RegionParameters coords(int setter);
+ /** Number of classifications to be predicted. */
public native int classes(); public native RegionParameters classes(int setter);
+ /** Helping structure to do softmax on confidence scores. */
public native softmaxTree smTree(); public native RegionParameters smTree(softmaxTree setter);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/softmaxTree.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/softmaxTree.java
index 5a9bce4fbfd..3408cd7a0f2 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/softmaxTree.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvinfer_plugin/softmaxTree.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvinfer_plugin;
@@ -22,7 +22,8 @@
/**
- * \brief When performing yolo9000, softmaxTree is helping to do softmax on confidence scores, for element to get the precise classification through word-tree structured classification definition.
+ * \brief When performing yolo9000, softmaxTree is helping to do softmax on confidence scores,
+ * for element to get the precise classification through word-tree structured classification definition.
* */
@Namespace("nvinfer1::plugin") @Properties(inherit = org.bytedeco.tensorrt.presets.nvinfer_plugin.class)
public class softmaxTree extends Pointer {
@@ -49,7 +50,6 @@ public class softmaxTree extends Pointer {
public native IntPointer group(); public native softmaxTree group(IntPointer setter);
public native @Cast("char*") BytePointer name(int i); public native softmaxTree name(int i, BytePointer setter);
public native @Cast("char**") PointerPointer name(); public native softmaxTree name(PointerPointer setter);
-
public native int groups(); public native softmaxTree groups(int setter);
public native IntPointer groupSize(); public native softmaxTree groupSize(IntPointer setter);
public native IntPointer groupOffset(); public native softmaxTree groupOffset(IntPointer setter);
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParser.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParser.java
index d19d5ba7e5b..948673a72f6 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParser.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParser.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvonnxparser;
@@ -139,18 +139,6 @@ public class IParser extends Pointer {
public native @Cast("bool") boolean supportsOperator(String op_name);
public native @Cast("bool") boolean supportsOperator(@Cast("const char*") BytePointer op_name);
- /**
- * \brief destroy this object
- *
- * \warning deprecated and planned on being removed in TensorRT 10.0
- * */
-
-
- //!
- //!
- //!
- public native @Deprecated void destroy();
-
/**
* \brief Get the number of errors that occurred during prior calls to
* \p parse
@@ -282,6 +270,28 @@ public class IParser extends Pointer {
*
* @return True if flag is set, false if unset.
* */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
public native @Cast("bool") @NoException(true) boolean getFlag(OnnxParserFlag onnxParserFlag);
public native @Cast("bool") @NoException(true) boolean getFlag(@Cast("nvonnxparser::OnnxParserFlag") int onnxParserFlag);
+
+ /**
+ * \brief Return the i-th output ITensor object for the ONNX layer "name".
+ *
+ * Return the i-th output ITensor object for the ONNX layer "name".
+ * If "name" is not found or i is out of range, return nullptr.
+ * In the case of multiple nodes sharing the same name this function will return
+ * the output tensors of the first instance of the node in the ONNX graph.
+ *
+ * @param name The name of the ONNX layer.
+ *
+ * @param i The index of the output. i must be in range [0, layer.num_outputs).
+ * */
+ public native @Const ITensor getLayerOutputTensor(String name, @Cast("int64_t") long i);
+ public native @Const ITensor getLayerOutputTensor(@Cast("const char*") BytePointer name, @Cast("int64_t") long i);
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParserError.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParserError.java
index 33dd653bf0c..d445ae7143a 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParserError.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParserError.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvonnxparser;
@@ -35,42 +35,74 @@ public class IParserError extends Pointer {
public IParserError(Pointer p) { super(p); }
/**
- * \brief the error code
+ * \brief the error code.
* */
//!
//!
public native org.bytedeco.tensorrt.global.nvonnxparser.ErrorCode code();
/**
- * \brief description of the error
+ * \brief description of the error.
* */
//!
//!
public native String desc();
/**
- * \brief source file in which the error occurred
+ * \brief source file in which the error occurred.
* */
//!
//!
public native String file();
/**
- * \brief source line at which the error occurred
+ * \brief source line at which the error occurred.
* */
//!
//!
public native int line();
/**
- * \brief source function in which the error occurred
+ * \brief source function in which the error occurred.
* */
//!
//!
public native String func();
/**
- * \brief index of the ONNX model node in which the error occurred
+ * \brief index of the ONNX model node in which the error occurred.
* */
+
+ //!
+ //!
public native int node();
+ /**
+ * \brief name of the node in which the error occurred.
+ * */
+
+ //!
+ //!
+ public native String nodeName();
+ /**
+ * \brief name of the node operation in which the error occurred.
+ * */
+
+ //!
+ //!
+ public native String nodeOperator();
+ /**
+ * \brief A list of the local function names, from the top level down, constituting the current
+ * stack trace in which the error occurred. A top-level node that is not inside any
+ * local function would return a nullptr.
+ * */
+
+ //!
+ //!
+ public native @Cast("const char*const*") PointerPointer localFunctionStack();
+ /**
+ * \brief The size of the stack of local functions at the point where the error occurred.
+ * A top-level node that is not inside any local function would correspond to */
+ // a stack size of 0.
+ /** */
+ public native int localFunctionStackSize();
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParserRefitter.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParserRefitter.java
new file mode 100644
index 00000000000..2a5444965b6
--- /dev/null
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/IParserRefitter.java
@@ -0,0 +1,111 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.tensorrt.nvonnxparser;
+
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import org.bytedeco.cuda.cudart.*;
+import static org.bytedeco.cuda.global.cudart.*;
+import org.bytedeco.cuda.cublas.*;
+import static org.bytedeco.cuda.global.cublas.*;
+import org.bytedeco.cuda.cudnn.*;
+import static org.bytedeco.cuda.global.cudnn.*;
+import org.bytedeco.cuda.nvrtc.*;
+import static org.bytedeco.cuda.global.nvrtc.*;
+import org.bytedeco.tensorrt.nvinfer.*;
+import static org.bytedeco.tensorrt.global.nvinfer.*;
+import org.bytedeco.tensorrt.nvinfer_plugin.*;
+import static org.bytedeco.tensorrt.global.nvinfer_plugin.*;
+
+import static org.bytedeco.tensorrt.global.nvonnxparser.*;
+
+
+/**
+ * \class IParserRefitter
+ *
+ * \brief An interface designed to refit weights from an ONNX model.
+ * */
+@Namespace("nvonnxparser") @Properties(inherit = org.bytedeco.tensorrt.presets.nvonnxparser.class)
+public class IParserRefitter extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public IParserRefitter(Pointer p) { super(p); }
+
+ /**
+ * \brief Load a serialized ONNX model from memory and perform weight refit.
+ *
+ * @param serializedOnnxModel Pointer to the serialized ONNX model
+ * @param serializedOnnxModelSize Size of the serialized ONNX model
+ * in bytes
+ * @param modelPath Absolute path to the model file for loading external weights if required
+ * @return true if all the weights in the engine were refit successfully.
+ *
+ * The serialized ONNX model must be identical to the one used to generate the engine
+ * that will be refit.
+ * */
+
+
+ //!
+ //!
+ //!
+ //!
+ //!
+ public native @Cast("bool") @NoException(true) boolean refitFromBytes(
+ @Const Pointer serializedOnnxModel, @Cast("size_t") long serializedOnnxModelSize, String modelPath/*=nullptr*/);
+ public native @Cast("bool") @NoException(true) boolean refitFromBytes(
+ @Const Pointer serializedOnnxModel, @Cast("size_t") long serializedOnnxModelSize);
+ public native @Cast("bool") @NoException(true) boolean refitFromBytes(
+ @Const Pointer serializedOnnxModel, @Cast("size_t") long serializedOnnxModelSize, @Cast("const char*") BytePointer modelPath/*=nullptr*/);
+
+ /**
+ * \brief Load and parse a ONNX model from disk and perform weight refit.
+ *
+ * @param onnxModelFile Path to the ONNX model to load from disk.
+ *
+ * @return true if the model was loaded successfully, and if all the weights in the engine were refit successfully.
+ *
+ * The provided ONNX model must be identical to the one used to generate the engine
+ * that will be refit.
+ * */
+
+
+ //!
+ //!
+ //!
+ public native @Cast("bool") @NoException(true) boolean refitFromFile(String onnxModelFile);
+ public native @Cast("bool") @NoException(true) boolean refitFromFile(@Cast("const char*") BytePointer onnxModelFile);
+
+ /**
+ * \brief Get the number of errors that occurred during prior calls to \p refitFromBytes or \p refitFromFile
+ *
+ * @see getError() IParserError
+ * */
+
+
+ //!
+ //!
+ //!
+ public native @NoException(true) int getNbErrors();
+
+ /**
+ * \brief Get an error that occurred during prior calls to \p refitFromBytes or \p refitFromFile
+ *
+ * @see getNbErrors() IParserError
+ * */
+
+
+ //!
+ //!
+ //!
+ public native @Const @NoException(true) IParserError getError(int index);
+
+ /**
+ * \brief Clear errors from prior calls to \p refitFromBytes or \p refitFromFile
+ *
+ * @see getNbErrors() getError() IParserError
+ * */
+ public native void clearErrors();
+}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraphCollection_t.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraphCollection_t.java
index 97a429f73bc..d1abb0984ef 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraphCollection_t.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraphCollection_t.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvonnxparser;
@@ -40,6 +40,8 @@ public class SubGraphCollection_t extends Pointer {
public void clear() { resize(0); }
public native void resize(@Cast("size_t") long n);
+ public SubGraph_t front() { return get(0); }
+ public SubGraph_t back() { return get(size() - 1); }
@Index(function = "at") public native @ByRef SubGraph_t get(@Cast("size_t") long i);
public native SubGraphCollection_t put(@Cast("size_t") long i, SubGraph_t value);
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraph_t.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraph_t.java
index 8c7d8962468..cadbb523f6c 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraph_t.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvonnxparser/SubGraph_t.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.nvonnxparser;
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/FieldCollection.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/FieldCollection.java
deleted file mode 100644
index 0c7b36671d6..00000000000
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/FieldCollection.java
+++ /dev/null
@@ -1,46 +0,0 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
-
-package org.bytedeco.tensorrt.nvparsers;
-
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import org.bytedeco.cuda.cudart.*;
-import static org.bytedeco.cuda.global.cudart.*;
-import org.bytedeco.cuda.cublas.*;
-import static org.bytedeco.cuda.global.cublas.*;
-import org.bytedeco.cuda.cudnn.*;
-import static org.bytedeco.cuda.global.cudnn.*;
-import org.bytedeco.cuda.nvrtc.*;
-import static org.bytedeco.cuda.global.nvrtc.*;
-import org.bytedeco.tensorrt.nvinfer.*;
-import static org.bytedeco.tensorrt.global.nvinfer.*;
-import org.bytedeco.tensorrt.nvinfer_plugin.*;
-import static org.bytedeco.tensorrt.global.nvinfer_plugin.*;
-
-import static org.bytedeco.tensorrt.global.nvparsers.*;
-
-
-@Namespace("nvuffparser") @Properties(inherit = org.bytedeco.tensorrt.presets.nvparsers.class)
-public class FieldCollection extends Pointer {
- static { Loader.load(); }
- /** Default native constructor. */
- public FieldCollection() { super((Pointer)null); allocate(); }
- /** Native array allocator. Access with {@link Pointer#position(long)}. */
- public FieldCollection(long size) { super((Pointer)null); allocateArray(size); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public FieldCollection(Pointer p) { super(p); }
- private native void allocate();
- private native void allocateArray(long size);
- @Override public FieldCollection position(long position) {
- return (FieldCollection)super.position(position);
- }
- @Override public FieldCollection getPointer(long i) {
- return new FieldCollection((Pointer)this).offsetAddress(i);
- }
-
- public native int nbFields(); public native FieldCollection nbFields(int setter);
- public native @Const FieldMap fields(); public native FieldCollection fields(FieldMap setter);
-}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/FieldMap.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/FieldMap.java
deleted file mode 100644
index 179854566a3..00000000000
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/FieldMap.java
+++ /dev/null
@@ -1,69 +0,0 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
-
-package org.bytedeco.tensorrt.nvparsers;
-
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import org.bytedeco.cuda.cudart.*;
-import static org.bytedeco.cuda.global.cudart.*;
-import org.bytedeco.cuda.cublas.*;
-import static org.bytedeco.cuda.global.cublas.*;
-import org.bytedeco.cuda.cudnn.*;
-import static org.bytedeco.cuda.global.cudnn.*;
-import org.bytedeco.cuda.nvrtc.*;
-import static org.bytedeco.cuda.global.nvrtc.*;
-import org.bytedeco.tensorrt.nvinfer.*;
-import static org.bytedeco.tensorrt.global.nvinfer.*;
-import org.bytedeco.tensorrt.nvinfer_plugin.*;
-import static org.bytedeco.tensorrt.global.nvinfer_plugin.*;
-
-import static org.bytedeco.tensorrt.global.nvparsers.*;
-
-
-/**
- * \class FieldMap
- *
- * \brief An array of field params used as a layer parameter for plugin layers.
- *
- * The node fields are passed by the parser to the API through the plugin
- * constructor. The implementation of the plugin should parse the contents of
- * the fieldMap as part of the plugin constructor
- * */
-@Namespace("nvuffparser") @NoOffset @Properties(inherit = org.bytedeco.tensorrt.presets.nvparsers.class)
-public class FieldMap extends Pointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public FieldMap(Pointer p) { super(p); }
- /** Native array allocator. Access with {@link Pointer#position(long)}. */
- public FieldMap(long size) { super((Pointer)null); allocateArray(size); }
- private native void allocateArray(long size);
- @Override public FieldMap position(long position) {
- return (FieldMap)super.position(position);
- }
- @Override public FieldMap getPointer(long i) {
- return new FieldMap((Pointer)this).offsetAddress(i);
- }
-
- public native String name(); public native FieldMap name(String setter);
- public native @Const Pointer data(); public native FieldMap data(Pointer setter);
- public native FieldType type(); public native FieldMap type(FieldType setter);
- public native int length(); public native FieldMap length(int setter);
-
- /** @deprecated Legacy constructor, retained for ABI compatibility. Deprecated in TensorRT 8.6.
- * Use the default constructor instead. */
- public FieldMap(String name, @Const Pointer data, FieldType type, int length/*=1*/) { super((Pointer)null); allocate(name, data, type, length); }
- @Deprecated private native void allocate(String name, @Const Pointer data, FieldType type, int length/*=1*/);
- public FieldMap(String name, @Const Pointer data, FieldType type) { super((Pointer)null); allocate(name, data, type); }
- @Deprecated private native void allocate(String name, @Const Pointer data, FieldType type);
- public FieldMap(@Cast("const char*") BytePointer name, @Const Pointer data, @Cast("nvuffparser::FieldType") int type, int length/*=1*/) { super((Pointer)null); allocate(name, data, type, length); }
- @Deprecated private native void allocate(@Cast("const char*") BytePointer name, @Const Pointer data, @Cast("nvuffparser::FieldType") int type, int length/*=1*/);
- public FieldMap(@Cast("const char*") BytePointer name, @Const Pointer data, @Cast("nvuffparser::FieldType") int type) { super((Pointer)null); allocate(name, data, type); }
- @Deprecated private native void allocate(@Cast("const char*") BytePointer name, @Const Pointer data, @Cast("nvuffparser::FieldType") int type);
-
- /** Default constructor */
- public FieldMap() { super((Pointer)null); allocate(); }
- private native void allocate();
-}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IBinaryProtoBlob.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IBinaryProtoBlob.java
deleted file mode 100644
index 289d2a322f8..00000000000
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IBinaryProtoBlob.java
+++ /dev/null
@@ -1,54 +0,0 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
-
-package org.bytedeco.tensorrt.nvparsers;
-
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import org.bytedeco.cuda.cudart.*;
-import static org.bytedeco.cuda.global.cudart.*;
-import org.bytedeco.cuda.cublas.*;
-import static org.bytedeco.cuda.global.cublas.*;
-import org.bytedeco.cuda.cudnn.*;
-import static org.bytedeco.cuda.global.cudnn.*;
-import org.bytedeco.cuda.nvrtc.*;
-import static org.bytedeco.cuda.global.nvrtc.*;
-import org.bytedeco.tensorrt.nvinfer.*;
-import static org.bytedeco.tensorrt.global.nvinfer.*;
-import org.bytedeco.tensorrt.nvinfer_plugin.*;
-import static org.bytedeco.tensorrt.global.nvinfer_plugin.*;
-
-import static org.bytedeco.tensorrt.global.nvparsers.*;
-
-
-/**
- * \class IBinaryProtoBlob
- *
- * \brief Object used to store and query data extracted from a binaryproto file using the ICaffeParser.
- *
- * @see nvcaffeparser1::ICaffeParser
- *
- * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI.
- * */
-@Namespace("nvcaffeparser1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvparsers.class)
-public class IBinaryProtoBlob extends Pointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public IBinaryProtoBlob(Pointer p) { super(p); }
-
- public native @Const @NoException(true) Pointer getData();
- public native @ByVal @NoException(true) Dims4 getDimensions();
-
- //!
- //!
- //!
- public native @NoException(true) DataType getDataType();
- /**
- * @deprecated Deprecated in TensorRT 8.0. Superseded by {@code delete}.
- *
- * \warning Calling destroy on a managed pointer will result in a double-free error.
- * */
- public native @Deprecated @NoException(true) void destroy();
-}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IBlobNameToTensor.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IBlobNameToTensor.java
deleted file mode 100644
index f3388e4b7e6..00000000000
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IBlobNameToTensor.java
+++ /dev/null
@@ -1,51 +0,0 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
-
-package org.bytedeco.tensorrt.nvparsers;
-
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import org.bytedeco.cuda.cudart.*;
-import static org.bytedeco.cuda.global.cudart.*;
-import org.bytedeco.cuda.cublas.*;
-import static org.bytedeco.cuda.global.cublas.*;
-import org.bytedeco.cuda.cudnn.*;
-import static org.bytedeco.cuda.global.cudnn.*;
-import org.bytedeco.cuda.nvrtc.*;
-import static org.bytedeco.cuda.global.nvrtc.*;
-import org.bytedeco.tensorrt.nvinfer.*;
-import static org.bytedeco.tensorrt.global.nvinfer.*;
-import org.bytedeco.tensorrt.nvinfer_plugin.*;
-import static org.bytedeco.tensorrt.global.nvinfer_plugin.*;
-
-import static org.bytedeco.tensorrt.global.nvparsers.*;
-
-
-/**
- * \class IBlobNameToTensor
- *
- * \brief Object used to store and query Tensors after they have been extracted from a Caffe model using the ICaffeParser.
- *
- * \note The lifetime of IBlobNameToTensor is the same as the lifetime of its parent ICaffeParser.
- *
- * @see nvcaffeparser1::ICaffeParser
- *
- * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI.
- * */
-@Namespace("nvcaffeparser1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvparsers.class)
-public class IBlobNameToTensor extends Pointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public IBlobNameToTensor(Pointer p) { super(p); }
-
- /** \brief Given a blob name, returns a pointer to a ITensor object.
- *
- * @param name Caffe blob name for which the user wants the corresponding ITensor.
- *
- * @return ITensor* corresponding to the queried name. If no such ITensor exists, then nullptr is returned.
- * */
- public native @NoException(true) ITensor find(String name);
- public native @NoException(true) ITensor find(@Cast("const char*") BytePointer name);
-}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/ICaffeParser.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/ICaffeParser.java
deleted file mode 100644
index 5fe9f2d4eb9..00000000000
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/ICaffeParser.java
+++ /dev/null
@@ -1,207 +0,0 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
-
-package org.bytedeco.tensorrt.nvparsers;
-
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import org.bytedeco.cuda.cudart.*;
-import static org.bytedeco.cuda.global.cudart.*;
-import org.bytedeco.cuda.cublas.*;
-import static org.bytedeco.cuda.global.cublas.*;
-import org.bytedeco.cuda.cudnn.*;
-import static org.bytedeco.cuda.global.cudnn.*;
-import org.bytedeco.cuda.nvrtc.*;
-import static org.bytedeco.cuda.global.nvrtc.*;
-import org.bytedeco.tensorrt.nvinfer.*;
-import static org.bytedeco.tensorrt.global.nvinfer.*;
-import org.bytedeco.tensorrt.nvinfer_plugin.*;
-import static org.bytedeco.tensorrt.global.nvinfer_plugin.*;
-
-import static org.bytedeco.tensorrt.global.nvparsers.*;
-
-/**
- * \class ICaffeParser
- *
- * \brief Class used for parsing Caffe models.
- *
- * Allows users to export models trained using Caffe to TRT.
- *
- * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI.
- * */
-@Namespace("nvcaffeparser1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvparsers.class)
-public class ICaffeParser extends Pointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public ICaffeParser(Pointer p) { super(p); }
-
- /**
- * \brief Parse a prototxt file and a binaryproto Caffe model to extract
- * network definition and weights associated with the network, respectively.
- *
- * @param deploy The plain text, prototxt file used to define the network definition.
- * @param model The binaryproto Caffe model that contains the weights associated with the network.
- * @param network Network in which the CaffeParser will fill the layers.
- * @param weightType The type to which the weights will transformed.
- *
- * @return A pointer to an IBlobNameToTensor object that contains the extracted data.
- *
- * @see nvcaffeparser1::IBlobNameToTensor
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @Const @NoException(true) IBlobNameToTensor parse(String deploy, String model, @ByRef INetworkDefinition network,
- DataType weightType);
- public native @Const @NoException(true) IBlobNameToTensor parse(@Cast("const char*") BytePointer deploy, @Cast("const char*") BytePointer model, @ByRef INetworkDefinition network,
- @Cast("nvinfer1::DataType") int weightType);
-
- /**
- * \brief Parse a deploy prototxt and a binaryproto Caffe model from memory buffers to extract
- * network definition and weights associated with the network, respectively.
- *
- * @param deployBuffer The plain text deploy prototxt used to define the network definition.
- * @param deployLength The length of the deploy buffer.
- * @param modelBuffer The binaryproto Caffe memory buffer that contains the weights associated with the network.
- * @param modelLength The length of the model buffer.
- * @param network Network in which the CaffeParser will fill the layers.
- * @param weightType The type to which the weights will transformed.
- *
- * @return A pointer to an IBlobNameToTensor object that contains the extracted data.
- *
- * @see nvcaffeparser1::IBlobNameToTensor
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- //!
- public native @Const @NoException(true) IBlobNameToTensor parseBuffers(@Cast("const uint8_t*") BytePointer deployBuffer, @Cast("std::size_t") long deployLength,
- @Cast("const uint8_t*") BytePointer modelBuffer, @Cast("std::size_t") long modelLength, @ByRef INetworkDefinition network,
- DataType weightType);
- public native @Const @NoException(true) IBlobNameToTensor parseBuffers(@Cast("const uint8_t*") ByteBuffer deployBuffer, @Cast("std::size_t") long deployLength,
- @Cast("const uint8_t*") ByteBuffer modelBuffer, @Cast("std::size_t") long modelLength, @ByRef INetworkDefinition network,
- @Cast("nvinfer1::DataType") int weightType);
- public native @Const @NoException(true) IBlobNameToTensor parseBuffers(@Cast("const uint8_t*") byte[] deployBuffer, @Cast("std::size_t") long deployLength,
- @Cast("const uint8_t*") byte[] modelBuffer, @Cast("std::size_t") long modelLength, @ByRef INetworkDefinition network,
- DataType weightType);
- public native @Const @NoException(true) IBlobNameToTensor parseBuffers(@Cast("const uint8_t*") BytePointer deployBuffer, @Cast("std::size_t") long deployLength,
- @Cast("const uint8_t*") BytePointer modelBuffer, @Cast("std::size_t") long modelLength, @ByRef INetworkDefinition network,
- @Cast("nvinfer1::DataType") int weightType);
- public native @Const @NoException(true) IBlobNameToTensor parseBuffers(@Cast("const uint8_t*") ByteBuffer deployBuffer, @Cast("std::size_t") long deployLength,
- @Cast("const uint8_t*") ByteBuffer modelBuffer, @Cast("std::size_t") long modelLength, @ByRef INetworkDefinition network,
- DataType weightType);
- public native @Const @NoException(true) IBlobNameToTensor parseBuffers(@Cast("const uint8_t*") byte[] deployBuffer, @Cast("std::size_t") long deployLength,
- @Cast("const uint8_t*") byte[] modelBuffer, @Cast("std::size_t") long modelLength, @ByRef INetworkDefinition network,
- @Cast("nvinfer1::DataType") int weightType);
-
- /**
- * \brief Parse and extract data stored in binaryproto file.
- *
- * The binaryproto file contains data stored in a binary blob. parseBinaryProto() converts it
- * to an IBinaryProtoBlob object which gives the user access to the data and meta-data about data.
- *
- * @param fileName Path to file containing binary proto.
- *
- * @return A pointer to an IBinaryProtoBlob object that contains the extracted data.
- *
- * @see nvcaffeparser1::IBinaryProtoBlob
- * */
-
-
- //!
- //!
- //!
- //!
- public native @NoException(true) IBinaryProtoBlob parseBinaryProto(String fileName);
- public native @NoException(true) IBinaryProtoBlob parseBinaryProto(@Cast("const char*") BytePointer fileName);
-
- /**
- * \brief Set buffer size for the parsing and storage of the learned model.
- *
- * @param size The size of the buffer specified as the number of bytes.
- *
- * \note Default size is 2^30 bytes.
- * */
-
-
- //!
- //!
- //!
- //!
- public native @NoException(true) void setProtobufBufferSize(@Cast("size_t") long size);
-
- /**
- * \brief Destroy this ICaffeParser object.
- *
- * @deprecated Deprecated in TensorRT 8.0. Superseded by {@code delete}.
- *
- * \warning Calling destroy on a managed pointer will result in a double-free error.
- * */
-
-
- //!
- //!
- //!
- public native @Deprecated @NoException(true) void destroy();
-
- /**
- * \brief Set the IPluginFactoryV2 used to create the user defined pluginV2 objects.
- *
- * @param factory Pointer to an instance of the user implementation of IPluginFactoryV2.
- * */
-
-
- //!
- //!
- public native @NoException(true) void setPluginFactoryV2(IPluginFactoryV2 factory);
-
- /**
- * \brief Set the namespace used to lookup and create plugins in the network.
- * */
- public native @NoException(true) void setPluginNamespace(String libNamespace);
- public native @NoException(true) void setPluginNamespace(@Cast("const char*") BytePointer libNamespace);
- /**
- * \brief Set the ErrorRecorder for this interface
- *
- * Assigns the ErrorRecorder to this interface. The ErrorRecorder will track all errors during execution.
- * This function will call incRefCount of the registered ErrorRecorder at least once. Setting
- * recorder to nullptr unregisters the recorder with the interface, resulting in a call to decRefCount if
- * a recorder has been registered.
- *
- * If an error recorder is not set, messages will be sent to the global log stream.
- *
- * @param recorder The error recorder to register with this interface.
- *
- * @see getErrorRecorder()
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @NoException(true) void setErrorRecorder(IErrorRecorder recorder);
-
- /**
- * \brief get the ErrorRecorder assigned to this interface.
- *
- * Retrieves the assigned error recorder object for the given class. A
- * nullptr will be returned if setErrorRecorder has not been called.
- *
- * @return A pointer to the IErrorRecorder object that has been registered.
- *
- * @see setErrorRecorder()
- * */
- public native @NoException(true) IErrorRecorder getErrorRecorder();
-}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IPluginFactoryV2.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IPluginFactoryV2.java
deleted file mode 100644
index 1233e593f9d..00000000000
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IPluginFactoryV2.java
+++ /dev/null
@@ -1,66 +0,0 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
-
-package org.bytedeco.tensorrt.nvparsers;
-
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import org.bytedeco.cuda.cudart.*;
-import static org.bytedeco.cuda.global.cudart.*;
-import org.bytedeco.cuda.cublas.*;
-import static org.bytedeco.cuda.global.cublas.*;
-import org.bytedeco.cuda.cudnn.*;
-import static org.bytedeco.cuda.global.cudnn.*;
-import org.bytedeco.cuda.nvrtc.*;
-import static org.bytedeco.cuda.global.nvrtc.*;
-import org.bytedeco.tensorrt.nvinfer.*;
-import static org.bytedeco.tensorrt.global.nvinfer.*;
-import org.bytedeco.tensorrt.nvinfer_plugin.*;
-import static org.bytedeco.tensorrt.global.nvinfer_plugin.*;
-
-import static org.bytedeco.tensorrt.global.nvparsers.*;
-
-
-/**
- * \class IPluginFactoryV2
- *
- * \brief Plugin factory used to configure plugins.
- * */
-@Namespace("nvcaffeparser1") @Properties(inherit = org.bytedeco.tensorrt.presets.nvparsers.class)
-public class IPluginFactoryV2 extends Pointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public IPluginFactoryV2(Pointer p) { super(p); }
-
- /**
- * \brief A user implemented function that determines if a layer configuration is provided by an IPluginV2.
- *
- * @param layerName Name of the layer which the user wishes to validate.
- * */
-
-
- //!
- //!
- //!
- public native @Cast("bool") @NoException(true) boolean isPluginV2(String layerName);
- public native @Cast("bool") @NoException(true) boolean isPluginV2(@Cast("const char*") BytePointer layerName);
-
- /**
- * \brief Creates a plugin.
- *
- * @param layerName Name of layer associated with the plugin.
- * @param weights Weights used for the layer.
- * @param nbWeights Number of weights.
- * @param libNamespace Library Namespace associated with the plugin object
- * */
- public native @NoException(true) IPluginV2 createPlugin(String layerName, @Const Weights weights,
- int nbWeights, String libNamespace/*=""*/);
- public native @NoException(true) IPluginV2 createPlugin(String layerName, @Const Weights weights,
- int nbWeights);
- public native @NoException(true) IPluginV2 createPlugin(@Cast("const char*") BytePointer layerName, @Const Weights weights,
- int nbWeights, @Cast("const char*") BytePointer libNamespace/*=""*/);
- public native @NoException(true) IPluginV2 createPlugin(@Cast("const char*") BytePointer layerName, @Const Weights weights,
- int nbWeights);
-}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IUffParser.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IUffParser.java
deleted file mode 100644
index 5a9d9ba669e..00000000000
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/nvparsers/IUffParser.java
+++ /dev/null
@@ -1,180 +0,0 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
-
-package org.bytedeco.tensorrt.nvparsers;
-
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import org.bytedeco.cuda.cudart.*;
-import static org.bytedeco.cuda.global.cudart.*;
-import org.bytedeco.cuda.cublas.*;
-import static org.bytedeco.cuda.global.cublas.*;
-import org.bytedeco.cuda.cudnn.*;
-import static org.bytedeco.cuda.global.cudnn.*;
-import org.bytedeco.cuda.nvrtc.*;
-import static org.bytedeco.cuda.global.nvrtc.*;
-import org.bytedeco.tensorrt.nvinfer.*;
-import static org.bytedeco.tensorrt.global.nvinfer.*;
-import org.bytedeco.tensorrt.nvinfer_plugin.*;
-import static org.bytedeco.tensorrt.global.nvinfer_plugin.*;
-
-import static org.bytedeco.tensorrt.global.nvparsers.*;
-
-
-/**
- * \class IUffParser
- *
- * \brief Class used for parsing models described using the UFF format.
- *
- * \warning Do not inherit from this class, as doing so will break forward-compatibility of the API and ABI.
- * */
-@Namespace("nvuffparser") @Properties(inherit = org.bytedeco.tensorrt.presets.nvparsers.class)
-public class IUffParser extends Pointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public IUffParser(Pointer p) { super(p); }
-
- /**
- * \brief Register an input name of a UFF network with the associated Dimensions.
- *
- * @param inputName Input name.
- * @param inputDims Input dimensions.
- * @param inputOrder Input order on which the framework input was originally.
- * */
-
-
- //!
- //!
- //!
- public native @Cast("bool") @NoException(true) boolean registerInput(String inputName, @ByVal @Cast("nvinfer1::Dims*") Dims32 inputDims, UffInputOrder inputOrder);
- public native @Cast("bool") @NoException(true) boolean registerInput(@Cast("const char*") BytePointer inputName, @ByVal @Cast("nvinfer1::Dims*") Dims32 inputDims, @Cast("nvuffparser::UffInputOrder") int inputOrder);
-
- /**
- * \brief Register an output name of a UFF network.
- *
- * @param outputName Output name.
- * */
-
-
- //!
- //!
- //!
- public native @Cast("bool") @NoException(true) boolean registerOutput(String outputName);
- public native @Cast("bool") @NoException(true) boolean registerOutput(@Cast("const char*") BytePointer outputName);
-
- /**
- * \brief Parse a UFF file.
- *
- * @param file File name of the UFF file.
- * @param network Network in which the UFFParser will fill the layers.
- * @param weightsType The type on which the weights will transformed in.
- * */
-
-
- //!
- //!
- //!
- public native @Cast("bool") @NoException(true) boolean parse(String file, @ByRef INetworkDefinition network,
- DataType weightsType/*=nvinfer1::DataType::kFLOAT*/);
- public native @Cast("bool") @NoException(true) boolean parse(String file, @ByRef INetworkDefinition network);
- public native @Cast("bool") @NoException(true) boolean parse(@Cast("const char*") BytePointer file, @ByRef INetworkDefinition network,
- @Cast("nvinfer1::DataType") int weightsType/*=nvinfer1::DataType::kFLOAT*/);
- public native @Cast("bool") @NoException(true) boolean parse(@Cast("const char*") BytePointer file, @ByRef INetworkDefinition network);
-
- /**
- * \brief Parse a UFF buffer, useful if the file already live in memory.
- *
- * @param buffer Buffer of the UFF file.
- * @param size Size of buffer of the UFF file.
- * @param network Network in which the UFFParser will fill the layers.
- * @param weightsType The type on which the weights will transformed in.
- * */
-
-
- //!
- //!
- public native @Cast("bool") @NoException(true) boolean parseBuffer(String buffer, @Cast("std::size_t") long size, @ByRef INetworkDefinition network,
- DataType weightsType/*=nvinfer1::DataType::kFLOAT*/);
- public native @Cast("bool") @NoException(true) boolean parseBuffer(String buffer, @Cast("std::size_t") long size, @ByRef INetworkDefinition network);
- public native @Cast("bool") @NoException(true) boolean parseBuffer(@Cast("const char*") BytePointer buffer, @Cast("std::size_t") long size, @ByRef INetworkDefinition network,
- @Cast("nvinfer1::DataType") int weightsType/*=nvinfer1::DataType::kFLOAT*/);
- public native @Cast("bool") @NoException(true) boolean parseBuffer(@Cast("const char*") BytePointer buffer, @Cast("std::size_t") long size, @ByRef INetworkDefinition network);
-
- /**
- * @deprecated Use {@code delete} instead. Deprecated in TRT 8.0.
- * */
-
-
- //!
- //!
- public native @Deprecated @NoException(true) void destroy();
-
- /**
- * \brief Return Version Major of the UFF.
- * */
-
-
- //!
- //!
- public native @NoException(true) int getUffRequiredVersionMajor();
-
- /**
- * \brief Return Version Minor of the UFF.
- * */
-
-
- //!
- //!
- public native @NoException(true) int getUffRequiredVersionMinor();
-
- /**
- * \brief Return Patch Version of the UFF.
- * */
-
-
- //!
- //!
- public native @NoException(true) int getUffRequiredVersionPatch();
-
- /**
- * \brief Set the namespace used to lookup and create plugins in the network.
- * */
- public native @NoException(true) void setPluginNamespace(String libNamespace);
- public native @NoException(true) void setPluginNamespace(@Cast("const char*") BytePointer libNamespace);
- /**
- * \brief Set the ErrorRecorder for this interface
- *
- * Assigns the ErrorRecorder to this interface. The ErrorRecorder will track all errors during execution.
- * This function will call incRefCount of the registered ErrorRecorder at least once. Setting
- * recorder to nullptr unregisters the recorder with the interface, resulting in a call to decRefCount if
- * a recorder has been registered.
- *
- * If an error recorder is not set, messages will be sent to the global log stream.
- *
- * @param recorder The error recorder to register with this interface. */
- //
- /** @see getErrorRecorder()
- /** */
-
-
- //!
- //!
- //!
- //!
- //!
- public native @NoException(true) void setErrorRecorder(IErrorRecorder recorder);
-
- /**
- * \brief get the ErrorRecorder assigned to this interface.
- *
- * Retrieves the assigned error recorder object for the given class. A
- * nullptr will be returned if setErrorRecorder has not been called.
- *
- * @return A pointer to the IErrorRecorder object that has been registered.
- *
- * @see setErrorRecorder()
- * */
- public native @NoException(true) IErrorRecorder getErrorRecorder();
-}
diff --git a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer.java b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer.java
index 037c418d2b8..9043f0d247d 100644
--- a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer.java
+++ b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018-2023 Samuel Audet
+ * Copyright (C) 2018-2024 Samuel Audet
*
* Licensed either under the Apache License, Version 2.0, or (at your option)
* under the terms of the GNU General Public License as published by
@@ -48,9 +48,10 @@
value = {"linux-arm64", "linux-ppc64le", "linux-x86_64", "windows-x86_64"},
compiler = "cpp11",
include = {"NvInferVersion.h", "NvInferRuntimeBase.h", "NvInferRuntimePlugin.h", "NvInferRuntimeCommon.h",
- "NvInferLegacyDims.h", "NvInferRuntime.h", "NvInfer.h", "NvInferImpl.h", "NvUtils.h"},
- link = "nvinfer@.8",
- preload = "nvinfer_builder_resource@.8.6.1"
+ "NvInferLegacyDims.h", "NvInferRuntime.h", "NvInfer.h", "NvInferImpl.h"/*, "NvUtils.h"*/},
+ exclude = "NvInferRuntimeBase.h",
+ link = "nvinfer@.10",
+ preload = "nvinfer_builder_resource@.10.0.1"
),
@Platform(
value = "linux-arm64",
@@ -123,10 +124,12 @@ public void map(InfoMap infoMap) {
.put(new Info("std::size_t").cast().valueTypes("long").pointerTypes("LongPointer", "LongBuffer", "long[]"))
.put(new Info("const char", "nvinfer1::AsciiChar").pointerTypes("String", "@Cast(\"const char*\") BytePointer"))
- .put(new Info("nvinfer1::IErrorRecorder::ErrorDesc").valueTypes("String", "@Cast(\"const char*\") BytePointer"))
+ .put(new Info("nvinfer1::IErrorRecorder::ErrorDesc", "nvinfer1::InterfaceKind",
+ "nvinfer1::v_1_0::IErrorRecorder::ErrorDesc").valueTypes("String", "@Cast(\"const char*\") BytePointer"))
+ .put(new Info("nvinfer1::NetworkDefinitionCreationFlags").cast().valueTypes("int"))
.put(new Info("nvinfer1::PluginFormat").cast().valueTypes("TensorFormat", "int").pointerTypes("IntPointer", "IntBuffer", "int[]"))
.put(new Info("nvinfer1::safe::IPluginRegistry").pointerTypes("SafeIPluginRegistry"))
- .put(new Info("nvinfer1::EnumMax", "nvinfer1::EnumMaxImpl").skip())
+ .put(new Info("nvinfer1::EnumMax", "nvinfer1::EnumMaxImpl", "nvinfer1::v_1_0::IPluginResource::operator =").skip())
.put(new Info("nvinfer1::Weights::values").javaText("public native @Const Pointer values(); public native Weights values(Pointer values);"))
.put(new Info("nvinfer1::IDimensionExpr", "nvinfer1::IExprBuilder", "nvinfer1::IOptimizationProfile", "nvinfer1::ITensor", "nvinfer1::ILayer",
"nvinfer1::IConvolutionLayer", "nvinfer1::IFullyConnectedLayer", "nvinfer1::IActivationLayer", "nvinfer1::IPoolingLayer",
@@ -140,12 +143,15 @@ public void map(InfoMap infoMap) {
"nvinfer1::IAssertionLayer", "nvinfer1::IConditionLayer", "nvinfer1::IEinsumLayer", "nvinfer1::IIfConditional",
"nvinfer1::IIfConditionalBoundaryLayer", "nvinfer1::IIfConditionalInputLayer", "nvinfer1::IIfConditionalOutputLayer", "nvinfer1::IScatterLayer",
"nvinfer1::IAlgorithmIOInfo", "nvinfer1::IAlgorithmVariant", "nvinfer1::IAlgorithmContext", "nvinfer1::IAlgorithm", "nvinfer1::ICastLayer",
- "nvinfer1::IGridSampleLayer", "nvinfer1::INMSLayer", "nvinfer1::INonZeroLayer", "nvinfer1::INormalizationLayer", "nvinfer1::IReverseSequenceLayer").purify())
+ "nvinfer1::IGridSampleLayer", "nvinfer1::INMSLayer", "nvinfer1::INonZeroLayer", "nvinfer1::INormalizationLayer", "nvinfer1::IReverseSequenceLayer",
+ "nvinfer1::IPluginV3Layer").purify())
.put(new Info("nvinfer1::IGpuAllocator::free").javaNames("_free"))
.put(new Info("nvinfer1::IGpuAllocator", "nvinfer1::IProfiler", "nvinfer1::ILogger", "nvinfer1::IInt8Calibrator", "nvinfer1::IInt8EntropyCalibrator",
- "nvinfer1::IInt8EntropyCalibrator2", "nvinfer1::IInt8MinMaxCalibrator", "nvinfer1::IInt8LegacyCalibrator").virtualize())
+ "nvinfer1::IInt8EntropyCalibrator2", "nvinfer1::IInt8MinMaxCalibrator", "nvinfer1::IInt8LegacyCalibrator", "nvinfer1::IVersionedInterface").virtualize())
.put(new Info("nvinfer1::IPluginRegistry::getPluginCreatorList").javaText(
"public native @Cast(\"nvinfer1::IPluginCreator*const*\") PointerPointer getPluginCreatorList(IntPointer numCreators);"))
+ .put(new Info("nvinfer1::IPluginRegistry::getAllCreators").javaText(
+ "public native @Cast(\"nvinfer1::IPluginCreatorInterface*const*\") @NoException(true) PointerPointer getAllCreators(IntPointer numCreators);"))
;
}
}
diff --git a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer_plugin.java b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer_plugin.java
index bd450875e6b..f6dc6f08530 100644
--- a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer_plugin.java
+++ b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvinfer_plugin.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018-2022 Samuel Audet
+ * Copyright (C) 2018-2024 Samuel Audet
*
* Licensed either under the Apache License, Version 2.0, or (at your option)
* under the terms of the GNU General Public License as published by
@@ -36,7 +36,7 @@
inherit = nvinfer.class,
value = @Platform(
include = {"NvInferPlugin.h", "NvInferPluginUtils.h"},
- link = "nvinfer_plugin@.8"),
+ link = "nvinfer_plugin@.10"),
target = "org.bytedeco.tensorrt.nvinfer_plugin",
global = "org.bytedeco.tensorrt.global.nvinfer_plugin")
public class nvinfer_plugin implements InfoMapper {
diff --git a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvonnxparser.java b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvonnxparser.java
index 3fc901ee904..aede025be26 100644
--- a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvonnxparser.java
+++ b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvonnxparser.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2019-2023 Samuel Audet
+ * Copyright (C) 2019-2024 Samuel Audet
*
* Licensed either under the Apache License, Version 2.0, or (at your option)
* under the terms of the GNU General Public License as published by
@@ -36,7 +36,8 @@
inherit = nvinfer_plugin.class,
value = @Platform(
include = "NvOnnxParser.h",
- link = "nvonnxparser@.8"),
+ link = "nvonnxparser@.10",
+ preload = "nvinfer_vc_plugin@.10"),
target = "org.bytedeco.tensorrt.nvonnxparser",
global = "org.bytedeco.tensorrt.global.nvonnxparser")
public class nvonnxparser implements InfoMapper {
diff --git a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvparsers.java b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvparsers.java
index b8a9a586f7f..46a1e39fd28 100644
--- a/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvparsers.java
+++ b/tensorrt/src/main/java/org/bytedeco/tensorrt/presets/nvparsers.java
@@ -32,13 +32,13 @@
*
* @author Samuel Audet
*/
-@Properties(
- inherit = nvinfer_plugin.class,
- value = @Platform(
- include = {"NvCaffeParser.h", "NvUffParser.h"},
- link = "nvparsers@.8"),
- target = "org.bytedeco.tensorrt.nvparsers",
- global = "org.bytedeco.tensorrt.global.nvparsers")
+//@Properties(
+// inherit = nvinfer_plugin.class,
+// value = @Platform(
+// include = {"NvCaffeParser.h", "NvUffParser.h"},
+// link = "nvparsers@.8"),
+// target = "org.bytedeco.tensorrt.nvparsers",
+// global = "org.bytedeco.tensorrt.global.nvparsers")
public class nvparsers implements InfoMapper {
public void map(InfoMap infoMap) {
infoMap.put(new Info("nvuffparser::IPluginFactory").pointerTypes("IUffPluginFactory"))
diff --git a/tritonserver/README.md b/tritonserver/README.md
index 2964ad4a30c..a8c5df099db 100644
--- a/tritonserver/README.md
+++ b/tritonserver/README.md
@@ -23,7 +23,7 @@ Introduction
------------
This directory contains the JavaCPP Presets module for:
- * Triton Inference Server 2.41.0 https://github.com/triton-inference-server/server
+ * Triton Inference Server 2.44.0 https://github.com/triton-inference-server/server
Please refer to the parent README.md file for more detailed information about the JavaCPP Presets.
@@ -51,9 +51,9 @@ This sample intends to show how to call the Java-mapped C API of Triton to execu
1. Get the source code of Triton Inference Server to prepare the model repository:
```bash
- $ wget https://github.com/triton-inference-server/server/archive/refs/tags/v2.41.0.tar.gz
- $ tar zxvf v2.41.0.tar.gz
- $ cd server-2.41.0/docs/examples/model_repository
+ $ wget https://github.com/triton-inference-server/server/archive/refs/tags/v2.44.0.tar.gz
+ $ tar zxvf v2.44.0.tar.gz
+ $ cd server-2.44.0/docs/examples/model_repository
$ mkdir models
$ cd models; cp -a ../simple .
```
@@ -61,7 +61,7 @@ Now, this `models` directory will be our model repository.
2. Start the Docker container to run the sample (assuming we are under the `models` directory created above):
```bash
- $ docker run -it --gpus=all -v $(pwd):/workspace nvcr.io/nvidia/tritonserver:23.12-py3 bash
+ $ docker run -it --gpus=all -v $(pwd):/workspace nvcr.io/nvidia/tritonserver:24.03-py3 bash
$ apt update
$ apt install -y openjdk-11-jdk
$ wget https://archive.apache.org/dist/maven/maven-3/3.8.4/binaries/apache-maven-3.8.4-bin.tar.gz
diff --git a/tritonserver/cppbuild.sh b/tritonserver/cppbuild.sh
index 02e9a5fa949..3a3315554df 100755
--- a/tritonserver/cppbuild.sh
+++ b/tritonserver/cppbuild.sh
@@ -11,9 +11,9 @@ INCLUDE_DEVELOPER_TOOLS_SERVER=${INCLUDE_DEVELOPER_TOOLS_SERVER:=1}
if [[ ! -f "/opt/tritonserver/include/triton/developer_tools/generic_server_wrapper.h" ]] && [[ ! -f "/opt/tritonserver/lib/libtritondevelopertoolsserver.so" ]] && [[ ${INCLUDE_DEVELOPER_TOOLS_SERVER} -ne 0 ]]; then
TOOLS_BRANCH=${TOOLS_BRANCH:="https://github.com/triton-inference-server/developer_tools.git"}
- TOOLS_BRANCH_TAG=${TOOLS_BRANCH_TAG:="r23.12"}
+ TOOLS_BRANCH_TAG=${TOOLS_BRANCH_TAG:="r24.03"}
TRITON_CORE_REPO=${TRITON_CORE_REPO:="https://github.com/triton-inference-server/core.git"}
- TRITON_CORE_REPO_TAG=${TRITON_CORE_REPO_TAG="r23.12"}
+ TRITON_CORE_REPO_TAG=${TRITON_CORE_REPO_TAG="r24.03"}
TRITON_HOME="/opt/tritonserver"
BUILD_HOME="$PWD"/tritonbuild
mkdir -p ${BUILD_HOME} && cd ${BUILD_HOME}
diff --git a/tritonserver/platform/pom.xml b/tritonserver/platform/pom.xml
index 1778ae8c87c..4c6b5bf447b 100644
--- a/tritonserver/platform/pom.xml
+++ b/tritonserver/platform/pom.xml
@@ -12,7 +12,7 @@
{@code
@@ -1649,18 +2193,10 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
*
{@code
* O = floor((M - DK) / S) + 1
* }
- * - CAFFE_ROUND_DOWN:
- * {@code
- * O = floor((I + B * 2 - DK) / S) + 1
- * }
* - EXPLICIT_ROUND_UP:
* {@code
* O = ceil((M - DK) / S) + 1
* }
- * - CAFFE_ROUND_UP:
- * {@code
- * O = ceil((I + B * 2 - DK) / S) + 1
- * }
* - SAME_UPPER:
* {@code
* O = ceil(I / S)
@@ -1678,9 +2214,7 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
*
* Formulas for Deconvolution:
* - EXPLICIT_ROUND_DOWN:
- * - CAFFE_ROUND_DOWN:
* - EXPLICIT_ROUND_UP:
- * - CAFFE_ROUND_UP:
*
- * - CAFFE_ROUND_DOWN:
- * {@code
* O = (I - 1) * S + DK - (B + A)
* }
@@ -1722,14 +2256,6 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
* A = floor(P / 2)
* B = P - A
* }{@code
- * EXPLICIT_ROUND_DOWN - ((EXPLICIT_ROUND_DOWN - 1) * S >= I + B)
- * }
- * - CAFFE_ROUND_UP:
- * {@code
- * EXPLICIT_ROUND_UP - ((EXPLICIT_ROUND_UP - 1) * S >= I + B)
- * }
*
* Pooling Example 1:
* {@code
@@ -1793,54 +2319,6 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
*
{@code
* Given I = {6, 6}, B = {3, 3}, A = {3, 3}, S = {2, 2}, F = {3, 3}. What is O?
* }
- *
- * - CAFFE_ROUND_DOWN:
- * {@code
- * Computation:
- * M = {6, 6} + {3, 3} + {3, 3} ==> {12, 12}
- * EXPLICIT_ROUND_DOWN ==> floor((M - F) / S) + 1
- * ==> floor(({12, 12} - {3, 3}) / {2, 2}) + {1, 1}
- * ==> {5, 5}
- * DIFF = (((EXPLICIT_ROUND_DOWN - 1) * S >= I + B) ? {1, 1} : {0, 0})
- * ==> ({5, 5} - {1, 1}) * {2, 2} >= {6, 6} + {3, 3} ? {1, 1} : {0,0}
- * ==> {0, 0}
- * O ==> EXPLICIT_ROUND_DOWN - DIFF
- * ==> {5, 5} - {0, 0}
- * ==> {5, 5}
- * }
- * - CAFFE_ROUND_UP:
- * {@code
- * Computation:
- * M = {6, 6} + {3, 3} + {3, 3} ==> {12, 12}
- * EXPLICIT_ROUND_UP ==> ceil((M - F) / S) + 1
- * ==> ceil(({12, 12} - {3, 3}) / {2, 2}) + {1, 1}
- * ==> {6, 6}
- * DIFF = (((EXPLICIT_ROUND_UP - 1) * S >= I + B) ? {1, 1} : {0, 0})
- * ==> ({6, 6} - {1, 1}) * {2, 2} >= {6, 6} + {3, 3} ? {1, 1} : {0,0}
- * ==> {1, 1}
- * O ==> EXPLICIT_ROUND_UP - DIFF
- * ==> {6, 6} - {1, 1}
- * ==> {5, 5}
- * }
- *
- * The sample points are {0, 2, 4, 6, 8} in each dimension.
- * CAFFE_ROUND_DOWN and CAFFE_ROUND_UP have two restrictions each on usage with pooling operations.
- * This will cause getDimensions to return an empty dimension and also to reject the network
- * at validation time.
- * For more information on original reference code, see
- * https://github.com/BVLC/caffe/blob/master/src/caffe/layers/pooling_layer.cpp
- *
- * - Restriction 1:
- * {@code
- * CAFFE_ROUND_DOWN: B >= F is an error if (B - S) < F
- * CAFFE_ROUND_UP: (B + S) >= (F + 1) is an error if B < (F + 1)
- * }
- *
- * - Restriction 2:
- * {@code
- * CAFFE_ROUND_DOWN: (B - S) >= F is an error if B >= F
- * CAFFE_ROUND_UP: B >= (F + 1) is an error if (B + S) >= (F + 1)
- * }
* */
@Namespace("nvinfer1") public enum PaddingMode {
/** Use explicit padding, rounding output size down. */
@@ -1850,11 +2328,7 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
/** Use SAME padding, with prePadding <= postPadding. */
kSAME_UPPER(2),
/** Use SAME padding, with prePadding >= postPadding. */
- kSAME_LOWER(3),
- /** Use CAFFE padding, rounding output size down, uses prePadding value. */
- kCAFFE_ROUND_DOWN(4),
- /** Use CAFFE padding, rounding output size up, uses prePadding value. */
- kCAFFE_ROUND_UP(5);
+ kSAME_LOWER(3);
public final int value;
private PaddingMode(int v) { this.value = v; }
@@ -1871,9 +2345,6 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
// Targeting ../nvinfer/IConvolutionLayer.java
-// Targeting ../nvinfer/IFullyConnectedLayer.java
-
-
// Targeting ../nvinfer/IActivationLayer.java
@@ -1884,9 +2355,12 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
* \brief The type of pooling to perform in a pooling layer.
* */
@Namespace("nvinfer1") public enum PoolingType {
- kMAX(0), // Maximum over elements
- kAVERAGE(1), // Average over elements. If the tensor is padded, the count includes the padding
- kMAX_AVERAGE_BLEND(2);// Blending between max and average pooling: (1-blendFactor)*maxPool + blendFactor*avgPool
+ /** Maximum over elements */
+ kMAX(0),
+ /** Average over elements. If the tensor is padded, the count includes the padding */
+ kAVERAGE(1),
+ /** Blending between max and average pooling: (1-blendFactor)*maxPool + blendFactor*avgPool */
+ kMAX_AVERAGE_BLEND(2);
public final int value;
private PoolingType(int v) { this.value = v; }
@@ -1953,9 +2427,10 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
*
* Operations kAND, kOR, and kXOR must have inputs of DataType::kBOOL.
*
- * Operation kPOW must have inputs of DataType::kFLOAT, DataType::kHALF, or DataType::kINT8.
+ * Operation kPOW must have inputs of floating-point type or DataType::kINT8.
*
- * All other operations must have inputs of DataType::kFLOAT, DataType::kHALF, DataType::kINT8, or DataType::kINT32.
+ * All other operations must have inputs of floating-point type, DataType::kINT8, DataType::kINT32, or
+ * DataType::kINT64.
*
* @see IElementWiseLayer
* */
@@ -2034,212 +2509,10 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
// Targeting ../nvinfer/IGatherLayer.java
-
-/**
- * \enum RNNOperation
- *
- * \brief Enumerates the RNN operations that may be performed by an RNN layer.
- *
- * __Equation definitions__
- *
- * The equations below have the following naming convention:
- *
- * ~~~
- * t := current time step
- *
- * i := input gate
- * o := output gate
- * f := forget gate
- * z := update gate
- * r := reset gate
- * c := cell gate
- * h := hidden gate
- *
- * g[t] denotes the output of gate g at timestep t, e.g.
- * f[t] is the output of the forget gate f.
- *
- * X[t] := input tensor for timestep t
- * C[t] := cell state for timestep t
- * H[t] := hidden state for timestep t
- *
- * W[g] := W (input) parameter weight matrix for gate g
- * R[g] := U (recurrent) parameter weight matrix for gate g
- * Wb[g] := W (input) parameter bias vector for gate g
- * Rb[g] := U (recurrent) parameter bias vector for gate g
- *
- * Unless otherwise specified, all operations apply pointwise
- * to elements of each operand tensor.
- *
- * ReLU(X) := max(X, 0)
- * tanh(X) := hyperbolic tangent of X
- * sigmoid(X) := 1 / (1 + exp(-X))
- * exp(X) := e^X
- *
- * A.B denotes matrix multiplication of A and B.
- * A*B denotes pointwise multiplication of A and B.
- * ~~~
- *
- * __Equations__
- *
- * Depending on the value of RNNOperation chosen, each sub-layer of the RNN
- * layer will perform one of the following operations:
- *
- * ~~~
- * ::kRELU
- *
- * H[t] := ReLU(W[i].X[t] + R[i].H[t-1] + Wb[i] + Rb[i])
- *
- * ::kTANH
- *
- * H[t] := tanh(W[i].X[t] + R[i].H[t-1] + Wb[i] + Rb[i])
- *
- * ::kLSTM
- *
- * i[t] := sigmoid(W[i].X[t] + R[i].H[t-1] + Wb[i] + Rb[i])
- * f[t] := sigmoid(W[f].X[t] + R[f].H[t-1] + Wb[f] + Rb[f])
- * o[t] := sigmoid(W[o].X[t] + R[o].H[t-1] + Wb[o] + Rb[o])
- * c[t] := tanh(W[c].X[t] + R[c].H[t-1] + Wb[c] + Rb[c])
- *
- * C[t] := f[t]*C[t-1] + i[t]*c[t]
- * H[t] := o[t]*tanh(C[t])
- *
- * ::kGRU
- *
- * z[t] := sigmoid(W[z].X[t] + R[z].H[t-1] + Wb[z] + Rb[z])
- * r[t] := sigmoid(W[r].X[t] + R[r].H[t-1] + Wb[r] + Rb[r])
- * h[t] := tanh(W[h].X[t] + r[t]*(R[h].H[t-1] + Rb[h]) + Wb[h])
- *
- * H[t] := (1 - z[t])*h[t] + z[t]*H[t-1]
- * ~~~
- *
- * @see IRNNv2Layer
- * */
-@Namespace("nvinfer1") public enum RNNOperation {
- /** Single gate RNN w/ ReLU activation function. */
- kRELU(0),
- /** Single gate RNN w/ TANH activation function. */
- kTANH(1),
- /** Four-gate LSTM network w/o peephole connections. */
- kLSTM(2),
- /** Three-gate network consisting of Gated Recurrent Units. */
- kGRU(3);
-
- public final int value;
- private RNNOperation(int v) { this.value = v; }
- private RNNOperation(RNNOperation e) { this.value = e.value; }
- public RNNOperation intern() { for (RNNOperation e : values()) if (e.value == value) return e; return this; }
- @Override public String toString() { return intern().name(); }
-}
-
-/**
- * Maximum number of elements in RNNOperation enum.
- *
- * @see RNNOperation
- * */
-
-
-/**
- * \enum RNNDirection
- *
- * \brief Enumerates the RNN direction that may be performed by an RNN layer.
- *
- * @see IRNNv2Layer
- * */
-@Namespace("nvinfer1") public enum RNNDirection {
- /** Network iterations from first input to last input. */
- kUNIDIRECTION(0),
- /** Network iterates from first to last and vice versa and outputs concatenated. */
- kBIDIRECTION(1);
-
- public final int value;
- private RNNDirection(int v) { this.value = v; }
- private RNNDirection(RNNDirection e) { this.value = e.value; }
- public RNNDirection intern() { for (RNNDirection e : values()) if (e.value == value) return e; return this; }
- @Override public String toString() { return intern().name(); }
-}
-
-/**
- * Maximum number of elements in RNNDirection enum.
- *
- * @see RNNDirection
- * */
-
-
-/**
- * \enum RNNInputMode
- *
- * \brief Enumerates the RNN input modes that may occur with an RNN layer.
- *
- * If the RNN is configured with RNNInputMode::kLINEAR, then for each gate {@code g} in the first layer of the RNN,
- * the input vector {@code X[t]} (length {@code E}) is left-multiplied by the gate's corresponding weight matrix {@code W[g]}
- * (dimensions {@code HxE}) as usual, before being used to compute the gate output as described by \ref RNNOperation.
- *
- * If the RNN is configured with RNNInputMode::kSKIP, then this initial matrix multiplication is "skipped"
- * and {@code W[g]} is conceptually an identity matrix. In this case, the input vector {@code X[t]} must have length {@code H}
- * (the size of the hidden state).
- *
- * @see IRNNv2Layer
- * */
-@Namespace("nvinfer1") public enum RNNInputMode {
- /** Perform the normal matrix multiplication in the first recurrent layer. */
- kLINEAR(0),
- /** No operation is performed on the first recurrent layer. */
- kSKIP(1);
-
- public final int value;
- private RNNInputMode(int v) { this.value = v; }
- private RNNInputMode(RNNInputMode e) { this.value = e.value; }
- public RNNInputMode intern() { for (RNNInputMode e : values()) if (e.value == value) return e; return this; }
- @Override public String toString() { return intern().name(); }
-}
-
-/**
- * Maximum number of elements in RNNInputMode enum.
- *
- * @see RNNInputMode
- * */
-
-
-/**
- * \enum RNNGateType
- *
- * \brief Identifies an individual gate within an RNN cell.
- *
- * @see RNNOperation
- * */
-@Namespace("nvinfer1") public enum RNNGateType {
- /** Input gate (i). */
- kINPUT(0),
- /** Output gate (o). */
- kOUTPUT(1),
- /** Forget gate (f). */
- kFORGET(2),
- /** Update gate (z). */
- kUPDATE(3),
- /** Reset gate (r). */
- kRESET(4),
- /** Cell gate (c). */
- kCELL(5),
- /** Hidden gate (h). */
- kHIDDEN(6);
-
- public final int value;
- private RNNGateType(int v) { this.value = v; }
- private RNNGateType(RNNGateType e) { this.value = e.value; }
- public RNNGateType intern() { for (RNNGateType e : values()) if (e.value == value) return e; return this; }
- @Override public String toString() { return intern().name(); }
-}
-
-/**
- * Maximum number of elements in RNNGateType enum.
- *
- * @see RNNGateType
- * */
-
-// Targeting ../nvinfer/IRNNv2Layer.java
+// Targeting ../nvinfer/IPluginV2Layer.java
-// Targeting ../nvinfer/IPluginV2Layer.java
+// Targeting ../nvinfer/IPluginV3Layer.java
@@ -2250,13 +2523,12 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
*
* Operations kNOT must have inputs of DataType::kBOOL.
*
- * Operation kSIGN must have inputs of DataType::kFLOAT, DataType::kHALF, DataType::kINT8, or DataType::kINT32.
+ * Operation kSIGN and kABS must have inputs of floating-point type, DataType::kINT8, DataType::kINT32 or
+ * DataType::kINT64.
*
- * Operation kISINF must have inputs of DataType::kFLOAT or DataType::kHALF.
+ * Operation kISINF must have inputs of floating-point type.
*
- * All other operations must have inputs of DataType::kFLOAT, DataType::kHALF, or DataType::kINT8.
- *
- * Operations kSIGN and kROUND are not supported in implicit batch mode.
+ * All other operations must have inputs of floating-point type.
*
* @see IUnaryLayer
* */
@@ -2386,7 +2658,6 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
@Namespace("nvinfer1") public enum SampleMode {
/** Fail with error when the coordinates are out of bounds. */
kSTRICT_BOUNDS(0),
- kDEFAULT(kSTRICT_BOUNDS.value), /** @deprecated Use kSTRICT_BOUNDS. */
/** Coordinates wrap around periodically. */
kWRAP(1),
/** Out of bounds indices are clamped to bounds. */
@@ -2402,15 +2673,8 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
private SampleMode(int v) { this.value = v; }
private SampleMode(SampleMode e) { this.value = e.value; }
public SampleMode intern() { for (SampleMode e : values()) if (e.value == value) return e; return this; }
- @Override public String toString() { return intern().name(); }
-}
-
-/** @deprecated Deprecated in TensorRT 8.5. Superseded by SampleMode. */
-
-
-//!
-//!
-//!
+ @Override public String toString() { return intern().name(); }
+}
/**
* Maximum number of elements in SampleMode enum.
@@ -2537,8 +2801,6 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
public InterpolationMode intern() { for (InterpolationMode e : values()) if (e.value == value) return e; return this; }
@Override public String toString() { return intern().name(); }
}
-
-/** @deprecated Deprecated in TensorRT 8.5. Superseded by InterpolationMode. */
/**
* Maximum number of elements in InterpolationMode enum.
*
@@ -2666,7 +2928,9 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
-/** Enum that describes kinds of loop outputs. */
+/**
+ * \enum Enum that describes kinds of loop outputs.
+ * */
@Namespace("nvinfer1") public enum LoopOutput {
/** Output value is value of tensor for last iteration. */
kLAST_VALUE(0),
@@ -2691,10 +2955,12 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
* */
-/** Enum that describes kinds of trip limits. */
+/**
+ * \enum Enum that describes kinds of trip limits.
+ * */
@Namespace("nvinfer1") public enum TripLimit {
- /** Tensor is scalar of type kINT32 that contains the trip count. */
+ /** Tensor is a scalar of type kINT32 or kINT64 that contains the trip count. */
kCOUNT(0),
/** Tensor is a scalar of type kBOOL. Loop terminates when value is false. */
kWHILE(1);
@@ -2760,11 +3026,27 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
* @see IFillLayer
* */
@Namespace("nvinfer1") public enum FillOperation {
- /** Generate evenly spaced numbers over a specified interval. */
+ /** Compute each value via an affine function of its indices.
+ * For example, suppose the parameters for the IFillLayer are:
+ *
+ * * Dimensions = [3,4]
+ * * Alpha = 1
+ * * Beta = [100,10]
+ *
+ * Element [i,j] of the output is Alpha + Beta[0]*i + Beta[1]*j.
+ * Thus the output matrix is:
+ *
+ * 1 11 21 31
+ * 101 111 121 131
+ * 201 211 221 231
+ *
+ * A static beta b is implicitly a 1D tensor, i.e. Beta = [b]. */
kLINSPACE(0),
- /** Generate a tensor with random values drawn from a uniform distribution. */
+
+ /** Randomly draw values from a uniform distribution. */
kRANDOM_UNIFORM(1),
- /** Generate a tensor with random values drawn from a normal distribution. */
+
+ /** Randomly draw values from a normal distribution. */
kRANDOM_NORMAL(2);
public final int value;
@@ -2794,6 +3076,8 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
/**
+ * \enum ScatterMode
+ *
* \brief Control form of IScatterLayer
*
* @see IScatterLayer
@@ -2828,6 +3112,8 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
// class IGridSampleLayer
/**
+ * \enum BoundingBoxFormat
+ *
* \brief Representation of bounding box data used for the Boxes input tensor in INMSLayer
*
* @see INMSLayer
@@ -2865,14 +3151,18 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
/**
- * enum CalibrationAlgoType
+ * \enum CalibrationAlgoType
*
* \brief Version of calibration algorithm to use.
* */
@Namespace("nvinfer1") public enum CalibrationAlgoType {
+ /** Legacy calibration */
kLEGACY_CALIBRATION(0),
+ /** Legacy entropy calibration */
kENTROPY_CALIBRATION(1),
+ /** Entropy calibration */
kENTROPY_CALIBRATION_2(2),
+ /** Minmax calibration */
kMINMAX_CALIBRATION(3);
public final int value;
@@ -2894,15 +3184,72 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
// Targeting ../nvinfer/IInt8EntropyCalibrator.java
+ // namespace v_1_0
+
+/**
+ * \class IInt8EntropyCalibrator
+ *
+ * \brief Entropy calibrator.
+ *
+ * This is the Legacy Entropy calibrator. It is less complicated than the legacy calibrator and
+ * produces better results.
+ *
+ * \note To ensure compatibility of source code with future versions of TensorRT, use IEntropyCalibrator, not
+ * v_1_0::IEntropyCalibrator
+ * */
// Targeting ../nvinfer/IInt8EntropyCalibrator2.java
+ // namespace v_1_0
+
+/**
+ * \class IInt8EntropyCalibrator2
+ *
+ * \brief Entropy calibrator 2.
+ *
+ * This is the preferred calibrator. This is the required calibrator for DLA, as it supports per
+ * activation tensor scaling.
+ *
+ * \note To ensure compatibility of source code with future versions of TensorRT, use IEntropyCalibrator2, not
+ * v_1_0::IEntropyCalibrator2
+ * */
// Targeting ../nvinfer/IInt8MinMaxCalibrator.java
+ // namespace v_1_0
+
+/**
+ * \class IInt8MinMaxCalibrator
+ *
+ * \brief MinMax Calibrator.
+ *
+ * It supports per activation tensor scaling.
+ *
+ * \note To ensure compatibility of source code with future versions of TensorRT, use IMinMaxCalibrator>, not
+ * v_1_0::IMinMaxCalibrator
+ * */
// Targeting ../nvinfer/IInt8LegacyCalibrator.java
+ // namespace v_1_0
+
+/**
+ * \class IInt8LegacyCalibrator
+ *
+ * \brief Legacy calibrator.
+ *
+ * This calibrator requires user parameterization,
+ * and is provided as a fallback option if the other calibrators yield poor results.
+ *
+ * \note To ensure compatibility of source code with future versions of TensorRT, use ILegacyCalibrator, not
+ * v_1_0::ILegacyCalibrator
+ * */
+
+
+//!
+//!
+//!
+//!
// Targeting ../nvinfer/IAlgorithmIOInfo.java
@@ -2918,6 +3265,23 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
// Targeting ../nvinfer/IAlgorithmSelector.java
+ // namespace v_1_0
+
+/**
+ * \class IAlgorithmSelector
+ *
+ * \brief Interface implemented by application for selecting and reporting algorithms of a layer provided by the
+ * builder.
+ * \note A layer in context of algorithm selection may be different from ILayer in INetworkDefiniton.
+ * For example, an algorithm might be implementing a conglomeration of multiple ILayers in INetworkDefinition.
+ * \note To ensure compatibility of source code with future versions of TensorRT, use IAlgorithmSelector, not
+ * v_1_0::IAlgorithmSelector
+ * */
+
+
+//!
+//!
+//!
/**
* \brief Represents one or more QuantizationFlag values using binary OR
@@ -2982,44 +3346,31 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
@Namespace("nvinfer1") public enum BuilderFlag {
/** Enable FP16 layer selection, with FP32 fallback. */
kFP16(0),
+
/** Enable Int8 layer selection, with FP32 fallback with FP16 fallback if kFP16 also specified. */
kINT8(1),
+
/** Enable debugging of layers via synchronizing after every layer. */
kDEBUG(2),
- /** Enable layers marked to execute on GPU if layer cannot execute on DLA.
-//!
-//!
-//!
-//! */
- kGPU_FALLBACK(3),
- /** Legacy flag with effect similar to setting all of these three flags:
- *
- * * kPREFER_PRECISION_CONSTRAINTS
- * * kDIRECT_IO
- * * kREJECT_EMPTY_ALGORITHMS
- *
- * except that if the direct I/O requirement cannot be met and kDIRECT_IO was not explicitly set,
- * instead of the build failing, the build falls back as if kDIRECT_IO was not set.
- *
- * @deprecated Deprecated in TensorRT 8.2.
- * */
- kSTRICT_TYPES(4),
+ /** Enable layers marked to execute on GPU if layer cannot execute on DLA. */
+ kGPU_FALLBACK(3),
/** Enable building a refittable engine. */
- kREFIT(5),
+ kREFIT(4),
+
/** Disable reuse of timing information across identical layers. */
- kDISABLE_TIMING_CACHE(6),
+ kDISABLE_TIMING_CACHE(5),
/** Allow (but not require) computations on tensors of type DataType::kFLOAT to use TF32.
* TF32 computes inner products by rounding the inputs to 10-bit mantissas before
* multiplying, but accumulates the sum using 23-bit mantissas. Enabled by default. */
- kTF32(7),
+ kTF32(6),
/** Allow the builder to examine weights and use optimized functions when weights have suitable sparsity. */
//!
- kSPARSE_WEIGHTS(8),
+ kSPARSE_WEIGHTS(7),
/** Change the allowed parameters in the EngineCapability::kSTANDARD flow to
* match the restrictions that EngineCapability::kSAFETY check against for DeviceType::kGPU
@@ -3027,59 +3378,114 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
* is forced to true if EngineCapability::kSAFETY at build time if it is unset.
*
* This flag is only supported in NVIDIA Drive(R) products. */
- kSAFETY_SCOPE(9),
+ kSAFETY_SCOPE(8),
/** Require that layers execute in specified precisions. Build fails otherwise. */
- kOBEY_PRECISION_CONSTRAINTS(10),
+ kOBEY_PRECISION_CONSTRAINTS(9),
/** Prefer that layers execute in specified precisions.
* Fall back (with warning) to another precision if build would otherwise fail. */
- kPREFER_PRECISION_CONSTRAINTS(11),
+ kPREFER_PRECISION_CONSTRAINTS(10),
/** Require that no reformats be inserted between a layer and a network I/O tensor
* for which ITensor::setAllowedFormats was called.
* Build fails if a reformat is required for functional correctness. */
- kDIRECT_IO(12),
+ kDIRECT_IO(11),
/** Fail if IAlgorithmSelector::selectAlgorithms returns an empty set of algorithms. */
//!
- kREJECT_EMPTY_ALGORITHMS(13),
-
- /** Enable heuristic-based tactic selection for shorter engine generation time. The engine may not
- * be as performant as when built with a profiling-based builder.
- *
- * This flag is only supported by NVIDIA Ampere and later GPUs.
- * @deprecated Superseded by builder optimization level 2. Deprecated in TensorRT 8.6 */
-
-//!
- kENABLE_TACTIC_HEURISTIC(14),
+ kREJECT_EMPTY_ALGORITHMS(12),
/** Restrict to lean runtime operators to provide version forward compatibility
* for the plan.
*
- * Using this flag with ICudaEngine::serialize() and BuilderFlag::kREFIT would result in error.
* This flag is only supported by NVIDIA Volta and later GPUs.
- * This flag is not supported in NVIDIA Drive(R) products.
- * This flag is not supported with implicit batch mode. Network must be created with
- * NetworkDefinitionCreationFlag::kEXPLICIT_BATCH. */
+ * This flag is not supported in NVIDIA Drive(R) products. */
//!
-//!
- kVERSION_COMPATIBLE(15),
+ kVERSION_COMPATIBLE(13),
/** Exclude lean runtime from the plan when version forward compatability is enabled.
* By default, this flag is unset, so the lean runtime will be included in the plan.
*
- * If BuilderFlag::kVERSION_COMPATIBLE is not set then the value of this flag will be ignored.
- *
- * This flag is not supported with implicit batch mode. Network must be created with
- * NetworkDefinitionCreationFlag::kEXPLICIT_BATCH. */
- kEXCLUDE_LEAN_RUNTIME(16),
+ * If BuilderFlag::kVERSION_COMPATIBLE is not set then the value of this flag will be ignored. */
+
+//!
+//!
+ kEXCLUDE_LEAN_RUNTIME(14),
/** Enable FP8 layer selection, with FP32 fallback.
- * \warning kFP8 is not supported yet and will result in an error or undefined behavior. */
- kFP8(17);
+ *
+ * This flag is not supported with hardware-compatibility mode.
+ *
+ * @see HardwareCompatibilityLevel */
+ kFP8(15),
+
+ /** Emit error when a tactic being timed is not present in the timing cache.
+ * This flag has an effect only when IBuilderConfig has an associated ITimingCache. */
+ kERROR_ON_TIMING_CACHE_MISS(16),
+
+ /** Enable DataType::kBF16 layer selection, with FP32 fallback.
+ * This flag is only supported by NVIDIA Ampere and later GPUs. */
+ kBF16(17),
+
+ /** Disable caching of JIT-compilation results during engine build.
+ * By default, JIT-compiled code will be serialized as part of the timing cache, which may significantly increase
+ * the cache size. Setting this flag prevents the code from being serialized. This flag has an effect only when
+ * BuilderFlag::DISABLE_TIMING_CACHE is not set. */
+ kDISABLE_COMPILATION_CACHE(18),
+
+ /** Strip the refittable weights from the engine plan file. */
+ kSTRIP_PLAN(19),
+
+ /** @deprecated Deprecated in TensorRT 10.0. Superseded by kSTRIP_PLAN. */
+ kWEIGHTLESS(kSTRIP_PLAN.value),
+
+ /** Create a refittable engine under the assumption that the refit weights will be identical to those provided at
+ * build time. The resulting engine will have the same performance as a non-refittable one. All refittable weights
+ * can be refitted through the refit API, but if the refit weights are not identical to the build-time weights,
+ * behavior is undefined. When used alongside 'kSTRIP_PLAN', this flag will result in a small plan file for which
+ * weights are later supplied via refitting. This enables use of a single set of weights with different inference
+ * backends, or with TensorRT plans for multiple GPU architectures. */
+
+
+//!
+//!
+//!
+//!
+//!
+//!
+//!
+//!
+ kREFIT_IDENTICAL(20),
+
+ /**
+ * \brief Enable weight streaming for the current engine.
+ *
+ * Weight streaming from the host enables execution of models that do not fit
+ * in GPU memory by allowing TensorRT to intelligently stream network weights
+ * from the CPU DRAM. Please see ICudaEngine::getMinimumWeightStreamingBudget
+ * for the default memory budget when this flag is enabled.
+ *
+ * Enabling this feature changes the behavior of
+ * IRuntime::deserializeCudaEngine to allocate the entire network’s weights
+ * on the CPU DRAM instead of GPU memory. Then,
+ * ICudaEngine::createExecutionContext will determine the optimal split of
+ * weights between the CPU and GPU and place weights accordingly.
+ *
+ * Future TensorRT versions may enable this flag by default.
+ *
+ * \warning Enabling this flag may marginally increase build time.
+ *
+ * \warning Enabling this feature will significantly increase the latency of
+ * ICudaEngine::createExecutionContext.
+ *
+ * @see IRuntime::deserializeCudaEngine,
+ * ICudaEngine::getMinimumWeightStreamingBudget,
+ * ICudaEngine::setWeightStreamingBudget
+ * */
+ kWEIGHT_STREAMING(21);
public final int value;
private BuilderFlag(int v) { this.value = v; }
@@ -3108,7 +3514,6 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
@Namespace("nvinfer1") public enum MemoryPoolType {
/**
* kWORKSPACE is used by TensorRT to store intermediate buffers within an operation.
- * This is equivalent to the deprecated IBuilderConfig::setMaxWorkspaceSize and overrides that value.
* This defaults to max device memory. Set to a smaller value to restrict tactics that use over the
* threshold en masse. For more targeted removal of tactics use the IAlgorithmSelector
* interface.
@@ -3123,7 +3528,7 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
* kDLA_MANAGED_SRAM is a fast software managed RAM used by DLA to communicate within a layer.
* The size of this pool must be at least 4 KiB and must be a power of 2.
* This defaults to 1 MiB.
- * Orin has capacity of 1 MiB per core, and Xavier shares 4 MiB across all of its accelerator cores.
+ * Orin has capacity of 1 MiB per core.
* */
@@ -3160,7 +3565,27 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
* This defaults to 75% of totalGlobalMem as reported by cudaGetDeviceProperties when
* cudaGetDeviceProperties.embedded is true, and 100% otherwise.
* */
- kTACTIC_DRAM(4);
+
+
+//!
+//!
+//!
+//!
+ kTACTIC_DRAM(4),
+
+ /**
+ * kTACTIC_SHARED_MEMORY defines the maximum sum of shared memory reserved by the driver and
+ * used for executing CUDA kernels. Adjust this value to restrict tactics that exceed the
+ * specified threshold en masse. The default value is device max capability. This value must
+ * be less than 1GiB.
+ *
+ * The driver reserved shared memory can be queried from cuDeviceGetAttribute(&reservedShmem,
+ * CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK).
+ *
+ * Updating this flag will override the shared memory limit set by \ref HardwareCompatibilityLevel,
+ * which defaults to 48KiB - reservedShmem.
+ * */
+ kTACTIC_SHARED_MEMORY(5);
public final int value;
private MemoryPoolType(int v) { this.value = v; }
@@ -3185,52 +3610,12 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
* They are provided as opt-in features for at least one release.
* */
@Namespace("nvinfer1") public enum PreviewFeature {
- /**
- * Optimize runtime dimensions with TensorRT's DL Compiler.
- * Potentially reduces run time and decreases device memory usage and engine size.
- * Models most likely to benefit from enabling kFASTER_DYNAMIC_SHAPES_0805 are transformer-based models,
- * and models containing dynamic control flows.
- *
- * The default value for this flag is on.
- *
- * @deprecated Turning it off is deprecated in TensorRT 8.6. The flag kFASTER_DYNAMIC_SHAPES_0805 will be removed in 9.0.
- * */
-
-
-//!
-//!
-//!
-//!
-//!
-//!
- kFASTER_DYNAMIC_SHAPES_0805(0),
-
- /**
- * Disable usage of cuDNN/cuBLAS/cuBLASLt tactics in the TensorRT core library.
- *
- * When the flag is enabled, TensorRT core will not use these tactics even if they are specified in
- * \ref IBuilderConfig::setTacticSources(), but cudnnContext and cublasContext handles will still be passed to
- * plugins via IPluginV2Ext::attachToContext() if the appropriate tactic sources are set.
- *
- * This allows users to experiment with disabling external library tactics without having to modify their
- * application's plugins to support nullptr handles.
- *
- * The default value for this flag is on.
- *
- * @see TacticSource
- * */
-
-
-//!
-//!
- kDISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805(1),
-
/**
* Allows optimization profiles to be shared across execution contexts.
- * This flag defaults to false and will become the default behavior in TensorRT 9.0.
- * At that point this flag will do nothing.
+ *
+ * @deprecated Deprecated in TensorRT 10.0. The default value for this flag is on and can not be changed.
* */
- kPROFILE_SHARING_0806(2);
+ kPROFILE_SHARING_0806(0);
public final int value;
private PreviewFeature(int v) { this.value = v; }
@@ -3245,18 +3630,33 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
* */
// namespace impl
-/** Describes requirements of compatibility with GPU architectures other than that of the GPU on which the engine was
- * built. Levels except kNONE are only supported for engines built on NVIDIA Ampere and later GPUs.
- * Note that compatibility with future hardware depends on CUDA forward compatibility support. */
+/**
+ * \enum HardwareCompatibilityLevel
+ *
+ * \brief Describes requirements of compatibility with GPU architectures other than that of the GPU on which the engine was
+ * built.
+ *
+ * Levels except kNONE are only supported for engines built on NVIDIA Ampere and later GPUs.
+ *
+ * \warning Note that compatibility with future hardware depends on CUDA forward compatibility support.
+ * */
@Namespace("nvinfer1") public enum HardwareCompatibilityLevel {
/** Do not require hardware compatibility with GPU architectures other than that of the GPU on which the engine was
* built. */
+
+//!
+//!
kNONE(0),
- /** Require that the engine is compatible with Ampere and newer GPUs. This will limit the max shared memory usage to
- * 48KiB, may reduce the number of available tactics for each layer, and may prevent some fusions from occurring.
- * Thus this can decrease the performance, especially for tf32 models.
- * This option will disable cuDNN, cuBLAS, and cuBLAS LT as tactic sources. */
+ /** Require that the engine is compatible with Ampere and newer GPUs. This will limit the combined usage of driver
+ * reserved and backend kernel max shared memory to 48KiB, may reduce the number of available tactics for each
+ * layer, and may prevent some fusions from occurring. Thus this can decrease the performance, especially for tf32
+ * models.
+ * This option will disable cuDNN, cuBLAS, and cuBLAS LT as tactic sources.
+ *
+ * The driver reserved shared memory can be queried from cuDeviceGetAttribute(&reservedShmem,
+ * CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK).
+ * */
kAMPERE_PLUS(1);
public final int value;
@@ -3271,42 +3671,73 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
* @see HardwareCompatibilityLevel
* */
+// Targeting ../nvinfer/IProgressMonitor.java
+
+ // class IProgressMonitor
+ // namespace v_1_0
+
+/**
+ * \class IProgressMonitor
+ *
+ * \brief Application-implemented progress reporting interface for TensorRT.
+ *
+ * The IProgressMonitor is a user-defined object that TensorRT uses to report back when an internal algorithm has
+ * started or finished a phase to help provide feedback on the progress of the optimizer.
+ *
+ * The IProgressMonitor will trigger its start function when a phase is entered and will trigger its finish function
+ * when that phase is exited. Each phase consists of one or more steps. When each step is completed, the stepComplete
+ * function is triggered. This will allow an application using the builder to communicate progress relative to when the
+ * optimization step is expected to complete.
+ *
+ * The implementation of IProgressMonitor must be thread-safe so that it can be called from multiple internal threads.
+ * The lifetime of the IProgressMonitor must exceed the lifetime of all TensorRT objects that use it.
+ *
+ * \note To ensure compatibility of source code with future versions of TensorRT, use IProgressMonitor, not
+ * v_1_0::IProgressMonitor
+ * */
+
+
+//!
+//!
+//!
+//!
// Targeting ../nvinfer/IBuilderConfig.java
-/** \brief Represents one or more NetworkDefinitionCreationFlag flags
+/**
+ * \brief Represents one or more NetworkDefinitionCreationFlag flags
* using binary OR operations.
- * e.g., 1U << NetworkDefinitionCreationFlag::kEXPLICIT_BATCH
+ * e.g., 1U << NetworkDefinitionCreationFlag::kSTRONGLY_TYPED
*
* @see IBuilder::createNetworkV2
* */
+
+//!
//!
//!
//!
-/** \enum NetworkDefinitionCreationFlag
+/**
+ * \enum NetworkDefinitionCreationFlag
*
* \brief List of immutable network properties expressed at network creation time.
* NetworkDefinitionCreationFlag is used with createNetworkV2() to specify immutable properties of the network.
- * Creating a network without NetworkDefinitionCreationFlag::kEXPLICIT_BATCH flag has been deprecated.
*
* @see IBuilder::createNetworkV2
* */
@Namespace("nvinfer1") public enum NetworkDefinitionCreationFlag {
- /** Mark the network to be an explicit batch network.
- * Dynamic shape support requires that the kEXPLICIT_BATCH flag is set.
- * With dynamic shapes, any of the input dimensions can vary at run-time,
- * and there are no implicit dimensions in the network specification.
- * Varying dimensions are specified by using the wildcard dimension value -1. */
-
-//!
+ /** Ignored because networks are always "explicit batch" in TensorRT 10.0.
+ *
+ * @deprecated Deprecated in TensorRT 10.0. */
kEXPLICIT_BATCH(0),
- /** Deprecated. This flag has no effect now, but is only kept for backward compatability.
- * */
- kEXPLICIT_PRECISION(1);
+ /** Mark the network to be strongly typed.
+ * Every tensor in the network has a data type defined in the network following only type inference rules and the
+ * inputs/operator annotations. Setting layer precision and layer output types is not allowed, and the network
+ * output types will be inferred based on the input types and the type inference rules. */
+ kSTRONGLY_TYPED(1);
public final int value;
private NetworkDefinitionCreationFlag(int v) { this.value = v; }
@@ -3385,15 +3816,20 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
// Parsed from NvInferImpl.h
/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
*
- * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
- * property and proprietary rights in and to this material, related
- * documentation and any modifications thereto. Any use, reproduction,
- * disclosure or distribution of this material and related documentation
- * without an express license agreement from NVIDIA CORPORATION or
- * its affiliates is strictly prohibited.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*/
// #ifndef NV_INFER_IMPL_H
@@ -3401,6 +3837,13 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
// #include "NvInferLegacyDims.h"
// #include "NvInferRuntimeCommon.h"
+
+// @cond SuppressDoxyWarnings
+
+
+
+
+
// Targeting ../nvinfer/IPlugin.java
@@ -3410,6 +3853,7 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
// Targeting ../nvinfer/IPluginLayer.java
+ // namespace v_1_0
/** enum class nvinfer1::ActivationType */
;
@@ -3463,20 +3907,14 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
;
/** enum class nvinfer1::ResizeSelector */
;
-/** enum class nvinfer1::RNNDirection */
-;
-/** enum class nvinfer1::RNNGateType */
-;
-/** enum class nvinfer1::RNNInputMode */
-;
-/** enum class nvinfer1::RNNOperation */
-;
/** enum class nvinfer1::ScaleMode */
;
/** enum class nvinfer1::ScatterMode */
;
/** enum class nvinfer1::SampleMode */
;
+/** enum class nvinfer1::SerializationFlag */
+;
/** enum class nvinfer1::TensorIOMode */
;
/** enum class nvinfer1::TensorLocation */
@@ -3493,6 +3931,8 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
;
/** enum class nvinfer1::HardwareCompatibilityLevel */
;
+/** enum class nvinfer1::ExecutionContextAllocationStrategy */
+;
//!
@@ -3537,9 +3977,6 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
// Targeting ../nvinfer/VConvolutionLayer.java
-// Targeting ../nvinfer/VFullyConnectedLayer.java
-
-
// Targeting ../nvinfer/VActivationLayer.java
@@ -3567,15 +4004,15 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
// Targeting ../nvinfer/VGatherLayer.java
-// Targeting ../nvinfer/VRNNv2Layer.java
-
-
// Targeting ../nvinfer/VPluginLayer.java
// Targeting ../nvinfer/VPluginV2Layer.java
+// Targeting ../nvinfer/VPluginV3Layer.java
+
+
// Targeting ../nvinfer/VUnaryLayer.java
@@ -3711,6 +4148,9 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
// Targeting ../nvinfer/VBuilderConfig.java
+// Targeting ../nvinfer/VSerializationConfig.java
+
+
// Targeting ../nvinfer/VBuilder.java
@@ -3723,185 +4163,4 @@ public class nvinfer extends org.bytedeco.tensorrt.presets.nvinfer {
// #endif // NV_INFER_RUNTIME_IMPL_H
-// Parsed from NvUtils.h
-
-/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
- *
- * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
- * property and proprietary rights in and to this material, related
- * documentation and any modifications thereto. Any use, reproduction,
- * disclosure or distribution of this material and related documentation
- * without an express license agreement from NVIDIA CORPORATION or
- * its affiliates is strictly prohibited.
- */
-
-// #ifndef NV_UTILS_H
-// #define NV_UTILS_H
-
-
-
-//!
-//!
-//!
-// #include "NvInfer.h"
-
-/**
- * \file NvUtils.h
- *
- * This file includes various utility functions
- * */
-
-/**
- * @param input The input weights to reshape.
- * @param shape The shape of the weights.
- * @param shapeOrder The order of the dimensions to process for the output.
- * @param data The location where the output data is placed.
- * @param nbDims The number of dimensions to process.
- *
- * \brief Reformat the input weights of the given shape based on the new
- * order of dimensions.
- *
- * Take the weights specified by \p input with the dimensions specified by
- * \p shape and re-order the weights based on the new dimensions specified
- * by \p shapeOrder. The size of each dimension and the input data is not
- * modified. The output volume pointed to by \p data must be the same as
- * he \p input volume.
- *
- * Example usage:
- * float *out = new float[N*C*H*W];
- * Weights input{DataType::kFLOAT, {0 ... N*C*H*W-1}, N*C*H*W size};
- * int32_t order[4]{1, 0, 3, 2};
- * int32_t shape[4]{C, N, W, H};
- * reshapeWeights(input, shape, order, out, 4);
- * Weights reshaped{input.type, out, input.count};
- *
- * Input Matrix{3, 2, 3, 2}:
- * { 0 1}, { 2 3}, { 4 5} <-- {0, 0, *, *}
- * { 6 7}, { 8 9}, {10 11} <-- {0, 1, *, *}
- * {12 13}, {14 15}, {16 17} <-- {1, 0, *, *}
- * {18 19}, {20 21}, {22 23} <-- {1, 1, *, *}
- * {24 25}, {26 27}, {28 29} <-- {2, 0, *, *}
- * {30 31}, {32 33}, {34 35} <-- {2, 1, *, *}
- *
- * Output Matrix{2, 3, 2, 3}:
- * { 0 2 4}, { 1 3 5} <-- {0, 0, *, *}
- * {12 14 16}, {13 15 17} <-- {0, 1, *, *}
- * {24 26 28}, {25 27 29} <-- {0, 2, *, *}
- * { 6 8 10}, { 7 9 11} <-- {1, 0, *, *}
- * {18 20 22}, {19 21 23} <-- {1, 1, *, *}
- * {30 32 34}, {31 33 35} <-- {1, 2, *, *}
- *
- * @return True on success, false on failure.
- *
- * @deprecated Deprecated in TensorRT 8.0.
- *
- * \warning This file will be removed in TensorRT 10.0.
- * */
-
-
-//!
-//!
-//!
-//!
-//!
-//!
-//!
-//!
-//!
-//!
-//!
-@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean reshapeWeights(
- @Const @ByRef Weights input, @Const IntPointer shape, @Const IntPointer shapeOrder, Pointer data, int nbDims);
-@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean reshapeWeights(
- @Const @ByRef Weights input, @Const IntBuffer shape, @Const IntBuffer shapeOrder, Pointer data, int nbDims);
-@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean reshapeWeights(
- @Const @ByRef Weights input, @Const int[] shape, @Const int[] shapeOrder, Pointer data, int nbDims);
-
-/**
- * @param input The input data to re-order.
- * @param order The new order of the data sub-buffers.
- * @param num The number of data sub-buffers to re-order.
- * @param size The size of each data sub-buffer in bytes.
- *
- * \brief Takes an input stream and re-orders \p num chunks of the data
- * given the \p size and \p order.
- *
- * In some frameworks, the ordering of the sub-buffers within a dimension
- * is different than the way that TensorRT expects them.
- * TensorRT expects the gate/bias sub-buffers for LSTM's to be in fico order.
- * TensorFlow however formats the sub-buffers in icfo order.
- * This helper function solves this in a generic fashion.
- *
- * Example usage output of reshapeWeights above:
- * int32_t indir[1]{1, 0}
- * int32_t stride = W*H;
- * for (int32_t x = 0, y = N*C; x < y; ++x)
- * reorderSubBuffers(out + x * stride, indir, H, W);
- *
- * Input Matrix{2, 3, 2, 3}:
- * { 0 2 4}, { 1 3 5} <-- {0, 0, *, *}
- * {12 14 16}, {13 15 17} <-- {0, 1, *, *}
- * {24 26 28}, {25 27 29} <-- {0, 2, *, *}
- * { 6 8 10}, { 7 9 11} <-- {1, 0, *, *}
- * {18 20 22}, {19 21 23} <-- {1, 1, *, *}
- * {30 32 34}, {31 33 35} <-- {1, 2, *, *}
- *
- * Output Matrix{2, 3, 2, 3}:
- * { 1 3 5}, { 0 2 4} <-- {0, 0, *, *}
- * {13 15 17}, {12 14 16} <-- {0, 1, *, *}
- * {25 27 29}, {24 26 28} <-- {0, 2, *, *}
- * { 7 9 11}, { 6 8 10} <-- {1, 0, *, *}
- * {19 21 23}, {18 20 22} <-- {1, 1, *, *}
- * {31 33 35}, {30 32 34} <-- {1, 2, *, *}
- *
- * @return True on success, false on failure.
- *
- * @see reshapeWeights()
- *
- * @deprecated Deprecated in TensorRT 8.0.
- *
- * \warning This file will be removed in TensorRT 10.0.
- * */
-
-
-//!
-//!
-//!
-//!
-//!
-//!
-@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean reorderSubBuffers(
- Pointer input, @Const IntPointer order, int num, int size);
-@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean reorderSubBuffers(
- Pointer input, @Const IntBuffer order, int num, int size);
-@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean reorderSubBuffers(
- Pointer input, @Const int[] order, int num, int size);
-
-/**
- * @param input The input data to transpose.
- * @param type The type of the data to transpose.
- * @param num The number of data sub-buffers to transpose.
- * @param height The size of the height dimension to transpose.
- * @param width The size of the width dimension to transpose.
- *
- * \brief Transpose \p num sub-buffers of \p height * \p width.
- *
- * @return True on success, false on failure.
- *
- * @deprecated Deprecated in TensorRT 8.0.
- *
- * \warning This file will be removed in TensorRT 10.0.
- * */
-@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean transposeSubBuffers(
- Pointer input, DataType type, int num, int height, int width);
-@Namespace("nvinfer1::utils") public static native @Cast("bool") @Deprecated @NoException(true) boolean transposeSubBuffers(
- Pointer input, @Cast("nvinfer1::DataType") int type, int num, int height, int width);
-
- // namespace utils
- // namespace nvinfer1
-// #endif // NV_UTILS_H
-
-
}
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvinfer_plugin.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvinfer_plugin.java
index aa9b88e3758..c84012c5256 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvinfer_plugin.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvinfer_plugin.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.global;
@@ -26,15 +26,20 @@ public class nvinfer_plugin extends org.bytedeco.tensorrt.presets.nvinfer_plugin
// Parsed from NvInferPlugin.h
/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
*
- * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
- * property and proprietary rights in and to this material, related
- * documentation and any modifications thereto. Any use, reproduction,
- * disclosure or distribution of this material and related documentation
- * without an express license agreement from NVIDIA CORPORATION or
- * its affiliates is strictly prohibited.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*/
// #ifndef NV_INFER_PLUGIN_H
@@ -51,214 +56,6 @@ public class nvinfer_plugin extends org.bytedeco.tensorrt.presets.nvinfer_plugin
*
* This is the API for the Nvidia provided TensorRT plugins.
* */
- /**
- * \brief Create a plugin layer that fuses the RPN and ROI pooling using user-defined parameters.
- * Registered plugin type "RPROI_TRT". Registered plugin version "1".
- * @param featureStride Feature stride.
- * @param preNmsTop Number of proposals to keep before applying NMS.
- * @param nmsMaxOut Number of remaining proposals after applying NMS.
- * @param iouThreshold IoU threshold.
- * @param minBoxSize Minimum allowed bounding box size before scaling.
- * @param spatialScale Spatial scale between the input image and the last feature map.
- * @param pooling Spatial dimensions of pooled ROIs.
- * @param anchorRatios Aspect ratios for generating anchor windows.
- * @param anchorScales Scales for generating anchor windows.
- *
- * @return Returns a FasterRCNN fused RPN+ROI pooling plugin. Returns nullptr on invalid inputs.
- *
- * @deprecated Deprecated in TensorRT 8.5. Use RPROIPluginCreator::createPlugin() to create an instance of
- * "RPROI_TRT" version 1 plugin.
- * */
-
-
- //!
- //!
- //!
- public static native @Deprecated IPluginV2 createRPNROIPlugin(int featureStride, int preNmsTop, int nmsMaxOut,
- float iouThreshold, float minBoxSize, float spatialScale, @ByVal DimsHW pooling,
- @ByVal Weights anchorRatios, @ByVal Weights anchorScales);
-
- /**
- * \brief The Normalize plugin layer normalizes the input to have L2 norm of 1 with scale learnable.
- * Registered plugin type "Normalize_TRT". Registered plugin version "1".
- * @param scales Scale weights that are applied to the output tensor.
- * @param acrossSpatial Whether to compute the norm over adjacent channels (acrossSpatial is true) or nearby
- * spatial locations (within channel in which case acrossSpatial is false).
- * @param channelShared Whether the scale weight(s) is shared across channels.
- * @param eps Epsilon for not dividing by zero.
- *
- * @deprecated Deprecated in TensorRT 8.5. Use NormalizePluginCreator::createPlugin() to create an instance of
- * "Normalize_TRT" version 1 plugin.
- * */
-
-
- //!
- //!
- //!
- public static native @Deprecated IPluginV2 createNormalizePlugin(
- @Const Weights scales, @Cast("bool") boolean acrossSpatial, @Cast("bool") boolean channelShared, float eps);
-
- /**
- * \brief The PriorBox plugin layer generates the prior boxes of designated sizes and aspect ratios across all
- * dimensions (H x W). PriorBoxParameters defines a set of parameters for creating the PriorBox plugin layer.
- * Registered plugin type "PriorBox_TRT". Registered plugin version "1".
- *
- * @deprecated Deprecated in TensorRT 8.5. Use PriorBoxPluginCreator::createPlugin() to create an instance of
- * "PriorBox_TRT" version 1 plugin.
- * */
-
-
- //!
- //!
- //!
- public static native @Deprecated IPluginV2 createPriorBoxPlugin(@ByVal PriorBoxParameters param);
-
- /**
- * \brief The Grid Anchor Generator plugin layer generates the prior boxes of
- * designated sizes and aspect ratios across all dimensions (H x W) for all feature maps.
- * GridAnchorParameters defines a set of parameters for creating the GridAnchorGenerator plugin layer.
- * Registered plugin type "GridAnchor_TRT". Registered plugin version "1".
- *
- * @deprecated Deprecated in TensorRT 8.5. Use GridAnchorPluginCreator::createPlugin() to create an instance of
- * "GridAnchor_TRT" version 1 plugin.
- * */
-
-
- //!
- //!
- //!
- public static native @Deprecated IPluginV2 createAnchorGeneratorPlugin(
- GridAnchorParameters param, int numLayers);
-
- /**
- * \brief The DetectionOutput plugin layer generates the detection output based on location and confidence
- * predictions by doing non maximum suppression. DetectionOutputParameters defines a set of parameters for creating
- * the DetectionOutput plugin layer. Registered plugin type "NMS_TRT". Registered plugin version "1".
- *
- * @deprecated Deprecated in TensorRT 8.5. Use NMSPluginCreator::createPlugin() to create an instance of "NMS_TRT"
- * version 1 plugin.
- * */
-
-
- //!
- //!
- //!
- public static native @Deprecated IPluginV2 createNMSPlugin(@ByVal DetectionOutputParameters param);
-
- /**
- * \brief The Reorg plugin reshapes input of shape CxHxW into a (C*stride*stride)x(H/stride)x(W/stride) shape, used
- * in YOLOv2. It does that by taking 1 x stride x stride slices from tensor and flattening them into
- * (stride x stride) x 1 x 1 shape. Registered plugin type "Reorg_TRT". Registered plugin version "1".
- * @param stride Strides in H and W, it should divide both H and W. Also stride * stride should be less than or
- * equal to C.
- *
- * @deprecated Deprecated in TensorRT 8.5. Use ReorgPluginCreator::createPlugin() to create an instance of
- * "Reorg_TRT" version 1 plugin.
- * */
-
-
- //!
- //!
- //!
- public static native @Deprecated IPluginV2 createReorgPlugin(int stride);
-
- /**
- * \brief The Region plugin layer performs region proposal calculation: generate 5 bounding boxes per cell (for
- * yolo9000, generate 3 bounding boxes per cell). For each box, calculating its probablities of objects detections
- * from 80 pre-defined classifications (yolo9000 has 9416 pre-defined classifications, and these 9416 items are
- * organized as work-tree structure). RegionParameters defines a set of parameters for creating the Region plugin
- * layer. Registered plugin type "Region_TRT". Registered plugin version "1".
- *
- * @deprecated Deprecated in TensorRT 8.5. Use RegionPluginCreator::createPlugin() to create an instance of
- * "Region_TRT" version 1 plugin.
- * */
-
-
- //!
- //!
- //!
- //!
- //!
- public static native @Deprecated IPluginV2 createRegionPlugin(@ByVal RegionParameters params);
-
- /**
- * \brief The BatchedNMS Plugin performs non_max_suppression on the input boxes, per batch, across all classes.
- * It greedily selects a subset of bounding boxes in descending order of
- * score. Prunes away boxes that have a high intersection-over-union (IOU)
- * overlap with previously selected boxes. Bounding boxes are supplied as [y1, x1, y2, x2],
- * where (y1, x1) and (y2, x2) are the coordinates of any
- * diagonal pair of box corners and the coordinates can be provided as normalized
- * (i.e., lying in the interval [0, 1]) or absolute.
- * The plugin expects two inputs.
- * Input0 is expected to be 4-D float boxes tensor of shape [batch_size, num_boxes,
- * q, 4], where q can be either 1 (if shareLocation is true) or num_classes.
- * Input1 is expected to be a 3-D float scores tensor of shape [batch_size, num_boxes, num_classes]
- * representing a single score corresponding to each box.
- * The plugin returns four outputs.
- * num_detections : A [batch_size] int32 tensor indicating the number of valid
- * detections per batch item. Can be less than keepTopK. Only the top num_detections[i] entries in
- * nmsed_boxes[i], nmsed_scores[i] and nmsed_classes[i] are valid.
- * nmsed_boxes : A [batch_size, max_detections, 4] float32 tensor containing
- * the co-ordinates of non-max suppressed boxes.
- * nmsed_scores : A [batch_size, max_detections] float32 tensor containing the
- * scores for the boxes.
- * nmsed_classes : A [batch_size, max_detections] float32 tensor containing the
- * classes for the boxes.
- *
- * Registered plugin type "BatchedNMS_TRT". Registered plugin version "1".
- *
- * The batched NMS plugin can require a lot of workspace due to intermediate buffer usage. To get the
- * estimated workspace size for the plugin for a batch size, use the API {@code plugin->getWorkspaceSize(batchSize)}.
- *
- * @deprecated Deprecated in TensorRT 8.5. Use BatchedNMSPluginCreator::createPlugin() to create an instance of
- * "BatchedNMS_TRT" version 1 plugin.
- * */
-
-
- //!
- //!
- //!
- public static native @Deprecated IPluginV2 createBatchedNMSPlugin(@ByVal NMSParameters param);
-
- /**
- * \brief The Split Plugin performs a split operation on the input tensor. It
- * splits the input tensor into several output tensors, each of a length corresponding to output_lengths.
- * The split occurs along the axis specified by axis.
- * @param axis The axis to split on.
- * @param output_lengths The lengths of the output tensors.
- * @param noutput The number of output tensors.
- *
- * @deprecated Deprecated in TensorRT 8.5 along with the "Split" plugin. Use INetworkDefinition::addSlice() to add
- * slice layer(s) as necessary to accomplish the required effect.
- * */
-
-
- //!
- //!
- //!
- public static native @Deprecated IPluginV2 createSplitPlugin(int axis, IntPointer output_lengths, int noutput);
- public static native @Deprecated IPluginV2 createSplitPlugin(int axis, IntBuffer output_lengths, int noutput);
- public static native @Deprecated IPluginV2 createSplitPlugin(int axis, int[] output_lengths, int noutput);
-
- /**
- * \brief The Instance Normalization Plugin computes the instance normalization of an input tensor.
- * The instance normalization is calculated as found in the paper https://arxiv.org/abs/1607.08022.
- * The calculation is y = scale * (x - mean) / sqrt(variance + epsilon) + bias where mean and variance
- * are computed per instance per channel.
- * @param epsilon The epsilon value to use to avoid division by zero.
- * @param scale_weights The input 1-dimensional scale weights of size C to scale.
- * @param bias_weights The input 1-dimensional bias weights of size C to offset.
- *
- * @deprecated Deprecated in TensorRT 8.5. Use InstanceNormalizationPluginCreator::createPlugin() to create an
- * instance of "InstanceNormalization_TRT" version 1 plugin.
- * */
-
-
- //!
- //!
- public static native @Deprecated IPluginV2 createInstanceNormalizationPlugin(
- float epsilon, @ByVal Weights scale_weights, @ByVal Weights bias_weights);
-
/**
* \brief Initialize and register all the existing TensorRT plugins to the Plugin Registry with an optional
* namespace. The plugin library author should ensure that this function name is unique to the library. This
@@ -275,15 +72,20 @@ public class nvinfer_plugin extends org.bytedeco.tensorrt.presets.nvinfer_plugin
// Parsed from NvInferPluginUtils.h
/*
- * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
- * property and proprietary rights in and to this material, related
- * documentation and any modifications thereto. Any use, reproduction,
- * disclosure or distribution of this material and related documentation
- * without an express license agreement from NVIDIA CORPORATION or
- * its affiliates is strictly prohibited.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*/
// #ifndef NV_INFER_PLUGIN_UTILS_H
@@ -302,9 +104,6 @@ public class nvinfer_plugin extends org.bytedeco.tensorrt.presets.nvinfer_plugin
* This is the API for the Nvidia provided TensorRT plugin utilities.
* It lists all the parameters utilized by the TensorRT plugins.
* */
-// Targeting ../nvinfer_plugin/Quadruple.java
-
-
// Targeting ../nvinfer_plugin/PriorBoxParameters.java
@@ -317,7 +116,10 @@ public class nvinfer_plugin extends org.bytedeco.tensorrt.presets.nvinfer_plugin
/**
* \enum CodeTypeSSD
+ *
* \brief The type of encoding used for decoding the bounding boxes and loc_data.
+ *
+ * @deprecated Deprecated in TensorRT 10.0. DetectionOutput plugin is deprecated.
* */
@Namespace("nvinfer1::plugin") public enum CodeTypeSSD {
/** Use box corners. */
diff --git a/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvonnxparser.java b/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvonnxparser.java
index cf5588c39a7..f1da601b313 100644
--- a/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvonnxparser.java
+++ b/tensorrt/src/gen/java/org/bytedeco/tensorrt/global/nvonnxparser.java
@@ -1,4 +1,4 @@
-// Targeted by JavaCPP version 1.5.10: DO NOT EDIT THIS FILE
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.tensorrt.global;
@@ -60,6 +60,7 @@ public class nvonnxparser extends org.bytedeco.tensorrt.presets.nvonnxparser {
// #include "NvInfer.h"
// #include