Skip to content

Commit

Permalink
Add ONNX LayerNormalization(17) (#12978)
Browse files Browse the repository at this point in the history
**Description**: LayerNormalization is now part of the ONNX spec as of
opset 17.
We had a LayerNormalization contrib op, which (incorrectly) was
registered in the ONNX domain. Use that implementation for the ONNX
operator.

Update skip_layer_norm_fusion.cc. There are other optimizers that use
LayerNormalization that need updates as well.

**Motivation and Context**
#12916
  • Loading branch information
skottmckay authored Sep 22, 2022
1 parent 952c993 commit 394c249
Show file tree
Hide file tree
Showing 12 changed files with 247 additions and 158 deletions.
9 changes: 5 additions & 4 deletions onnxruntime/contrib_ops/cpu/cpu_contrib_kernels.cc
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,9 @@ class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMSNchwcDomai
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMSNchwcDomain, 1, float, AveragePool);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMSNchwcDomain, 1, float, GlobalAveragePool);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMSNchwcDomain, 1, float, Upsample);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, float, LayerNormalization);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, double, LayerNormalization);
// LayerNormalization is now in the ONNX spec. As the contrib op (incorrectly) used kOnnxDomain we need to version it
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 16, float, LayerNormalization);
class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 16, double, LayerNormalization);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, float, SimplifiedLayerNormalization);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, double, SimplifiedLayerNormalization);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMSDomain, 1, float, SkipLayerNormalization);
Expand Down Expand Up @@ -229,8 +230,8 @@ Status RegisterCpuContribKernels(KernelRegistry& kernel_registry) {
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, ScaledTanh)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 9, ThresholdedRelu)>,
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, Scale)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, float, LayerNormalization)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, double, LayerNormalization)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 16, float, LayerNormalization)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 16, double, LayerNormalization)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, float, SimplifiedLayerNormalization)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, double, SimplifiedLayerNormalization)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMSDomain, 1, float, SkipLayerNormalization)>,
Expand Down
146 changes: 18 additions & 128 deletions onnxruntime/contrib_ops/cpu/layer_norm.cc
Original file line number Diff line number Diff line change
@@ -1,140 +1,30 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

// LayerNorm was a contrib op but is now part of the ONNX spec
#include "layer_norm.h"

#include "core/common/safeint.h"
#include "core/framework/tensor.h"
#include "core/platform/threadpool.h"
#include "core/providers/common.h"
#include "core/util/math_cpuonly.h"

namespace onnxruntime {
namespace contrib {

#define REGISTER_KERNEL_TYPED(T) \
ONNX_OPERATOR_TYPED_KERNEL_EX(LayerNormalization, kOnnxDomain, 1, T, kCpuExecutionProvider, \
KernelDefBuilder() \
.TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) \
.TypeConstraint("U", DataTypeImpl::GetTensorType<T>()) \
.TypeConstraint("V", DataTypeImpl::GetTensorType<T>()), \
LayerNorm<T, false>); \
ONNX_OPERATOR_TYPED_KERNEL_EX(SimplifiedLayerNormalization, kOnnxDomain, 1, T, kCpuExecutionProvider, \
KernelDefBuilder() \
.TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) \
.TypeConstraint("U", DataTypeImpl::GetTensorType<T>()) \
.TypeConstraint("V", DataTypeImpl::GetTensorType<T>()), \
LayerNorm<T, true>);

REGISTER_KERNEL_TYPED(float)
REGISTER_KERNEL_TYPED(double)

template <typename T, bool simplified>
LayerNorm<T, simplified>::LayerNorm(const OpKernelInfo& op_kernel_info)
: OpKernel(op_kernel_info) {
ORT_ENFORCE(op_kernel_info.GetAttr("axis", &axis_).IsOK());
ORT_ENFORCE(op_kernel_info.GetAttr<float>("epsilon", &epsilon_).IsOK());
}

template <typename T, bool simplified>
Status LayerNorm<T, simplified>::Compute(OpKernelContext* p_ctx) const {
// Inputs
const Tensor* X = p_ctx->Input<Tensor>(0);
const Tensor* scale = p_ctx->Input<Tensor>(1);
const Tensor* bias = p_ctx->Input<Tensor>(2);
auto X_data = X->Data<T>();
auto scale_data = scale->Data<T>();
auto bias_data = (simplified || nullptr == bias) ? nullptr : bias->Data<T>();

const TensorShape& x_shape = X->Shape();
const int64_t axis = HandleNegativeAxis(axis_, x_shape.NumDimensions());
auto norm_count = x_shape.SizeToDimension(axis);
auto norm_size = x_shape.SizeFromDimension(axis);

Tensor* Y = p_ctx->Output(0, x_shape);
auto Y_data = Y->MutableData<T>();

std::vector<int64_t> mean_inv_std_dev_dim;
mean_inv_std_dev_dim.reserve(x_shape.NumDimensions());
for (int i = 0; i < static_cast<int>(x_shape.NumDimensions()); ++i) {
if (i < axis) {
mean_inv_std_dev_dim.emplace_back(x_shape.GetDims()[i]);
} else {
mean_inv_std_dev_dim.emplace_back(1);
}
}

AllocatorPtr alloc;
ORT_RETURN_IF_ERROR(p_ctx->GetTempSpaceAllocator(&alloc));

T* mean_data = nullptr;
BufferUniquePtr mean_data_buf_ptr;

int output_index = 1;

if (!simplified) {
Tensor* mean = p_ctx->Output(output_index++, TensorShape(mean_inv_std_dev_dim));
if (mean != nullptr) {
mean_data = mean->MutableData<T>();
} else {
auto mean_data_buf = alloc->Alloc(SafeInt<size_t>(sizeof(T)) * norm_count);
mean_data_buf_ptr = BufferUniquePtr(mean_data_buf, BufferDeleter(alloc));
mean_data = static_cast<T*>(mean_data_buf_ptr.get());
}
}

T* inv_std_dev_data = nullptr;
BufferUniquePtr inv_std_dev_data_buf_ptr;

Tensor* inv_std_dev = p_ctx->Output(output_index, TensorShape(mean_inv_std_dev_dim));
if (inv_std_dev != nullptr) {
inv_std_dev_data = inv_std_dev->MutableData<T>();
} else {
auto inv_std_dev_data_buf = alloc->Alloc(SafeInt<size_t>(sizeof(T)) * norm_count);
inv_std_dev_data_buf_ptr = BufferUniquePtr(inv_std_dev_data_buf, BufferDeleter(alloc));
inv_std_dev_data = static_cast<T*>(inv_std_dev_data_buf_ptr.get());
}

concurrency::ThreadPool::TryBatchParallelFor(
p_ctx->GetOperatorThreadPool(), static_cast<int32_t>(norm_count),
[&](ptrdiff_t task_idx) {
const T* p_input = X_data + task_idx * norm_size;
T* p_output = Y_data + task_idx * norm_size;

T mean = 0;
T mean_square = 0;

for (int64_t h = 0; h < norm_size; h++) {
mean += p_input[h];
mean_square += p_input[h] * p_input[h];
}

mean = mean / norm_size;
if (simplified) {
mean_square = sqrt(mean_square / norm_size + epsilon_);
} else {
mean_square = sqrt(mean_square / norm_size - mean * mean + epsilon_);
}

for (int64_t h = 0; h < norm_size; h++) {
if (simplified) {
p_output[h] = p_input[h] / mean_square * scale_data[h];
} else if (nullptr == bias) {
p_output[h] = (p_input[h] - mean) / mean_square * scale_data[h];
} else {
p_output[h] = (p_input[h] - mean) / mean_square * scale_data[h] + bias_data[h];
}
}

if (mean_data != nullptr) {
mean_data[task_idx] = mean;
}
inv_std_dev_data[task_idx] = 1 / mean_square;
},
0);

return Status::OK();
}
// original LayerNormalization contrib op (incorrectly using onnx domain though)
#define REGISTER_CONTRIB_KERNELS(T) \
ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_EX(LayerNormalization, kOnnxDomain, 1, 16, T, kCpuExecutionProvider, \
KernelDefBuilder() \
.TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) \
.TypeConstraint("U", DataTypeImpl::GetTensorType<T>()) \
.TypeConstraint("V", DataTypeImpl::GetTensorType<T>()), \
LayerNorm<false>); \
ONNX_OPERATOR_TYPED_KERNEL_EX(SimplifiedLayerNormalization, kOnnxDomain, 1, T, kCpuExecutionProvider, \
KernelDefBuilder() \
.TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) \
.TypeConstraint("U", DataTypeImpl::GetTensorType<T>()) \
.TypeConstraint("V", DataTypeImpl::GetTensorType<T>()), \
LayerNorm<true>);

REGISTER_CONTRIB_KERNELS(float)
REGISTER_CONTRIB_KERNELS(double)

} // namespace contrib
} // namespace onnxruntime
16 changes: 5 additions & 11 deletions onnxruntime/contrib_ops/cpu/layer_norm.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,22 +3,16 @@

#pragma once

#include "core/common/common.h"
#include "core/framework/op_kernel.h"
#include "core/framework/tensor.h"
#include "core/providers/cpu/nn/layer_norm_impl.h"

namespace onnxruntime {
namespace contrib {

template <typename T, bool simplified>
class LayerNorm final : public OpKernel {
template <bool simplified>
class LayerNorm final : public LayerNormImpl {
public:
LayerNorm(const OpKernelInfo& op_kernel_info);
Status Compute(OpKernelContext* p_op_kernel_context) const override;

private:
int64_t axis_;
float epsilon_;
LayerNorm(const OpKernelInfo& op_kernel_info)
: LayerNormImpl(op_kernel_info, simplified) {}
};

} // namespace contrib
Expand Down
8 changes: 4 additions & 4 deletions onnxruntime/core/optimizer/attention_fusion.cc
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ Status AttentionFusion::ApplyImpl(Graph& graph, bool& modified, int graph_level,
ORT_RETURN_IF_ERROR(Recurse(node, modified, graph_level, logger));

if ((node.GetOutputEdgesCount() >= 2 && node.GetOutputEdgesCount() <= 6) && // Add node.GetOutputEdgesCount() == 5/6 for distilbert
graph_utils::IsSupportedOptypeVersionAndDomain(node, "LayerNormalization", {1}, kOnnxDomain) &&
graph_utils::IsSupportedOptypeVersionAndDomain(node, "LayerNormalization", {1, 17}, kOnnxDomain) &&
graph_utils::IsSupportedProvider(node, GetCompatibleExecutionProviders())) {
// Get hidden size from layer norm bias tensor shape.
const NodeArg& layer_norm_bias = *(node.InputDefs()[2]);
Expand Down Expand Up @@ -316,7 +316,7 @@ static bool FuseSubGraphQKImpl(Node& layer_norm,
{0, 0, "Reshape", {5, 13}, kOnnxDomain},
{0, 0, "Add", {7, 13}, kOnnxDomain},
{0, 0, "MatMul", {1, 9, 13}, kOnnxDomain},
{0, 0, "LayerNormalization", {1}, kOnnxDomain}};
{0, 0, "LayerNormalization", {1, 17}, kOnnxDomain}};

if (!graph_utils::FindPath(pivot_nodes[0].get(), true, k_path, edges, logger)) {
DEBUG_LOG("Failed to find path for k");
Expand Down Expand Up @@ -624,7 +624,7 @@ bool AttentionFusion::FuseSubGraph(Node& layer_norm, const Node& add_after_layer
{0, 0, "Reshape", {5, 13}, kOnnxDomain},
{0, 0, "Add", {7, 13}, kOnnxDomain},
{0, 0, "MatMul", {1, 9, 13}, kOnnxDomain},
{0, 0, "LayerNormalization", {1}, kOnnxDomain}};
{0, 0, "LayerNormalization", {1, 17}, kOnnxDomain}};

std::vector<const Node::EdgeEnd*> edges;
if (!graph_utils::FindPath(add_after_layer_norm, true, parent_path, edges, logger)) {
Expand Down Expand Up @@ -669,7 +669,7 @@ bool AttentionFusion::FuseSubGraph(Node& layer_norm, const Node& add_after_layer
return false;
}

//store parent path
// store parent path
std::vector<std::reference_wrapper<const Node>> parent_path_nodes{reshape, transpose, qkv_matmul, v_transpose, v_reshape, v_add, v_matmul};

// Find mask nodes: Unsqueeze -> Unsqueeze -> (Cast) -> Sub -> Mul -> Add -> Softmax --> [MatMul]
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/core/optimizer/embed_layer_norm_fusion.cc
Original file line number Diff line number Diff line change
Expand Up @@ -819,7 +819,7 @@ Status EmbedLayerNormFusion::ApplyImpl(Graph& graph, bool& modified, int graph_l

Node& layer_norm_node = *p_layer_norm;
ORT_RETURN_IF_ERROR(Recurse(layer_norm_node, modified, graph_level, logger));
if (!graph_utils::IsSupportedOptypeVersionAndDomain(layer_norm_node, "LayerNormalization", {1}, kOnnxDomain) ||
if (!graph_utils::IsSupportedOptypeVersionAndDomain(layer_norm_node, "LayerNormalization", {1, 17}, kOnnxDomain) ||
!graph_utils::IsSupportedProvider(layer_norm_node, GetCompatibleExecutionProviders())) {
continue;
}
Expand All @@ -843,7 +843,7 @@ Status EmbedLayerNormFusion::ApplyImpl(Graph& graph, bool& modified, int graph_l
Node& layer_norm_add_node = *graph.GetNode(edges[0]->GetNode().Index());

if (IsNeighborNodeExpectedTypes(layer_norm_add_node.InputEdgesBegin(), layer_norm_add_node.InputNodesEnd(), {"Gather", "Gather"})) {
//DistilBert
// DistilBert
if (FuseSubGraphDistilBert(graph, layer_norm_add_node, layer_norm_node, logger)) {
modified = true;
}
Expand Down
12 changes: 6 additions & 6 deletions onnxruntime/core/optimizer/skip_layer_norm_fusion.cc
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ Status SkipLayerNormFusion::ApplyImpl(Graph& graph, bool& modified, int graph_le
Node& ln_node = *p_layernorm;
ORT_RETURN_IF_ERROR(Recurse(ln_node, modified, graph_level, logger));

if (!graph_utils::IsSupportedOptypeVersionAndDomain(ln_node, "LayerNormalization", {1}) ||
if (!graph_utils::IsSupportedOptypeVersionAndDomain(ln_node, "LayerNormalization", {1, 17}) ||
!graph_utils::IsSupportedProvider(ln_node, GetCompatibleExecutionProviders()) ||
!IsSupportedDataType(ln_node)) {
continue;
Expand All @@ -163,8 +163,8 @@ Status SkipLayerNormFusion::ApplyImpl(Graph& graph, bool& modified, int graph_le

// Format 1
std::vector<graph_utils::EdgeEndToMatch> format1_parent_path{
{0, 0, "Add", {7, 13}, kOnnxDomain},
{0, 0, "Add", {7, 13}, kOnnxDomain}};
{0, 0, "Add", {7, 13, 14}, kOnnxDomain},
{0, 0, "Add", {7, 13, 14}, kOnnxDomain}};

std::vector<const Node::EdgeEnd*> edges;
if (graph_utils::FindPath(ln_node, true, format1_parent_path, edges, logger)) {
Expand All @@ -182,8 +182,8 @@ Status SkipLayerNormFusion::ApplyImpl(Graph& graph, bool& modified, int graph_le
if (matched_format == Format::None) {
// Format 2
std::vector<graph_utils::EdgeEndToMatch> format2_parent_path{
{0, 0, "Add", {7, 13}, kOnnxDomain},
{0, 1, "Add", {7, 13}, kOnnxDomain}};
{0, 0, "Add", {7, 13, 14}, kOnnxDomain},
{0, 1, "Add", {7, 13, 14}, kOnnxDomain}};

if (graph_utils::FindPath(ln_node, true, format2_parent_path, edges, logger)) {
p_add1 = const_cast<Node*>(&edges[0]->GetNode());
Expand All @@ -201,7 +201,7 @@ Status SkipLayerNormFusion::ApplyImpl(Graph& graph, bool& modified, int graph_le
if (matched_format == Format::None) {
// Format 3
std::vector<graph_utils::EdgeEndToMatch> format3_parent_path{
{0, 0, "Add", {7, 13}, kOnnxDomain}};
{0, 0, "Add", {7, 13, 14}, kOnnxDomain}};

if (graph_utils::FindPath(ln_node, true, format3_parent_path, edges, logger)) {
p_add1 = const_cast<Node*>(&edges[0]->GetNode());
Expand Down
5 changes: 4 additions & 1 deletion onnxruntime/core/providers/cpu/cpu_execution_provider.cc
Original file line number Diff line number Diff line change
Expand Up @@ -786,6 +786,7 @@ class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 17, Ha
class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 17, HannWindow);
class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 17, MelWeightMatrix);
class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 17, STFT);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 17, float, LayerNormalization);

// !!PLEASE READ BELOW!! Following that, add new entries above this comment

Expand Down Expand Up @@ -1989,12 +1990,14 @@ Status RegisterOnnxOperatorKernels(KernelRegistry& kernel_registry) {
LessOrEqual)>,

// Opset 17
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 17, DFT)>,
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 17, BlackmanWindow)>,
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 17, DFT)>,
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 17, HammingWindow)>,
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 17, HannWindow)>,
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 17, MelWeightMatrix)>,
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 17, STFT)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 17, float,
LayerNormalization)>,
};

for (auto& function_table_entry : function_table) {
Expand Down
22 changes: 22 additions & 0 deletions onnxruntime/core/providers/cpu/nn/layer_norm.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

#include "layer_norm.h"

#include "core/providers/common.h"

namespace onnxruntime {
// official onnx operator registration. originally LayerNormalization was a contrib op.
// Only 2 type constraints (values using 'T' and 'U' in the contrib op all use 'T' in the ONNX spec)
#define REGISTER_ONNX_KERNEL_TYPED(T) \
ONNX_CPU_OPERATOR_TYPED_KERNEL(LayerNormalization, 17, T, \
KernelDefBuilder() \
.TypeConstraint("T", DataTypeImpl::GetTensorType<T>()) \
.TypeConstraint("U", DataTypeImpl::GetTensorType<T>()), \
LayerNorm);

// ONNX LayerNorm doesn't support 'double' for Mean/InvStdDev so we can only register a version with float
// with our current implementation which originally handled 'double' and 'float' for the contrib op.
REGISTER_ONNX_KERNEL_TYPED(float)

} // namespace onnxruntime
16 changes: 16 additions & 0 deletions onnxruntime/core/providers/cpu/nn/layer_norm.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

#pragma once

#include "layer_norm_impl.h"

namespace onnxruntime {

class LayerNorm final : public LayerNormImpl {
public:
LayerNorm(const OpKernelInfo& op_kernel_info)
: LayerNormImpl(op_kernel_info, /* simplified */ false) {}
};

} // namespace onnxruntime
Loading

0 comments on commit 394c249

Please sign in to comment.