Skip to content

Commit

Permalink
Qualify tensorstore::span use in tensorstore/ and util/
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 659678597
Change-Id: I8e060698cc299e51c85bbcb265e3e73bd0e2f06e
  • Loading branch information
laramiel authored and copybara-github committed Aug 5, 2024
1 parent cc9fb66 commit 2401872
Show file tree
Hide file tree
Showing 88 changed files with 1,207 additions and 935 deletions.
19 changes: 17 additions & 2 deletions tensorstore/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ tensorstore_cc_test(
"//tensorstore/util:element_pointer",
"//tensorstore/util:iterate",
"//tensorstore/util:result",
"//tensorstore/util:span",
"//tensorstore/util:status_testutil",
"//tensorstore/util:str_cat",
"@com_github_nlohmann_json//:json",
Expand Down Expand Up @@ -123,6 +124,7 @@ tensorstore_cc_test(
srcs = ["array_testutil_test.cc"],
deps = [
":array_testutil",
"//tensorstore/util:span",
"@com_google_googletest//:gtest_main",
],
)
Expand Down Expand Up @@ -197,12 +199,16 @@ tensorstore_cc_test(
srcs = ["box_test.cc"],
deps = [
":box",
":index",
":index_interval",
":rank",
":static_cast",
"//tensorstore/serialization",
"//tensorstore/serialization:test_util",
"//tensorstore/util:status",
"//tensorstore/util:span",
"//tensorstore/util:status_testutil",
"//tensorstore/util:str_cat",
"@com_google_absl//absl/status",
"@com_google_googletest//:gtest_main",
],
)
Expand Down Expand Up @@ -280,6 +286,7 @@ tensorstore_cc_test(
"//tensorstore/serialization:test_util",
"//tensorstore/util:dimension_set",
"//tensorstore/util:division",
"//tensorstore/util:span",
"//tensorstore/util:status",
"//tensorstore/util:status_testutil",
"//tensorstore/util:str_cat",
Expand Down Expand Up @@ -763,9 +770,9 @@ tensorstore_cc_test(
":static_cast",
"//tensorstore/util:result",
"//tensorstore/util:span",
"//tensorstore/util:status",
"//tensorstore/util:status_testutil",
"//tensorstore/util:str_cat",
"@com_google_absl//absl/status",
"@com_google_googletest//:gtest_main",
],
)
Expand Down Expand Up @@ -802,13 +809,18 @@ tensorstore_cc_test(
size = "small",
srcs = ["strided_layout_test.cc"],
deps = [
":box",
":contiguous_layout",
":index",
":rank",
":static_cast",
":strided_layout",
"//tensorstore/internal:type_traits",
"//tensorstore/util:span",
"//tensorstore/util:status_testutil",
"//tensorstore/util:str_cat",
"@com_google_absl//absl/meta:type_traits",
"@com_google_absl//absl/status",
"@com_google_googletest//:gtest_main",
],
)
Expand Down Expand Up @@ -924,12 +936,15 @@ tensorstore_cc_library(
hdrs = ["downsample.h"],
deps = [
":downsample_method",
":index",
":open_mode",
":rank",
":spec",
":tensorstore",
"//tensorstore/driver/downsample",
"//tensorstore/internal:type_traits",
"//tensorstore/util:result",
"//tensorstore/util:span",
],
)

Expand Down
12 changes: 6 additions & 6 deletions tensorstore/array.cc
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ std::string DescribeForCast(DataType dtype, DimensionIndex rank) {
StaticCastTraits<DimensionIndex>::Describe(rank));
}

absl::Status ArrayOriginCastError(span<const Index> shape) {
absl::Status ArrayOriginCastError(tensorstore::span<const Index> shape) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot translate array with shape ", shape, " to have zero origin."));
}
Expand All @@ -173,7 +173,7 @@ SharedElementPointer<void> AllocateArrayLike(
const auto dimension_order =
internal_iterate::ComputeStridedLayoutDimensionIterationOrder(
constraints, source_layout.shape(),
span({source_layout.byte_strides().data()}));
tensorstore::span({source_layout.byte_strides().data()}));
const DimensionIndex rank = source_layout.rank();
std::fill_n(byte_strides, rank, Index(0));
Index stride = r->size;
Expand Down Expand Up @@ -217,7 +217,7 @@ void AppendToString(
} else {
internal_array::PrintArrayDimension(result, array, options, summarize);
}
const span<const Index> origin = array.origin();
const tensorstore::span<const Index> origin = array.origin();
if (std::any_of(origin.begin(), origin.end(),
[](Index x) { return x != 0; })) {
tensorstore::StrAppend(result, " @ ", origin);
Expand All @@ -241,9 +241,9 @@ void PrintToOstream(
} // namespace internal_array

namespace internal_array {
void UnbroadcastStridedLayout(StridedLayoutView<> layout,
span<Index> unbroadcast_shape,
span<Index> unbroadcast_byte_strides) {
void UnbroadcastStridedLayout(
StridedLayoutView<> layout, tensorstore::span<Index> unbroadcast_shape,
tensorstore::span<Index> unbroadcast_byte_strides) {
assert(unbroadcast_shape.size() == layout.rank());
assert(unbroadcast_byte_strides.size() == layout.rank());
for (DimensionIndex i = 0; i < layout.rank(); ++i) {
Expand Down
66 changes: 35 additions & 31 deletions tensorstore/array.h
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ constexpr inline DimensionIndex SubArrayStaticRank =
RankConstraint::Subtract(Rank, internal::ConstSpanType<Indices>::extent);

/// Returns a reference to the sub-array obtained by subscripting the first
/// `span(indices).size()` dimensions of `array`.
/// `tensorstore::span(indices).size()` dimensions of `array`.
///
/// `SubArray` always returns an array with an unowned data pointer, while
/// `SharedSubArray` returns an array that shares ownership of the data.
Expand All @@ -309,9 +309,9 @@ constexpr inline DimensionIndex SubArrayStaticRank =
/// \param indices A `span`-compatible index array. May be specified as a
/// braced list, e.g. ``SubArray(array, {1, 2})`` or
/// ``SharedSubArray(array, {1, 2})``.
/// \dchecks `array.rank() >= span(indices).size()`.
/// \dchecks ``0 <= span(indices)[i] < array.shape()[i]`` for
/// ``0 <= i < span(indices).size()``.
/// \dchecks `array.rank() >= tensorstore::span(indices).size()`.
/// \dchecks ``0 <= tensorstore::span(indices)[i] < array.shape()[i]`` for
/// ``0 <= i < tensorstore::span(indices).size()``.
/// \returns The sub array.
/// \relates Array
template <ContainerKind LayoutCKind = view, typename ElementTag,
Expand Down Expand Up @@ -362,31 +362,33 @@ template <ContainerKind LayoutCKind = view, typename ElementTag,
ContainerKind SourceCKind, size_t N>
Array<typename ElementTagTraits<ElementTag>::Element,
SubArrayStaticRank<RankConstraint::FromInlineRank(Rank),
span<const Index, N>>,
tensorstore::span<const Index, N>>,
OriginKind, LayoutCKind>
SubArray(const Array<ElementTag, Rank, OriginKind, SourceCKind>& array,
const Index (&indices)[N]) {
return SubArray<LayoutCKind>(array, span<const Index, N>(indices));
return SubArray<LayoutCKind>(array,
tensorstore::span<const Index, N>(indices));
}

template <ContainerKind LayoutCKind = view, typename Element,
DimensionIndex Rank, ArrayOriginKind OriginKind,
ContainerKind SourceCKind, size_t N>
SharedArray<Element,
SubArrayStaticRank<RankConstraint::FromInlineRank(Rank),
span<const Index, N>>,
tensorstore::span<const Index, N>>,
OriginKind, LayoutCKind>
SharedSubArray(const SharedArray<Element, Rank, OriginKind, SourceCKind>& array,
const Index (&indices)[N]) {
return SharedSubArray<LayoutCKind>(array, span<const Index, N>(indices));
return SharedSubArray<LayoutCKind>(
array, tensorstore::span<const Index, N>(indices));
}

namespace internal_array {
void PrintToOstream(
std::ostream& os,
const ArrayView<const void, dynamic_rank, offset_origin>& array);
std::string DescribeForCast(DataType dtype, DimensionIndex rank);
absl::Status ArrayOriginCastError(span<const Index> shape);
absl::Status ArrayOriginCastError(tensorstore::span<const Index> shape);
} // namespace internal_array

/// Represents a pointer to an in-memory multi-dimensional array with an
Expand Down Expand Up @@ -577,8 +579,8 @@ class Array {
Array(SourcePointer element_pointer, const Shape& shape,
ContiguousLayoutOrder order = c_order) {
this->element_pointer() = std::move(element_pointer);
InitializeContiguousLayout(order, this->dtype().size(), span(shape),
&this->layout());
InitializeContiguousLayout(order, this->dtype().size(),
tensorstore::span(shape), &this->layout());
}
template <typename SourcePointer = ElementPointer, DimensionIndex ShapeRank,
std::enable_if_t<
Expand All @@ -588,8 +590,8 @@ class Array {
Array(SourcePointer element_pointer, const Index (&shape)[ShapeRank],
ContiguousLayoutOrder order = c_order) {
this->element_pointer() = std::move(element_pointer);
InitializeContiguousLayout(order, this->dtype().size(), span(shape),
&this->layout());
InitializeContiguousLayout(order, this->dtype().size(),
tensorstore::span(shape), &this->layout());
}

/// Constructs an array with a contiguous layout from an implicitly
Expand Down Expand Up @@ -749,24 +751,26 @@ class Array {
constexpr RankType rank() const { return storage_.rank(); }

/// Returns the origin vector of size `rank()`.
constexpr span<const Index, static_rank> origin() const {
constexpr tensorstore::span<const Index, static_rank> origin() const {
return storage_.origin();
}
span<MaybeConstOriginIndex, static_rank> origin() {
tensorstore::span<MaybeConstOriginIndex, static_rank> origin() {
return storage_.origin();
}

/// Returns the shape vector of size `rank()`.
constexpr span<const Index, static_rank> shape() const {
constexpr tensorstore::span<const Index, static_rank> shape() const {
return storage_.shape();
}
tensorstore::span<MaybeConstIndex, static_rank> shape() {
return storage_.shape();
}
span<MaybeConstIndex, static_rank> shape() { return storage_.shape(); }

/// Returns the byte strides vector of size `rank()`.
constexpr span<const Index, static_rank> byte_strides() const {
constexpr tensorstore::span<const Index, static_rank> byte_strides() const {
return storage_.byte_strides();
}
span<MaybeConstIndex, static_rank> byte_strides() {
tensorstore::span<MaybeConstIndex, static_rank> byte_strides() {
return storage_.byte_strides();
}

Expand Down Expand Up @@ -854,7 +858,7 @@ class Array {
ArrayView<Element, RankConstraint::Subtract(SfinaeR, 1),
array_origin_kind>>
operator[](Index index) const {
return SubArray(*this, span<const Index, 1>(&index, 1));
return SubArray(*this, tensorstore::span<const Index, 1>(&index, 1));
}

/// Returns a reference to the sub-array obtained by subscripting the first
Expand Down Expand Up @@ -1297,7 +1301,7 @@ SharedArray<Element, 2> MakeArray(const Element (&array)[N0][N1]) {
/// \id array
template <typename Element, Index N0>
ArrayView<Element, 1, offset_origin> MakeOffsetArrayView(
span<const Index, 1> origin,
tensorstore::span<const Index, 1> origin,
Element (&array ABSL_ATTRIBUTE_LIFETIME_BOUND)[N0]) {
static constexpr Index shape[] = {N0};
static constexpr Index byte_strides[] = {sizeof(Element)};
Expand All @@ -1308,7 +1312,7 @@ ArrayView<Element, 1, offset_origin> MakeOffsetArrayView(
}
template <typename Element, Index N0>
ArrayView<const Element, 1, offset_origin> MakeOffsetArrayView(
span<const Index, 1> origin,
tensorstore::span<const Index, 1> origin,
const Element (&array ABSL_ATTRIBUTE_LIFETIME_BOUND)[N0]) {
static constexpr Index shape[] = {N0};
static constexpr Index byte_strides[] = {sizeof(Element)};
Expand All @@ -1319,7 +1323,7 @@ ArrayView<const Element, 1, offset_origin> MakeOffsetArrayView(
}
template <typename Element, Index N0, Index N1>
ArrayView<Element, 2, offset_origin> MakeOffsetArrayView(
span<const Index, 2> origin,
tensorstore::span<const Index, 2> origin,
Element (&array ABSL_ATTRIBUTE_LIFETIME_BOUND)[N0][N1]) {
static constexpr Index shape[] = {N0, N1};
static constexpr Index byte_strides[] = {N1 * sizeof(Element),
Expand All @@ -1331,7 +1335,7 @@ ArrayView<Element, 2, offset_origin> MakeOffsetArrayView(
}
template <typename Element, Index N0, Index N1>
ArrayView<const Element, 2, offset_origin> MakeOffsetArrayView(
span<const Index, 2> origin,
tensorstore::span<const Index, 2> origin,
const Element (&array ABSL_ATTRIBUTE_LIFETIME_BOUND)[N0][N1]) {
static constexpr Index shape[] = {N0, N1};
static constexpr Index byte_strides[] = {N1 * sizeof(Element),
Expand All @@ -1358,22 +1362,22 @@ ArrayView<const Element, 2, offset_origin> MakeOffsetArrayView(
/// \id array
template <typename Element, Index N0>
SharedArray<Element, 1, offset_origin> MakeOffsetArray(
span<const Index, 1> origin, Element (&array)[N0]) {
tensorstore::span<const Index, 1> origin, Element (&array)[N0]) {
return MakeCopy(MakeOffsetArrayView(origin, array));
}
template <typename Element, Index N0>
SharedArray<Element, 1, offset_origin> MakeOffsetArray(
span<const Index, 1> origin, const Element (&array)[N0]) {
tensorstore::span<const Index, 1> origin, const Element (&array)[N0]) {
return MakeCopy(MakeOffsetArrayView(origin, array));
}
template <typename Element, Index N0, Index N1>
SharedArray<Element, 2, offset_origin> MakeOffsetArray(
span<const Index, 2> origin, Element (&array)[N0][N1]) {
tensorstore::span<const Index, 2> origin, Element (&array)[N0][N1]) {
return MakeCopy(MakeOffsetArrayView(origin, array));
}
template <typename Element, Index N0, Index N1>
SharedArray<Element, 2, offset_origin> MakeOffsetArray(
span<const Index, 2> origin, const Element (&array)[N0][N1]) {
tensorstore::span<const Index, 2> origin, const Element (&array)[N0][N1]) {
return MakeCopy(MakeOffsetArrayView(origin, array));
}

Expand Down Expand Up @@ -1552,7 +1556,7 @@ SharedArray<Element, Rank> AllocateArray(
ContiguousLayoutOrder layout_order = ContiguousLayoutOrder::c,
ElementInitialization initialization = default_init,
dtype_t<Element> representation = dtype_v<Element>) {
return AllocateArray<Element, span<const Index, Rank>>(
return AllocateArray<Element, tensorstore::span<const Index, Rank>>(
extents, layout_order, initialization, representation);
}

Expand Down Expand Up @@ -1933,11 +1937,11 @@ bool AreArraysIdenticallyEqual(
///
/// EXPECT_THAT(
/// BroadcastArray(MakeArray<int>({1, 2, 3}),
/// span<const Index>({2, 3})),
/// tensorstore::span<const Index>({2, 3})),
/// MakeArray<int>({{1, 2, 3}, {1, 2, 3}}));
///
/// EXPECT_THAT(BroadcastArray(MakeArray<int>({{1}, {2}, {3}}),
/// span<const Index>({3, 2})),
/// tensorstore::span<const Index>({3, 2})),
/// MakeArray<int>({{1, 1}, {2, 2}, {3, 3}}));
///
/// \param source Source array to broadcast.
Expand Down
9 changes: 4 additions & 5 deletions tensorstore/array_nc_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,13 @@
namespace {

using ::tensorstore::Index;
using ::tensorstore::span;

void FullIndexing() {
tensorstore::SharedArray<int, 2> x =
tensorstore::MakeArray<int>({{1, 2}, {3, 4}});
static_cast<void>(x(0, 1));
static_cast<void>(x({0, 1}));
static_cast<void>(x(span<const Index, 2>({0, 1})));
static_cast<void>(x(tensorstore::span<const Index, 2>({0, 1})));

EXPECT_NON_COMPILE("double", x({1.1, 2.2}));
EXPECT_NON_COMPILE("no matching function", x());
Expand All @@ -38,7 +37,7 @@ void FullIndexing() {
EXPECT_NON_COMPILE("template argument", x({}));
EXPECT_NON_COMPILE("RankConstraint::EqualOrUnspecified", x({1, 2, 3}));
EXPECT_NON_COMPILE("IsCompatibleFullIndexVector",
x(span<const Index, 3>({1, 2, 3})));
x(tensorstore::span<const Index, 3>({1, 2, 3})));
}

void PartialIndexing() {
Expand All @@ -47,12 +46,12 @@ void PartialIndexing() {
static_cast<void>(x[0]);
static_cast<void>(x[0][1]);
static_cast<void>(x[{0, 1}]);
static_cast<void>(x[span<const Index, 2>({0, 1})]);
static_cast<void>(x[tensorstore::span<const Index, 2>({0, 1})]);

EXPECT_NON_COMPILE("GreaterOrUnspecified", x[0][0][0]);
EXPECT_NON_COMPILE("no viable overloaded operator\\[\\]", x[{0, 0, 0}]);
EXPECT_NON_COMPILE("no viable overloaded operator\\[\\]",
x[span<const Index, 3>({0, 0, 0})]);
x[tensorstore::span<const Index, 3>({0, 0, 0})]);
}

} // namespace
Expand Down
Loading

0 comments on commit 2401872

Please sign in to comment.