Skip to content
gchanan edited this page Apr 3, 2019 · 24 revisions

is device_guard: False valid?

This comes up because we are trying to add caching to the device (see https://github.com/pytorch/pytorch/pull/18751).

Previously, it was valid to to not worry about the device, to do something like:

x = THCTensor_new(...)
y.set_(x)

and this would automatically change the device. But this doesn't work (without virtual functions) because of the lack of variable/tensor merge.

Reading properties

Function Valid?
cudnn_is_acceptable(Tensor self) -> bool Y
is_distributed(Tensor self) -> bool Y
is_floating_point(Tensor self) -> bool Y
is_complex(Tensor self) -> bool Y
is_nonzero(Tensor self) -> bool Y
is_same_size(Tensor self, Tensor other) -> bool Y
is_signed(Tensor self) -> bool Y
size(Tensor self, int dim) -> int Y
stride(Tensor self, int dim) -> int Y
sparse_dim(Tensor self) -> int Y
_dimI(Tensor self) -> int Y
dense_dim(Tensor self) -> int Y
_dimV(Tensor self) -> int Y
_nnz(Tensor self) -> int Y
is_coalesced(Tensor self) -> bool Y
numel(Tensor self) -> int Y
is_set_to(Tensor self, Tensor tensor) -> bool Y
data_ptr(Tensor self) -> void* Y
_indices(Tensor(a) self) -> Tensor(a) Y
_values(Tensor(a) self) -> Tensor(a) Y

Inplace

Function Valid? Explanation
as_strided_(Tensor(a!) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a!) Y only sets sizes and strides
resize_(Tensor(a!) self, int[] size) -> Tensor(a!)
squeeze_(Tensor(a!) self) -> Tensor(a!) Y only calls as_strided_
squeeze_(Tensor(a!) self, int dim) -> Tensor(a!) Y only calls as_strided_
unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!) Y only calls as_strided_
transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) Y only messes with size
set_(Tensor(a!) self, Storage source) -> Tensor(a!) Y we check
set_(Tensor(a!) self, Storage source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!) Y we check
set_(Tensor(a!) self, Tensor source) -> Tensor(a!) Y we check
set_(Tensor(a!) self) -> Tensor(a!) N

Other

Function Valid? Explanation
as_strided(Tensor(a) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a)
broadcast_tensors(Tensor[] tensors) -> Tensor[]
chunk(Tensor(a) self, int chunks, int dim=0) -> Tensor(a)[]
empty(int[] size, *, Tensor(a!) out) -> Tensor(a!)
empty_like(Tensor self) -> Tensor
empty_like(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory) -> Tensor
expand(Tensor(a) self, int[] size, *, bool implicit=False) -> Tensor(a)
expand_as(Tensor self, Tensor other) -> Tensor
isnan(Tensor self) -> Tensor
narrow(Tensor(a) self, int dim, int start, int length) -> Tensor(a)
reshape(Tensor self, int[] shape) -> Tensor
reshape_as(Tensor self, Tensor other) -> Tensor
select(Tensor(a) self, int dim, int index) -> Tensor(a)
slice(Tensor(a) self, int dim=0, int start=0, int end=9223372036854775807, int step=1) -> Tensor(a)
split(Tensor(a) self, int split_size, int dim=0) -> Tensor(a)[]
split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[]
squeeze(Tensor(a) self) -> Tensor(a)
squeeze(Tensor(a) self, int dim) -> Tensor(a)
sum_to_size(Tensor self, int[] size) -> Tensor
t(Tensor(a) self) -> Tensor(a)
t_(Tensor(a!) self) -> Tensor(a!)
transpose(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
unsqueeze(Tensor(a) self, int dim) -> Tensor(a)
view_as(Tensor self, Tensor other) -> Tensor
coalesced(Tensor(a!) self, bool coalesced) -> Tensor(a!)
indices(Tensor(a) self) -> Tensor(a)
values(Tensor(a) self) -> Tensor(a)
to(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory, bool non_blocking=False, bool copy=False) -> Tensor
to(Tensor self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False) -> Tensor
to(Tensor self, ScalarType dtype, bool non_blocking=False, bool copy=False) -> Tensor
to(Tensor self, Tensor other, bool non_blocking=False, bool copy=False) -> Tensor
view(Tensor(a) self, int[] size) -> Tensor(a)
unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)