forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 1
Scalar native_functions Notes
gchanan edited this page Aug 15, 2019
·
1 revision
Cases:
- scaling factor
- func: addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- func: addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- func: baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- func: s_native_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- func: _sparse_addmm(Tensor self, Tensor sparse, Tensor dense, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- func: addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- func: addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- func: addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)
- func: addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- func: addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)
- func: sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- func: _addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- func: _addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
- func: addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
- func: addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
- factory functions
- func: arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)
- func: linspace(Scalar start, Scalar end, int steps=100, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: logspace(Scalar start, Scalar end, int steps=100, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- fill value
- func: full(int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
- func: fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)
- func: fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)
- func: masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor
- func: scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
- func: _pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)
- func: constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -> Tensor
- arithmetic ops
- func: add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
- func: _sparse_add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- func: _sparse_dense_add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- func: _sparse_div_scalar.out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: _sparse_mul_scalar.out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)
- func: sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
- func: sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
- func: sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
- func: sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
- func: sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)
- func: rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
- func: rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor
- func: mul.Scalar(Tensor self, Scalar other) -> Tensor
- func: div.Scalar(Tensor self, Scalar other) -> Tensor
- comparison ops
- func: ne.Scalar(Tensor self, Scalar other) -> Tensor
- func: eq.Scalar(Tensor self, Scalar other) -> Tensor
- func: ge.Scalar(Tensor self, Scalar other) -> Tensor
- func: le.Scalar(Tensor self, Scalar other) -> Tensor
- func: gt.Scalar(Tensor self, Scalar other) -> Tensor
- func: lt.Scalar(Tensor self, Scalar other) -> Tensor
- func: lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- bitwise ops
- func: and.Scalar(Tensor self, Scalar other) -> Tensor
- func: iand.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- func: or.Scalar(Tensor self, Scalar other) -> Tensor
- func: ior.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- func: xor.Scalar(Tensor self, Scalar other) -> Tensor
- func: ixor.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- func: lshift.Scalar(Tensor self, Scalar other) -> Tensor
- func: ilshift.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- func: rshift.Scalar(Tensor self, Scalar other) -> Tensor
- func: irshift.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- a Scalar overload for a Tensor
- func: pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)
- func: pow.Scalar(Scalar self, Tensor exponent) -> Tensor
- func: lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)
- func: fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- func: remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)
- there isn't a Tensor overload
- func: clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
- func: rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
- func: softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor
- func: softshrink(Tensor self, Scalar lambd=0.5) -> Tensor
- func: hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor
- func: celu(Tensor self, Scalar alpha=1.0) -> Tensor
- func: threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor
- func: norm.Scalar(Tensor self, Scalar p=2) -> Tensor
- func: renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)
- func: multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor
- func: renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor
- func: dist(Tensor self, Tensor other, Scalar p=2) -> Tensor
- func: histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor
- func: elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor
- func: hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor
- func: leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor