Skip to content

Commit

Permalink
[fbsync] Rename cpu_and_gpu into cpu_and_cuda (#7687)
Browse files Browse the repository at this point in the history
Reviewed By: vmoens

Differential Revision: D47186571

fbshipit-source-id: ac83e5a5f341883eaea1b42a7cb5958c028ed8be
  • Loading branch information
NicolasHug authored and facebook-github-bot committed Jul 3, 2023
1 parent c609677 commit 7e988b5
Show file tree
Hide file tree
Showing 9 changed files with 135 additions and 135 deletions.
2 changes: 1 addition & 1 deletion test/common_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def disable_console_output():
yield


def cpu_and_gpu():
def cpu_and_cuda():
import pytest # noqa

return ("cpu", pytest.param("cuda", marks=pytest.mark.needs_cuda))
Expand Down
2 changes: 1 addition & 1 deletion test/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def pytest_collection_modifyitems(items):
# The needs_cuda mark will exist if the test was explicitly decorated with
# the @needs_cuda decorator. It will also exist if it was parametrized with a
# parameter that has the mark: for example if a test is parametrized with
# @pytest.mark.parametrize('device', cpu_and_gpu())
# @pytest.mark.parametrize('device', cpu_and_cuda())
# the "instances" of the tests where device == 'cuda' will have the 'needs_cuda' mark,
# and the ones with device == 'cpu' won't have the mark.
needs_cuda = item.get_closest_marker("needs_cuda") is not None
Expand Down
90 changes: 45 additions & 45 deletions test/test_functional_tensor.py

Large diffs are not rendered by default.

12 changes: 6 additions & 6 deletions test/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import torch.fx
import torch.nn as nn
from _utils_internal import get_relative_path
from common_utils import cpu_and_gpu, freeze_rng_state, map_nested_tensor_object, needs_cuda, set_rng_seed
from common_utils import cpu_and_cuda, freeze_rng_state, map_nested_tensor_object, needs_cuda, set_rng_seed
from PIL import Image
from torchvision import models, transforms
from torchvision.models import get_model_builder, list_models
Expand Down Expand Up @@ -676,14 +676,14 @@ def vitc_b_16(**kwargs: Any):


@pytest.mark.parametrize("model_fn", [vitc_b_16])
@pytest.mark.parametrize("dev", cpu_and_gpu())
@pytest.mark.parametrize("dev", cpu_and_cuda())
def test_vitc_models(model_fn, dev):
test_classification_model(model_fn, dev)


@disable_tf32() # see: https://github.com/pytorch/vision/issues/7618
@pytest.mark.parametrize("model_fn", list_model_fns(models))
@pytest.mark.parametrize("dev", cpu_and_gpu())
@pytest.mark.parametrize("dev", cpu_and_cuda())
def test_classification_model(model_fn, dev):
set_rng_seed(0)
defaults = {
Expand Down Expand Up @@ -726,7 +726,7 @@ def test_classification_model(model_fn, dev):


@pytest.mark.parametrize("model_fn", list_model_fns(models.segmentation))
@pytest.mark.parametrize("dev", cpu_and_gpu())
@pytest.mark.parametrize("dev", cpu_and_cuda())
def test_segmentation_model(model_fn, dev):
set_rng_seed(0)
defaults = {
Expand Down Expand Up @@ -791,7 +791,7 @@ def check_out(out):


@pytest.mark.parametrize("model_fn", list_model_fns(models.detection))
@pytest.mark.parametrize("dev", cpu_and_gpu())
@pytest.mark.parametrize("dev", cpu_and_cuda())
def test_detection_model(model_fn, dev):
set_rng_seed(0)
defaults = {
Expand Down Expand Up @@ -923,7 +923,7 @@ def test_detection_model_validation(model_fn):


@pytest.mark.parametrize("model_fn", list_model_fns(models.video))
@pytest.mark.parametrize("dev", cpu_and_gpu())
@pytest.mark.parametrize("dev", cpu_and_cuda())
def test_video_model(model_fn, dev):
set_rng_seed(0)
# the default input shape is
Expand Down
42 changes: 21 additions & 21 deletions test/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import torch
import torch.fx
import torch.nn.functional as F
from common_utils import assert_equal, cpu_and_gpu, needs_cuda
from common_utils import assert_equal, cpu_and_cuda, needs_cuda
from PIL import Image
from torch import nn, Tensor
from torch.autograd import gradcheck
Expand Down Expand Up @@ -97,7 +97,7 @@ def forward(self, imgs: Tensor, boxes: List[Tensor]) -> Tensor:
class RoIOpTester(ABC):
dtype = torch.float64

@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("contiguous", (True, False))
def test_forward(self, device, contiguous, x_dtype=None, rois_dtype=None, deterministic=False, **kwargs):
x_dtype = self.dtype if x_dtype is None else x_dtype
Expand Down Expand Up @@ -126,7 +126,7 @@ def test_forward(self, device, contiguous, x_dtype=None, rois_dtype=None, determ
tol = 1e-3 if (x_dtype is torch.half or rois_dtype is torch.half) else 1e-5
torch.testing.assert_close(gt_y.to(y), y, rtol=tol, atol=tol)

@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
def test_is_leaf_node(self, device):
op_obj = self.make_obj(wrap=True).to(device=device)
graph_node_names = get_graph_node_names(op_obj)
Expand All @@ -135,7 +135,7 @@ def test_is_leaf_node(self, device):
assert len(graph_node_names[0]) == len(graph_node_names[1])
assert len(graph_node_names[0]) == 1 + op_obj.n_inputs

@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
def test_torch_fx_trace(self, device, x_dtype=torch.float, rois_dtype=torch.float):
op_obj = self.make_obj().to(device=device)
graph_module = torch.fx.symbolic_trace(op_obj)
Expand All @@ -155,7 +155,7 @@ def test_torch_fx_trace(self, device, x_dtype=torch.float, rois_dtype=torch.floa
torch.testing.assert_close(output_gt, output_fx, rtol=tol, atol=tol)

@pytest.mark.parametrize("seed", range(10))
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("contiguous", (True, False))
def test_backward(self, seed, device, contiguous, deterministic=False):
torch.random.manual_seed(seed)
Expand Down Expand Up @@ -418,7 +418,7 @@ def test_boxes_shape(self):
self._helper_boxes_shape(ops.roi_align)

@pytest.mark.parametrize("aligned", (True, False))
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("contiguous", (True, False))
@pytest.mark.parametrize("deterministic", (True, False))
def test_forward(self, device, contiguous, deterministic, aligned, x_dtype=None, rois_dtype=None):
Expand Down Expand Up @@ -450,7 +450,7 @@ def test_autocast(self, aligned, deterministic, x_dtype, rois_dtype):
)

@pytest.mark.parametrize("seed", range(10))
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("contiguous", (True, False))
@pytest.mark.parametrize("deterministic", (True, False))
def test_backward(self, seed, device, contiguous, deterministic):
Expand Down Expand Up @@ -612,7 +612,7 @@ def test_msroialign_repr(self):
)
assert repr(t) == expected_string

@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
def test_is_leaf_node(self, device):
op_obj = self.make_obj(wrap=True).to(device=device)
graph_node_names = get_graph_node_names(op_obj)
Expand Down Expand Up @@ -885,7 +885,7 @@ def make_obj(self, in_channels=6, out_channels=2, kernel_size=(3, 2), groups=2,
)
return DeformConvModuleWrapper(obj) if wrap else obj

@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
def test_is_leaf_node(self, device):
op_obj = self.make_obj(wrap=True).to(device=device)
graph_node_names = get_graph_node_names(op_obj)
Expand All @@ -894,7 +894,7 @@ def test_is_leaf_node(self, device):
assert len(graph_node_names[0]) == len(graph_node_names[1])
assert len(graph_node_names[0]) == 1 + op_obj.n_inputs

@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("contiguous", (True, False))
@pytest.mark.parametrize("batch_sz", (0, 33))
def test_forward(self, device, contiguous, batch_sz, dtype=None):
Expand Down Expand Up @@ -946,7 +946,7 @@ def test_wrong_sizes(self):
wrong_mask = torch.rand_like(mask[:, :2])
layer(x, offset, wrong_mask)

@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("contiguous", (True, False))
@pytest.mark.parametrize("batch_sz", (0, 33))
def test_backward(self, device, contiguous, batch_sz):
Expand Down Expand Up @@ -1411,7 +1411,7 @@ def assert_empty_loss(iou_fn, dtype, device):

class TestGeneralizedBoxIouLoss:
# We refer to original test: https://github.com/facebookresearch/fvcore/blob/main/tests/test_giou_loss.py
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("dtype", [torch.float32, torch.half])
def test_giou_loss(self, dtype, device):
box1, box2, box3, box4, box1s, box2s = get_boxes(dtype, device)
Expand Down Expand Up @@ -1439,15 +1439,15 @@ def test_giou_loss(self, dtype, device):
with pytest.raises(ValueError, match="Invalid"):
ops.generalized_box_iou_loss(box1s, box2s, reduction="xyz")

@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("dtype", [torch.float32, torch.half])
def test_empty_inputs(self, dtype, device):
assert_empty_loss(ops.generalized_box_iou_loss, dtype, device)


class TestCompleteBoxIouLoss:
@pytest.mark.parametrize("dtype", [torch.float32, torch.half])
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
def test_ciou_loss(self, dtype, device):
box1, box2, box3, box4, box1s, box2s = get_boxes(dtype, device)

Expand All @@ -1461,14 +1461,14 @@ def test_ciou_loss(self, dtype, device):
with pytest.raises(ValueError, match="Invalid"):
ops.complete_box_iou_loss(box1s, box2s, reduction="xyz")

@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("dtype", [torch.float32, torch.half])
def test_empty_inputs(self, dtype, device):
assert_empty_loss(ops.complete_box_iou_loss, dtype, device)


class TestDistanceBoxIouLoss:
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("dtype", [torch.float32, torch.half])
def test_distance_iou_loss(self, dtype, device):
box1, box2, box3, box4, box1s, box2s = get_boxes(dtype, device)
Expand All @@ -1483,7 +1483,7 @@ def test_distance_iou_loss(self, dtype, device):
with pytest.raises(ValueError, match="Invalid"):
ops.distance_box_iou_loss(box1s, box2s, reduction="xyz")

@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("dtype", [torch.float32, torch.half])
def test_empty_distance_iou_inputs(self, dtype, device):
assert_empty_loss(ops.distance_box_iou_loss, dtype, device)
Expand Down Expand Up @@ -1528,7 +1528,7 @@ def generate_tensor_with_range_type(shape, range_type, **kwargs):

@pytest.mark.parametrize("alpha", [-1.0, 0.0, 0.58, 1.0])
@pytest.mark.parametrize("gamma", [0, 2])
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("dtype", [torch.float32, torch.half])
@pytest.mark.parametrize("seed", [0, 1])
def test_correct_ratio(self, alpha, gamma, device, dtype, seed):
Expand Down Expand Up @@ -1557,7 +1557,7 @@ def test_correct_ratio(self, alpha, gamma, device, dtype, seed):
torch.testing.assert_close(correct_ratio, loss_ratio, atol=tol, rtol=tol)

@pytest.mark.parametrize("reduction", ["mean", "sum"])
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("dtype", [torch.float32, torch.half])
@pytest.mark.parametrize("seed", [2, 3])
def test_equal_ce_loss(self, reduction, device, dtype, seed):
Expand All @@ -1584,7 +1584,7 @@ def test_equal_ce_loss(self, reduction, device, dtype, seed):
@pytest.mark.parametrize("alpha", [-1.0, 0.0, 0.58, 1.0])
@pytest.mark.parametrize("gamma", [0, 2])
@pytest.mark.parametrize("reduction", ["none", "mean", "sum"])
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("dtype", [torch.float32, torch.half])
@pytest.mark.parametrize("seed", [4, 5])
def test_jit(self, alpha, gamma, reduction, device, dtype, seed):
Expand All @@ -1600,7 +1600,7 @@ def test_jit(self, alpha, gamma, reduction, device, dtype, seed):
torch.testing.assert_close(focal_loss, scripted_focal_loss, rtol=tol, atol=tol)

# Raise ValueError for anonymous reduction mode
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("device", cpu_and_cuda())
@pytest.mark.parametrize("dtype", [torch.float32, torch.half])
def test_reduction_mode(self, device, dtype, reduction="xyz"):
if device == "cpu" and dtype is torch.half:
Expand Down
6 changes: 3 additions & 3 deletions test/test_prototype_models.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
import pytest
import test_models as TM
import torch
from common_utils import cpu_and_gpu, set_rng_seed
from common_utils import cpu_and_cuda, set_rng_seed
from torchvision.prototype import models


@pytest.mark.parametrize("model_fn", (models.depth.stereo.raft_stereo_base,))
@pytest.mark.parametrize("model_mode", ("standard", "scripted"))
@pytest.mark.parametrize("dev", cpu_and_gpu())
@pytest.mark.parametrize("dev", cpu_and_cuda())
def test_raft_stereo(model_fn, model_mode, dev):
# A simple test to make sure the model can do forward pass and jit scriptable
set_rng_seed(0)
Expand Down Expand Up @@ -40,7 +40,7 @@ def test_raft_stereo(model_fn, model_mode, dev):

@pytest.mark.parametrize("model_fn", (models.depth.stereo.crestereo_base,))
@pytest.mark.parametrize("model_mode", ("standard", "scripted"))
@pytest.mark.parametrize("dev", cpu_and_gpu())
@pytest.mark.parametrize("dev", cpu_and_cuda())
def test_crestereo(model_fn, model_mode, dev):
set_rng_seed(0)

Expand Down
Loading

0 comments on commit 7e988b5

Please sign in to comment.