Skip to content

Commit

Permalink
New metric classes (#1326)
Browse files Browse the repository at this point in the history
* Create metrics package

* Create metric.py

* Create utils.py

* Create __init__.py

* add tests for metric utils

* add docstrings for metrics utils

* add function to recursively apply other function to collection

* add tests for this function

* update test

* Update pytorch_lightning/metrics/metric.py

Co-Authored-By: Jirka Borovec <Borda@users.noreply.github.com>

* update metric name

* remove example docs

* fix tests

* add metric tests

* fix to tensor conversion

* fix apply to collection

* Update CHANGELOG.md

* Update pytorch_lightning/metrics/metric.py

Co-Authored-By: Jirka Borovec <Borda@users.noreply.github.com>

* remove tests from init

* add missing type annotations

* rename utils to convertors

* Create metrics.rst

* Update index.rst

* Update index.rst

* Update pytorch_lightning/metrics/convertors.py

Co-Authored-By: Jirka Borovec <Borda@users.noreply.github.com>

* Update pytorch_lightning/metrics/convertors.py

Co-Authored-By: Jirka Borovec <Borda@users.noreply.github.com>

* Update pytorch_lightning/metrics/convertors.py

Co-Authored-By: Jirka Borovec <Borda@users.noreply.github.com>

* Update pytorch_lightning/metrics/metric.py

Co-Authored-By: Jirka Borovec <Borda@users.noreply.github.com>

* Update tests/utilities/test_apply_to_collection.py

Co-Authored-By: Jirka Borovec <Borda@users.noreply.github.com>

* Update tests/utilities/test_apply_to_collection.py

Co-Authored-By: Jirka Borovec <Borda@users.noreply.github.com>

* Update tests/metrics/convertors.py

Co-Authored-By: Jirka Borovec <Borda@users.noreply.github.com>

* Apply suggestions from code review

Co-Authored-By: Jirka Borovec <Borda@users.noreply.github.com>

* add doctest example

* rename file and fix imports

* added parametrized test

* replace lambda with inlined function

* rename apply_to_collection to apply_func

* Separated class description from init args

* Apply suggestions from code review

Co-Authored-By: Jirka Borovec <Borda@users.noreply.github.com>

* adjust random values

* suppress output when seeding

* remove gpu from doctest

* Add requested changes and add ellipsis for doctest

* forgot to push these files...

* add explicit check for dtype to convert to

* fix ddp tests

* remove explicit ddp destruction

Co-authored-by: Jirka Borovec <Borda@users.noreply.github.com>
  • Loading branch information
justusschock and Borda committed Jun 8, 2020
1 parent 1f2ed9d commit f53c676
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 205 deletions.
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@ All notable changes to this project will be documented in this file.

The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).

## Metrics (will be added to unreleased once the metric branch was finished)
- Add Metric Base Classes ([#1326](https://github.com/PyTorchLightning/pytorch-lightning/pull/1326))

## [unreleased] - YYYY-MM-DD

### Added
Expand Down
205 changes: 0 additions & 205 deletions tests/metrics/__init__.py
Original file line number Diff line number Diff line change
@@ -1,205 +0,0 @@
import numpy as np
import pytest
import torch
import torch.distributed as dist

import tests.base.utils as tutils
from pytorch_lightning.metrics.utils import _apply_to_inputs, _apply_to_outputs, \
_convert_to_tensor, _convert_to_numpy, _numpy_metric_conversion, \
_tensor_metric_conversion, _sync_ddp, tensor_metric, numpy_metric


def test_apply_to_inputs():
def apply_fn(inputs, factor):
if isinstance(inputs, (float, int)):
return inputs * factor
elif isinstance(inputs, dict):
return {k: apply_fn(v, factor) for k, v in inputs.items()}
elif isinstance(inputs, (tuple, list)):
return [apply_fn(x, factor) for x in inputs]

@_apply_to_inputs(apply_fn, factor=2.)
def test_fn(*args, **kwargs):
return args, kwargs

for args in [[], [1., 2.]]:
for kwargs in [{}, {1., 2.}]:
result_args, result_kwargs = test_fn(*args, **kwargs)
assert isinstance(result_args, list)
assert isinstance(result_kwargs, dict)
assert len(result_args) == len(args)
assert len(result_kwargs) == len(kwargs)
assert all([k in result_kwargs for k in kwargs.keys()])
for arg, result_arg in zip(args, result_args):
assert arg * 2. == result_arg

for key in kwargs.keys():
arg = kwargs[key],
result_arg = result_kwargs[key]
assert arg * 2. == result_arg


def test_apply_to_outputs():
def apply_fn(inputs, additional_str):
return str(inputs) + additional_str

@_apply_to_outputs(apply_fn, additional_str='_str')
def test_fn(*args, **kwargs):
return 'dummy'

assert test_fn() == 'dummy_str'


def test_convert_to_tensor():
for test_item in [1., np.array([1.])]:
assert isinstance(_convert_to_tensor(test_item), torch.Tensor)
assert test_item.item() == 1.


def test_convert_to_numpy():
for test_item in [1., torch.tensor([1.])]:
result = _convert_to_numpy(test_item)
assert isinstance(result, np.ndarray)
assert result.item() == 1.


def test_numpy_metric_conversion():
@_numpy_metric_conversion
def numpy_test_metric(*args, **kwargs):
for arg in args:
assert isinstance(arg, np.ndarray)

for v in kwargs.values():
assert isinstance(v, np.ndarray)

return 5.

result = numpy_test_metric(torch.tensor([1.]), dummy_kwarg=2.)
assert isinstance(result, torch.Tensor)
assert result.item() == 5.


def test_tensor_metric_conversion():
@_tensor_metric_conversion
def tensor_test_metric(*args, **kwargs):
for arg in args:
assert isinstance(arg, torch.Tensor)

for v in kwargs.values():
assert isinstance(v, torch.Tensor)

return 5.

result = tensor_test_metric(np.array([1.]), dummy_kwarg=2.)
assert isinstance(result, torch.Tensor)
assert result.item() == 5.


@pytest.mark.skipif(torch.cuda.device_count() < 2, "test requires multi-GPU machine")
def test_sync_reduce_ddp():
"""Make sure sync-reduce works with DDP"""
tutils.reset_seed()
tutils.set_random_master_port()

dist.init_process_group('gloo')

tensor = torch.tensor([1.], device='cuda:0')

reduced_tensor = _sync_ddp(tensor)

assert reduced_tensor.item() == dist.get_world_size(), \
'Sync-Reduce does not work properly with DDP and Tensors'

number = 1.
reduced_number = _sync_ddp(number)
assert isinstance(reduced_number, torch.Tensor), 'When reducing a number we should get a tensor out'
assert reduced_number.item() == dist.get_world_size(), \
'Sync-Reduce does not work properly with DDP and Numbers'

dist.destroy_process_group()


def test_sync_reduce_simple():
"""Make sure sync-reduce works without DDP"""
tensor = torch.tensor([1.], device='cpu')

reduced_tensor = _sync_ddp(tensor)

assert torch.allclose(tensor,
reduced_tensor), 'Sync-Reduce does not work properly without DDP and Tensors'

number = 1.

reduced_number = _sync_ddp(number)
assert isinstance(reduced_number, torch.Tensor), 'When reducing a number we should get a tensor out'
assert reduced_number.item() == number, 'Sync-Reduce does not work properly without DDP and Numbers'


def _test_tensor_metric(is_ddp: bool):
@tensor_metric()
def tensor_test_metric(*args, **kwargs):
for arg in args:
assert isinstance(arg, torch.Tensor)

for v in kwargs.values():
assert isinstance(v, torch.Tensor)

return 5.

if is_ddp:
factor = dist.get_world_size()
else:
factor = 1.

result = tensor_test_metric(np.array([1.]), dummy_kwarg=2.)
assert isinstance(result, torch.Tensor)
assert result.item() == 5. * factor


@pytest.mark.skipif(torch.cuda.device_count() < 2, "test requires multi-GPU machine")
def test_tensor_metric_ddp():
tutils.reset_seed()
tutils.set_random_master_port()

dist.init_process_group('gloo')
_test_tensor_metric(True)
dist.destroy_process_group()


def test_tensor_metric_simple():
_test_tensor_metric(False)


def _test_numpy_metric(is_ddp: bool):
@numpy_metric()
def numpy_test_metric(*args, **kwargs):
for arg in args:
assert isinstance(arg, np.ndarray)

for v in kwargs.values():
assert isinstance(v, np.ndarray)

return 5.

if is_ddp:
factor = dist.get_world_size()
else:
factor = 1.

result = numpy_test_metric(torch.tensor([1.]), dummy_kwarg=2.)
assert isinstance(result, torch.Tensor)
assert result.item() == 5. * factor


@pytest.mark.skipif(torch.cuda.device_count() < 2, "test requires multi-GPU machine")
def test_numpy_metric_ddp():
tutils.reset_seed()
tutils.set_random_master_port()

dist.init_process_group('gloo')
_test_tensor_metric(True)
dist.destroy_process_group()


def test_numpy_metric_simple():
_test_tensor_metric(False)

0 comments on commit f53c676

Please sign in to comment.