Skip to content

Commit

Permalink
pre-release
Browse files Browse the repository at this point in the history
  • Loading branch information
Scitator committed Jul 29, 2021
1 parent c1cdec1 commit 7d7cb68
Show file tree
Hide file tree
Showing 7 changed files with 70 additions and 75 deletions.
19 changes: 19 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,25 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).

### Added

-

### Changed

-

### Removed

-

### Fixed

-


## [21.07] - 2021-07-29

### Added

- added `pre-commit` hook to run codestyle checker on commit ([#1257](https://github.com/catalyst-team/catalyst/pull/1257))
- `on publish` github action for docker and docs added ([#1260](https://github.com/catalyst-team/catalyst/pull/1260))
- MixupCallback and `utils.mixup_batch` ([#1241](https://github.com/catalyst-team/catalyst/pull/1241))
Expand Down
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ We are using the Github CI for our test cases validation:
- [unit tests](https://github.com/catalyst-team/catalyst/blob/master/.github/workflows/dl_cpu.yml#L113)
- [integrations tests](https://github.com/catalyst-team/catalyst/blob/master/.github/workflows/dl_cpu.yml#L114#L117)

We also have a [colab minimal CI/CD](https://colab.research.google.com/drive/1JCGTVvWlrIsLXMPRRRSWiAstSLic4nbA) as an independent step-by-step handmade tests option.
We also have a [colab minimal CI/CD](https://colab.research.google.com/github/catalyst-team/catalyst/blob/master/examples/notebooks/colab_ci_cd.ipynb) as an independent step-by-step handmade tests option.
Please use it as a collaborative platform, if you have any issues during the PR.

### Codestyle
Expand Down
98 changes: 39 additions & 59 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,13 +71,11 @@ model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.02)

train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
),
"train": DataLoader(train_data, batch_size=32),
"valid": DataLoader(valid_data, batch_size=32),
}

runner = dl.SupervisedRunner(
Expand Down Expand Up @@ -220,13 +218,11 @@ from catalyst.contrib.datasets import MNIST
model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
optimizer = optim.Adam(model.parameters(), lr=0.02)

train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
),
"train": DataLoader(train_data, batch_size=32),
"valid": DataLoader(valid_data, batch_size=32),
}

class CustomRunner(dl.Runner):
Expand Down Expand Up @@ -626,13 +622,11 @@ model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.02)

train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
),
"train": DataLoader(train_data, batch_size=32),
"valid": DataLoader(valid_data, batch_size=32),
}

runner = dl.SupervisedRunner()
Expand Down Expand Up @@ -688,13 +682,11 @@ model = nn.Sequential(
criterion = IoULoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.02)

train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
),
"train": DataLoader(train_data, batch_size=32),
"valid": DataLoader(valid_data, batch_size=32),
}

class CustomRunner(dl.SupervisedRunner):
Expand Down Expand Up @@ -750,13 +742,11 @@ student = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
criterion = {"cls": nn.CrossEntropyLoss(), "kl": nn.KLDivLoss(reduction="batchmean")}
optimizer = optim.Adam(student.parameters(), lr=0.02)

train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
),
"train": DataLoader(train_data, batch_size=32),
"valid": DataLoader(valid_data, batch_size=32),
}

class DistilRunner(dl.Runner):
Expand Down Expand Up @@ -934,11 +924,8 @@ optimizer = {
"generator": torch.optim.Adam(generator.parameters(), lr=0.0003, betas=(0.5, 0.999)),
"discriminator": torch.optim.Adam(discriminator.parameters(), lr=0.0003, betas=(0.5, 0.999)),
}
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
)
}
train_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
loaders = {"train": DataLoader(train_data, batch_size=32)}

class CustomRunner(dl.Runner):
def predict_batch(self, batch):
Expand Down Expand Up @@ -1099,13 +1086,11 @@ class CustomRunner(dl.IRunner):
return 3

def get_loaders(self, stage: str):
train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
),
"train": DataLoader(train_data, batch_size=32),
"valid": DataLoader(valid_data, batch_size=32),
}
return loaders

Expand Down Expand Up @@ -1202,13 +1187,11 @@ class CustomRunner(dl.IRunner):
return 3

def get_loaders(self, stage: str):
train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
),
"train": DataLoader(train_data, batch_size=32),
"valid": DataLoader(valid_data, batch_size=32),
}
return loaders

Expand Down Expand Up @@ -1311,13 +1294,11 @@ class CustomRunner(dl.IRunner):
return 3

def get_loaders(self, stage: str):
train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
),
"train": DataLoader(train_data, batch_size=32),
"valid": DataLoader(valid_data, batch_size=32),
}
return loaders

Expand Down Expand Up @@ -1409,13 +1390,11 @@ def objective(trial):
lr = trial.suggest_loguniform("lr", 1e-3, 1e-1)
num_hidden = int(trial.suggest_loguniform("num_hidden", 32, 128))

train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
),
"train": DataLoader(train_data, batch_size=32),
"valid": DataLoader(valid_data, batch_size=32),
}
model = nn.Sequential(
nn.Flatten(), nn.Linear(784, num_hidden), nn.ReLU(), nn.Linear(num_hidden, 10)
Expand Down Expand Up @@ -1582,6 +1561,7 @@ best practices for your deep learning research and development.

### Documentation
- [master](https://catalyst-team.github.io/catalyst/)
- [21.07](https://catalyst-team.github.io/catalyst/v21.07/index.html)
- [21.06](https://catalyst-team.github.io/catalyst/v21.06/index.html)
- [21.05](https://catalyst-team.github.io/catalyst/v21.05/index.html) ([Catalyst — A PyTorch Framework for Accelerated Deep Learning R&D](https://medium.com/pytorch/catalyst-a-pytorch-framework-for-accelerated-deep-learning-r-d-ad9621e4ca88?source=friends_link&sk=885b4409aecab505db0a63b06f19dcef))
- [21.04/21.04.1](https://catalyst-team.github.io/catalyst/v21.04/index.html), [21.04.2](https://catalyst-team.github.io/catalyst/v21.04.2/index.html)
Expand Down
2 changes: 1 addition & 1 deletion catalyst/__version__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "21.06"
__version__ = "21.07"
10 changes: 4 additions & 6 deletions docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -46,13 +46,11 @@ Getting started
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.02)
train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
),
"train": DataLoader(train_data, batch_size=32),
"valid": DataLoader(valid_data, batch_size=32),
}
runner = dl.SupervisedRunner(
Expand Down
2 changes: 1 addition & 1 deletion examples/engines/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,12 @@

Let's check different
DataParallel and DistributedDataParallel multi-GPU setups with Catalyst Engines.
> *Please use `pip install git+https://github.com/catalyst-team/catalyst@master --upgrade` before the `v21.06` release.*


## PyTorch
```bash
pip install catalyst
CUDA_VISIBLE_DEVICES="0" python multi_gpu.py --engine=de
CUDA_VISIBLE_DEVICES="0,1" python multi_gpu.py --engine=dp
CUDA_VISIBLE_DEVICES="0,1" python multi_gpu.py --engine=ddp
```
Expand Down
12 changes: 5 additions & 7 deletions examples/engines/multi_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from catalyst.data import transforms

E2E = {
"de": dl.DeviceEngine,
"dp": dl.DataParallelEngine,
"ddp": dl.DistributedDataParallelEngine,
}
Expand Down Expand Up @@ -97,14 +98,11 @@ def get_loaders(self, stage: str):
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
train_data = CIFAR10(os.getcwd(), train=True, download=True, transform=transform)
valid_data = CIFAR10(os.getcwd(), train=False, download=True, transform=transform)
return {
"train": DataLoader(
CIFAR10(os.getcwd(), train=True, download=True, transform=transform), batch_size=32
),
"valid": DataLoader(
CIFAR10(os.getcwd(), train=False, download=True, transform=transform),
batch_size=32,
),
"train": DataLoader(train_data, batch_size=32),
"valid": DataLoader(valid_data, batch_size=32),
}

def get_model(self, stage: str):
Expand Down

0 comments on commit 7d7cb68

Please sign in to comment.