diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000000..fdc976d48579 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,17 @@ +__pycache__ +*.pyc +*.pyo +*.pyd +.Python +env +pip-log.txt +pip-delete-this-directory.txt +.tox +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover +*.log +.git diff --git a/Dockerfile b/Dockerfile index 5d843f8c51cd..d77a8fcd26a0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,3 +1,5 @@ +# syntax=docker/dockerfile:experimental + # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,9 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG FROM_IMAGE_NAME=nvcr.io/nvidia/pytorch:19.11-py3 +ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:20.01-py3 -FROM ${FROM_IMAGE_NAME} +# build an image that includes only the nemo dependencies, ensures that dependencies +# are included first for optimal caching, and useful for building a development +# image (by specifying build target as `nemo-deps`) +FROM ${BASE_IMAGE} as nemo-deps # Ensure apt-get won't prompt for selecting options ENV DEBIAN_FRONTEND=noninteractive @@ -25,27 +30,42 @@ RUN apt-get update && \ python-dev && \ rm -rf /var/lib/apt/lists/* +# install onnx trt open source plugins ENV PATH=$PATH:/usr/src/tensorrt/bin WORKDIR /tmp/onnx-trt COPY scripts/docker/onnx-trt.patch . -RUN git clone -n https://github.com/onnx/onnx-tensorrt.git && cd onnx-tensorrt && git checkout 8716c9b && git submodule update --init --recursive && patch -f < ../onnx-trt.patch && \ - mkdir build && cd build && cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DGPU_ARCHS="60 70 75" && make -j16 && make install && mv -f /usr/lib/libnvonnx* /usr/lib/x86_64-linux-gnu/ && ldconfig - -WORKDIR /workspace/nemo -ARG NEMO_GIT_BRANCH="master" +RUN git clone -n https://github.com/onnx/onnx-tensorrt.git && cd onnx-tensorrt && \ + git checkout 8716c9b && git submodule update --init --recursive && patch -f < ../onnx-trt.patch && \ + mkdir build && cd build && cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DGPU_ARCHS="60 70 75" && \ + make -j16 && make install && mv -f /usr/lib/libnvonnx* /usr/lib/x86_64-linux-gnu/ && ldconfig && rm -rf /tmp/onnx-tensorrt +# install nemo dependencies +WORKDIR /tmp/nemo COPY requirements/requirements_docker.txt requirements.txt -RUN pip install \ - --disable-pip-version-check --no-cache-dir\ - --upgrade -r requirements.txt && \ - pip install --no-cache-dir \ - git+git://github.com/NVIDIA/NeMo.git@${NEMO_GIT_BRANCH}#egg=project[all] - -RUN printf "#!/bin/bash\njupyter lab --no-browser --allow-root --ip=0.0.0.0" >> start-jupyter.sh && \ - chmod +x start-jupyter.sh - +RUN pip install --disable-pip-version-check --no-cache-dir -r requirements.txt +# copy nemo source into a scratch image +FROM scratch as nemo-src +COPY . . +# start building the final container +FROM nemo-deps as nemo +ARG NEMO_VERSION +ARG BASE_IMAGE +# Check that NEMO_VERSION is set. Build will fail without this. Expose NEMO and base container +# version information as runtime environment variable for introspection purposes +RUN /usr/bin/test -n "$NEMO_VERSION" && \ + /bin/echo "export NEMO_VERSION=${NEMO_VERSION}" >> /root/.bashrc && \ + /bin/echo "export BASE_IMAGE=${BASE_IMAGE}" >> /root/.bashrc +RUN --mount=from=nemo-src,target=/tmp/nemo cd /tmp/nemo && pip install ".[all]" +# copy scripts/examples/tests into container for end user +WORKDIR /workspace/nemo +COPY scripts /workspace/nemo/scripts +COPY examples /workspace/nemo/examples +COPY tests /workspace/nemo/tests +COPY README.rst LICENSE /workspace/nemo/ +RUN printf "#!/bin/bash\njupyter lab --no-browser --allow-root --ip=0.0.0.0" >> start-jupyter.sh && \ + chmod +x start-jupyter.sh diff --git a/Jenkinsfile b/Jenkinsfile index 3bd1946fe5fb..b6ec2d321979 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,12 +1,14 @@ pipeline { - agent any - environment { - PATH="/home/mrjenkins/anaconda3/envs/py37p1.4.0/bin:$PATH" + agent { + docker { + image 'nvcr.io/nvidia/pytorch:20.01-py3' + args '--device=/dev/nvidia0 --gpus all --user 0:128 -v /home:/home --shm-size=8g' + } } options { timeout(time: 1, unit: 'HOURS') disableConcurrentBuilds() - } + } stages { stage('PyTorch version') { @@ -16,7 +18,7 @@ pipeline { } stage('Install test requirements') { steps { - sh 'pip install -r requirements/requirements_test.txt' + sh 'apt-get update && apt-get install -y bc && pip install -r requirements/requirements_test.txt' } } stage('Code formatting checks') { @@ -24,25 +26,9 @@ pipeline { sh 'python setup.py style' } } - stage('Unittests general') { - steps { - sh './reinstall.sh && python -m unittest tests/*.py' - } - } - - stage('Unittests ASR') { - steps { - sh 'python -m unittest tests/asr/*.py' - } - } - stage('Unittests NLP') { + stage('Unittests ALL') { steps { - sh 'python -m unittest tests/nlp/*.py' - } - } - stage('Unittests TTS') { - steps { - sh 'python -m unittest tests/tts/*.py' + sh './reinstall.sh && python -m unittest' } } @@ -147,8 +133,6 @@ pipeline { } } - - stage('NLP-ASR processing') { failFast true parallel { diff --git a/docs/docs_zh/sources/source/collections/nemo_asr.rst b/docs/docs_zh/sources/source/collections/nemo_asr.rst index 19135143a520..83f92615e563 100644 --- a/docs/docs_zh/sources/source/collections/nemo_asr.rst +++ b/docs/docs_zh/sources/source/collections/nemo_asr.rst @@ -1,9 +1,9 @@ -NeMo_ASR collection +NeMo ASR collection =================== 语音数据处理模块 ------------------------------ -.. automodule:: nemo_asr.data_layer +.. automodule:: nemo.collections.asr.data_layer :members: :undoc-members: :show-inheritance: @@ -11,7 +11,7 @@ NeMo_ASR collection 语音识别模块 ------------------------------------ -.. automodule:: nemo_asr.jasper +.. automodule:: nemo.collections.asr.jasper :members: :undoc-members: :show-inheritance: diff --git a/docs/docs_zh/sources/source/collections/nemo_nlp.rst b/docs/docs_zh/sources/source/collections/nemo_nlp.rst index ccd744efd4a4..223cd71d3b6d 100644 --- a/docs/docs_zh/sources/source/collections/nemo_nlp.rst +++ b/docs/docs_zh/sources/source/collections/nemo_nlp.rst @@ -1,48 +1,47 @@ -NeMo_NLP collection +NeMo NLP collection =================== NLP 数据处理模块 --------------------------- -.. automodule:: nemo_nlp.data_layers +.. automodule:: nemo.collections.nlp.data.datasets :members: :undoc-members: :show-inheritance: - :exclude-members: forward NLP 分词器 -------------- -.. automodule:: nemo_nlp.data.tokenizers.bert_tokenizer +.. automodule:: nemo.collections.nlp.data.tokenizers.bert_tokenizer :members: :undoc-members: :show-inheritance: -.. automodule:: nemo_nlp.data.tokenizers.char_tokenizer +.. automodule:: nemo.collections.nlp.data.tokenizers.char_tokenizer :members: :undoc-members: :show-inheritance: -.. automodule:: nemo_nlp.data.tokenizers.gpt2_tokenizer +.. automodule:: nemo.collections.nlp.data.tokenizers.gpt2_tokenizer :members: :undoc-members: :show-inheritance: -.. automodule:: nemo_nlp.data.tokenizers.spc_tokenizer +.. automodule:: nemo.collections.nlp.data.tokenizers.sentencepiece_tokenizer :members: :undoc-members: :show-inheritance: -.. automodule:: nemo_nlp.data.tokenizers.tokenizer_spec +.. automodule:: nemo.collections.nlp.data.tokenizers.tokenizer_spec :members: :undoc-members: :show-inheritance: -.. automodule:: nemo_nlp.data.tokenizers.word_tokenizer +.. automodule:: nemo.collections.nlp.data.tokenizers.word_tokenizer :members: :undoc-members: :show-inheritance: -.. automodule:: nemo_nlp.data.tokenizers.yttm_tokenizer +.. automodule:: nemo.collections.nlp.data.tokenizers.youtokentome_tokenizer :members: :undoc-members: :show-inheritance: @@ -50,27 +49,52 @@ NLP 分词器 NLP 神经模块 ------------------ -.. automodule:: nemo_nlp.modules.classifiers + +.. automodule:: nemo.collections.nlp.nm.data_layers :members: :undoc-members: :show-inheritance: :exclude-members: forward -.. automodule:: nemo_nlp.modules.losses +.. automodule:: nemo.collections.nlp.nm.losses :members: :undoc-members: :show-inheritance: :exclude-members: forward -.. automodule:: nemo_nlp.modules.pytorch_utils +.. automodule:: nemo.collections.nlp.nm.trainables.common.sequence_classification_nm + :members: + :undoc-members: + :show-inheritance: + :exclude-members: forward + +.. automodule:: nemo.collections.nlp.nm.trainables.common.sequence_regression_nm + :members: + :undoc-members: + :show-inheritance: + :exclude-members: forward + +.. automodule:: nemo.collections.nlp.nm.trainables.common.token_classification_nm + :members: + :undoc-members: + :show-inheritance: + :exclude-members: forward + +.. automodule:: nemo.collections.nlp.nm.trainables.common.transformer.transformer_nm + :members: + :undoc-members: + :show-inheritance: + :exclude-members: forward + +.. automodule:: nemo.collections.nlp.nm.trainables.dialogue_state_tracking.state_tracking_trade_nm :members: :undoc-members: :show-inheritance: :exclude-members: forward -.. automodule:: nemo_nlp.modules.transformer_nm +.. automodule:: nemo.collections.nlp.nm.trainables.joint_intent_slot.joint_intent_slot_nm :members: :undoc-members: :show-inheritance: @@ -79,7 +103,7 @@ NLP 神经模块 NLP Hugging Face 神经模块 ------------------------------- -.. automodule:: nemo_nlp.huggingface.bert +.. automodule:: nemo.collections.nlp.nm.trainables.common.huggingface.bert_nm :members: :undoc-members: :show-inheritance: diff --git a/docs/docs_zh/sources/source/collections/nemo_tts.rst b/docs/docs_zh/sources/source/collections/nemo_tts.rst index 8b189efd5224..aa852bba56eb 100644 --- a/docs/docs_zh/sources/source/collections/nemo_tts.rst +++ b/docs/docs_zh/sources/source/collections/nemo_tts.rst @@ -1,9 +1,9 @@ -NeMo_TTS collection +NeMo TTS collection =================== 语音数据处理模块 ------------------------------ -.. automodule:: nemo_tts.data_layers +.. automodule:: nemo.collections.tts.data_layers :members: :undoc-members: :show-inheritance: @@ -11,7 +11,7 @@ NeMo_TTS collection Tacotron 2 模块 ------------------ -.. automodule:: nemo_tts.tacotron2_modules +.. automodule:: nemo.collections.tts.tacotron2_modules :members: :undoc-members: :show-inheritance: @@ -19,7 +19,7 @@ Tacotron 2 模块 Waveglow 模块 ------------------ -.. automodule:: nemo_tts.waveglow_modules +.. automodule:: nemo.collections.tts.waveglow_modules :members: :undoc-members: :show-inheritance: diff --git a/docs/docs_zh/sources/source/conf.py b/docs/docs_zh/sources/source/conf.py index faa8502da823..b8220000049e 100644 --- a/docs/docs_zh/sources/source/conf.py +++ b/docs/docs_zh/sources/source/conf.py @@ -25,11 +25,6 @@ sys.path.insert(0, os.path.abspath(".")) sys.path.insert(0, os.path.abspath("../../../")) -sys.path.insert(0, os.path.abspath("../../../nemo/nemo")) -sys.path.insert(0, os.path.abspath("../../../collections")) -sys.path.insert(0, os.path.abspath("../../../collections/nemo_asr")) -sys.path.insert(0, os.path.abspath("../../../collections/nemo_nlp")) -# sys.path.insert(0, os.path.abspath("../../../collections/nemo_lpr")) # ---- Mocking up the classes. ----- MOCK_CLASSES = {'Dataset': 'torch.utils.data', 'Module': 'torch.nn'} @@ -63,6 +58,7 @@ def __getattr__(cls, name): 'h5py', 'kaldi_io', 'transformers', + 'transformers.tokenization_bert', ] sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) diff --git a/docs/sources/source/collections/nemo_asr.rst b/docs/sources/source/collections/nemo_asr.rst index ae1765070e20..95d2e53779a2 100644 --- a/docs/sources/source/collections/nemo_asr.rst +++ b/docs/sources/source/collections/nemo_asr.rst @@ -1,9 +1,9 @@ -NeMo_ASR collection +NeMo ASR collection =================== Speech data processing modules ------------------------------ -.. automodule:: nemo_asr.data_layer +.. automodule:: nemo.collections.asr.data_layer :members: :undoc-members: :show-inheritance: @@ -11,7 +11,7 @@ Speech data processing modules Automatic Speech Recognition modules ------------------------------------ -.. automodule:: nemo_asr.jasper +.. automodule:: nemo.collections.asr.jasper :members: :undoc-members: :show-inheritance: diff --git a/docs/sources/source/collections/nemo_nlp.rst b/docs/sources/source/collections/nemo_nlp.rst index b0ddc62c4f8e..8c862d84fb66 100644 --- a/docs/sources/source/collections/nemo_nlp.rst +++ b/docs/sources/source/collections/nemo_nlp.rst @@ -1,48 +1,47 @@ -NeMo_NLP collection +NeMo NLP collection =================== NLP data processing modules --------------------------- -.. automodule:: nemo_nlp.data_layers +.. automodule:: nemo.collections.nlp.data.datasets :members: :undoc-members: :show-inheritance: - :exclude-members: forward NLP Tokenizers -------------- -.. automodule:: nemo_nlp.data.tokenizers.bert_tokenizer +.. automodule:: nemo.collections.nlp.data.tokenizers.bert_tokenizer :members: :undoc-members: :show-inheritance: -.. automodule:: nemo_nlp.data.tokenizers.char_tokenizer +.. automodule:: nemo.collections.nlp.data.tokenizers.char_tokenizer :members: :undoc-members: :show-inheritance: -.. automodule:: nemo_nlp.data.tokenizers.gpt2_tokenizer +.. automodule:: nemo.collections.nlp.data.tokenizers.gpt2_tokenizer :members: :undoc-members: :show-inheritance: -.. automodule:: nemo_nlp.data.tokenizers.spc_tokenizer +.. automodule:: nemo.collections.nlp.data.tokenizers.sentencepiece_tokenizer :members: :undoc-members: :show-inheritance: -.. automodule:: nemo_nlp.data.tokenizers.tokenizer_spec +.. automodule:: nemo.collections.nlp.data.tokenizers.tokenizer_spec :members: :undoc-members: :show-inheritance: -.. automodule:: nemo_nlp.data.tokenizers.word_tokenizer +.. automodule:: nemo.collections.nlp.data.tokenizers.word_tokenizer :members: :undoc-members: :show-inheritance: -.. automodule:: nemo_nlp.data.tokenizers.yttm_tokenizer +.. automodule:: nemo.collections.nlp.data.tokenizers.youtokentome_tokenizer :members: :undoc-members: :show-inheritance: @@ -50,27 +49,51 @@ NLP Tokenizers NLP Neural Modules ------------------ -.. automodule:: nemo_nlp.modules.classifiers +.. automodule:: nemo.collections.nlp.nm.data_layers :members: :undoc-members: :show-inheritance: :exclude-members: forward -.. automodule:: nemo_nlp.modules.losses +.. automodule:: nemo.collections.nlp.nm.losses :members: :undoc-members: :show-inheritance: :exclude-members: forward -.. automodule:: nemo_nlp.modules.pytorch_utils +.. automodule:: nemo.collections.nlp.nm.trainables.common.sequence_classification_nm :members: :undoc-members: :show-inheritance: :exclude-members: forward -.. automodule:: nemo_nlp.modules.transformer_nm +.. automodule:: nemo.collections.nlp.nm.trainables.common.sequence_regression_nm + :members: + :undoc-members: + :show-inheritance: + :exclude-members: forward + +.. automodule:: nemo.collections.nlp.nm.trainables.common.token_classification_nm + :members: + :undoc-members: + :show-inheritance: + :exclude-members: forward + +.. automodule:: nemo.collections.nlp.nm.trainables.common.transformer.transformer_nm + :members: + :undoc-members: + :show-inheritance: + :exclude-members: forward + +.. automodule:: nemo.collections.nlp.nm.trainables.dialogue_state_tracking.state_tracking_trade_nm + :members: + :undoc-members: + :show-inheritance: + :exclude-members: forward + +.. automodule:: nemo.collections.nlp.nm.trainables.joint_intent_slot.joint_intent_slot_nm :members: :undoc-members: :show-inheritance: @@ -79,7 +102,7 @@ NLP Neural Modules NLP Hugging Face Neural Modules ------------------------------- -.. automodule:: nemo_nlp.huggingface.bert +.. automodule:: nemo.collections.nlp.nm.trainables.common.huggingface.bert_nm :members: :undoc-members: :show-inheritance: diff --git a/docs/sources/source/collections/nemo_tts.rst b/docs/sources/source/collections/nemo_tts.rst index 958642a2f092..be7a8c7a8d07 100644 --- a/docs/sources/source/collections/nemo_tts.rst +++ b/docs/sources/source/collections/nemo_tts.rst @@ -1,9 +1,9 @@ -NeMo_TTS collection +NeMo TTS collection =================== Speech data processing modules ------------------------------ -.. automodule:: nemo_tts.data_layers +.. automodule:: nemo.collections.tts.data_layers :members: :undoc-members: :show-inheritance: @@ -11,7 +11,7 @@ Speech data processing modules Tacotron 2 modules ------------------ -.. automodule:: nemo_tts.tacotron2_modules +.. automodule:: nemo.collections.tts.tacotron2_modules :members: :undoc-members: :show-inheritance: @@ -19,7 +19,7 @@ Tacotron 2 modules Waveglow modules ------------------ -.. automodule:: nemo_tts.waveglow_modules +.. automodule:: nemo.collections.tts.waveglow_modules :members: :undoc-members: :show-inheritance: diff --git a/docs/sources/source/conf.py b/docs/sources/source/conf.py index 77a048fad9f5..8caeaaede9b5 100644 --- a/docs/sources/source/conf.py +++ b/docs/sources/source/conf.py @@ -25,12 +25,6 @@ sys.path.insert(0, os.path.abspath(".")) sys.path.insert(0, os.path.abspath("../../../")) -sys.path.insert(0, os.path.abspath("../../../nemo/nemo")) -sys.path.insert(0, os.path.abspath("../../../collections")) -sys.path.insert(0, os.path.abspath("../../../collections/nemo_asr")) -sys.path.insert(0, os.path.abspath("../../../collections/nemo_nlp")) -sys.path.insert(0, os.path.abspath("../../../collections/nemo_tts")) -# sys.path.insert(0, os.path.abspath("../../../collections/nemo_lpr")) # ---- Mocking up the classes. ----- MOCK_CLASSES = {'Dataset': 'torch.utils.data', 'Module': 'torch.nn'} @@ -66,6 +60,7 @@ def __getattr__(cls, name): 'h5py', 'kaldi_io', 'transformers', + 'transformers.tokenization_bert', ] sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) diff --git a/examples/asr/jasper_an4.py b/examples/asr/jasper_an4.py index 77d81c9c364f..1ed10e0437e5 100644 --- a/examples/asr/jasper_an4.py +++ b/examples/asr/jasper_an4.py @@ -128,7 +128,8 @@ def main(): parser.add_argument("--eval_datasets", type=str, nargs=1, help="validation dataset path") # Create new args - parser.add_argument("--lm", default="./an4-lm.3gram.binary", type=str) + # parser.add_argument("--lm", default="./an4-lm.3gram.binary", type=str) + parser.add_argument("--lm", default=None, type=str) parser.add_argument("--test_after_training", action='store_true') parser.add_argument("--momentum", type=float) parser.add_argument("--beta1", default=0.95, type=float) @@ -198,7 +199,7 @@ def main(): if args.test_after_training: logging.info("Testing greedy and beam search with LM WER.") # Create BeamSearch NM - if nf.world_size > 1: + if nf.world_size > 1 or args.lm is None: logging.warning("Skipping beam search WER as it does not " "work if doing distributed training.") else: beam_search_with_lm = nemo_asr.BeamSearchDecoderWithLM( @@ -218,7 +219,7 @@ def main(): raise ValueError(f"Final eval greedy WER {wer * 100:.2f}% > :" f"than {wer_thr * 100:.2f}%") nf.sync_all_processes() - if nf.world_size == 1: + if nf.world_size == 1 and args.lm is not None: beam_hypotheses = [] # Over mini-batch for i in evaluated_tensors[-1]: diff --git a/examples/image/gan.py b/examples/image/gan.py index 61aee8c252f7..08c43899ef21 100644 --- a/examples/image/gan.py +++ b/examples/image/gan.py @@ -44,7 +44,7 @@ batch_size=batch_size, shuffle=True, train=True, root=args.train_dataset ) -generator = nemo_simple_gan.SimpleGenerator() +generator = nemo_simple_gan.SimpleGenerator(batch_size) discriminator = nemo_simple_gan.SimpleDiscriminator() neg_disc_loss = nemo_simple_gan.DiscriminatorLoss(neg=True) disc_loss = nemo_simple_gan.DiscriminatorLoss() diff --git a/examples/image/resnet50.py b/examples/image/resnet50.py index b6ca608f06b5..92c7e2d4b037 100644 --- a/examples/image/resnet50.py +++ b/examples/image/resnet50.py @@ -54,41 +54,37 @@ optimization_level=nemo.core.Optimization.mxprO0, ) -resnet = neural_factory.get_module( - name="resnet50", params={"placement": device}, collection="torchvision", pretrained=False, -) +resnet = neural_factory.get_module(name="resnet50", params={}, collection="torchvision", pretrained=False) dl_train = neural_factory.get_module( name="ImageFolderDataLayer", collection="torchvision", params={ "batch_size": batch_size, - "input_size": resnet.inputs["x"].axis2type[2].dim, + "input_size": resnet.input_ports["x"].axis2type[2].dim, "shuffle": True, "path": args.data_root + "train", # "path": "/mnt/D1/Data/ImageNet/ImageFolder/train", - "placement": device, }, ) -L_train = neural_factory.get_module(name="CrossEntropyLoss", collection="toys", params={"placement": device}) +L_train = neural_factory.get_module(name="CrossEntropyLoss", collection="toys", params={}) dl_eval = neural_factory.get_module( name="ImageFolderDataLayer", collection="torchvision", params={ "batch_size": batch_size, - "input_size": resnet.inputs["x"].axis2type[2].dim, + "input_size": resnet.input_ports["x"].axis2type[2].dim, "shuffle": False, "is_eval": True, "path": args.data_root + "val", # "path": "/mnt/D1/Data/ImageNet/ImageFolder/val", # "path": "/raid/okuchaiev/Data/ImageNet/ImageFolder/val", - "placement": device, }, ) -L_eval = neural_factory.get_module(name="CrossEntropyLoss", collection="toys", params={"placement": device}) +L_eval = neural_factory.get_module(name="CrossEntropyLoss", collection="toys", params={}) step_per_epoch = int(len(dl_train) / (batch_size * num_gpus)) diff --git a/nemo/collections/simple_gan/gan.py b/nemo/collections/simple_gan/gan.py index 4d8f48e6cdbb..dd2028ba769d 100644 --- a/nemo/collections/simple_gan/gan.py +++ b/nemo/collections/simple_gan/gan.py @@ -96,8 +96,10 @@ def output_ports(self): "image": NeuralType(ChannelType(), ('B', 'C', 'H', 'W')) } - def __init__(self): + def __init__(self, batch_size): super().__init__() + self._batch_size = batch_size + self.layers = torch.nn.Sequential( torch.nn.ConvTranspose2d(64, 128, 3, stride=2), torch.nn.ReLU(), diff --git a/nemo/collections/tts/tacotron2_modules.py b/nemo/collections/tts/tacotron2_modules.py index dd0f56a18816..9399bfa85d53 100644 --- a/nemo/collections/tts/tacotron2_modules.py +++ b/nemo/collections/tts/tacotron2_modules.py @@ -37,7 +37,7 @@ def input_ports(self): """Returns definitions of module input ports. """ # return {"char_phone": NeuralType({0: AxisType(BatchTag), 1: AxisType(TimeTag)})} - return {"char_phone": NeuralType(ChannelType(), ('B', 'T'))} + return {"char_phone": NeuralType(LabelsType(), ('B', 'T'))} @property def output_ports(self): diff --git a/requirements/requirements_docker.txt b/requirements/requirements_docker.txt index a7ac755041c9..82b84f9f6ca3 100644 --- a/requirements/requirements_docker.txt +++ b/requirements/requirements_docker.txt @@ -21,8 +21,7 @@ ruamel.yaml sentencepiece six sox -torch tqdm unidecode wget -youtokentome \ No newline at end of file +youtokentome diff --git a/requirements/requirements_test.txt b/requirements/requirements_test.txt index 544127fca734..ddd891eac3fe 100644 --- a/requirements/requirements_test.txt +++ b/requirements/requirements_test.txt @@ -4,4 +4,5 @@ pytest-runner black isort[requirements] wrapt +wget onnxruntime