Skip to content
This repository has been archived by the owner on Oct 9, 2023. It is now read-only.

relax latest numpy #1595

Merged
merged 18 commits into from
Jun 19, 2023
Merged
4 changes: 2 additions & 2 deletions requirements/base.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@

packaging <24.0
setuptools <=59.5.0 # Prevent install bug with tensorboard
numpy <1.24 # strict - freeze for using np.long
numpy <1.25
torch >1.7.0, <=2.0.1
torchmetrics >0.7.0, <0.11.0 # strict
pytorch-lightning >1.8.0, <2.0.0 # strict
pyDeprecate >0.1.0
pyDeprecate >0.2.0
pandas >1.1.0, <=1.5.2
jsonargparse[signatures] >4.0.0, <=4.9.0
click >=7.1.2, <=8.1.3
Expand Down
1 change: 1 addition & 0 deletions requirements/datatype_audio.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# NOTE: all pins for latest are for CI consistency unless it is `strict`, then it is also forced in setup

numpy <1.24 # strict - freeze for strange failing with required `padding=True`
torchaudio <=2.0.2
torchvision <=0.15.2
librosa >=0.8.1, <=0.10.0.post2
Expand Down
16 changes: 8 additions & 8 deletions requirements/serve.txt
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
# NOTE: all pins for latest are for CI consistency unless it is `strict`, then it is also forced in setup

pillow >7.1, <=9.5.0
pyyaml <=6.0
cytoolz <=0.12.1
graphviz <=0.20.1
tqdm <=4.64.1
fastapi >=0.65.2, <=0.96.0
pillow >9.0.0, <=9.5.0
pyyaml >5.4, <=6.0
cytoolz >0.11, <=0.12.1
graphviz >=0.19, <=0.20.1
tqdm >4.60, <=4.64.1
fastapi >0.65, <=0.96.0
pydantic >1.8.1, <=1.10.8
starlette ==0.14.2
uvicorn[standard] >=0.12.0, <=0.20.0
aiofiles <=23.1.0
aiofiles >22.1.0, <=23.1.0
jinja2 >=3.0.0, <3.1.0
torchvision <=0.15.2
torchvision >0.10.0, <=0.15.2
66 changes: 35 additions & 31 deletions src/flash/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,39 +14,43 @@
"""Root package info."""
import os

from flash.__about__ import * # noqa: F401 F403
from flash.core.utilities.imports import _TORCH_AVAILABLE
import numpy

if _TORCH_AVAILABLE:
from flash.core.data.callback import FlashCallback
from flash.core.data.data_module import DataModule
from flash.core.data.io.input import DataKeys, Input
from flash.core.data.io.input_transform import InputTransform
from flash.core.data.io.output import Output
from flash.core.data.io.output_transform import OutputTransform
from flash.core.model import Task
from flash.core.trainer import Trainer
from flash.core.utilities.stages import RunningStage
# adding compatibility for numpy >= 1.24
for tp_name, tp_ins in [("object", object), ("bool", bool), ("float", float)]:
if not hasattr(numpy, tp_name):
setattr(numpy, tp_name, tp_ins)

_PACKAGE_ROOT = os.path.dirname(__file__)
ASSETS_ROOT = os.path.join(_PACKAGE_ROOT, "assets")
PROJECT_ROOT = os.path.dirname(_PACKAGE_ROOT)
_IS_TESTING = os.getenv("FLASH_TESTING", "0") == "1"
from flash.__about__ import * # noqa: F401 E402 F403
from flash.core.data.callback import FlashCallback # noqa: E402
from flash.core.data.data_module import DataModule # noqa: E402
from flash.core.data.io.input import DataKeys, Input # noqa: E402
from flash.core.data.io.input_transform import InputTransform # noqa: E402
from flash.core.data.io.output import Output # noqa: E402
from flash.core.data.io.output_transform import OutputTransform # noqa: E402
from flash.core.model import Task # noqa: E402
from flash.core.trainer import Trainer # noqa: E402
from flash.core.utilities.stages import RunningStage # noqa: E402

if _IS_TESTING:
from pytorch_lightning import seed_everything
_PACKAGE_ROOT = os.path.dirname(__file__)
ASSETS_ROOT = os.path.join(_PACKAGE_ROOT, "assets")
PROJECT_ROOT = os.path.dirname(_PACKAGE_ROOT)
_IS_TESTING = os.getenv("FLASH_TESTING", "0") == "1"

seed_everything(42)
if _IS_TESTING:
from pytorch_lightning import seed_everything

__all__ = [
"DataKeys",
"DataModule",
"FlashCallback",
"Input",
"InputTransform",
"Output",
"OutputTransform",
"RunningStage",
"Task",
"Trainer",
]
seed_everything(42)

__all__ = [
"DataKeys",
"DataModule",
"FlashCallback",
"Input",
"InputTransform",
"Output",
"OutputTransform",
"RunningStage",
"Task",
"Trainer",
]
19 changes: 4 additions & 15 deletions src/flash/audio/speech_recognition/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,9 +119,7 @@ def from_files(
>>> _ = [os.remove(f"predict_speech_{i}.wav") for i in range(1, 4)]
"""

ds_kw = {
"sampling_rate": sampling_rate,
}
ds_kw = {"sampling_rate": sampling_rate}

return cls(
input_cls(RunningStage.TRAINING, train_files, train_targets, **ds_kw),
Expand Down Expand Up @@ -306,10 +304,7 @@ def from_csv(
>>> os.remove("predict_data.tsv")
"""

ds_kw = {
"input_key": input_field,
"sampling_rate": sampling_rate,
}
ds_kw = {"input_key": input_field, "sampling_rate": sampling_rate}

return cls(
input_cls(RunningStage.TRAINING, train_file, target_key=target_field, **ds_kw),
Expand Down Expand Up @@ -430,11 +425,7 @@ def from_json(
>>> os.remove("predict_data.json")
"""

ds_kw = {
"input_key": input_field,
"sampling_rate": sampling_rate,
"field": field,
}
ds_kw = {"input_key": input_field, "sampling_rate": sampling_rate, "field": field}

return cls(
input_cls(RunningStage.TRAINING, train_file, target_key=target_field, **ds_kw),
Expand Down Expand Up @@ -580,9 +571,7 @@ def from_datasets(
>>> _ = [os.remove(f"predict_speech_{i}.wav") for i in range(1, 4)]
"""

ds_kw = {
"sampling_rate": sampling_rate,
}
ds_kw = {"sampling_rate": sampling_rate}

return cls(
input_cls(RunningStage.TRAINING, train_dataset, **ds_kw),
Expand Down