Skip to content

Commit

Permalink
apply renaming
Browse files Browse the repository at this point in the history
  • Loading branch information
parikls committed Oct 9, 2024
1 parent 85979b7 commit 3861a9a
Show file tree
Hide file tree
Showing 3 changed files with 69 additions and 69 deletions.
24 changes: 12 additions & 12 deletions neuro_config_client/entities.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,9 @@ class NodePoolOptions:
nvidia_gpu: int | None = None
amd_gpu: int | None = None
intel_gpu: int | None = None
nvidia_model: str | None = None
amd_model: str | None = None
intel_model: str | None = None
nvidia_gpu_model: str | None = None
amd_gpu_model: str | None = None
intel_gpu_model: str | None = None


@dataclass(frozen=True)
Expand Down Expand Up @@ -128,9 +128,9 @@ class NodePool:
nvidia_gpu: int | None = None
amd_gpu: int | None = None
intel_gpu: int | None = None
nvidia_model: str | None = None
amd_model: str | None = None
intel_model: str | None = None
nvidia_gpu_model: str | None = None
amd_gpu_model: str | None = None
intel_gpu_model: str | None = None

price: Decimal | None = None
currency: str | None = None
Expand Down Expand Up @@ -476,9 +476,9 @@ class ResourcePreset:
nvidia_gpu: int | None = None
amd_gpu: int | None = None
intel_gpu: int | None = None
nvidia_model: str | None = None
amd_model: str | None = None
intel_model: str | None = None
nvidia_gpu_model: str | None = None
amd_gpu_model: str | None = None
intel_gpu_model: str | None = None
tpu: TPUPreset | None = None
scheduler_enabled: bool = False
preemptible_node: bool = False
Expand All @@ -503,9 +503,9 @@ class ResourcePoolType:
nvidia_gpu: int | None = None
amd_gpu: int | None = None
intel_gpu: int | None = None
nvidia_model: str | None = None
amd_model: str | None = None
intel_model: str | None = None
nvidia_gpu_model: str | None = None
amd_gpu_model: str | None = None
intel_gpu_model: str | None = None
tpu: TPUResource | None = None

price: Decimal = Decimal()
Expand Down
48 changes: 24 additions & 24 deletions neuro_config_client/factories.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,9 +133,9 @@ def create_node_pool_options(payload: dict[str, Any]) -> NodePoolOptions:
nvidia_gpu=payload.get("nvidia_gpu"),
amd_gpu=payload.get("amd_gpu"),
intel_gpu=payload.get("intel_gpu"),
nvidia_model=payload.get("nvidia_model"),
amd_model=payload.get("amd_model"),
intel_model=payload.get("intel_model")
nvidia_gpu_model=payload.get("nvidia_gpu_model"),
amd_gpu_model=payload.get("amd_gpu_model"),
intel_gpu_model=payload.get("intel_gpu_model")
)

@classmethod
Expand Down Expand Up @@ -269,9 +269,9 @@ def create_resource_pool_type(self, payload: dict[str, Any]) -> ResourcePoolType
nvidia_gpu=payload.get("nvidia_gpu"),
amd_gpu=payload.get("amd_gpu"),
intel_gpu=payload.get("intel_gpu"),
nvidia_model=payload.get("nvidia_model"),
amd_model=payload.get("amd_model"),
intel_model=payload.get("intel_model"),
nvidia_gpu_model=payload.get("nvidia_gpu_model"),
amd_gpu_model=payload.get("amd_gpu_model"),
intel_gpu_model=payload.get("intel_gpu_model"),
price=Decimal(payload.get("price", ResourcePoolType.price)),
currency=payload.get("currency"),
tpu=tpu,
Expand Down Expand Up @@ -301,9 +301,9 @@ def create_resource_preset(self, payload: dict[str, Any]) -> ResourcePreset:
nvidia_gpu=payload.get("nvidia_gpu"),
amd_gpu=payload.get("amd_gpu"),
intel_gpu=payload.get("intel_gpu"),
nvidia_model=payload.get("nvidia_model"),
amd_model=payload.get("amd_model"),
intel_model=payload.get("intel_model"),
nvidia_gpu_model=payload.get("nvidia_gpu_model"),
amd_gpu_model=payload.get("amd_gpu_model"),
intel_gpu_model=payload.get("intel_gpu_model"),
tpu=tpu,
scheduler_enabled=payload.get("scheduler_enabled", False),
preemptible_node=payload.get("preemptible_node", False),
Expand Down Expand Up @@ -451,9 +451,9 @@ def create_node_pool(self, payload: dict[str, Any]) -> NodePool:
nvidia_gpu=payload.get("nvidia_gpu"),
amd_gpu=payload.get("amd_gpu"),
intel_gpu=payload.get("intel_gpu"),
nvidia_model=payload.get("nvidia_model"),
amd_model=payload.get("amd_model"),
intel_model=payload.get("intel_model"),
nvidia_gpu_model=payload.get("nvidia_gpu_model"),
amd_gpu_model=payload.get("amd_gpu_model"),
intel_gpu_model=payload.get("intel_gpu_model"),
price=price,
currency=payload.get("currency", NodePool.currency),
machine_type=payload.get("machine_type"),
Expand Down Expand Up @@ -901,12 +901,12 @@ def create_resource_pool_type(
result["amd_gpu"] = resource_pool_type.amd_gpu
if resource_pool_type.intel_gpu:
result["intel_gpu"] = resource_pool_type.intel_gpu
if resource_pool_type.nvidia_model:
result["nvidia_model"] = resource_pool_type.nvidia_model
if resource_pool_type.amd_model:
result["amd_model"] = resource_pool_type.amd_model
if resource_pool_type.intel_model:
result["intel_model"] = resource_pool_type.intel_model
if resource_pool_type.nvidia_gpu_model:
result["nvidia_gpu_model"] = resource_pool_type.nvidia_gpu_model
if resource_pool_type.amd_gpu_model:
result["amd_gpu_model"] = resource_pool_type.amd_gpu_model
if resource_pool_type.intel_gpu_model:
result["intel_gpu_model"] = resource_pool_type.intel_gpu_model
if resource_pool_type.currency:
result["price"] = str(resource_pool_type.price)
result["currency"] = resource_pool_type.currency
Expand Down Expand Up @@ -1069,12 +1069,12 @@ def create_node_pool(cls, node_pool: NodePool) -> dict[str, Any]:
result["amd_gpu"] = node_pool.amd_gpu
if node_pool.intel_gpu:
result["intel_gpu"] = node_pool.intel_gpu
if node_pool.nvidia_model:
result["nvidia_model"] = node_pool.nvidia_model
if node_pool.amd_model:
result["amd_model"] = node_pool.amd_model
if node_pool.intel_model:
result["intel_model"] = node_pool.intel_model
if node_pool.nvidia_gpu_model:
result["nvidia_gpu_model"] = node_pool.nvidia_gpu_model
if node_pool.amd_gpu_model:
result["amd_gpu_model"] = node_pool.amd_gpu_model
if node_pool.intel_gpu_model:
result["intel_gpu_model"] = node_pool.intel_gpu_model
if node_pool.price:
result["price"] = str(node_pool.price)
if node_pool.currency:
Expand Down
66 changes: 33 additions & 33 deletions tests/test_factories.py
Original file line number Diff line number Diff line change
Expand Up @@ -302,9 +302,9 @@ def test_create_resource_pool_type(
"nvidia_gpu": 1,
"amd_gpu": 2,
"intel_gpu": 3,
"nvidia_model": nvidia_small_gpu,
"amd_model": amd_small_gpu,
"intel_model": intel_small_gpu,
"nvidia_gpu_model": nvidia_small_gpu,
"amd_gpu_model": amd_small_gpu,
"intel_gpu_model": intel_small_gpu,
"tpu": {
"ipv4_cidr_block": "10.0.0.0/8",
"types": ["tpu"],
Expand All @@ -330,9 +330,9 @@ def test_create_resource_pool_type(
nvidia_gpu=1,
amd_gpu=2,
intel_gpu=3,
nvidia_model=nvidia_small_gpu,
amd_model=amd_small_gpu,
intel_model=intel_small_gpu,
nvidia_gpu_model=nvidia_small_gpu,
amd_gpu_model=amd_small_gpu,
intel_gpu_model=intel_small_gpu,
tpu=mock.ANY,
is_preemptible=True,
price=Decimal("1.0"),
Expand Down Expand Up @@ -389,9 +389,9 @@ def test_create_resource_preset_custom(
"nvidia_gpu": 1,
"amd_gpu": 2,
"intel_gpu": 3,
"nvidia_model": nvidia_small_gpu,
"amd_model": amd_small_gpu,
"intel_model": intel_small_gpu,
"nvidia_gpu_model": nvidia_small_gpu,
"amd_gpu_model": amd_small_gpu,
"intel_gpu_model": intel_small_gpu,
"tpu": {"type": "tpu", "software_version": "v1"},
"scheduler_enabled": True,
"preemptible_node": True,
Expand All @@ -409,9 +409,9 @@ def test_create_resource_preset_custom(
nvidia_gpu=1,
amd_gpu=2,
intel_gpu=3,
nvidia_model=nvidia_small_gpu,
amd_model=amd_small_gpu,
intel_model=intel_small_gpu,
nvidia_gpu_model=nvidia_small_gpu,
amd_gpu_model=amd_small_gpu,
intel_gpu_model=intel_small_gpu,
tpu=TPUPreset(type="tpu", software_version="v1"),
scheduler_enabled=True,
preemptible_node=True,
Expand Down Expand Up @@ -591,7 +591,7 @@ def google_cloud_provider_response(self) -> dict[str, Any]:
"available_memory": 201 * 1024,
"disk_size": 700,
"nvidia_gpu": 1,
"nvidia_model": "nvidia-tesla-k80",
"nvidia_gpu_model": "nvidia-tesla-k80",
"is_preemptible": True,
},
],
Expand Down Expand Up @@ -652,7 +652,7 @@ def google_cloud_provider(self) -> GoogleCloudProvider:
available_memory=201 * 1024,
disk_size=700,
nvidia_gpu=1,
nvidia_model="nvidia-tesla-k80",
nvidia_gpu_model="nvidia-tesla-k80",
is_preemptible=True,
),
],
Expand Down Expand Up @@ -715,7 +715,7 @@ def aws_cloud_provider_response(self) -> dict[str, Any]:
"available_memory": 57 * 1024,
"disk_size": 700,
"nvidia_gpu": 1,
"nvidia_model": "nvidia-tesla-k80",
"nvidia_gpu_model": "nvidia-tesla-k80",
"is_preemptible": True,
},
],
Expand Down Expand Up @@ -766,7 +766,7 @@ def aws_cloud_provider(self) -> AWSCloudProvider:
available_memory=57 * 1024,
disk_size=700,
nvidia_gpu=1,
nvidia_model="nvidia-tesla-k80",
nvidia_gpu_model="nvidia-tesla-k80",
is_preemptible=True,
),
],
Expand Down Expand Up @@ -831,7 +831,7 @@ def azure_cloud_provider_response(self) -> dict[str, Any]:
"available_memory": 50 * 1024,
"disk_size": 700,
"nvidia_gpu": 1,
"nvidia_model": "nvidia-tesla-k80",
"nvidia_gpu_model": "nvidia-tesla-k80",
"is_preemptible": True,
},
],
Expand Down Expand Up @@ -884,7 +884,7 @@ def azure_cloud_provider(self) -> AzureCloudProvider:
available_memory=50 * 1024,
disk_size=700,
nvidia_gpu=1,
nvidia_model="nvidia-tesla-k80",
nvidia_gpu_model="nvidia-tesla-k80",
is_preemptible=True,
),
],
Expand Down Expand Up @@ -943,7 +943,7 @@ def on_prem_cloud_provider_response(self) -> dict[str, Any]:
"available_memory": 1024,
"disk_size": 700,
"nvidia_gpu": 1,
"nvidia_model": "nvidia-tesla-k80",
"nvidia_gpu_model": "nvidia-tesla-k80",
"price": "0.9",
"currency": "USD",
"cpu_min_watts": 0.1,
Expand Down Expand Up @@ -981,7 +981,7 @@ def on_prem_cloud_provider(self) -> OnPremCloudProvider:
available_memory=1024,
disk_size=700,
nvidia_gpu=1,
nvidia_model="nvidia-tesla-k80",
nvidia_gpu_model="nvidia-tesla-k80",
price=Decimal("0.9"),
currency="USD",
machine_type="gpu-machine-1xk80",
Expand Down Expand Up @@ -1044,7 +1044,7 @@ def vcd_cloud_provider_response(self) -> dict[str, Any]:
"available_memory": 37 * 1024,
"disk_size": 700,
"nvidia_gpu": 1,
"nvidia_model": "nvidia-tesla-k80",
"nvidia_gpu_model": "nvidia-tesla-k80",
"price": "0.9",
"currency": "USD",
"cpu_min_watts": 0.1,
Expand Down Expand Up @@ -1101,7 +1101,7 @@ def vcd_cloud_provider(self) -> VCDCloudProvider:
available_memory=37 * 1024,
disk_size=700,
nvidia_gpu=1,
nvidia_model="nvidia-tesla-k80",
nvidia_gpu_model="nvidia-tesla-k80",
price=Decimal("0.9"),
currency="USD",
cpu_min_watts=0.1,
Expand Down Expand Up @@ -1274,7 +1274,7 @@ def node_pool_options_response(self) -> dict[str, Any]:
"memory": 458752,
"available_memory": 452608,
"nvidia_gpu": 4,
"nvidia_model": "nvidia-tesla-p40",
"nvidia_gpu_model": "nvidia-tesla-p40",
"extra_info": "will be ignored",
}

Expand All @@ -1288,7 +1288,7 @@ def node_pool_options(self) -> NodePoolOptions:
memory=458752,
available_memory=452608,
nvidia_gpu=4,
nvidia_model="nvidia-tesla-p40",
nvidia_gpu_model="nvidia-tesla-p40",
)

def test_aws_cloud_provider_options(
Expand Down Expand Up @@ -1618,9 +1618,9 @@ def test_create_resource_pool_type(
nvidia_gpu=1,
amd_gpu=2,
intel_gpu=3,
nvidia_model=nvidia_small_gpu,
amd_model=amd_small_gpu,
intel_model=intel_small_gpu,
nvidia_gpu_model=nvidia_small_gpu,
amd_gpu_model=amd_small_gpu,
intel_gpu_model=intel_small_gpu,
tpu=TPUResource(
ipv4_cidr_block="10.0.0.0/8",
types=["tpu"],
Expand All @@ -1647,9 +1647,9 @@ def test_create_resource_pool_type(
"nvidia_gpu": 1,
"amd_gpu": 2,
"intel_gpu": 3,
"nvidia_model": nvidia_small_gpu,
"amd_model": amd_small_gpu,
"intel_model": intel_small_gpu,
"nvidia_gpu_model": nvidia_small_gpu,
"amd_gpu_model": amd_small_gpu,
"intel_gpu_model": intel_small_gpu,
"tpu": {
"ipv4_cidr_block": "10.0.0.0/8",
"types": ["tpu"],
Expand Down Expand Up @@ -2008,7 +2008,7 @@ def node_pool(self) -> NodePool:
disk_size=100500,
disk_type="some-disk-type",
nvidia_gpu=1,
nvidia_model="some-gpu-model",
nvidia_gpu_model="some-gpu-model",
price=Decimal(180),
currency="rabbits",
is_preemptible=True,
Expand All @@ -2035,7 +2035,7 @@ def test_node_pool(self, factory: PayloadFactory, node_pool: NodePool) -> None:
"disk_size": 100500,
"disk_type": "some-disk-type",
"nvidia_gpu": 1,
"nvidia_model": "some-gpu-model",
"nvidia_gpu_model": "some-gpu-model",
"zones": ("here", "there"),
"price": "180",
"currency": "rabbits",
Expand Down Expand Up @@ -2065,7 +2065,7 @@ def test_node_pool(self, factory: PayloadFactory, node_pool: NodePool) -> None:
"disk_size": 100500,
"disk_type": "some-disk-type",
"nvidia_gpu": 1,
"nvidia_model": "some-gpu-model",
"nvidia_gpu_model": "some-gpu-model",
"price": "180",
"currency": "rabbits",
"cpu_min_watts": 0.01,
Expand Down

0 comments on commit 3861a9a

Please sign in to comment.