diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java index 8d9da97538e11..3ebb11e1732d0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java @@ -714,26 +714,7 @@ public static long estimateMemoryUsageBytes( int numberOfAllocations ) { // While loading the model in the process we need twice the model size. - - // 1. If ELSER v1 or v2 then 2004MB - // 2. If static memory and dynamic memory are not set then 240MB + 2 * model size - // 3. Else static memory + dynamic memory * allocations + model size - - // The model size is still added in option 3 to account for the temporary requirement to hold the zip file in memory - // in `pytorch_inference`. - if (isElserV1Or2Model(modelId)) { - return ELSER_1_OR_2_MEMORY_USAGE.getBytes(); - } else { - long baseSize = MEMORY_OVERHEAD.getBytes() + 2 * totalDefinitionLength; - if (perDeploymentMemoryBytes == 0 && perAllocationMemoryBytes == 0) { - return baseSize; - } else { - return Math.max( - baseSize, - perDeploymentMemoryBytes + perAllocationMemoryBytes * numberOfAllocations + totalDefinitionLength - ); - } - } + return isElserV1Or2Model(modelId) ? ELSER_1_OR_2_MEMORY_USAGE.getBytes() : MEMORY_OVERHEAD.getBytes() + 2 * totalDefinitionLength; } private static boolean isElserV1Or2Model(String modelId) {