Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement model ready OIP implementation #2950

Draft
wants to merge 5 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,11 @@
import org.pytorch.serve.archive.DownloadArchiveException;
import org.pytorch.serve.archive.model.ModelException;
import org.pytorch.serve.archive.workflow.WorkflowException;
import org.pytorch.serve.grpc.openinference.OpenInferenceGrpc.ModelMetadataResponse;
import org.pytorch.serve.http.HttpRequestHandlerChain;
import org.pytorch.serve.util.ConfigManager;
import org.pytorch.serve.util.NettyUtils;
import org.pytorch.serve.wlm.ModelManager;
import org.pytorch.serve.wlm.WorkerInitializationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand All @@ -29,6 +31,8 @@ public class OpenInferenceProtocolRequestHandler extends HttpRequestHandlerChain
private static final String SERVER_METADATA_API = "/v2";
private static final String SERVER_LIVE_API = "/v2/health/live";
private static final String SERVER_READY_API = "/v2/health/ready";
private static final String MODEL_READY_ENDPOINT_PATTERN = "^/v2/models/([^/]+)(?:/versions/([^/]+))?/ready$";
private static final String MODEL_METADATA_ENDPOINT_PATTERN = "^/v2/models/([^/]+)(?:/versions/([^/]+))?";

/** Creates a new {@code OpenInferenceProtocolRequestHandler} instance. */
public OpenInferenceProtocolRequestHandler() {}
Expand Down Expand Up @@ -65,6 +69,35 @@ public void handleRequest(
supportedExtensions.add("kubeflow");
response.add("extenstion", supportedExtensions);
NettyUtils.sendJsonResponse(ctx, response);
} else if (concatenatedSegments.matches(MODEL_READY_ENDPOINT_PATTERN)) {
String modelName = segments[3];
String modelVersion = null;
if (segments.length > 5) {
modelVersion = segments[5];
}

ModelManager modelManager = ModelManager.getInstance();

boolean isModelReady = modelManager.isModelReady(modelName, modelVersion);

JsonObject response = new JsonObject();
response.addProperty("name", modelName);
response.addProperty("ready", isModelReady);
NettyUtils.sendJsonResponse(ctx, response);

} else if (concatenatedSegments.matches(MODEL_METADATA_ENDPOINT_PATTERN)) {
String modelName = segments[3];
String modelVersion = null;
if (segments.length > 5) {
modelVersion = segments[5];
}

ModelManager modelManager = ModelManager.getInstance();

ModelMetadataResponse.Builder response = modelManager.modelMetadata(modelName, modelVersion);

NettyUtils.sendJsonResponse(ctx, response.build());

} else if (segments.length > 5 && concatenatedSegments.contains("/versions")) {
// As of now kserve not implemented versioning, we just throws not implemented.
JsonObject response = new JsonObject();
Expand Down
151 changes: 100 additions & 51 deletions frontend/server/src/main/java/org/pytorch/serve/wlm/ModelManager.java
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@
import org.pytorch.serve.archive.model.ModelException;
import org.pytorch.serve.archive.model.ModelNotFoundException;
import org.pytorch.serve.archive.model.ModelVersionNotFoundException;
import org.pytorch.serve.grpc.openinference.OpenInferenceGrpc.ModelMetadataResponse;
import org.pytorch.serve.grpc.openinference.OpenInferenceGrpc.ModelMetadataResponse.TensorMetadata;
import org.pytorch.serve.http.ConflictStatusException;
import org.pytorch.serve.http.InvalidModelVersionException;
import org.pytorch.serve.http.messages.RegisterModelRequest;
Expand Down Expand Up @@ -87,7 +89,7 @@ public ModelArchive registerModel(String url, String defaultModelName)

public void registerAndUpdateModel(String modelName, JsonObject modelInfo)
throws ModelException, IOException, InterruptedException, DownloadArchiveException,
WorkerInitializationException {
WorkerInitializationException {

boolean defaultVersion = modelInfo.get(Model.DEFAULT_VERSION).getAsBoolean();
String url = modelInfo.get(Model.MAR_NAME).getAsString();
Expand Down Expand Up @@ -127,7 +129,7 @@ public ModelArchive registerModel(
throws ModelException, IOException, InterruptedException, DownloadArchiveException {

ModelArchive archive;
if (isWorkflowModel && url == null) { // This is a workflow function
if (isWorkflowModel && url == null) { // This is a workflow function
Manifest manifest = new Manifest();
manifest.getModel().setVersion("1.0");
manifest.getModel().setModelVersion("1.0");
Expand All @@ -138,12 +140,12 @@ public ModelArchive registerModel(
archive = new ModelArchive(manifest, url, f.getParentFile(), true);
} else {
archive =
createModelArchive(
modelName, url, handler, runtime, defaultModelName, s3SseKms);
createModelArchive(
modelName, url, handler, runtime, defaultModelName, s3SseKms);
}

Model tempModel =
createModel(archive, batchSize, maxBatchDelay, responseTimeout, isWorkflowModel);
createModel(archive, batchSize, maxBatchDelay, responseTimeout, isWorkflowModel);

String versionId = archive.getModelVersion();

Expand Down Expand Up @@ -174,11 +176,11 @@ private ModelArchive createModelArchive(
throws ModelException, IOException, DownloadArchiveException {

ModelArchive archive =
ModelArchive.downloadModel(
configManager.getAllowedUrls(),
configManager.getModelStore(),
url,
s3SseKms);
ModelArchive.downloadModel(
configManager.getAllowedUrls(),
configManager.getModelStore(),
url,
s3SseKms);
Manifest.Model model = archive.getManifest().getModel();
if (modelName == null || modelName.isEmpty()) {
if (archive.getModelName() == null || archive.getModelName().isEmpty()) {
Expand Down Expand Up @@ -237,10 +239,10 @@ private void setupModelVenv(Model model)
}
Map<String, String> environment = processBuilder.environment();
String[] envp =
EnvironmentUtils.getEnvString(
configManager.getModelServerHome(),
model.getModelDir().getAbsolutePath(),
null);
EnvironmentUtils.getEnvString(
configManager.getModelServerHome(),
model.getModelDir().getAbsolutePath(),
null);
for (String envVar : envp) {
String[] parts = envVar.split("=", 2);
if (parts.length == 2) {
Expand Down Expand Up @@ -278,15 +280,15 @@ private void setupModelVenv(Model model)
private void setupModelDependencies(Model model)
throws IOException, InterruptedException, ModelException {
String requirementsFile =
model.getModelArchive().getManifest().getModel().getRequirementsFile();
model.getModelArchive().getManifest().getModel().getRequirementsFile();

if (!configManager.getInstallPyDepPerModel() || requirementsFile == null) {
return;
}

String pythonRuntime = EnvironmentUtils.getPythonRunTime(model);
Path requirementsFilePath =
Paths.get(model.getModelDir().getAbsolutePath(), requirementsFile).toAbsolutePath();
Paths.get(model.getModelDir().getAbsolutePath(), requirementsFile).toAbsolutePath();
List<String> commandParts = new ArrayList<>();
ProcessBuilder processBuilder = new ProcessBuilder();

Expand Down Expand Up @@ -336,10 +338,10 @@ private void setupModelDependencies(Model model)

processBuilder.command(commandParts);
String[] envp =
EnvironmentUtils.getEnvString(
configManager.getModelServerHome(),
model.getModelDir().getAbsolutePath(),
null);
EnvironmentUtils.getEnvString(
configManager.getModelServerHome(),
model.getModelDir().getAbsolutePath(),
null);
Map<String, String> environment = processBuilder.environment();
for (String envVar : envp) {
String[] parts = envVar.split("=", 2);
Expand Down Expand Up @@ -393,20 +395,20 @@ private Model createModel(
if (archive.getModelConfig() != null) {
int marBatchSize = archive.getModelConfig().getBatchSize();
batchSize =
marBatchSize > 0
? marBatchSize
: configManager.getJsonIntValue(
archive.getModelName(),
archive.getModelVersion(),
Model.BATCH_SIZE,
RegisterModelRequest.DEFAULT_BATCH_SIZE);
} else {
batchSize =
configManager.getJsonIntValue(
marBatchSize > 0
? marBatchSize
: configManager.getJsonIntValue(
archive.getModelName(),
archive.getModelVersion(),
Model.BATCH_SIZE,
RegisterModelRequest.DEFAULT_BATCH_SIZE);
} else {
batchSize =
configManager.getJsonIntValue(
archive.getModelName(),
archive.getModelVersion(),
Model.BATCH_SIZE,
RegisterModelRequest.DEFAULT_BATCH_SIZE);
}
}
model.setBatchSize(batchSize);
Expand All @@ -415,41 +417,41 @@ private Model createModel(
if (archive.getModelConfig() != null) {
int marMaxBatchDelay = archive.getModelConfig().getMaxBatchDelay();
maxBatchDelay =
marMaxBatchDelay > 0
? marMaxBatchDelay
: configManager.getJsonIntValue(
archive.getModelName(),
archive.getModelVersion(),
Model.MAX_BATCH_DELAY,
RegisterModelRequest.DEFAULT_MAX_BATCH_DELAY);
} else {
maxBatchDelay =
configManager.getJsonIntValue(
marMaxBatchDelay > 0
? marMaxBatchDelay
: configManager.getJsonIntValue(
archive.getModelName(),
archive.getModelVersion(),
Model.MAX_BATCH_DELAY,
RegisterModelRequest.DEFAULT_MAX_BATCH_DELAY);
} else {
maxBatchDelay =
configManager.getJsonIntValue(
archive.getModelName(),
archive.getModelVersion(),
Model.MAX_BATCH_DELAY,
RegisterModelRequest.DEFAULT_MAX_BATCH_DELAY);
}
}
model.setMaxBatchDelay(maxBatchDelay);

if (archive.getModelConfig() != null) {
int marResponseTimeout = archive.getModelConfig().getResponseTimeout();
responseTimeout =
marResponseTimeout > 0
? marResponseTimeout
: configManager.getJsonIntValue(
archive.getModelName(),
archive.getModelVersion(),
Model.RESPONSE_TIMEOUT,
responseTimeout);
} else {
responseTimeout =
configManager.getJsonIntValue(
marResponseTimeout > 0
? marResponseTimeout
: configManager.getJsonIntValue(
archive.getModelName(),
archive.getModelVersion(),
Model.RESPONSE_TIMEOUT,
responseTimeout);
} else {
responseTimeout =
configManager.getJsonIntValue(
archive.getModelName(),
archive.getModelVersion(),
Model.RESPONSE_TIMEOUT,
responseTimeout);
}
model.setResponseTimeout(responseTimeout);
model.setWorkflowModel(isWorkflowModel);
Expand Down Expand Up @@ -680,6 +682,53 @@ public boolean scaleRequestStatus(String modelName, String versionId) {
return model == null || model.getMinWorkers() <= numWorkers;
}

public boolean isModelReady(String modelName, String modelVersion)
throws ModelVersionNotFoundException, ModelNotFoundException {

if (modelVersion == null || "".equals(modelVersion)) {
modelVersion = null;
}

Model model = getModel(modelName, modelVersion);
if (model == null) {
throw new ModelNotFoundException("Model not found: " + modelName);
}

int numScaled = model.getMinWorkers();
int numHealthy = modelManager.getNumHealthyWorkers(model.getModelVersionName());

return numHealthy >= numScaled;
}

public ModelMetadataResponse.Builder modelMetadata(String modelName, String modelVersion)
throws ModelVersionNotFoundException, ModelNotFoundException {

ModelManager modelManager = ModelManager.getInstance();
ModelMetadataResponse.Builder response = ModelMetadataResponse.newBuilder();
List<TensorMetadata> inputs = new ArrayList<>();
List<TensorMetadata> outputs = new ArrayList<>();
List<String> versions = new ArrayList<>();

if (modelVersion == null || "".equals(modelVersion)) {
modelVersion = null;
}

Model model = modelManager.getModel(modelName, modelVersion);
if (model == null) {
throw new ModelNotFoundException("Model not found: " + modelName);
}
modelManager.getAllModelVersions(modelName).forEach(entry -> versions.add(entry.getKey()));
response.setName(modelName);
response.addAllVersions(versions);
response.setPlatform("");
response.addAllInputs(inputs);
response.addAllOutputs(outputs);

return response;
}

// return numHealthy >= numScaled;

public void submitTask(Runnable runnable) {
wlm.scheduleAsync(runnable);
}
Expand Down Expand Up @@ -722,4 +771,4 @@ public int getNumRunningWorkers(ModelVersionName modelVersionName) {
public int getNumHealthyWorkers(ModelVersionName modelVersionName) {
return wlm.getNumHealthyWorkers(modelVersionName);
}
}
}
12 changes: 12 additions & 0 deletions kubernetes/kserve/tests/scripts/test_mnist.sh
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,18 @@ URL="http://${INGRESS_HOST}:${INGRESS_PORT}/v2/health/live"
EXPECTED_OUTPUT='{"live":true}'
make_cluster_accessible ${SERVICE_NAME} ${URL} "" ${EXPECTED_OUTPUT}

# ModelReady
echo "HTTP ModelReady method call"
URL="http://${INGRESS_HOST}:${INGRESS_PORT}/v2/models/${MODEL_NAME}/ready"
EXPECTED_OUTPUT='{"name":"mnist","ready":true}'
make_cluster_accessible ${SERVICE_NAME} ${URL} "" ${EXPECTED_OUTPUT}

# ModelMetadata
echo "HTTP ModelMetadata method call"
URL="http://${INGRESS_HOST}:${INGRESS_PORT}/v2/models/${MODEL_NAME}"
EXPECTED_OUTPUT='{"name_":"mnist","versions_":["1.0"],"platform_":"","inputs_":[],"outputs_":[],"memoizedIsInitialized":1,"unknownFields":{"fields":{}},"memoizedSize":-1,"memoizedHashCode":0}'
make_cluster_accessible ${SERVICE_NAME} ${URL} "" ${EXPECTED_OUTPUT}

# delete oip http isvc
kubectl delete inferenceservice ${SERVICE_NAME}

Expand Down