From e537f9e6e978d5843fefdc75f7c46e9cee0756da Mon Sep 17 00:00:00 2001 From: Yaliang Wu Date: Wed, 25 Jan 2023 11:48:45 -0800 Subject: [PATCH] fix profile API in example doc Signed-off-by: Yaliang Wu --- .../text_embedding_model_examples.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/model_serving_framework/text_embedding_model_examples.md b/docs/model_serving_framework/text_embedding_model_examples.md index 102fcfc4c3..a9952c8974 100644 --- a/docs/model_serving_framework/text_embedding_model_examples.md +++ b/docs/model_serving_framework/text_embedding_model_examples.md @@ -178,20 +178,20 @@ By default, it will monitor last 100 predict requests. You can tune this setting ``` # Sample request -POST /_plugins/_ml/profile/models/yQlW5YUB1qmVrJFlPDXc +GET /_plugins/_ml/profile/models/zwla5YUB1qmVrJFlwzXJ # Sample response { "nodes": { - "0TLL4hHxRv6_G3n6y1l0BQ": { + "0TLL4hHxRv6_G3n6y1l0BQ": { # node id "models": { - "yQlW5YUB1qmVrJFlPDXc": { + "zwla5YUB1qmVrJFlwzXJ": { # model id "model_state": "LOADED", "predictor": "org.opensearch.ml.engine.algorithms.text_embedding.TextEmbeddingModel@1a0b0793", - "target_worker_nodes": [ + "target_worker_nodes": [ # plan to deploy model to these nodes "0TLL4hHxRv6_G3n6y1l0BQ" ], - "worker_nodes": [ + "worker_nodes": [ # model deployed to these nodes "0TLL4hHxRv6_G3n6y1l0BQ" ], "model_inference_stats": { // in Millisecond, time used in model part