From 0e58640331aae40b39917c7c631f075f21e1eb10 Mon Sep 17 00:00:00 2001 From: Finn Carroll Date: Mon, 16 Sep 2024 23:58:01 +0000 Subject: [PATCH] Partial merge from vachaPOC Signed-off-by: Finn Carroll --- .../common/io/stream/ProtobufWriteable.java | 70 + ...ProtobufActionListenerResponseHandler.java | 77 + .../action/ProtobufActionRequest.java | 47 + .../action/ProtobufActionRequestBuilder.java | 66 + .../action/ProtobufActionResponse.java | 27 + .../opensearch/action/ProtobufActionType.java | 64 + .../cluster/node/info/ProtobufNodeInfo.java | 252 +++ .../node/info/ProtobufNodesInfoAction.java | 26 + .../node/info/ProtobufNodesInfoRequest.java | 123 ++ .../node/info/ProtobufNodesInfoResponse.java | 127 ++ .../ProtobufTransportNodesInfoAction.java | 128 ++ .../cluster/node/stats/ProtobufNodeStats.java | 245 +++ .../node/stats/ProtobufNodesStatsAction.java | 26 + .../node/stats/ProtobufNodesStatsRequest.java | 206 +++ .../stats/ProtobufNodesStatsResponse.java | 96 ++ .../ProtobufTransportNodesStatsAction.java | 136 ++ .../state/ProtobufClusterStateAction.java | 26 + .../state/ProtobufClusterStateRequest.java | 225 +++ .../state/ProtobufClusterStateResponse.java | 109 ++ .../ProtobufTransportClusterStateAction.java | 151 ++ .../action/main/ProtobufMainAction.java | 26 + .../action/main/ProtobufMainRequest.java | 33 + .../action/main/ProtobufMainResponse.java | 173 +++ .../main/ProtobufTransportMainAction.java | 55 + .../ProtobufAbstractSearchAsyncAction.java | 835 ++++++++++ .../ProtobufCanMatchPreFilterSearchPhase.java | 241 +++ .../search/ProtobufExpandSearchPhase.java | 38 + .../search/ProtobufFetchSearchPhase.java | 277 ++++ .../action/search/ProtobufSearchAction.java | 28 + ...otobufSearchQueryThenFetchAsyncAction.java | 160 ++ .../action/search/ProtobufSearchRequest.java | 766 ++++++++++ .../action/search/ProtobufSearchResponse.java | 575 +++++++ .../search/ProtobufSearchShardTask.java | 62 + .../action/search/ProtobufSearchTask.java | 83 + .../search/ProtobufTransportSearchAction.java | 1355 +++++++++++++++++ .../action/support/ProtobufActionFilter.java | 65 + .../support/ProtobufActionFilterChain.java | 28 + .../action/support/ProtobufActionFilters.java | 40 + .../ProtobufChannelActionListener.java | 48 + .../ProtobufHandledTransportAction.java | 89 ++ .../support/ProtobufTransportAction.java | 241 +++ ...ProtobufClusterManagerNodeReadRequest.java | 49 + .../ProtobufClusterManagerNodeRequest.java | 84 + ...obufTransportClusterManagerNodeAction.java | 355 +++++ ...TransportClusterManagerNodeReadAction.java | 67 + .../nodes/ProtobufBaseNodeResponse.java | 39 + .../nodes/ProtobufBaseNodesRequest.java | 95 ++ .../nodes/ProtobufBaseNodesResponse.java | 100 ++ .../nodes/ProtobufTransportNodesAction.java | 314 ++++ .../client/ProtobufAdminClient.java | 24 + .../org/opensearch/client/ProtobufClient.java | 51 + .../client/ProtobufClusterAdminClient.java | 82 + .../client/ProtobufFilterClient.java | 72 + .../client/ProtobufOpenSearchClient.java | 59 + .../client/ProtobufOriginSettingClient.java | 51 + .../client/node/ProtobufNodeClient.java | 134 ++ .../support/ProtobufAbstractClient.java | 172 +++ .../common/io/stream/BaseWriteable.java | 53 + .../common/io/stream/TryWriteable.java | 70 + .../opensearch/node/ProtobufNodeService.java | 237 +++ .../plugins/ProtobufActionPlugin.java | 205 +++ .../rest/ProtobufBaseRestHandler.java | 310 ++++ .../opensearch/rest/ProtobufRestHandler.java | 259 ++++ .../ProtobufRestCancellableNodeClient.java | 178 +++ .../action/cat/ProtobufAbstractCatAction.java | 78 + .../action/cat/ProtobufRestCatAction.java | 58 + .../action/cat/ProtobufRestNodesAction.java | 522 +++++++ .../search/ProtobufRestSearchAction.java | 389 +++++ .../fetch/ProtobufShardFetchRequest.java | 139 ++ .../ProtobufShardFetchSearchRequest.java | 103 ++ .../internal/ProtobufShardSearchRequest.java | 673 ++++++++ .../query/ProtobufQuerySearchRequest.java | 116 ++ .../tasks/ProtobufCancellableTask.java | 99 ++ .../org/opensearch/tasks/ProtobufTask.java | 456 ++++++ .../tasks/ProtobufTaskAwareRequest.java | 57 + .../ProtobufTaskCancellationService.java | 237 +++ .../org/opensearch/tasks/ProtobufTaskId.java | 83 + .../opensearch/tasks/ProtobufTaskInfo.java | 205 +++ .../tasks/ProtobufTaskListener.java | 40 + .../tasks/ProtobufTaskResourceStats.java | 66 + .../ProtobufTaskResourceTrackingService.java | 271 ++++ .../opensearch/tasks/ProtobufTaskResult.java | 158 ++ ...ProtobufEmptyTransportResponseHandler.java | 58 + .../transport/ProtobufOutboundMessage.java | 398 +++++ .../ProtobufRequestHandlerRegistry.java | 120 ++ .../ProtobufTransportRequestHandler.java | 21 + .../server/ClusterStateRequestProto.proto | 26 + .../server/ClusterStateResponseProto.proto | 59 + .../proto/server/FetchSearchResultProto.proto | 22 + .../src/main/proto/server/MessageProto.proto | 57 + .../main/proto/server/NodesInfoProto.proto | 25 + .../proto/server/NodesInfoRequestProto.proto | 20 + .../proto/server/NodesInfoResponseProto.proto | 22 + .../main/proto/server/NodesStatsProto.proto | 147 ++ .../proto/server/NodesStatsRequestProto.proto | 20 + .../server/NodesStatsResponseProto.proto | 22 + .../server/QueryFetchSearchResultProto.proto | 23 + .../proto/server/QuerySearchResultProto.proto | 78 + .../server/ShardSearchRequestProto.proto | 71 + server/src/main/proto/tasks/TaskIdProto.proto | 20 + .../proto/tasks/TaskResourceStatsProto.proto | 23 + 101 files changed, 15487 insertions(+) create mode 100644 libs/core/src/main/java/org/opensearch/core/common/io/stream/ProtobufWriteable.java create mode 100644 server/src/main/java/org/opensearch/action/ProtobufActionListenerResponseHandler.java create mode 100644 server/src/main/java/org/opensearch/action/ProtobufActionRequest.java create mode 100644 server/src/main/java/org/opensearch/action/ProtobufActionRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/ProtobufActionResponse.java create mode 100644 server/src/main/java/org/opensearch/action/ProtobufActionType.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufTransportNodesInfoAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodeStats.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufTransportNodesStatsAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateAction.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequest.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufTransportClusterStateAction.java create mode 100644 server/src/main/java/org/opensearch/action/main/ProtobufMainAction.java create mode 100644 server/src/main/java/org/opensearch/action/main/ProtobufMainRequest.java create mode 100644 server/src/main/java/org/opensearch/action/main/ProtobufMainResponse.java create mode 100644 server/src/main/java/org/opensearch/action/main/ProtobufTransportMainAction.java create mode 100644 server/src/main/java/org/opensearch/action/search/ProtobufAbstractSearchAsyncAction.java create mode 100644 server/src/main/java/org/opensearch/action/search/ProtobufCanMatchPreFilterSearchPhase.java create mode 100644 server/src/main/java/org/opensearch/action/search/ProtobufExpandSearchPhase.java create mode 100644 server/src/main/java/org/opensearch/action/search/ProtobufFetchSearchPhase.java create mode 100644 server/src/main/java/org/opensearch/action/search/ProtobufSearchAction.java create mode 100644 server/src/main/java/org/opensearch/action/search/ProtobufSearchQueryThenFetchAsyncAction.java create mode 100644 server/src/main/java/org/opensearch/action/search/ProtobufSearchRequest.java create mode 100644 server/src/main/java/org/opensearch/action/search/ProtobufSearchResponse.java create mode 100644 server/src/main/java/org/opensearch/action/search/ProtobufSearchShardTask.java create mode 100644 server/src/main/java/org/opensearch/action/search/ProtobufSearchTask.java create mode 100644 server/src/main/java/org/opensearch/action/search/ProtobufTransportSearchAction.java create mode 100644 server/src/main/java/org/opensearch/action/support/ProtobufActionFilter.java create mode 100644 server/src/main/java/org/opensearch/action/support/ProtobufActionFilterChain.java create mode 100644 server/src/main/java/org/opensearch/action/support/ProtobufActionFilters.java create mode 100644 server/src/main/java/org/opensearch/action/support/ProtobufChannelActionListener.java create mode 100644 server/src/main/java/org/opensearch/action/support/ProtobufHandledTransportAction.java create mode 100644 server/src/main/java/org/opensearch/action/support/ProtobufTransportAction.java create mode 100644 server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufClusterManagerNodeReadRequest.java create mode 100644 server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufClusterManagerNodeRequest.java create mode 100644 server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java create mode 100644 server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeReadAction.java create mode 100644 server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodeResponse.java create mode 100644 server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodesRequest.java create mode 100644 server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodesResponse.java create mode 100644 server/src/main/java/org/opensearch/action/support/nodes/ProtobufTransportNodesAction.java create mode 100644 server/src/main/java/org/opensearch/client/ProtobufAdminClient.java create mode 100644 server/src/main/java/org/opensearch/client/ProtobufClient.java create mode 100644 server/src/main/java/org/opensearch/client/ProtobufClusterAdminClient.java create mode 100644 server/src/main/java/org/opensearch/client/ProtobufFilterClient.java create mode 100644 server/src/main/java/org/opensearch/client/ProtobufOpenSearchClient.java create mode 100644 server/src/main/java/org/opensearch/client/ProtobufOriginSettingClient.java create mode 100644 server/src/main/java/org/opensearch/client/node/ProtobufNodeClient.java create mode 100644 server/src/main/java/org/opensearch/client/support/ProtobufAbstractClient.java create mode 100644 server/src/main/java/org/opensearch/common/io/stream/BaseWriteable.java create mode 100644 server/src/main/java/org/opensearch/common/io/stream/TryWriteable.java create mode 100644 server/src/main/java/org/opensearch/node/ProtobufNodeService.java create mode 100644 server/src/main/java/org/opensearch/plugins/ProtobufActionPlugin.java create mode 100644 server/src/main/java/org/opensearch/rest/ProtobufBaseRestHandler.java create mode 100644 server/src/main/java/org/opensearch/rest/ProtobufRestHandler.java create mode 100644 server/src/main/java/org/opensearch/rest/action/ProtobufRestCancellableNodeClient.java create mode 100644 server/src/main/java/org/opensearch/rest/action/cat/ProtobufAbstractCatAction.java create mode 100644 server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestCatAction.java create mode 100644 server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java create mode 100644 server/src/main/java/org/opensearch/rest/action/search/ProtobufRestSearchAction.java create mode 100644 server/src/main/java/org/opensearch/search/fetch/ProtobufShardFetchRequest.java create mode 100644 server/src/main/java/org/opensearch/search/fetch/ProtobufShardFetchSearchRequest.java create mode 100644 server/src/main/java/org/opensearch/search/internal/ProtobufShardSearchRequest.java create mode 100644 server/src/main/java/org/opensearch/search/query/ProtobufQuerySearchRequest.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufCancellableTask.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTask.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskAwareRequest.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskId.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskListener.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceStats.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java create mode 100644 server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufEmptyTransportResponseHandler.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufOutboundMessage.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java create mode 100644 server/src/main/java/org/opensearch/transport/ProtobufTransportRequestHandler.java create mode 100644 server/src/main/proto/server/ClusterStateRequestProto.proto create mode 100644 server/src/main/proto/server/ClusterStateResponseProto.proto create mode 100644 server/src/main/proto/server/FetchSearchResultProto.proto create mode 100644 server/src/main/proto/server/MessageProto.proto create mode 100644 server/src/main/proto/server/NodesInfoProto.proto create mode 100644 server/src/main/proto/server/NodesInfoRequestProto.proto create mode 100644 server/src/main/proto/server/NodesInfoResponseProto.proto create mode 100644 server/src/main/proto/server/NodesStatsProto.proto create mode 100644 server/src/main/proto/server/NodesStatsRequestProto.proto create mode 100644 server/src/main/proto/server/NodesStatsResponseProto.proto create mode 100644 server/src/main/proto/server/QueryFetchSearchResultProto.proto create mode 100644 server/src/main/proto/server/QuerySearchResultProto.proto create mode 100644 server/src/main/proto/server/ShardSearchRequestProto.proto create mode 100644 server/src/main/proto/tasks/TaskIdProto.proto create mode 100644 server/src/main/proto/tasks/TaskResourceStatsProto.proto diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/ProtobufWriteable.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/ProtobufWriteable.java new file mode 100644 index 0000000000000..e65b7731a2d7e --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/ProtobufWriteable.java @@ -0,0 +1,70 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.core.common.io.stream; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * Implementers can be written to write to output and read from input using Protobuf. +* +* @opensearch.internal +*/ +public interface ProtobufWriteable { + + /** + * Write this into the stream output. + */ + public void writeTo(OutputStream out) throws IOException; + + /** + * Reference to a method that can write some object to a {@link OutputStream}. + * Most classes should implement {@link ProtobufWriteable} and the {@link ProtobufWriteable#writeTo(OutputStream)} method should use + * {@link OutputStream} methods directly or this indirectly: + *

+     * public void writeTo(OutputStream out) throws IOException {
+     *     out.writeVInt(someValue);
+     * }
+     * 
+ */ + @FunctionalInterface + interface Writer { + + /** + * Write {@code V}-type {@code value} to the {@code out}put stream. + * + * @param out Output to write the {@code value} too + * @param value The value to add + */ + void write(OutputStream out, V value) throws IOException; + + } + + /** + * Reference to a method that can read some object from a stream. By convention this is a constructor that takes + * {@linkplain byte[]} as an argument for most classes and a static method for things like enums. + *

+     * public MyClass(final byte[] in) throws IOException {
+     *     this.someValue = in.readVInt();
+     * }
+     * 
+ */ + @FunctionalInterface + interface Reader { + + /** + * Read {@code V}-type value from a stream. + * + * @param in Input to read the value from + */ + V read(byte[] in) throws IOException; + + } + +} diff --git a/server/src/main/java/org/opensearch/action/ProtobufActionListenerResponseHandler.java b/server/src/main/java/org/opensearch/action/ProtobufActionListenerResponseHandler.java new file mode 100644 index 0000000000000..0e7996dc6ab32 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/ProtobufActionListenerResponseHandler.java @@ -0,0 +1,77 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action; + +import org.opensearch.core.common.io.stream.ProtobufWriteable; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportException; +import org.opensearch.core.transport.TransportResponse; + +import java.io.IOException; +import java.util.Objects; + +/** + * A simple base class for action response listeners, defaulting to using the SAME executor (as its +* very common on response handlers). +* +* @opensearch.api +*/ +public class ProtobufActionListenerResponseHandler implements TransportResponseHandler { + + private final ActionListener listener; + private final ProtobufWriteable.Reader reader; + private final String executor; + + public ProtobufActionListenerResponseHandler( + ActionListener listener, + ProtobufWriteable.Reader reader, + String executor + ) { + this.listener = Objects.requireNonNull(listener); + this.reader = Objects.requireNonNull(reader); + this.executor = Objects.requireNonNull(executor); + } + + public ProtobufActionListenerResponseHandler(ActionListener listener, ProtobufWriteable.Reader reader) { + this(listener, reader, ThreadPool.Names.SAME); + } + + @Override + public void handleResponse(Response response) { + listener.onResponse(response); + } + + @Override + public void handleException(TransportException e) { + listener.onFailure(e); + } + + @Override + public String executor() { + return executor; + } + + @Override + public String toString() { + return super.toString() + "/" + listener; + } + + @Override + public Response read(StreamInput in) throws IOException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'read'"); + } + + @Override + public Response read(byte[] in) throws IOException { + return reader.read(in); + } +} diff --git a/server/src/main/java/org/opensearch/action/ProtobufActionRequest.java b/server/src/main/java/org/opensearch/action/ProtobufActionRequest.java new file mode 100644 index 0000000000000..ca20364a58145 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/ProtobufActionRequest.java @@ -0,0 +1,47 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action; + +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * Base action request implemented by plugins. +* +* @opensearch.api +*/ +public abstract class ProtobufActionRequest extends TransportRequest { + + public ProtobufActionRequest() { + super(); + // this does not set the listenerThreaded API, if needed, its up to the caller to set it + // since most times, we actually want it to not be threaded... + // this.listenerThreaded = request.listenerThreaded(); + } + + public ProtobufActionRequest(byte[] in) throws IOException { + super(in); + } + + public abstract ActionRequestValidationException validate(); + + /** + * Should this task store its result after it has finished? + */ + public boolean getShouldStoreResult() { + return false; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/ProtobufActionRequestBuilder.java b/server/src/main/java/org/opensearch/action/ProtobufActionRequestBuilder.java new file mode 100644 index 0000000000000..6c752e24c4f2c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/ProtobufActionRequestBuilder.java @@ -0,0 +1,66 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action; + +import org.opensearch.client.ProtobufOpenSearchClient; +import org.opensearch.common.unit.TimeValue; + +import java.util.Objects; + +/** + * Base Action Request Builder +* +* @opensearch.api +*/ +public abstract class ProtobufActionRequestBuilder { + + protected final ProtobufActionType action; + protected final Request request; + protected final ProtobufOpenSearchClient client; + + protected ProtobufActionRequestBuilder(ProtobufOpenSearchClient client, ProtobufActionType action, Request request) { + Objects.requireNonNull(action, "action must not be null"); + this.action = action; + this.request = request; + this.client = client; + } + + public Request request() { + return this.request; + } + + public ActionFuture execute() { + return client.execute(action, request); + } + + /** + * Short version of execute().actionGet(). + */ + public Response get() { + return execute().actionGet(); + } + + /** + * Short version of execute().actionGet(). + */ + public Response get(TimeValue timeout) { + return execute().actionGet(timeout); + } + + /** + * Short version of execute().actionGet(). + */ + public Response get(String timeout) { + return execute().actionGet(timeout); + } + + public void execute(ActionListener listener) { + client.execute(action, request, listener); + } +} diff --git a/server/src/main/java/org/opensearch/action/ProtobufActionResponse.java b/server/src/main/java/org/opensearch/action/ProtobufActionResponse.java new file mode 100644 index 0000000000000..aa2b9705fa01a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/ProtobufActionResponse.java @@ -0,0 +1,27 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action; + +import org.opensearch.core.transport.TransportResponse; + +import java.io.IOException; + +/** + * Base class for responses to action requests implemented by plugins. +* +* @opensearch.api +*/ +public abstract class ProtobufActionResponse extends TransportResponse { + + public ProtobufActionResponse() {} + + public ProtobufActionResponse(byte[] in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/action/ProtobufActionType.java b/server/src/main/java/org/opensearch/action/ProtobufActionType.java new file mode 100644 index 0000000000000..1be9c28f3a17f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/ProtobufActionType.java @@ -0,0 +1,64 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action; + +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.ProtobufWriteable; +import org.opensearch.transport.TransportRequestOptions; + +/** + * A generic action. Should strive to make it a singleton. +* +* @opensearch.api +*/ +public class ProtobufActionType { + + private final String name; + private final ProtobufWriteable.Reader responseReader; + + /** + * @param name The name of the action, must be unique across actions. + * @param responseReader A reader for the response type + */ + public ProtobufActionType(String name, ProtobufWriteable.Reader responseReader) { + this.name = name; + this.responseReader = responseReader; + } + + /** + * The name of the action. Must be unique across actions. + */ + public String name() { + return this.name; + } + + /** + * Get a reader that can create a new instance of the class from a {@link byte[]} + */ + public ProtobufWriteable.Reader getResponseReaderTry() { + return responseReader; + } + + /** + * Optional request options for the action. + */ + public TransportRequestOptions transportOptions(Settings settings) { + return TransportRequestOptions.EMPTY; + } + + @Override + public boolean equals(Object o) { + return o instanceof ProtobufActionType && name.equals(((ProtobufActionType) o).name()); + } + + @Override + public int hashCode() { + return name.hashCode(); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java new file mode 100644 index 0000000000000..7c6ac6606cb43 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodeInfo.java @@ -0,0 +1,252 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.info; + +import com.google.protobuf.InvalidProtocolBufferException; + +import org.opensearch.Build; +import org.opensearch.Version; +import org.opensearch.action.support.nodes.ProtobufBaseNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.Nullable; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.http.HttpInfo; +import org.opensearch.ingest.IngestInfo; +import org.opensearch.monitor.jvm.JvmInfo; +import org.opensearch.monitor.os.OsInfo; +import org.opensearch.monitor.process.ProcessInfo; +import org.opensearch.search.aggregations.support.AggregationInfo; +import org.opensearch.search.pipeline.SearchPipelineInfo; +import org.opensearch.server.proto.NodesInfoProto; +import org.opensearch.threadpool.ThreadPoolInfo; +import org.opensearch.transport.TransportInfo; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * Node information (static, does not change over time). +* +* @opensearch.internal +*/ +public class ProtobufNodeInfo extends ProtobufBaseNodeResponse { + + private Version version; + private Build build; + + @Nullable + private Settings settings; + + private NodesInfoProto.NodesInfo nodesInfoResponse; + + @Nullable + private ByteSizeValue totalIndexingBuffer; + + public ProtobufNodeInfo(byte[] data) throws InvalidProtocolBufferException { + super(data); + this.nodesInfoResponse = NodesInfoProto.NodesInfo.parseFrom(data); + } + + public ProtobufNodeInfo(NodesInfoProto.NodesInfo nodesInfo) throws InvalidProtocolBufferException { + super(nodesInfo.toByteArray()); + this.nodesInfoResponse = nodesInfo; + } + + public ProtobufNodeInfo( + Version version, + Build build, + DiscoveryNode node, + @Nullable Settings settings, + @Nullable OsInfo os, + @Nullable ProcessInfo process, + @Nullable JvmInfo jvm, + @Nullable ThreadPoolInfo threadPool, + @Nullable TransportInfo transport, + @Nullable HttpInfo http, + @Nullable PluginsAndModules plugins, + @Nullable IngestInfo ingest, + @Nullable AggregationInfo aggsInfo, + @Nullable ByteSizeValue totalIndexingBuffer, + @Nullable SearchPipelineInfo searchPipelineInfo + ) { + super(node); + this.version = version; + this.build = build; + this.settings = settings; + this.totalIndexingBuffer = totalIndexingBuffer; + this.nodesInfoResponse = NodesInfoProto.NodesInfo.newBuilder() + .setNodeId(node.getId()) + .setProcessId(process.getId()) + .setAddress(http.getAddress().publishAddress().toString()) + .setDisplayName(this.build.type().displayName()) + .setHash(this.build.hash()) + .setJvmInfoVersion(jvm.version()) + .setJvmHeapMax(jvm.getMem().getHeapMax().toString()) + .build(); + } + + /** + * System's hostname. null in case of UnknownHostException + */ + @Nullable + public String getHostname() { + return getNode().getHostName(); + } + + /** + * The current OpenSearch version + */ + public Version getVersion() { + return version; + } + + /** + * The build version of the node. + */ + public Build getBuild() { + return this.build; + } + + /** + * The settings of the node. + */ + @Nullable + public Settings getSettings() { + return this.settings; + } + + @Nullable + public ByteSizeValue getTotalIndexingBuffer() { + return totalIndexingBuffer; + } + + public static ProtobufNodeInfo.Builder builder(Version version, Build build, DiscoveryNode node) { + return new Builder(version, build, node); + } + + /** + * Builder class to accommodate new Info types being added to NodeInfo. + */ + public static class Builder { + private final Version version; + private final Build build; + private final DiscoveryNode node; + + private Builder(Version version, Build build, DiscoveryNode node) { + this.version = version; + this.build = build; + this.node = node; + } + + private Settings settings; + private OsInfo os; + private ProcessInfo process; + private JvmInfo jvm; + private ThreadPoolInfo threadPool; + private TransportInfo transport; + private HttpInfo http; + private PluginsAndModules plugins; + private IngestInfo ingest; + private AggregationInfo aggsInfo; + private ByteSizeValue totalIndexingBuffer; + private SearchPipelineInfo searchPipelineInfo; + + public Builder setSettings(Settings settings) { + this.settings = settings; + return this; + } + + public Builder setOs(OsInfo os) { + this.os = os; + return this; + } + + public Builder setProcess(ProcessInfo process) { + this.process = process; + return this; + } + + public Builder setJvm(JvmInfo jvm) { + this.jvm = jvm; + return this; + } + + public Builder setThreadPool(ThreadPoolInfo threadPool) { + this.threadPool = threadPool; + return this; + } + + public Builder setTransport(TransportInfo transport) { + this.transport = transport; + return this; + } + + public Builder setHttp(HttpInfo http) { + this.http = http; + return this; + } + + public Builder setPlugins(PluginsAndModules plugins) { + this.plugins = plugins; + return this; + } + + public Builder setIngest(IngestInfo ingest) { + this.ingest = ingest; + return this; + } + + public Builder setAggsInfo(AggregationInfo aggsInfo) { + this.aggsInfo = aggsInfo; + return this; + } + + public Builder setTotalIndexingBuffer(ByteSizeValue totalIndexingBuffer) { + this.totalIndexingBuffer = totalIndexingBuffer; + return this; + } + + public Builder setProtobufSearchPipelineInfo(SearchPipelineInfo searchPipelineInfo) { + this.searchPipelineInfo = searchPipelineInfo; + return this; + } + + public ProtobufNodeInfo build() { + return new ProtobufNodeInfo( + version, + build, + node, + settings, + os, + process, + jvm, + threadPool, + transport, + http, + plugins, + ingest, + aggsInfo, + totalIndexingBuffer, + searchPipelineInfo + ); + } + + } + + @Override + public void writeTo(OutputStream out) throws IOException { + out.write(this.nodesInfoResponse.toByteArray()); + } + + public NodesInfoProto.NodesInfo response() { + return this.nodesInfoResponse; + } + +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoAction.java new file mode 100644 index 0000000000000..7cb1201f2ccef --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoAction.java @@ -0,0 +1,26 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.info; + +import org.opensearch.action.ProtobufActionType; + +/** + * Transport action for OpenSearch Node Information +* +* @opensearch.internal +*/ +public class ProtobufNodesInfoAction extends ProtobufActionType { + + public static final ProtobufNodesInfoAction INSTANCE = new ProtobufNodesInfoAction(); + public static final String NAME = "cluster:monitor/nodes/info"; + + private ProtobufNodesInfoAction() { + super(NAME, ProtobufNodesInfoResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoRequest.java new file mode 100644 index 0000000000000..12c59b03189b2 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoRequest.java @@ -0,0 +1,123 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.node.info; + +import org.opensearch.action.support.nodes.ProtobufBaseNodesRequest; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.server.proto.NodesInfoRequestProto.NodesInfoRequest; +import org.opensearch.server.proto.NodesInfoRequestProto; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.stream.Collectors; + +/** + * A request to get node (cluster) level information. + * + * @opensearch.internal + */ +public class ProtobufNodesInfoRequest extends ProtobufBaseNodesRequest { + + private NodesInfoRequestProto.NodesInfoRequest nodesInfoRequest; + private final TimeValue DEFAULT_TIMEOUT_SECS = TimeValue.timeValueSeconds(30); + + /** + * Get information from nodes based on the nodes ids specified. If none are passed, information + * for all nodes will be returned. + */ + public ProtobufNodesInfoRequest(String... nodesIds) { + super(nodesIds); + } + + /** + * Get the names of requested metrics + */ + public Set requestedMetrics() { + return new HashSet<>(this.nodesInfoRequest.getRequestedMetricsList()); + } + + /** + * Add multiple metrics + */ + public ProtobufNodesInfoRequest addMetrics(String timeout, String... metrics) { + SortedSet metricsSet = new TreeSet<>(Arrays.asList(metrics)); + if (Metric.allMetrics().containsAll(metricsSet) == false) { + metricsSet.removeAll(Metric.allMetrics()); + String plural = metricsSet.size() == 1 ? "" : "s"; + throw new IllegalStateException("Used illegal metric" + plural + ": " + metricsSet); + } + this.nodesInfoRequest = NodesInfoRequestProto.NodesInfoRequest.newBuilder() + .addAllRequestedMetrics(metricsSet) + .setTimeout(DEFAULT_TIMEOUT_SECS.toString()) + .build(); + return this; + } + + public ProtobufNodesInfoRequest(byte[] data) throws IOException { + super(data); + this.nodesInfoRequest = NodesInfoRequestProto.NodesInfoRequest.parseFrom(data); + } + + public ProtobufNodesInfoRequest(NodesInfoRequestProto.NodesInfoRequest nodesInfoRequest) throws IOException { + super(nodesInfoRequest.toByteArray()); + this.nodesInfoRequest = nodesInfoRequest; + } + + /** + * An enumeration of the "core" sections of metrics that may be requested + * from the nodes information endpoint. Eventually this list list will be + * pluggable. + */ + public enum Metric { + SETTINGS("settings"), + OS("os"), + PROCESS("process"), + JVM("jvm"), + THREAD_POOL("thread_pool"), + TRANSPORT("transport"), + HTTP("http"), + PLUGINS("plugins"), + INGEST("ingest"), + AGGREGATIONS("aggregations"), + INDICES("indices"), + SEARCH_PIPELINES("search_pipelines"); + + private String metricName; + + Metric(String name) { + this.metricName = name; + } + + public String metricName() { + return this.metricName; + } + + boolean containedIn(Set metricNames) { + return metricNames.contains(this.metricName()); + } + + public static Set allMetrics() { + return Arrays.stream(values()).map(Metric::metricName).collect(Collectors.toSet()); + } + } + + @Override + public void writeTo(OutputStream out) throws IOException { + this.nodesInfoRequest.writeTo(out); + } + + public NodesInfoRequest request() { + return this.nodesInfoRequest; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java new file mode 100644 index 0000000000000..08d00d57084d7 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufNodesInfoResponse.java @@ -0,0 +1,127 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.info; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.nodes.ProtobufBaseNodesResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.Strings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.server.proto.NodesInfoResponseProto; +import org.opensearch.server.proto.NodesInfoProto.NodesInfo; +import java.io.IOException; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Transport response for OpenSearch Node Information +* +* @opensearch.internal +*/ +public class ProtobufNodesInfoResponse extends ProtobufBaseNodesResponse implements ToXContentFragment { + + private NodesInfoResponseProto.NodesInfoResponse nodesInfoRes; + private Map nodesMap = new HashMap<>(); + + public ProtobufNodesInfoResponse(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); + List nodesInfo = new ArrayList<>(); + for (ProtobufNodeInfo nodeInfo : nodes) { + nodesInfo.add(nodeInfo.response()); + this.nodesMap.put(nodeInfo.response().getNodeId(), nodeInfo.response()); + } + this.nodesInfoRes = NodesInfoResponseProto.NodesInfoResponse.newBuilder() + .setClusterName(clusterName.value()) + .addAllNodesInfo(nodesInfo) + .build(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("nodes"); + for (ProtobufNodeInfo nodeInfo : getNodes()) { + builder.startObject(nodeInfo.getNode().getId()); + + builder.field("name", nodeInfo.getNode().getName()); + builder.field("transport_address", nodeInfo.getNode().getAddress().toString()); + builder.field("host", nodeInfo.getNode().getHostName()); + builder.field("ip", nodeInfo.getNode().getHostAddress()); + + builder.field("version", nodeInfo.getVersion()); + builder.field("build_type", nodeInfo.getBuild().type().displayName()); + builder.field("build_hash", nodeInfo.getBuild().hash()); + if (nodeInfo.getTotalIndexingBuffer() != null) { + builder.humanReadableField("total_indexing_buffer", "total_indexing_buffer_in_bytes", nodeInfo.getTotalIndexingBuffer()); + } + + builder.startArray("roles"); + for (DiscoveryNodeRole role : nodeInfo.getNode().getRoles()) { + builder.value(role.roleName()); + } + builder.endArray(); + + if (!nodeInfo.getNode().getAttributes().isEmpty()) { + builder.startObject("attributes"); + for (Map.Entry entry : nodeInfo.getNode().getAttributes().entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + builder.endObject(); + } + + if (nodeInfo.getSettings() != null) { + builder.startObject("settings"); + Settings settings = nodeInfo.getSettings(); + settings.toXContent(builder, params); + builder.endObject(); + } + + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public String toString() { + try { + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return Strings.toString(builder); + } catch (IOException e) { + return "{ \"error\" : \"" + e.getMessage() + "\"}"; + } + } + + public NodesInfoResponseProto.NodesInfoResponse response() { + return nodesInfoRes; + } + + public Map nodesMap() { + return nodesMap; + } + + public ProtobufNodesInfoResponse(byte[] data) throws IOException { + super(data); + this.nodesInfoRes = NodesInfoResponseProto.NodesInfoResponse.parseFrom(data); + } + + @Override + public void writeTo(OutputStream out) throws IOException { + out.write(this.nodesInfoRes.toByteArray()); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufTransportNodesInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufTransportNodesInfoAction.java new file mode 100644 index 0000000000000..a420d4fe65486 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/ProtobufTransportNodesInfoAction.java @@ -0,0 +1,128 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.info; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ProtobufActionFilters; +import org.opensearch.action.support.nodes.ProtobufTransportNodesAction; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.node.ProtobufNodeService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.List; +import java.util.Set; + +/** + * Transport action for OpenSearch Node Information +* +* @opensearch.internal +*/ +public class ProtobufTransportNodesInfoAction extends ProtobufTransportNodesAction< + ProtobufNodesInfoRequest, + ProtobufNodesInfoResponse, + ProtobufTransportNodesInfoAction.NodeInfoRequest, + ProtobufNodeInfo> { + + private final ProtobufNodeService nodeService; + + @Inject + public ProtobufTransportNodesInfoAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ProtobufNodeService nodeService, + ProtobufActionFilters actionFilters + ) { + super( + NodesInfoAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + ProtobufNodesInfoRequest::new, + NodeInfoRequest::new, + ThreadPool.Names.MANAGEMENT, + ProtobufNodeInfo.class + ); + this.nodeService = nodeService; + } + + @Override + protected ProtobufNodesInfoResponse newResponse( + ProtobufNodesInfoRequest nodesInfoRequest, + List responses, + List failures + ) { + return new ProtobufNodesInfoResponse(new ClusterName(clusterService.getClusterName().value()), responses, failures); + } + + @Override + protected NodeInfoRequest newNodeRequest(ProtobufNodesInfoRequest request) { + return new NodeInfoRequest(request); + } + + @Override + protected ProtobufNodeInfo nodeOperation(NodeInfoRequest nodeRequest) { + ProtobufNodesInfoRequest request = nodeRequest.request; + Set metrics = request.requestedMetrics(); + ProtobufNodeInfo protobufNodeInfo = nodeService.info( + metrics.contains(ProtobufNodesInfoRequest.Metric.SETTINGS.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.OS.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.PROCESS.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.JVM.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.THREAD_POOL.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.TRANSPORT.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.HTTP.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.PLUGINS.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.INGEST.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.AGGREGATIONS.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.INDICES.metricName()), + metrics.contains(ProtobufNodesInfoRequest.Metric.SEARCH_PIPELINES.metricName()) + ); + return protobufNodeInfo; + } + + /** + * Inner Node Info Request + * + * @opensearch.internal + */ + public static class NodeInfoRequest extends TransportRequest { + + ProtobufNodesInfoRequest request; + + public NodeInfoRequest(byte[] data) throws IOException { + request = new ProtobufNodesInfoRequest(data); + } + + public NodeInfoRequest(ProtobufNodesInfoRequest request) { + this.request = request; + } + + public ProtobufNodesInfoRequest request() { + return request; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + request.writeTo(out); + } + } + + @Override + protected ProtobufNodeInfo newNodeResponse(byte[] in) throws IOException { + return new ProtobufNodeInfo(in); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodeStats.java new file mode 100644 index 0000000000000..bd91837758a21 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodeStats.java @@ -0,0 +1,245 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.stats; + +import com.google.protobuf.InvalidProtocolBufferException; + +import org.opensearch.action.support.nodes.ProtobufBaseNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.Nullable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.discovery.DiscoveryStats; +import org.opensearch.http.HttpStats; +import org.opensearch.indices.NodeIndicesStats; +import org.opensearch.core.indices.breaker.AllCircuitBreakerStats; +import org.opensearch.ingest.IngestStats; +import org.opensearch.monitor.fs.FsInfo; +import org.opensearch.monitor.jvm.JvmStats; +import org.opensearch.monitor.os.OsStats; +import org.opensearch.monitor.process.ProcessStats; +import org.opensearch.node.AdaptiveSelectionStats; +import org.opensearch.script.ScriptStats; +import org.opensearch.server.proto.NodesStatsProto; +import org.opensearch.threadpool.ThreadPoolStats; +import org.opensearch.transport.TransportStats; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Node statistics (dynamic, changes depending on when created). +* +* @opensearch.internal +*/ +public class ProtobufNodeStats extends ProtobufBaseNodeResponse implements ToXContentFragment { + + private long timestamp; + + private NodesStatsProto.NodesStats nodesStatsResponse; + + public ProtobufNodeStats(byte[] data) throws InvalidProtocolBufferException { + super(data); + this.nodesStatsResponse = NodesStatsProto.NodesStats.parseFrom(data); + } + + public ProtobufNodeStats(NodesStatsProto.NodesStats nodesStats) throws InvalidProtocolBufferException { + super(nodesStats.toByteArray()); + this.nodesStatsResponse = nodesStats; + } + + public ProtobufNodeStats( + DiscoveryNode node, + long timestamp, + @Nullable NodeIndicesStats indices, + @Nullable OsStats os, + @Nullable ProcessStats process, + @Nullable JvmStats jvm, + @Nullable ThreadPoolStats threadPool, + @Nullable FsInfo fs, + @Nullable TransportStats transport, + @Nullable HttpStats http, + @Nullable AllCircuitBreakerStats breaker, + @Nullable ScriptStats scriptStats, + @Nullable DiscoveryStats discoveryStats, + @Nullable IngestStats ingestStats, + @Nullable AdaptiveSelectionStats adaptiveSelectionStats + ) { + super(node); + this.timestamp = timestamp; + NodesStatsProto.NodesStats.CompletionStats completionStats = NodesStatsProto.NodesStats.CompletionStats.newBuilder() + .setSize(indices.getCompletion().getSizeInBytes()) + .build(); + NodesStatsProto.NodesStats.FieldDataStats fieldDataStats = NodesStatsProto.NodesStats.FieldDataStats.newBuilder() + .setMemSize(indices.getFieldData().getMemorySizeInBytes()) + .setEvictions(indices.getFieldData().getEvictions()) + .build(); + NodesStatsProto.NodesStats.QueryCacheStats queryCacheStats = NodesStatsProto.NodesStats.QueryCacheStats.newBuilder() + .setRamBytesUsed(indices.getQueryCache().getMemorySizeInBytes()) + .setHitCount(indices.getQueryCache().getEvictions()) + .setMissCount(indices.getQueryCache().getHitCount()) + .setCacheCount(indices.getQueryCache().getMissCount()) + .setCacheSize(indices.getQueryCache().getCacheSize()) + .build(); + NodesStatsProto.NodesStats.RequestCacheStats requestCacheStats = NodesStatsProto.NodesStats.RequestCacheStats.newBuilder() + .setMemorySize(indices.getRequestCache().getMemorySizeInBytes()) + .setEvictions(indices.getRequestCache().getEvictions()) + .setHitCount(indices.getRequestCache().getHitCount()) + .setMissCount(indices.getRequestCache().getMissCount()) + .build(); + NodesStatsProto.NodesStats.FlushStats flushStats = NodesStatsProto.NodesStats.FlushStats.newBuilder() + .setTotal(indices.getFlush().getTotal()) + .setPeriodic(indices.getFlush().getPeriodic()) + .setTotalTimeInMillis(indices.getFlush().getTotalTimeInMillis()) + .build(); + NodesStatsProto.NodesStats.GetStats getStats = NodesStatsProto.NodesStats.GetStats.newBuilder() + .setExistsCount(indices.getGet().getExistsCount()) + .setExistsTimeInMillis(indices.getGet().getExistsTimeInMillis()) + .setMissingCount(indices.getGet().getMissingCount()) + .setMissingTimeInMillis(indices.getGet().getMissingTimeInMillis()) + .setCurrent(indices.getGet().current()) + .setCount(indices.getGet().getCount()) + .setTime(indices.getGet().getTimeInMillis()) + .build(); + NodesStatsProto.NodesStats.IndexingStats indexingStats = NodesStatsProto.NodesStats.IndexingStats.newBuilder() + .setIndexCount(indices.getIndexing().getTotal().getIndexCount()) + .setIndexTimeInMillis(indices.getIndexing().getTotal().getIndexTime().getMillis()) + .setIndexCurrent(indices.getIndexing().getTotal().getIndexCurrent()) + .setIndexFailedCount(indices.getIndexing().getTotal().getIndexFailedCount()) + .setDeleteCount(indices.getIndexing().getTotal().getDeleteCount()) + .setDeleteTimeInMillis(indices.getIndexing().getTotal().getDeleteTime().getMillis()) + .setDeleteCurrent(indices.getIndexing().getTotal().getDeleteCurrent()) + .setNoopUpdateCount(indices.getIndexing().getTotal().getNoopUpdateCount()) + .setIsThrottled(indices.getIndexing().getTotal().isThrottled()) + .setThrottleTimeInMillis(indices.getIndexing().getTotal().getThrottleTime().getMillis()) + .build(); + NodesStatsProto.NodesStats.MergeStats mergeStats = NodesStatsProto.NodesStats.MergeStats.newBuilder() + .setTotal(indices.getMerge().getTotal()) + .setTotalTimeInMillis(indices.getMerge().getTotalTimeInMillis()) + .setTotalNumDocs(indices.getMerge().getTotalNumDocs()) + .setTotalSizeInBytes(indices.getMerge().getTotalSizeInBytes()) + .setCurrent(indices.getMerge().getCurrent()) + .setCurrentNumDocs(indices.getMerge().getCurrentNumDocs()) + .setCurrentSizeInBytes(indices.getMerge().getCurrentSizeInBytes()) + .build(); + NodesStatsProto.NodesStats.RefreshStats refreshStats = NodesStatsProto.NodesStats.RefreshStats.newBuilder() + .setTotal(indices.getRefresh().getTotal()) + .setTotalTimeInMillis(indices.getRefresh().getTotalTimeInMillis()) + .setExternalTotal(indices.getRefresh().getExternalTotal()) + .setExternalTotalTimeInMillis(indices.getRefresh().getExternalTotalTimeInMillis()) + .setListeners(indices.getRefresh().getListeners()) + .build(); + NodesStatsProto.NodesStats.ScriptStats scStats = NodesStatsProto.NodesStats.ScriptStats.newBuilder() + .setCompilations(scriptStats.getCompilations()) + .setCacheEvictions(scriptStats.getCacheEvictions()) + .setCompilationLimitTriggered(scriptStats.getCompilationLimitTriggered()) + .build(); + NodesStatsProto.NodesStats.SearchStats searchStats = NodesStatsProto.NodesStats.SearchStats.newBuilder() + .setQueryCount(indices.getSearch().getTotal().getQueryCount()) + .setQueryTimeInMillis(indices.getSearch().getTotal().getQueryTimeInMillis()) + .setQueryCurrent(indices.getSearch().getTotal().getQueryCurrent()) + .setFetchCount(indices.getSearch().getTotal().getFetchCount()) + .setFetchTimeInMillis(indices.getSearch().getTotal().getFetchTimeInMillis()) + .setFetchCurrent(indices.getSearch().getTotal().getFetchCurrent()) + .setScrollCount(indices.getSearch().getTotal().getScrollCount()) + .setScrollTimeInMillis(indices.getSearch().getTotal().getScrollTimeInMillis()) + .setScrollCurrent(indices.getSearch().getTotal().getScrollCurrent()) + .setSuggestCount(indices.getSearch().getTotal().getSuggestCount()) + .setSuggestTimeInMillis(indices.getSearch().getTotal().getSuggestTimeInMillis()) + .setSuggestCurrent(indices.getSearch().getTotal().getSuggestCurrent()) + .setPitCount(indices.getSearch().getTotal().getPitCount()) + .setPitTimeInMillis(indices.getSearch().getTotal().getPitTimeInMillis()) + .setPitCurrent(indices.getSearch().getTotal().getPitCurrent()) + .setOpenContexts(indices.getSearch().getOpenContexts()) + .build(); + NodesStatsProto.NodesStats.SegmentStats segmentStats = NodesStatsProto.NodesStats.SegmentStats.newBuilder() + .setCount(indices.getSegments().getCount()) + .setIndexWriterMemoryInBytes(indices.getSegments().getIndexWriterMemoryInBytes()) + .setVersionMapMemoryInBytes(indices.getSegments().getVersionMapMemoryInBytes()) + .setMaxUnsafeAutoIdTimestamp(indices.getSegments().getMaxUnsafeAutoIdTimestamp()) + .setBitsetMemoryInBytes(indices.getSegments().getBitsetMemoryInBytes()) + .build(); + List list = Arrays.stream(os.getCpu().getLoadAverage()).boxed().collect(Collectors.toList()); + this.nodesStatsResponse = NodesStatsProto.NodesStats.newBuilder() + .setNodeId(node.getId()) + .setJvmHeapUsed(jvm.getMem().getHeapUsed().getBytes()) + .setJvmHeapUsedPercent(Short.toString(jvm.getMem().getHeapUsedPercent())) + .setJvmUpTime(jvm.getUptime().getMillis()) + .setDiskTotal(fs.getTotal().getTotal().getBytes()) + .setDiskAvailable(fs.getTotal().getAvailable().getBytes()) + .setOsMemUsed(os.getMem().getUsed().getBytes()) + .setOsMemUsedPercent(Short.toString(os.getMem().getUsedPercent())) + .setOsMemTotal(os.getMem().getTotal().getBytes()) + .setOsCpuPercent(Short.toString(os.getCpu().getPercent())) + .addAllOsCpuLoadAverage(list) + .setProcessOpenFileDescriptors(process.getOpenFileDescriptors()) + .setProcessMaxFileDescriptors(process.getMaxFileDescriptors()) + .setCompletionStats(completionStats) + .setFieldDataStats(fieldDataStats) + .setQueryCacheStats(queryCacheStats) + .setRequestCacheStats(requestCacheStats) + .setFlushStats(flushStats) + .setGetStats(getStats) + .setIndexingStats(indexingStats) + .setMergeStats(mergeStats) + .setRefreshStats(refreshStats) + .setScriptStats(scStats) + .setSearchStats(searchStats) + .setSegmentStats(segmentStats) + .build(); + } + + public long getTimestamp() { + return this.timestamp; + } + + @Nullable + public String getHostname() { + return getNode().getHostName(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + + builder.field("name", getNode().getName()); + builder.field("transport_address", getNode().getAddress().toString()); + builder.field("host", getNode().getHostName()); + builder.field("ip", getNode().getAddress()); + + builder.startArray("roles"); + for (DiscoveryNodeRole role : getNode().getRoles()) { + builder.value(role.roleName()); + } + builder.endArray(); + + if (!getNode().getAttributes().isEmpty()) { + builder.startObject("attributes"); + for (Map.Entry attrEntry : getNode().getAttributes().entrySet()) { + builder.field(attrEntry.getKey(), attrEntry.getValue()); + } + builder.endObject(); + } + + return builder; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + out.write(this.nodesStatsResponse.toByteArray()); + } + + public NodesStatsProto.NodesStats response() { + return this.nodesStatsResponse; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsAction.java new file mode 100644 index 0000000000000..a03ee6119f0b2 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsAction.java @@ -0,0 +1,26 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.stats; + +import org.opensearch.action.ProtobufActionType; + +/** + * Transport action for obtaining OpenSearch Node Stats +* +* @opensearch.internal +*/ +public class ProtobufNodesStatsAction extends ProtobufActionType { + + public static final ProtobufNodesStatsAction INSTANCE = new ProtobufNodesStatsAction(); + public static final String NAME = "cluster:monitor/nodes/stats"; + + private ProtobufNodesStatsAction() { + super(NAME, ProtobufNodesStatsResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java new file mode 100644 index 0000000000000..1e2f2f22731df --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsRequest.java @@ -0,0 +1,206 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.stats; + +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.support.nodes.ProtobufBaseNodesRequest; +import org.opensearch.server.proto.NodesStatsRequestProto.NodesStatsRequest; +import org.opensearch.server.proto.NodesStatsRequestProto; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.stream.Collectors; + +/** + * A request to get node (cluster) level stats. +* +* @opensearch.internal +*/ +public class ProtobufNodesStatsRequest extends ProtobufBaseNodesRequest { + + private CommonStatsFlags indices = new CommonStatsFlags(); + private final Set requestedMetrics = new HashSet<>(); + private NodesStatsRequestProto.NodesStatsRequest nodesStatsRequest; + + public ProtobufNodesStatsRequest() { + super((String[]) null); + } + + /** + * Get stats from nodes based on the nodes ids specified. If none are passed, stats + * for all nodes will be returned. + */ + public ProtobufNodesStatsRequest(String... nodesIds) { + super(nodesIds); + } + + /** + * Sets all the request flags. + */ + public ProtobufNodesStatsRequest all() { + this.indices.all(); + this.requestedMetrics.addAll(Metric.allMetrics()); + return this; + } + + /** + * Clears all the request flags. + */ + public ProtobufNodesStatsRequest clear() { + this.indices.clear(); + this.requestedMetrics.clear(); + return this; + } + + /** + * Get indices. Handles separately from other metrics because it may or + * may not have submetrics. + * @return flags indicating which indices stats to return + */ + public CommonStatsFlags indices() { + return indices; + } + + /** + * Set indices. Handles separately from other metrics because it may or + * may not involve submetrics. + * @param indices flags indicating which indices stats to return + * @return This object, for request chaining. + */ + public ProtobufNodesStatsRequest indices(CommonStatsFlags indices) { + this.indices = indices; + return this; + } + + /** + * Should indices stats be returned. + */ + public ProtobufNodesStatsRequest indices(boolean indices) { + if (indices) { + this.indices.all(); + } else { + this.indices.clear(); + } + return this; + } + + /** + * Get the names of requested metrics, excluding indices, which are + * handled separately. + */ + public Set requestedMetrics() { + return new HashSet<>(this.nodesStatsRequest.getRequestedMetricsList()); + } + + /** + * Add metric + */ + public ProtobufNodesStatsRequest addMetric(String metric) { + if (Metric.allMetrics().contains(metric) == false) { + throw new IllegalStateException("Used an illegal metric: " + metric); + } + requestedMetrics.add(metric); + return this; + } + + /** + * Add an array of metric names + */ + public ProtobufNodesStatsRequest addMetrics(String... metrics) { + // use sorted set for reliable ordering in error messages + SortedSet metricsSet = new TreeSet<>(Arrays.asList(metrics)); + if (Metric.allMetrics().containsAll(metricsSet) == false) { + metricsSet.removeAll(Metric.allMetrics()); + String plural = metricsSet.size() == 1 ? "" : "s"; + throw new IllegalStateException("Used illegal metric" + plural + ": " + metricsSet); + } + this.nodesStatsRequest = NodesStatsRequestProto.NodesStatsRequest.newBuilder().addAllRequestedMetrics(metricsSet).build(); + requestedMetrics.addAll(metricsSet); + return this; + } + + /** + * Remove metric + */ + public ProtobufNodesStatsRequest removeMetric(String metric) { + if (Metric.allMetrics().contains(metric) == false) { + throw new IllegalStateException("Used an illegal metric: " + metric); + } + requestedMetrics.remove(metric); + return this; + } + + public ProtobufNodesStatsRequest(byte[] data) throws IOException { + super(data); + this.nodesStatsRequest = NodesStatsRequestProto.NodesStatsRequest.parseFrom(data); + } + + public ProtobufNodesStatsRequest(NodesStatsRequestProto.NodesStatsRequest nodesStatsRequest) throws IOException { + super(nodesStatsRequest.toByteArray()); + this.nodesStatsRequest = nodesStatsRequest; + } + + /** + * An enumeration of the "core" sections of metrics that may be requested + * from the nodes stats endpoint. Eventually this list will be pluggable. + */ + public enum Metric { + OS("os"), + PROCESS("process"), + JVM("jvm"), + THREAD_POOL("thread_pool"), + FS("fs"), + TRANSPORT("transport"), + HTTP("http"), + BREAKER("breaker"), + SCRIPT("script"), + DISCOVERY("discovery"), + INGEST("ingest"), + ADAPTIVE_SELECTION("adaptive_selection"), + SCRIPT_CACHE("script_cache"), + INDEXING_PRESSURE("indexing_pressure"), + SHARD_INDEXING_PRESSURE("shard_indexing_pressure"), + SEARCH_BACKPRESSURE("search_backpressure"), + CLUSTER_MANAGER_THROTTLING("cluster_manager_throttling"), + WEIGHTED_ROUTING_STATS("weighted_routing"), + FILE_CACHE_STATS("file_cache"); + + private String metricName; + + Metric(String name) { + this.metricName = name; + } + + public String metricName() { + return this.metricName; + } + + boolean containedIn(Set metricNames) { + return metricNames.contains(this.metricName()); + } + + static Set allMetrics() { + return Arrays.stream(values()).map(Metric::metricName).collect(Collectors.toSet()); + } + } + + @Override + public void writeTo(OutputStream out) throws IOException { + this.nodesStatsRequest.writeTo(out); + } + + public NodesStatsRequest request() { + return this.nodesStatsRequest; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsResponse.java new file mode 100644 index 0000000000000..c70c32af009a5 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufNodesStatsResponse.java @@ -0,0 +1,96 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.stats; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.nodes.ProtobufBaseNodesResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.common.Strings; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.server.proto.NodesStatsResponseProto; +import org.opensearch.server.proto.NodesStatsProto.NodesStats; +import org.opensearch.common.xcontent.XContentFactory; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Transport response for obtaining OpenSearch Node Stats +* +* @opensearch.internal +*/ +public class ProtobufNodesStatsResponse extends ProtobufBaseNodesResponse implements ToXContentFragment { + + private NodesStatsResponseProto.NodesStatsResponse nodesStatsRes; + private Map nodesMap = new HashMap<>(); + + public ProtobufNodesStatsResponse(byte[] data) throws IOException { + super(data); + this.nodesStatsRes = NodesStatsResponseProto.NodesStatsResponse.parseFrom(data); + } + + public ProtobufNodesStatsResponse(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); + List nodesStats = new ArrayList<>(); + for (ProtobufNodeStats nodeStats : nodes) { + nodesStats.add(nodeStats.response()); + this.nodesMap.put(nodeStats.response().getNodeId(), nodeStats.response()); + } + this.nodesStatsRes = NodesStatsResponseProto.NodesStatsResponse.newBuilder() + .setClusterName(clusterName.value()) + .addAllNodesStats(nodesStats) + .build(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("nodes"); + for (ProtobufNodeStats nodeStats : getNodes()) { + builder.startObject(nodeStats.getNode().getId()); + builder.field("timestamp", nodeStats.getTimestamp()); + nodeStats.toXContent(builder, params); + + builder.endObject(); + } + builder.endObject(); + + return builder; + } + + @Override + public String toString() { + try { + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return Strings.toString(builder); + } catch (IOException e) { + return "{ \"error\" : \"" + e.getMessage() + "\"}"; + } + } + + public NodesStatsResponseProto.NodesStatsResponse response() { + return nodesStatsRes; + } + + public Map nodesMap() { + return nodesMap; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + out.write(this.nodesStatsRes.toByteArray()); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufTransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufTransportNodesStatsAction.java new file mode 100644 index 0000000000000..0fd8b758e4c22 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/ProtobufTransportNodesStatsAction.java @@ -0,0 +1,136 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.node.stats; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ProtobufActionFilters; +import org.opensearch.action.support.nodes.ProtobufTransportNodesAction; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.node.ProtobufNodeService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.List; +import java.util.Set; + +/** + * Transport action for obtaining OpenSearch Node Stats +* +* @opensearch.internal +*/ +public class ProtobufTransportNodesStatsAction extends ProtobufTransportNodesAction< + ProtobufNodesStatsRequest, + ProtobufNodesStatsResponse, + ProtobufTransportNodesStatsAction.NodeStatsRequest, + ProtobufNodeStats> { + + private final ProtobufNodeService nodeService; + + @Inject + public ProtobufTransportNodesStatsAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ProtobufNodeService nodeService, + ProtobufActionFilters actionFilters + ) { + super( + NodesStatsAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + ProtobufNodesStatsRequest::new, + NodeStatsRequest::new, + ThreadPool.Names.MANAGEMENT, + ProtobufNodeStats.class + ); + this.nodeService = nodeService; + } + + @Override + protected ProtobufNodesStatsResponse newResponse( + ProtobufNodesStatsRequest request, + List responses, + List failures + ) { + return new ProtobufNodesStatsResponse(new ClusterName(clusterService.getClusterName().value()), responses, failures); + } + + @Override + protected NodeStatsRequest newNodeRequest(ProtobufNodesStatsRequest request) { + return new NodeStatsRequest(request); + } + + @Override + protected ProtobufNodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { + ProtobufNodesStatsRequest request = nodeStatsRequest.request; + Set metrics = request.requestedMetrics(); + ProtobufNodeStats protobufNodeStats = nodeService.stats( + request.indices(), + ProtobufNodesStatsRequest.Metric.OS.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.PROCESS.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.JVM.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.THREAD_POOL.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.FS.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.TRANSPORT.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.HTTP.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.BREAKER.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.SCRIPT.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.DISCOVERY.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.INGEST.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.ADAPTIVE_SELECTION.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.SCRIPT_CACHE.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.INDEXING_PRESSURE.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.SHARD_INDEXING_PRESSURE.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.SEARCH_BACKPRESSURE.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.CLUSTER_MANAGER_THROTTLING.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.WEIGHTED_ROUTING_STATS.containedIn(metrics), + ProtobufNodesStatsRequest.Metric.FILE_CACHE_STATS.containedIn(metrics) + ); + return protobufNodeStats; + } + + /** + * Inner Node Stats Request + * + * @opensearch.internal + */ + public static class NodeStatsRequest extends TransportRequest { + + ProtobufNodesStatsRequest request; + + public NodeStatsRequest(byte[] data) throws IOException { + request = new ProtobufNodesStatsRequest(data); + } + + public NodeStatsRequest(ProtobufNodesStatsRequest request) { + this.request = request; + } + + public ProtobufNodesStatsRequest request() { + return request; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + request.writeTo(out); + } + } + + @Override + protected ProtobufNodeStats newNodeResponse(byte[] in) throws IOException { + return new ProtobufNodeStats(in); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateAction.java new file mode 100644 index 0000000000000..3f3c2a4d80d0e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateAction.java @@ -0,0 +1,26 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.state; + +import org.opensearch.action.ProtobufActionType; + +/** + * Transport action for obtaining cluster state +* +* @opensearch.internal +*/ +public class ProtobufClusterStateAction extends ProtobufActionType { + + public static final ProtobufClusterStateAction INSTANCE = new ProtobufClusterStateAction(); + public static final String NAME = "cluster:monitor/state"; + + private ProtobufClusterStateAction() { + super(NAME, ProtobufClusterStateResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequest.java new file mode 100644 index 0000000000000..9df8e80f4d336 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateRequest.java @@ -0,0 +1,225 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.state; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.IndicesRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.action.support.clustermanager.ProtobufClusterManagerNodeReadRequest; +import org.opensearch.common.Strings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.server.proto.ClusterStateRequestProto; +import org.opensearch.server.proto.ClusterStateRequestProto.ClusterStateRequest; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Arrays; +import java.util.List; + +/** + * Transport request for obtaining cluster state +* +* @opensearch.internal +*/ +public class ProtobufClusterStateRequest extends ProtobufClusterManagerNodeReadRequest + implements + IndicesRequest.Replaceable { + + public static final TimeValue DEFAULT_WAIT_FOR_NODE_TIMEOUT = TimeValue.timeValueMinutes(1); + private ClusterStateRequestProto.ClusterStateRequest clusterStateRequest; + + public ProtobufClusterStateRequest() {} + + public ProtobufClusterStateRequest( + boolean routingTable, + boolean nodes, + boolean metadata, + boolean blocks, + boolean customs, + long waitForMetadataVersion, + TimeValue waitForTimeout, + List indices + ) { + this.clusterStateRequest = ClusterStateRequestProto.ClusterStateRequest.newBuilder() + .setRoutingTable(routingTable) + .setNodes(nodes) + .setMetadata(metadata) + .setBlocks(blocks) + .setCustoms(customs) + .setWaitForMetadataVersion(waitForMetadataVersion) + .setWaitForTimeout(waitForTimeout.toString()) + .addAllIndices(indices) + .build(); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public ProtobufClusterStateRequest all() { + this.clusterStateRequest = ClusterStateRequestProto.ClusterStateRequest.newBuilder() + .setRoutingTable(true) + .setNodes(true) + .setMetadata(true) + .setBlocks(true) + .setCustoms(true) + .addAllIndices(Arrays.asList(Strings.EMPTY_ARRAY)) + .setWaitForTimeout(DEFAULT_WAIT_FOR_NODE_TIMEOUT.toString()) + .build(); + return this; + } + + public ProtobufClusterStateRequest clear() { + this.clusterStateRequest = ClusterStateRequestProto.ClusterStateRequest.newBuilder() + .setRoutingTable(false) + .setNodes(false) + .setMetadata(false) + .setBlocks(false) + .setCustoms(false) + .addAllIndices(Arrays.asList(Strings.EMPTY_ARRAY)) + .setWaitForTimeout(DEFAULT_WAIT_FOR_NODE_TIMEOUT.toString()) + .build(); + return this; + } + + public boolean routingTable() { + return this.clusterStateRequest.getRoutingTable(); + } + + public ProtobufClusterStateRequest routingTable(boolean routingTable) { + this.clusterStateRequest = ClusterStateRequestProto.ClusterStateRequest.newBuilder() + .setRoutingTable(routingTable) + .setWaitForTimeout(DEFAULT_WAIT_FOR_NODE_TIMEOUT.toString()) + .build(); + return this; + } + + public boolean nodes() { + return this.clusterStateRequest.getNodes(); + } + + public ProtobufClusterStateRequest nodes(boolean nodes) { + this.clusterStateRequest = ClusterStateRequestProto.ClusterStateRequest.newBuilder() + .setNodes(nodes) + .setWaitForTimeout(DEFAULT_WAIT_FOR_NODE_TIMEOUT.toString()) + .build(); + return this; + } + + public boolean metadata() { + return this.clusterStateRequest.getMetadata(); + } + + public ProtobufClusterStateRequest metadata(boolean metadata) { + this.clusterStateRequest = ClusterStateRequestProto.ClusterStateRequest.newBuilder() + .setMetadata(metadata) + .setWaitForTimeout(DEFAULT_WAIT_FOR_NODE_TIMEOUT.toString()) + .build(); + return this; + } + + public boolean blocks() { + return this.clusterStateRequest.getBlocks(); + } + + public ProtobufClusterStateRequest blocks(boolean blocks) { + this.clusterStateRequest = ClusterStateRequestProto.ClusterStateRequest.newBuilder() + .setWaitForTimeout(DEFAULT_WAIT_FOR_NODE_TIMEOUT.toString()) + .setBlocks(blocks) + .build(); + return this; + } + + @Override + public String[] indices() { + return this.clusterStateRequest.getIndicesList().toArray(new String[0]); + } + + @Override + public ProtobufClusterStateRequest indices(String... indices) { + this.clusterStateRequest = ClusterStateRequestProto.ClusterStateRequest.newBuilder() + .addAllIndices(Arrays.asList(indices)) + .setWaitForTimeout(DEFAULT_WAIT_FOR_NODE_TIMEOUT.toString()) + .build(); + return this; + } + + @Override + public boolean includeDataStreams() { + return true; + } + + public ProtobufClusterStateRequest customs(boolean customs) { + this.clusterStateRequest = ClusterStateRequestProto.ClusterStateRequest.newBuilder() + .setCustoms(customs) + .setWaitForTimeout(DEFAULT_WAIT_FOR_NODE_TIMEOUT.toString()) + .build(); + return this; + } + + public boolean customs() { + return this.clusterStateRequest.getCustoms(); + } + + public TimeValue waitForTimeout() { + return TimeValue.parseTimeValue( + this.clusterStateRequest.getWaitForTimeout(), + getClass().getSimpleName() + ".clusterManagerNodeTimeout" + ); + } + + public ProtobufClusterStateRequest waitForTimeout(TimeValue waitForTimeout) { + this.clusterStateRequest = ClusterStateRequestProto.ClusterStateRequest.newBuilder() + .setWaitForTimeout(waitForTimeout.toString()) + .setWaitForTimeout(DEFAULT_WAIT_FOR_NODE_TIMEOUT.toString()) + .build(); + return this; + } + + public Long waitForMetadataVersion() { + return this.clusterStateRequest.getWaitForMetadataVersion(); + } + + public ProtobufClusterStateRequest waitForMetadataVersion(long waitForMetadataVersion) { + if (waitForMetadataVersion < 1) { + throw new IllegalArgumentException( + "provided waitForMetadataVersion should be >= 1, but instead is [" + waitForMetadataVersion + "]" + ); + } + this.clusterStateRequest = ClusterStateRequestProto.ClusterStateRequest.newBuilder() + .setWaitForMetadataVersion(waitForMetadataVersion) + .setWaitForTimeout(DEFAULT_WAIT_FOR_NODE_TIMEOUT.toString()) + .build(); + return this; + } + + @Override + public IndicesOptions indicesOptions() { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'indicesOptions'"); + } + + public ProtobufClusterStateRequest(byte[] data) throws IOException { + this.clusterStateRequest = ClusterStateRequestProto.ClusterStateRequest.parseFrom(data); + } + + public ProtobufClusterStateRequest(ClusterStateRequestProto.ClusterStateRequest clusterStateRequest) { + this.clusterStateRequest = clusterStateRequest; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + out.write(this.clusterStateRequest.toByteArray()); + } + + public ClusterStateRequest request() { + return this.clusterStateRequest; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java new file mode 100644 index 0000000000000..15c39de1c049a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufClusterStateResponse.java @@ -0,0 +1,109 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.admin.cluster.state; + +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.server.proto.ClusterStateResponseProto; +import org.opensearch.server.proto.ClusterStateResponseProto.ClusterStateResponse; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.List; + +/** + * The response for getting the cluster state. +* +* @opensearch.internal +*/ +public class ProtobufClusterStateResponse extends ProtobufActionResponse { + + private ClusterStateResponseProto.ClusterStateResponse clusterStateRes; + + public ProtobufClusterStateResponse(String clusterName, DiscoveryNodes nodes, long version, String stateUUID, boolean waitForTimedOut) { + ClusterStateResponseProto.ClusterStateResponse.ClusterState.DiscoveryNodes.Builder discoveryNodesBuilder = + ClusterStateResponseProto.ClusterStateResponse.ClusterState.DiscoveryNodes.newBuilder(); + + List allNodes = convertNodes(nodes); + discoveryNodesBuilder.addAllAllNodes(allNodes) + .setClusterManagerNodeId(nodes.getClusterManagerNodeId()) + .setLocalNodeId(nodes.getLocalNodeId()) + .setMinNonClientNodeVersion(nodes.getSmallestNonClientNodeVersion().toString()) + .setMaxNonClientNodeVersion(nodes.getLargestNonClientNodeVersion().toString()) + .setMinNodeVersion(nodes.getMinNodeVersion().toString()) + .setMaxNodeVersion(nodes.getMaxNodeVersion().toString()); + ClusterStateResponseProto.ClusterStateResponse.ClusterState.Builder clusterStateBuilder = + ClusterStateResponseProto.ClusterStateResponse.ClusterState.newBuilder(); + clusterStateBuilder.setClusterName(clusterName).setVersion(version).setStateUUID(stateUUID).setNodes(discoveryNodesBuilder.build()); + this.clusterStateRes = ClusterStateResponseProto.ClusterStateResponse.newBuilder() + .setClusterName(clusterName) + .setClusterState(clusterStateBuilder.build()) + .setWaitForTimedOut(waitForTimedOut) + .build(); + } + + private List convertNodes(DiscoveryNodes nodes) { + List convertedNodes = new ArrayList<>(); + if (nodes.getNodes().isEmpty()) { + return convertedNodes; + } + for (DiscoveryNode node : nodes.getNodes().values()) { + List nodeRoles = new ArrayList<>(); + node.getRoles().forEach(role -> { + ClusterStateResponseProto.ClusterStateResponse.ClusterState.DiscoveryNodes.Node.NodeRole.Builder nodeRoleBuilder = + ClusterStateResponseProto.ClusterStateResponse.ClusterState.DiscoveryNodes.Node.NodeRole.newBuilder(); + nodeRoleBuilder.setIsKnownRole(role.isKnownRole()) + .setIsDynamicRole(role.isDynamicRole()) + .setRoleName(role.roleName()) + .setRoleNameAbbreviation(role.roleNameAbbreviation()) + .setCanContainData(role.canContainData()) + .build(); + nodeRoles.add(nodeRoleBuilder.build()); + }); + ClusterStateResponseProto.ClusterStateResponse.ClusterState.DiscoveryNodes.Node.Builder nodeBuilder = + ClusterStateResponseProto.ClusterStateResponse.ClusterState.DiscoveryNodes.Node.newBuilder(); + nodeBuilder.setNodeName(node.getName()) + .setNodeId(node.getId()) + .setEphemeralId(node.getEphemeralId()) + .setHostName(node.getHostName()) + .setHostAddress(node.getHostAddress()) + .setTransportAddress(node.getAddress().toString()) + .putAllAttributes(node.getAttributes()) + .addAllRoles(nodeRoles) + .setVersion(node.getVersion().toString()) + .build(); + convertedNodes.add(nodeBuilder.build()); + } + return convertedNodes; + } + + @Override + public String toString() { + return "ProtobufClusterStateResponse{" + "clusterState=" + this.clusterStateRes.getClusterState() + '}'; + } + + public ClusterStateResponse response() { + return this.clusterStateRes; + } + + public ProtobufClusterStateResponse(byte[] data) throws IOException { + this.clusterStateRes = ClusterStateResponseProto.ClusterStateResponse.parseFrom(data); + } + + public ProtobufClusterStateResponse(ClusterStateResponseProto.ClusterStateResponse clusterStateRes) { + this.clusterStateRes = clusterStateRes; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + out.write(this.clusterStateRes.toByteArray()); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufTransportClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufTransportClusterStateAction.java new file mode 100644 index 0000000000000..a9de5245cc2d0 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ProtobufTransportClusterStateAction.java @@ -0,0 +1,151 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.state; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ProtobufActionFilters; +import org.opensearch.action.support.clustermanager.ProtobufTransportClusterManagerNodeReadAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateObserver; +import org.opensearch.cluster.NotClusterManagerException; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.node.NodeClosedException; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.function.Predicate; + +/** + * Transport action for obtaining cluster state + * + * @opensearch.internal + */ +public class ProtobufTransportClusterStateAction extends ProtobufTransportClusterManagerNodeReadAction< + ProtobufClusterStateRequest, + ProtobufClusterStateResponse> { + + private final Logger logger = LogManager.getLogger(getClass()); + + static { + final String property = System.getProperty("opensearch.cluster_state.size"); + if (property != null) { + throw new IllegalArgumentException("opensearch.cluster_state.size is no longer respected but was [" + property + "]"); + } + } + + @Inject + public ProtobufTransportClusterStateAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ProtobufActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + ClusterStateAction.NAME, + false, + transportService, + clusterService, + threadPool, + actionFilters, + ProtobufClusterStateRequest::new, + indexNameExpressionResolver + ); + } + + @Override + protected String executor() { + // very lightweight operation in memory, no need to fork to a thread + return ThreadPool.Names.SAME; + } + + @Override + protected ClusterBlockException checkBlock(ProtobufClusterStateRequest request, ClusterState state) { + // cluster state calls are done also on a fully blocked cluster to figure out what is going + // on in the cluster. For example, which nodes have joined yet the recovery has not yet kicked + // in, we need to make sure we allow those calls + // return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); + return null; + } + + @Override + protected void clusterManagerOperation( + final ProtobufClusterStateRequest request, + final ClusterState state, + final ActionListener listener + ) throws IOException { + final Predicate acceptableClusterStatePredicate = request.waitForMetadataVersion() == null + ? clusterState -> true + : clusterState -> clusterState.metadata().version() >= request.waitForMetadataVersion(); + + final Predicate acceptableClusterStateOrNotMasterPredicate = request.local() + ? acceptableClusterStatePredicate + : acceptableClusterStatePredicate.or(clusterState -> clusterState.nodes().isLocalNodeElectedClusterManager() == false); + + if (acceptableClusterStatePredicate.test(state)) { + ActionListener.completeWith(listener, () -> buildResponse(request, state)); + } else { + assert acceptableClusterStateOrNotMasterPredicate.test(state) == false; + new ClusterStateObserver(state, clusterService, request.waitForTimeout(), logger, threadPool.getThreadContext()) + .waitForNextChange(new ClusterStateObserver.Listener() { + + @Override + public void onNewClusterState(ClusterState newState) { + if (acceptableClusterStatePredicate.test(newState)) { + ActionListener.completeWith(listener, () -> buildResponse(request, newState)); + } else { + listener.onFailure( + new NotClusterManagerException( + "cluster-manager stepped down waiting for metadata version " + request.waitForMetadataVersion() + ) + ); + } + } + + @Override + public void onClusterServiceClose() { + listener.onFailure(new NodeClosedException(clusterService.localNode())); + } + + @Override + public void onTimeout(TimeValue timeout) { + try { + listener.onResponse(new ProtobufClusterStateResponse(state.getClusterName().value(), null, 1, null, true)); + } catch (Exception e) { + listener.onFailure(e); + } + } + }, acceptableClusterStateOrNotMasterPredicate); + } + } + + private ProtobufClusterStateResponse buildResponse(final ProtobufClusterStateRequest request, final ClusterState currentState) { + logger.trace("Serving cluster state request using version {}", currentState.version()); + return new ProtobufClusterStateResponse( + currentState.getClusterName().value(), + currentState.nodes(), + currentState.version(), + currentState.stateUUID(), + false + ); + } + + @Override + protected ProtobufClusterStateResponse read(byte[] in) throws IOException { + return new ProtobufClusterStateResponse(in); + } + +} diff --git a/server/src/main/java/org/opensearch/action/main/ProtobufMainAction.java b/server/src/main/java/org/opensearch/action/main/ProtobufMainAction.java new file mode 100644 index 0000000000000..1a457423f0b00 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/main/ProtobufMainAction.java @@ -0,0 +1,26 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.main; + +import org.opensearch.action.ProtobufActionType; + +/** + * The main OpenSearch Action +* +* @opensearch.internal +*/ +public class ProtobufMainAction extends ProtobufActionType { + + public static final String NAME = "cluster:monitor/main"; + public static final ProtobufMainAction INSTANCE = new ProtobufMainAction(); + + public ProtobufMainAction() { + super(NAME, ProtobufMainResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/main/ProtobufMainRequest.java b/server/src/main/java/org/opensearch/action/main/ProtobufMainRequest.java new file mode 100644 index 0000000000000..a28b75b330fe5 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/main/ProtobufMainRequest.java @@ -0,0 +1,33 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.main; + +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ActionRequestValidationException; + +import java.io.IOException; + +/** + * Transport request for main action +* +* @opensearch.internal +*/ +public class ProtobufMainRequest extends ProtobufActionRequest { + + public ProtobufMainRequest() {} + + ProtobufMainRequest(byte[] in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/server/src/main/java/org/opensearch/action/main/ProtobufMainResponse.java b/server/src/main/java/org/opensearch/action/main/ProtobufMainResponse.java new file mode 100644 index 0000000000000..27f2a00864dc8 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/main/ProtobufMainResponse.java @@ -0,0 +1,173 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.main; + +import org.opensearch.Build; +import org.opensearch.Version; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.ObjectParser; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Objects; + +/** + * The main response of opensearch +* +* @opensearch.internal +*/ +public class ProtobufMainResponse extends ProtobufActionResponse implements ToXContentObject { + + private String nodeName; + private Version version; + private ClusterName clusterName; + private String clusterUuid; + private Build build; + public static final String TAGLINE = "The OpenSearch Project: https://opensearch.org/"; + + ProtobufMainResponse() {} + + ProtobufMainResponse(byte[] in) throws IOException { + super(in); + } + + public ProtobufMainResponse(String nodeName, Version version, ClusterName clusterName, String clusterUuid, Build build) { + this.nodeName = nodeName; + this.version = version; + this.clusterName = clusterName; + this.clusterUuid = clusterUuid; + this.build = build; + } + + public String getNodeName() { + return nodeName; + } + + public Version getVersion() { + return version; + } + + public ClusterName getClusterName() { + return clusterName; + } + + public String getClusterUuid() { + return clusterUuid; + } + + public Build getBuild() { + return build; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("name", nodeName); + builder.field("cluster_name", clusterName.value()); + builder.field("cluster_uuid", clusterUuid); + builder.startObject("version") + .field("distribution", build.getDistribution()) + .field("number", build.getQualifiedVersion()) + .field("build_type", build.type().displayName()) + .field("build_hash", build.hash()) + .field("build_date", build.date()) + .field("build_snapshot", build.isSnapshot()) + .field("lucene_version", version.luceneVersion.toString()) + .field("minimum_wire_compatibility_version", version.minimumCompatibilityVersion().toString()) + .field("minimum_index_compatibility_version", version.minimumIndexCompatibilityVersion().toString()) + .endObject(); + builder.field("tagline", TAGLINE); + builder.endObject(); + return builder; + } + + private static final ObjectParser PARSER = new ObjectParser<>( + ProtobufMainResponse.class.getName(), + true, + ProtobufMainResponse::new + ); + + static { + PARSER.declareString((response, value) -> response.nodeName = value, new ParseField("name")); + PARSER.declareString((response, value) -> response.clusterName = new ClusterName(value), new ParseField("cluster_name")); + PARSER.declareString((response, value) -> response.clusterUuid = value, new ParseField("cluster_uuid")); + PARSER.declareString((response, value) -> {}, new ParseField("tagline")); + PARSER.declareObject((response, value) -> { + final String buildType = (String) value.get("build_type"); + response.build = new Build( + /* + * Be lenient when reading on the wire, the enumeration values from other versions might be different than what + * we know. + */ + buildType == null ? Build.Type.UNKNOWN : Build.Type.fromDisplayName(buildType, false), + (String) value.get("build_hash"), + (String) value.get("build_date"), + (boolean) value.get("build_snapshot"), + (String) value.get("number"), + (String) value.get("distribution") + ); + response.version = Version.fromString( + ((String) value.get("number")).replace("-SNAPSHOT", "").replaceFirst("-(alpha\\d+|beta\\d+|rc\\d+)", "") + ); + }, (parser, context) -> parser.map(), new ParseField("version")); + } + + public static ProtobufMainResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ProtobufMainResponse other = (ProtobufMainResponse) o; + return Objects.equals(nodeName, other.nodeName) + && Objects.equals(version, other.version) + && Objects.equals(clusterUuid, other.clusterUuid) + && Objects.equals(build, other.build) + && Objects.equals(clusterName, other.clusterName); + } + + @Override + public int hashCode() { + return Objects.hash(nodeName, version, clusterUuid, build, clusterName); + } + + @Override + public String toString() { + return "ProtobufMainResponse{" + + "nodeName='" + + nodeName + + '\'' + + ", version=" + + version + + ", clusterName=" + + clusterName + + ", clusterUuid='" + + clusterUuid + + '\'' + + ", build=" + + build + + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/action/main/ProtobufTransportMainAction.java b/server/src/main/java/org/opensearch/action/main/ProtobufTransportMainAction.java new file mode 100644 index 0000000000000..a9340c10f541d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/main/ProtobufTransportMainAction.java @@ -0,0 +1,55 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.main; + +import org.opensearch.Build; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ProtobufActionFilters; +import org.opensearch.action.support.ProtobufHandledTransportAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.node.Node; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.transport.TransportService; + +/** + * Performs the main action +* +* @opensearch.internal +*/ +public class ProtobufTransportMainAction extends ProtobufHandledTransportAction { + + private final String nodeName; + private final ClusterService clusterService; + + @Inject + public ProtobufTransportMainAction( + Settings settings, + TransportService transportService, + ProtobufActionFilters actionFilters, + ClusterService clusterService + ) { + super(MainAction.NAME, transportService, actionFilters, ProtobufMainRequest::new); + this.nodeName = Node.NODE_NAME_SETTING.get(settings); + this.clusterService = clusterService; + } + + @Override + protected void doExecute(ProtobufTask task, ProtobufMainRequest request, ActionListener listener) { + ClusterState clusterState = clusterService.state(); + ClusterName clusterName = new ClusterName(clusterState.getClusterName().value()); + listener.onResponse( + new ProtobufMainResponse(nodeName, Version.CURRENT, clusterName, clusterState.metadata().clusterUUID(), Build.CURRENT) + ); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/ProtobufAbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/ProtobufAbstractSearchAsyncAction.java new file mode 100644 index 0000000000000..0e05bbf58375f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/ProtobufAbstractSearchAsyncAction.java @@ -0,0 +1,835 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.action.NoShardAvailableActionException; +import org.opensearch.core.action.ShardOperationFailedException; +import org.opensearch.action.support.TransportActions; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.routing.FailAwareWeightedRouting; +import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.common.Nullable; +import org.opensearch.common.SetOnce; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.search.internal.AliasFilter; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.internal.ShardSearchRequest; +import org.opensearch.search.internal.ProtobufShardSearchRequest; +import org.opensearch.search.pipeline.PipelinedRequest; +import org.opensearch.transport.Transport; + +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiFunction; +import java.util.stream.Collectors; + +/** + * This is an abstract base class that encapsulates the logic to fan out to all shards in provided {@link GroupShardsIterator} + * and collect the results. If a shard request returns a failure this class handles the advance to the next replica of the shard until + * the shards replica iterator is exhausted. Each shard is referenced by position in the {@link GroupShardsIterator} which is later + * referred to as the {@code shardIndex}. + * The fan out and collect algorithm is traditionally used as the initial phase which can either be a query execution or collection of + * distributed frequencies + * + * @opensearch.internal + */ +abstract class ProtobufAbstractSearchAsyncAction extends SearchPhase implements SearchPhaseContext { + private static final float DEFAULT_INDEX_BOOST = 1.0f; + private final Logger logger; + private final SearchTransportService searchTransportService; + private final Executor executor; + private final ActionListener listener; + private final ProtobufSearchRequest request; + /** + * Used by subclasses to resolve node ids to DiscoveryNodes. + **/ + private final BiFunction nodeIdToConnection; + private final ProtobufSearchTask task; + protected final SearchPhaseResults results; + private final ClusterState clusterState; + private final Map aliasFilter; + private final Map concreteIndexBoosts; + private final Map> indexRoutings; + private final SetOnce> shardFailures = new SetOnce<>(); + private final Object shardFailuresMutex = new Object(); + private final AtomicBoolean hasShardResponse = new AtomicBoolean(false); + private final AtomicInteger successfulOps = new AtomicInteger(); + private final AtomicInteger skippedOps = new AtomicInteger(); + private final ProtobufTransportSearchAction.SearchTimeProvider timeProvider; + private final ProtobufSearchResponse.Clusters clusters; + + protected final GroupShardsIterator toSkipShardsIts; + protected final GroupShardsIterator shardsIts; + private final int expectedTotalOps; + private final AtomicInteger totalOps = new AtomicInteger(); + private final int maxConcurrentRequestsPerNode; + private final Map pendingExecutionsPerNode = new ConcurrentHashMap<>(); + private final boolean throttleConcurrentRequests; + + private final List releasables = new ArrayList<>(); + + ProtobufAbstractSearchAsyncAction( + String name, + Logger logger, + SearchTransportService searchTransportService, + BiFunction nodeIdToConnection, + Map aliasFilter, + Map concreteIndexBoosts, + Map> indexRoutings, + Executor executor, + ProtobufSearchRequest protobufSearchRequest, + ActionListener listener, + GroupShardsIterator shardsIts, + ProtobufTransportSearchAction.SearchTimeProvider timeProvider, + ClusterState clusterState, + ProtobufSearchTask task, + SearchPhaseResults resultConsumer, + int maxConcurrentRequestsPerNode, + ProtobufSearchResponse.Clusters clusters + ) { + super(name); + final List toSkipIterators = new ArrayList<>(); + final List iterators = new ArrayList<>(); + for (final SearchShardIterator iterator : shardsIts) { + if (iterator.skip()) { + toSkipIterators.add(iterator); + } else { + iterators.add(iterator); + } + } + this.toSkipShardsIts = new GroupShardsIterator<>(toSkipIterators); + this.shardsIts = new GroupShardsIterator<>(iterators); + // we need to add 1 for non active partition, since we count it in the total. This means for each shard in the iterator we sum up + // it's number of active shards but use 1 as the default if no replica of a shard is active at this point. + // on a per shards level we use shardIt.remaining() to increment the totalOps pointer but add 1 for the current shard result + // we process hence we add one for the non active partition here. + this.expectedTotalOps = shardsIts.totalSizeWith1ForEmpty(); + this.maxConcurrentRequestsPerNode = maxConcurrentRequestsPerNode; + // in the case were we have less shards than maxConcurrentRequestsPerNode we don't need to throttle + this.throttleConcurrentRequests = maxConcurrentRequestsPerNode < shardsIts.size(); + this.timeProvider = timeProvider; + this.logger = logger; + this.searchTransportService = searchTransportService; + this.executor = executor; + this.request = protobufSearchRequest; + this.task = task; + this.listener = ActionListener.runAfter(listener, this::releaseContext); + this.nodeIdToConnection = nodeIdToConnection; + this.clusterState = clusterState; + this.concreteIndexBoosts = concreteIndexBoosts; + this.aliasFilter = aliasFilter; + this.indexRoutings = indexRoutings; + this.results = resultConsumer; + this.clusters = clusters; + } + + @Override + public void addReleasable(Releasable releasable) { + releasables.add(releasable); + } + + public void releaseContext() { + Releasables.close(releasables); + } + + /** + * Builds how long it took to execute the search. + */ + long buildTookInMillis() { + return timeProvider.buildTookInMillis(); + } + + /** + * This is the main entry point for a search. This method starts the search execution of the initial phase. + */ + public final void start() { + System.out.println("ProtobufAbstractSearchAsyncAction start"); + if (getNumShards() == 0) { + System.out.println("Number of shards is 0"); + // no search shards to search on, bail with empty response + // (it happens with search across _all with no indices around and consistent with broadcast operations) + int trackTotalHitsUpTo = request.source() == null ? SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO + : request.source().trackTotalHitsUpTo() == null ? SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO + : request.source().trackTotalHitsUpTo(); + // total hits is null in the response if the tracking of total hits is disabled + boolean withTotalHits = trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED; + listener.onResponse( + new ProtobufSearchResponse( + InternalSearchResponse.empty(withTotalHits), + null, + 0, + 0, + 0, + buildTookInMillis(), + ShardSearchFailure.EMPTY_ARRAY, + clusters, + null + ) + ); + return; + } + executePhase(this); + } + + @Override + public final void run() { + System.out.println("ProtobufAbstractSearchAsyncAction run: running the phase coming from phase.recordAndRun()"); + for (final SearchShardIterator iterator : toSkipShardsIts) { + assert iterator.skip(); + skipShard(iterator); + } + if (shardsIts.size() > 0) { + assert request.allowPartialSearchResults() != null : "ProtobufSearchRequest missing setting for allowPartialSearchResults"; + if (request.allowPartialSearchResults() == false) { + final StringBuilder missingShards = new StringBuilder(); + // Fail-fast verification of all shards being available + for (int index = 0; index < shardsIts.size(); index++) { + final SearchShardIterator shardRoutings = shardsIts.get(index); + if (shardRoutings.size() == 0) { + if (missingShards.length() > 0) { + missingShards.append(", "); + } + missingShards.append(shardRoutings.shardId()); + } + } + if (missingShards.length() > 0) { + // Status red - shard is missing all copies and would produce partial results for an index search + final String msg = "Search rejected due to missing shards [" + + missingShards + + "]. Consider using `allow_partial_search_results` setting to bypass this error."; + throw new SearchPhaseExecutionException(getName(), msg, null, ShardSearchFailure.EMPTY_ARRAY); + } + } + for (int index = 0; index < shardsIts.size(); index++) { + final SearchShardIterator shardRoutings = shardsIts.get(index); + assert shardRoutings.skip() == false; + performPhaseOnShard(index, shardRoutings, shardRoutings.nextOrNull()); + } + } + } + + void skipShard(SearchShardIterator iterator) { + successfulOps.incrementAndGet(); + skippedOps.incrementAndGet(); + assert iterator.skip(); + successfulShardExecution(iterator); + } + + private void performPhaseOnShard(final int shardIndex, final SearchShardIterator shardIt, final SearchShardTarget shard) { + /* + * We capture the thread that this phase is starting on. When we are called back after executing the phase, we are either on the + * same thread (because we never went async, or the same thread was selected from the thread pool) or a different thread. If we + * continue on the same thread in the case that we never went async and this happens repeatedly we will end up recursing deeply and + * could stack overflow. To prevent this, we fork if we are called back on the same thread that execution started on and otherwise + * we can continue (cf. InitialSearchPhase#maybeFork). + */ + if (shard == null) { + fork(() -> onShardFailure(shardIndex, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()))); + } else { + final PendingExecutions pendingExecutions = throttleConcurrentRequests + ? pendingExecutionsPerNode.computeIfAbsent(shard.getNodeId(), n -> new PendingExecutions(maxConcurrentRequestsPerNode)) + : null; + Runnable r = () -> { + final Thread thread = Thread.currentThread(); + try { + executePhaseOnShard(shardIt, shard, new SearchActionListener(shard, shardIndex) { + @Override + public void innerOnResponse(Result result) { + try { + onShardResult(result, shardIt); + } finally { + executeNext(pendingExecutions, thread); + } + } + + @Override + public void onFailure(Exception t) { + try { + onShardFailure(shardIndex, shard, shardIt, t); + } finally { + executeNext(pendingExecutions, thread); + } + } + }); + } catch (final Exception e) { + try { + /* + * It is possible to run into connection exceptions here because we are getting the connection early and might + * run into nodes that are not connected. In this case, on shard failure will move us to the next shard copy. + */ + fork(() -> { + // It only happens when onPhaseDone() is called and executePhaseOnShard() fails hard with an exception. + // In this case calling onShardFailure() would overflow the operations counter, so the best we could do + // here is to fail the phase and move on to the next one. + if (totalOps.get() == expectedTotalOps) { + onPhaseFailure(this, "The phase has failed", e); + } else { + onShardFailure(shardIndex, shard, shardIt, e); + } + }); + } finally { + executeNext(pendingExecutions, thread); + } + } + }; + if (throttleConcurrentRequests) { + pendingExecutions.tryRun(r); + } else { + r.run(); + } + } + } + + /** + * Sends the request to the actual shard. + * @param shardIt the shards iterator + * @param shard the shard routing to send the request for + * @param listener the listener to notify on response + */ + protected abstract void executePhaseOnShard( + SearchShardIterator shardIt, + SearchShardTarget shard, + SearchActionListener listener + ); + + private void fork(final Runnable runnable) { + executor.execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + + } + + @Override + protected void doRun() { + runnable.run(); + } + + @Override + public boolean isForceExecution() { + // we can not allow a stuffed queue to reject execution here + return true; + } + }); + } + + @Override + public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase) { + /* This is the main search phase transition where we move to the next phase. At this point we check if there is + * at least one successful operation left and if so we move to the next phase. If not we immediately fail the + * search phase as "all shards failed"*/ + if (successfulOps.get() == 0) { // we have 0 successful results that means we shortcut stuff and return a failure + final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures()); + Throwable cause = shardSearchFailures.length == 0 + ? null + : OpenSearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; + logger.debug(() -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()), cause); + onPhaseFailure(currentPhase, "all shards failed", cause); + } else { + Boolean allowPartialResults = request.allowPartialSearchResults(); + assert allowPartialResults != null : "ProtobufSearchRequest missing setting for allowPartialSearchResults"; + if (allowPartialResults == false && successfulOps.get() != getNumShards()) { + // check if there are actual failures in the atomic array since + // successful retries can reset the failures to null + ShardOperationFailedException[] shardSearchFailures = buildShardFailures(); + if (shardSearchFailures.length > 0) { + if (logger.isDebugEnabled()) { + int numShardFailures = shardSearchFailures.length; + shardSearchFailures = ExceptionsHelper.groupBy(shardSearchFailures); + Throwable cause = OpenSearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; + logger.debug( + () -> new ParameterizedMessage("{} shards failed for phase: [{}]", numShardFailures, getName()), + cause + ); + } + onPhaseFailure(currentPhase, "Partial shards failure", null); + return; + } else { + int discrepancy = getNumShards() - successfulOps.get(); + assert discrepancy > 0 : "discrepancy: " + discrepancy; + if (logger.isDebugEnabled()) { + logger.debug( + "Partial shards failure (unavailable: {}, successful: {}, skipped: {}, num-shards: {}, phase: {})", + discrepancy, + successfulOps.get(), + skippedOps.get(), + getNumShards(), + currentPhase.getName() + ); + } + onPhaseFailure(currentPhase, "Partial shards failure (" + discrepancy + " shards unavailable)", null); + return; + } + } + if (logger.isTraceEnabled()) { + final String resultsFrom = results.getSuccessfulResults() + .map(r -> r.getSearchShardTarget().toString()) + .collect(Collectors.joining(",")); + logger.trace( + "[{}] Moving to next phase: [{}], based on results from: {} (cluster state version: {})", + currentPhase.getName(), + nextPhase.getName(), + resultsFrom, + clusterState.version() + ); + } + executePhase(nextPhase); + } + } + + private void executePhase(SearchPhase phase) { + System.out.println("Execute Phase"); + System.out.println("Phase: " + phase); + try { + phase.run(); + } catch (Exception e) { + if (logger.isDebugEnabled()) { + logger.debug(new ParameterizedMessage("Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), e); + } + onPhaseFailure(phase, "", e); + } + } + + ShardSearchFailure[] buildShardFailures() { + AtomicArray shardFailures = this.shardFailures.get(); + if (shardFailures == null) { + return ShardSearchFailure.EMPTY_ARRAY; + } + List entries = shardFailures.asList(); + ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()]; + for (int i = 0; i < failures.length; i++) { + failures[i] = entries.get(i); + } + return failures; + } + + private void onShardFailure(final int shardIndex, @Nullable SearchShardTarget shard, final SearchShardIterator shardIt, Exception e) { + // we always add the shard failure for a specific shard instance + // we do make sure to clean it on a successful response from a shard + onShardFailure(shardIndex, shard, e); + SearchShardTarget nextShard = FailAwareWeightedRouting.getInstance() + .findNext(shardIt, clusterState, e, () -> totalOps.incrementAndGet()); + + final boolean lastShard = nextShard == null; + if (logger.isTraceEnabled()) { + logger.trace( + () -> new ParameterizedMessage( + "{}: Failed to execute [{}] lastShard [{}]", + shard != null ? shard : shardIt.shardId(), + request, + lastShard + ), + e + ); + } else { + // Log the message without an exception. + logger.debug( + new ParameterizedMessage( + "{}: Failed to execute [{}] lastShard [{}]", + shard != null ? shard : shardIt.shardId(), + request, + lastShard + ) + ); + } + if (lastShard) { + onShardGroupFailure(shardIndex, shard, e); + } + final int totalOps = this.totalOps.incrementAndGet(); + if (totalOps == expectedTotalOps) { + try { + onPhaseDone(); + } catch (final Exception ex) { + onPhaseFailure(this, "The phase has failed", ex); + } + } else if (totalOps > expectedTotalOps) { + throw new AssertionError( + "unexpected higher total ops [" + totalOps + "] compared to expected [" + expectedTotalOps + "]", + new SearchPhaseExecutionException(getName(), "Shard failures", null, buildShardFailures()) + ); + } else { + if (lastShard == false) { + performPhaseOnShard(shardIndex, shardIt, nextShard); + } + } + } + + /** + * Executed once for every {@link ShardId} that failed on all available shard routing. + * + * @param shardIndex the shard index that failed + * @param shardTarget the last shard target for this failure + * @param exc the last failure reason + */ + protected void onShardGroupFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) {} + + /** + * Executed once for every failed shard level request. This method is invoked before the next replica is tried for the given + * shard target. + * @param shardIndex the internal index for this shard. Each shard has an index / ordinal assigned that is used to reference + * it's results + * @param shardTarget the shard target for this failure + * @param e the failure reason + */ + @Override + public final void onShardFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Exception e) { + // we don't aggregate shard failures on non active shards (but do keep the header counts right) + if (TransportActions.isShardNotAvailableException(e) == false) { + AtomicArray shardFailures = this.shardFailures.get(); + // lazily create shard failures, so we can early build the empty shard failure list in most cases (no failures) + if (shardFailures == null) { // this is double checked locking but it's fine since SetOnce uses a volatile read internally + synchronized (shardFailuresMutex) { + shardFailures = this.shardFailures.get(); // read again otherwise somebody else has created it? + if (shardFailures == null) { // still null so we are the first and create a new instance + shardFailures = new AtomicArray<>(getNumShards()); + this.shardFailures.set(shardFailures); + } + } + } + ShardSearchFailure failure = shardFailures.get(shardIndex); + if (failure == null) { + shardFailures.set(shardIndex, new ShardSearchFailure(e, shardTarget)); + } else { + // the failure is already present, try and not override it with an exception that is less meaningless + // for example, getting illegal shard state + if (TransportActions.isReadOverrideException(e)) { + shardFailures.set(shardIndex, new ShardSearchFailure(e, shardTarget)); + } + } + + if (results.hasResult(shardIndex)) { + assert failure == null : "shard failed before but shouldn't: " + failure; + successfulOps.decrementAndGet(); // if this shard was successful before (initial phase) we have to adjust the counter + } + } + results.consumeShardFailure(shardIndex); + } + + /** + * Executed once for every successful shard level request. + * @param result the result returned form the shard + * @param shardIt the shard iterator + */ + protected void onShardResult(Result result, SearchShardIterator shardIt) { + assert result.getShardIndex() != -1 : "shard index is not set"; + assert result.getSearchShardTarget() != null : "search shard target must not be null"; + hasShardResponse.set(true); + if (logger.isTraceEnabled()) { + logger.trace("got first-phase result from {}", result != null ? result.getSearchShardTarget() : null); + } + results.consumeResult(result, () -> onShardResultConsumed(result, shardIt)); + } + + private void onShardResultConsumed(Result result, SearchShardIterator shardIt) { + successfulOps.incrementAndGet(); + // clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level + // so its ok concurrency wise to miss potentially the shard failures being created because of another failure + // in the #addShardFailure, because by definition, it will happen on *another* shardIndex + AtomicArray shardFailures = this.shardFailures.get(); + if (shardFailures != null) { + shardFailures.set(result.getShardIndex(), null); + } + // we need to increment successful ops first before we compare the exit condition otherwise if we + // are fast we could concurrently update totalOps but then preempt one of the threads which can + // cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc. + // increment all the "future" shards to update the total ops since we some may work and some may not... + // and when that happens, we break on total ops, so we must maintain them + successfulShardExecution(shardIt); + } + + private void successfulShardExecution(SearchShardIterator shardsIt) { + final int remainingOpsOnIterator; + if (shardsIt.skip()) { + remainingOpsOnIterator = shardsIt.remaining(); + } else { + remainingOpsOnIterator = shardsIt.remaining() + 1; + } + final int xTotalOps = totalOps.addAndGet(remainingOpsOnIterator); + if (xTotalOps == expectedTotalOps) { + try { + onPhaseDone(); + } catch (final Exception ex) { + onPhaseFailure(this, "The phase has failed", ex); + } + } else if (xTotalOps > expectedTotalOps) { + throw new AssertionError( + "unexpected higher total ops [" + xTotalOps + "] compared to expected [" + expectedTotalOps + "]", + new SearchPhaseExecutionException(getName(), "Shard failures", null, buildShardFailures()) + ); + } + } + + @Override + public final int getNumShards() { + return results.getNumShards(); + } + + @Override + public final Logger getLogger() { + return logger; + } + + @Override + public final ProtobufSearchTask getProtobufTask() { + return task; + } + + @Override + public final ProtobufSearchRequest getProtobufRequest() { + return request; + } + + @Override + public final SearchTask getTask() { + throw new UnsupportedOperationException("Unimplemented method 'getTask'"); + } + + @Override + public final SearchRequest getRequest() { + throw new UnsupportedOperationException("Unimplemented method 'getRequest'"); + } + + protected final ProtobufSearchResponse buildSearchResponse( + InternalSearchResponse internalSearchResponse, + ShardSearchFailure[] failures, + String scrollId, + String searchContextId + ) { + return new ProtobufSearchResponse( + internalSearchResponse, + scrollId, + getNumShards(), + successfulOps.get(), + skippedOps.get(), + buildTookInMillis(), + failures, + clusters, + searchContextId + ); + } + + boolean buildPointInTimeFromSearchResults() { + // TODO: Until we implement the retry mechanism for point in times (i.e., replace an unavailable shard with an equivalent copy), + // we can simply return the point in time of the search request. + return false; + } + + @Override + public void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults) { + ShardSearchFailure[] failures = buildShardFailures(); + Boolean allowPartialResults = request.allowPartialSearchResults(); + assert allowPartialResults != null : "ProtobufSearchRequest missing setting for allowPartialSearchResults"; + if (allowPartialResults == false && failures.length > 0) { + raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures)); + } else { + final Version minNodeVersion = clusterState.nodes().getMinNodeVersion(); + final String scrollId = request.scroll() != null ? TransportSearchHelper.buildScrollId(queryResults, minNodeVersion) : null; + final String searchContextId; + if (buildPointInTimeFromSearchResults()) { + searchContextId = SearchContextId.encode(queryResults.asList(), aliasFilter, minNodeVersion); + } else { + if (request.source() != null && request.source().pointInTimeBuilder() != null) { + searchContextId = request.source().pointInTimeBuilder().getId(); + } else { + searchContextId = null; + } + } + listener.onResponse(buildSearchResponse(internalSearchResponse, failures, scrollId, searchContextId)); + } + } + + @Override + public final void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { + raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures())); + } + + /** + * This method should be called if a search phase failed to ensure all relevant reader contexts are released. + * This method will also notify the listener and sends back a failure to the user. + * + * @param exception the exception explaining or causing the phase failure + */ + private void raisePhaseFailure(SearchPhaseExecutionException exception) { + // we don't release persistent readers (point in time). + if (request.pointInTimeBuilder() == null) { + results.getSuccessfulResults().forEach((entry) -> { + if (entry.getContextId() != null) { + try { + SearchShardTarget searchShardTarget = entry.getSearchShardTarget(); + Transport.Connection connection = getConnection(searchShardTarget.getClusterAlias(), searchShardTarget.getNodeId()); + sendReleaseSearchContext(entry.getContextId(), connection, searchShardTarget.getOriginalIndices()); + } catch (Exception inner) { + inner.addSuppressed(exception); + logger.trace("failed to release context", inner); + } + } + }); + } + Releasables.close(releasables); + listener.onFailure(exception); + } + + /** + * Executed once all shard results have been received and processed + * @see #onShardFailure(int, SearchShardTarget, Exception) + * @see #onShardResult(SearchPhaseResult, SearchShardIterator) + */ + final void onPhaseDone() { // as a tribute to @kimchy aka. finishHim() + final SearchPhase nextPhase = getNextPhase(results, this); + // if (request instanceof PipelinedRequest && nextPhase != null) { + // ((PipelinedRequest) request).transformSearchPhaseResults(results, this, this.getName(), nextPhase.getName()); + // } + executeNextPhase(this, nextPhase); + } + + @Override + public final Transport.Connection getConnection(String clusterAlias, String nodeId) { + return nodeIdToConnection.apply(clusterAlias, nodeId); + } + + @Override + public final SearchTransportService getSearchTransport() { + return searchTransportService; + } + + @Override + public final void execute(Runnable command) { + executor.execute(command); + } + + @Override + public final void onFailure(Exception e) { + listener.onFailure(e); + } + + @Override + public final ShardSearchRequest buildShardSearchRequest(SearchShardIterator shardIt) { + throw new UnsupportedOperationException("Unimplemented method 'buildShardSearchRequest'"); + } + + @Override + public final ProtobufShardSearchRequest buildProtobufShardSearchRequest(SearchShardIterator shardIt) { + AliasFilter filter = aliasFilter.get(shardIt.shardId().getIndex().getUUID()); + assert filter != null; + float indexBoost = concreteIndexBoosts.getOrDefault(shardIt.shardId().getIndex().getUUID(), DEFAULT_INDEX_BOOST); + String indexName = shardIt.shardId().getIndex().getName(); + final String[] routings = indexRoutings.getOrDefault(indexName, Collections.emptySet()).toArray(new String[0]); + ProtobufShardSearchRequest shardRequest = new ProtobufShardSearchRequest( + shardIt.getOriginalIndices(), + request, + shardIt.shardId(), + getNumShards(), + filter, + indexBoost, + timeProvider.getAbsoluteStartMillis(), + shardIt.getClusterAlias(), + routings, + shardIt.getSearchContextId(), + shardIt.getSearchContextKeepAlive() + ); + // if we already received a search result we can inform the shard that it + // can return a null response if the request rewrites to match none rather + // than creating an empty response in the search thread pool. + // Note that, we have to disable this shortcut for queries that create a context (scroll and search context). + shardRequest.canReturnNullResponseIfMatchNoDocs(hasShardResponse.get() && shardRequest.scroll() == null); + return shardRequest; + } + + /** + * Returns the next phase based on the results of the initial search phase + * @param results the results of the initial search phase. Each non null element in the result array represent a successfully + * executed shard request + * @param context the search context for the next phase + */ + protected abstract SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context); + + private void executeNext(PendingExecutions pendingExecutions, Thread originalThread) { + executeNext(pendingExecutions == null ? null : pendingExecutions::finishAndRunNext, originalThread); + } + + void executeNext(Runnable runnable, Thread originalThread) { + if (throttleConcurrentRequests) { + if (originalThread == Thread.currentThread()) { + fork(runnable); + } else { + runnable.run(); + } + } else { + assert runnable == null; + } + } + + /** + * Pending Executions + * + * @opensearch.internal + */ + private static final class PendingExecutions { + private final int permits; + private int permitsTaken = 0; + private ArrayDeque queue = new ArrayDeque<>(); + + PendingExecutions(int permits) { + assert permits > 0 : "not enough permits: " + permits; + this.permits = permits; + } + + void finishAndRunNext() { + synchronized (this) { + permitsTaken--; + assert permitsTaken >= 0 : "illegal taken permits: " + permitsTaken; + } + tryRun(null); + } + + void tryRun(Runnable runnable) { + Runnable r = tryQueue(runnable); + if (r != null) { + r.run(); + } + } + + private synchronized Runnable tryQueue(Runnable runnable) { + Runnable toExecute = null; + if (permitsTaken < permits) { + permitsTaken++; + toExecute = runnable; + if (toExecute == null) { // only poll if we don't have anything to execute + toExecute = queue.poll(); + } + if (toExecute == null) { + permitsTaken--; + } + } else if (runnable != null) { + queue.add(runnable); + } + return toExecute; + } + } +} diff --git a/server/src/main/java/org/opensearch/action/search/ProtobufCanMatchPreFilterSearchPhase.java b/server/src/main/java/org/opensearch/action/search/ProtobufCanMatchPreFilterSearchPhase.java new file mode 100644 index 0000000000000..2f77b1bc68af7 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/ProtobufCanMatchPreFilterSearchPhase.java @@ -0,0 +1,241 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.FixedBitSet; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.common.lease.Releasable; +import org.opensearch.search.SearchService.CanMatchResponse; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.internal.AliasFilter; +import org.opensearch.search.sort.FieldSortBuilder; +import org.opensearch.search.sort.MinAndMax; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.transport.Transport; + +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Executor; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +/** + * This search phase can be used as an initial search phase to pre-filter search shards based on query rewriting. + * The queries are rewritten against the shards and based on the rewrite result shards might be able to be excluded + * from the search. The extra round trip to the search shards is very cheap and is not subject to rejections + * which allows to fan out to more shards at the same time without running into rejections even if we are hitting a + * large portion of the clusters indices. + * This phase can also be used to pre-sort shards based on min/max values in each shard of the provided primary sort. + * When the query primary sort is perform on a field, this phase extracts the min/max value in each shard and + * sort them according to the provided order. This can be useful for instance to ensure that shards that contain recent + * data are executed first when sorting by descending timestamp. + * + * @opensearch.internal + */ +final class ProtobufCanMatchPreFilterSearchPhase extends ProtobufAbstractSearchAsyncAction { + + private final Function, SearchPhase> phaseFactory; + private final GroupShardsIterator shardsIts; + + ProtobufCanMatchPreFilterSearchPhase( + Logger logger, + SearchTransportService searchTransportService, + BiFunction nodeIdToConnection, + Map aliasFilter, + Map concreteIndexBoosts, + Map> indexRoutings, + Executor executor, + ProtobufSearchRequest request, + ActionListener listener, + GroupShardsIterator shardsIts, + ProtobufTransportSearchAction.SearchTimeProvider timeProvider, + ClusterState clusterState, + ProtobufSearchTask task, + Function, SearchPhase> phaseFactory, + ProtobufSearchResponse.Clusters clusters + ) { + // We set max concurrent shard requests to the number of shards so no throttling happens for can_match requests + super( + SearchPhaseName.CAN_MATCH.getName(), + logger, + searchTransportService, + nodeIdToConnection, + aliasFilter, + concreteIndexBoosts, + indexRoutings, + executor, + request, + listener, + shardsIts, + timeProvider, + clusterState, + task, + new CanMatchSearchPhaseResults(shardsIts.size()), + shardsIts.size(), + clusters + ); + this.phaseFactory = phaseFactory; + this.shardsIts = shardsIts; + } + + @Override + public void addReleasable(Releasable releasable) { + throw new RuntimeException("cannot add releasable in " + getName() + " phase"); + } + + @Override + protected void executePhaseOnShard( + SearchShardIterator shardIt, + SearchShardTarget shard, + SearchActionListener listener + ) { + getSearchTransport().sendCanMatch( + getConnection(shard.getClusterAlias(), shard.getNodeId()), + buildShardSearchRequest(shardIt), + getTask(), + listener + ); + } + + @Override + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + + return phaseFactory.apply(getIterator((CanMatchSearchPhaseResults) results, shardsIts)); + } + + private GroupShardsIterator getIterator( + CanMatchSearchPhaseResults results, + GroupShardsIterator shardsIts + ) { + int cardinality = results.getNumPossibleMatches(); + FixedBitSet possibleMatches = results.getPossibleMatches(); + if (cardinality == 0) { + // this is a special case where we have no hit but we need to get at least one search response in order + // to produce a valid search result with all the aggs etc. + possibleMatches.set(0); + } + SearchSourceBuilder source = getProtobufRequest().source(); + int i = 0; + for (SearchShardIterator iter : shardsIts) { + if (possibleMatches.get(i++)) { + iter.reset(); + } else { + iter.resetAndSkip(); + } + } + if (shouldSortShards(results.minAndMaxes) == false) { + return shardsIts; + } + FieldSortBuilder fieldSort = FieldSortBuilder.getPrimaryFieldSortOrNull(source); + return new GroupShardsIterator<>(sortShards(shardsIts, results.minAndMaxes, fieldSort.order())); + } + + private static List sortShards( + GroupShardsIterator shardsIts, + MinAndMax[] minAndMaxes, + SortOrder order + ) { + return IntStream.range(0, shardsIts.size()) + .boxed() + .sorted(shardComparator(shardsIts, minAndMaxes, order)) + .map(shardsIts::get) + .collect(Collectors.toList()); + } + + private static boolean shouldSortShards(MinAndMax[] minAndMaxes) { + Class clazz = null; + for (MinAndMax minAndMax : minAndMaxes) { + if (clazz == null) { + clazz = minAndMax == null ? null : minAndMax.getMin().getClass(); + } else if (minAndMax != null && clazz != minAndMax.getMin().getClass()) { + // we don't support sort values that mix different types (e.g.: long/double, numeric/keyword). + // TODO: we could fail the request because there is a high probability + // that the merging of topdocs will fail later for the same reason ? + return false; + } + } + return clazz != null; + } + + private static Comparator shardComparator( + GroupShardsIterator shardsIts, + MinAndMax[] minAndMaxes, + SortOrder order + ) { + final Comparator comparator = Comparator.comparing(index -> minAndMaxes[index], MinAndMax.getComparator(order)); + return comparator.thenComparing(index -> shardsIts.get(index).shardId()); + } + + /** + * Inner class for determining if canMatch search phase results + * + * @opensearch.internal + */ + private static final class CanMatchSearchPhaseResults extends SearchPhaseResults { + private final FixedBitSet possibleMatches; + private final MinAndMax[] minAndMaxes; + private int numPossibleMatches; + + CanMatchSearchPhaseResults(int size) { + super(size); + possibleMatches = new FixedBitSet(size); + minAndMaxes = new MinAndMax[size]; + } + + @Override + void consumeResult(CanMatchResponse result, Runnable next) { + try { + consumeResult(result.getShardIndex(), result.canMatch(), result.estimatedMinAndMax()); + } finally { + next.run(); + } + } + + @Override + boolean hasResult(int shardIndex) { + return false; // unneeded + } + + @Override + void consumeShardFailure(int shardIndex) { + // we have to carry over shard failures in order to account for them in the response. + consumeResult(shardIndex, true, null); + } + + synchronized void consumeResult(int shardIndex, boolean canMatch, MinAndMax minAndMax) { + if (canMatch) { + possibleMatches.set(shardIndex); + numPossibleMatches++; + } + minAndMaxes[shardIndex] = minAndMax; + } + + synchronized int getNumPossibleMatches() { + return numPossibleMatches; + } + + synchronized FixedBitSet getPossibleMatches() { + return possibleMatches; + } + + @Override + Stream getSuccessfulResults() { + return Stream.empty(); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/search/ProtobufExpandSearchPhase.java b/server/src/main/java/org/opensearch/action/search/ProtobufExpandSearchPhase.java new file mode 100644 index 0000000000000..e5eaa7b6674d8 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/ProtobufExpandSearchPhase.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.internal.InternalSearchResponse; + +/** + * This search phase is an optional phase that will be executed once all hits are fetched from the shards that executes + * field-collapsing on the inner hits. This phase only executes if field collapsing is requested in the search request and otherwise + * forwards to the next phase immediately. + * + * @opensearch.internal + */ +final class ProtobufExpandSearchPhase extends SearchPhase { + private final SearchPhaseContext context; + private final InternalSearchResponse searchResponse; + private final AtomicArray queryResults; + + ProtobufExpandSearchPhase(SearchPhaseContext context, InternalSearchResponse searchResponse, AtomicArray queryResults) { + super(SearchPhaseName.EXPAND.getName()); + this.context = context; + this.searchResponse = searchResponse; + this.queryResults = queryResults; + } + + @Override + public void run() { + context.sendSearchResponse(searchResponse, queryResults); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/ProtobufFetchSearchPhase.java b/server/src/main/java/org/opensearch/action/search/ProtobufFetchSearchPhase.java new file mode 100644 index 0000000000000..0afc569acf988 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/ProtobufFetchSearchPhase.java @@ -0,0 +1,277 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.search.ScoreDoc; +import org.opensearch.action.OriginalIndices; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.search.RescoreDocIds; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.search.dfs.AggregatedDfs; +import org.opensearch.search.fetch.FetchSearchResult; +import org.opensearch.search.fetch.ProtobufShardFetchSearchRequest; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.search.internal.ProtobufShardSearchRequest; +import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.transport.Transport; + +import java.util.List; +import java.util.function.BiFunction; + +/** + * This search phase merges the query results from the previous phase together and calculates the topN hits for this search. + * Then it reaches out to all relevant shards to fetch the topN hits. + * + * @opensearch.internal + */ +final class ProtobufFetchSearchPhase extends SearchPhase { + private final ArraySearchPhaseResults fetchResults; + private final SearchPhaseController searchPhaseController; + private final AtomicArray queryResults; + private final BiFunction, SearchPhase> nextPhaseFactory; + private final SearchPhaseContext context; + private final Logger logger; + private final SearchPhaseResults resultConsumer; + private final SearchProgressListener progressListener; + private final AggregatedDfs aggregatedDfs; + + ProtobufFetchSearchPhase( + SearchPhaseResults resultConsumer, + SearchPhaseController searchPhaseController, + AggregatedDfs aggregatedDfs, + SearchPhaseContext context + ) { + this( + resultConsumer, + searchPhaseController, + aggregatedDfs, + context, + (response, queryPhaseResults) -> new ProtobufExpandSearchPhase(context, response, queryPhaseResults) + ); + } + + ProtobufFetchSearchPhase( + SearchPhaseResults resultConsumer, + SearchPhaseController searchPhaseController, + AggregatedDfs aggregatedDfs, + SearchPhaseContext context, + BiFunction, SearchPhase> nextPhaseFactory + ) { + super(SearchPhaseName.FETCH.getName()); + if (context.getNumShards() != resultConsumer.getNumShards()) { + throw new IllegalStateException( + "number of shards must match the length of the query results but doesn't:" + + context.getNumShards() + + "!=" + + resultConsumer.getNumShards() + ); + } + this.fetchResults = new ArraySearchPhaseResults<>(resultConsumer.getNumShards()); + this.searchPhaseController = searchPhaseController; + this.queryResults = resultConsumer.getAtomicArray(); + this.aggregatedDfs = aggregatedDfs; + this.nextPhaseFactory = nextPhaseFactory; + this.context = context; + this.logger = context.getLogger(); + this.resultConsumer = resultConsumer; + this.progressListener = context.getProtobufTask().getProgressListener(); + } + + @Override + public void run() { + context.execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + // we do the heavy lifting in this inner run method where we reduce aggs etc. that's why we fork this phase + // off immediately instead of forking when we send back the response to the user since there we only need + // to merge together the fetched results which is a linear operation. + innerRun(); + } + + @Override + public void onFailure(Exception e) { + context.onPhaseFailure(ProtobufFetchSearchPhase.this, "", e); + } + }); + } + + private void innerRun() throws Exception { + final int numShards = context.getNumShards(); + final boolean isScrollSearch = context.getProtobufRequest().scroll() != null; + final List phaseResults = queryResults.asList(); + final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = resultConsumer.reduce(); + final boolean queryAndFetchOptimization = queryResults.length() == 1; + final Runnable finishPhase = () -> moveToNextPhase( + searchPhaseController, + queryResults, + reducedQueryPhase, + queryAndFetchOptimization ? queryResults : fetchResults.getAtomicArray() + ); + if (queryAndFetchOptimization) { + assert phaseResults.isEmpty() || phaseResults.get(0).fetchResult() != null : "phaseResults empty [" + + phaseResults.isEmpty() + + "], single result: " + + phaseResults.get(0).fetchResult(); + // query AND fetch optimization + finishPhase.run(); + } else { + ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs.scoreDocs; + final List[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(numShards, scoreDocs); + // no docs to fetch -- sidestep everything and return + if (scoreDocs.length == 0) { + // we have to release contexts here to free up resources + phaseResults.stream().map(SearchPhaseResult::queryResult).forEach(this::releaseIrrelevantSearchContext); + finishPhase.run(); + } else { + final ScoreDoc[] lastEmittedDocPerShard = isScrollSearch + ? searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, numShards) + : null; + final CountedCollector counter = new CountedCollector<>( + fetchResults, + docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not + finishPhase, + context + ); + for (int i = 0; i < docIdsToLoad.length; i++) { + List entry = docIdsToLoad[i]; + SearchPhaseResult queryResult = queryResults.get(i); + if (entry == null) { // no results for this shard ID + if (queryResult != null) { + // if we got some hits from this shard we have to release the context there + // we do this as we go since it will free up resources and passing on the request on the + // transport layer is cheap. + releaseIrrelevantSearchContext(queryResult.queryResult()); + progressListener.notifyFetchResult(i); + } + // in any case we count down this result since we don't talk to this shard anymore + counter.countDown(); + } else { + SearchShardTarget searchShardTarget = queryResult.getSearchShardTarget(); + Transport.Connection connection = context.getConnection( + searchShardTarget.getClusterAlias(), + searchShardTarget.getNodeId() + ); + ProtobufShardFetchSearchRequest fetchSearchRequest = createFetchRequest( + queryResult.queryResult().getContextId(), + i, + entry, + lastEmittedDocPerShard, + searchShardTarget.getOriginalIndices(), + queryResult.getProtobufShardSearchRequest(), + queryResult.getRescoreDocIds() + ); + executeFetch(i, searchShardTarget, counter, fetchSearchRequest, queryResult.queryResult(), connection); + } + } + } + } + } + + protected ProtobufShardFetchSearchRequest createFetchRequest( + ShardSearchContextId contextId, + int index, + List entry, + ScoreDoc[] lastEmittedDocPerShard, + OriginalIndices originalIndices, + ProtobufShardSearchRequest shardSearchRequest, + RescoreDocIds rescoreDocIds + ) { + final ScoreDoc lastEmittedDoc = (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[index] : null; + return new ProtobufShardFetchSearchRequest( + originalIndices, + contextId, + shardSearchRequest, + entry, + lastEmittedDoc, + rescoreDocIds, + aggregatedDfs + ); + } + + private void executeFetch( + final int shardIndex, + final SearchShardTarget shardTarget, + final CountedCollector counter, + final ProtobufShardFetchSearchRequest fetchSearchRequest, + final QuerySearchResult querySearchResult, + final Transport.Connection connection + ) { + context.getSearchTransport() + .sendExecuteFetchProtobuf( + connection, + fetchSearchRequest, + context.getProtobufTask(), + new SearchActionListener(shardTarget, shardIndex) { + @Override + public void innerOnResponse(FetchSearchResult result) { + try { + progressListener.notifyFetchResult(shardIndex); + counter.onResult(result); + } catch (Exception e) { + context.onPhaseFailure(ProtobufFetchSearchPhase.this, "", e); + } + } + + @Override + public void onFailure(Exception e) { + try { + logger.debug( + () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.contextId()), + e + ); + progressListener.notifyFetchFailure(shardIndex, shardTarget, e); + counter.onFailure(shardIndex, shardTarget, e); + } finally { + // the search context might not be cleared on the node where the fetch was executed for example + // because the action was rejected by the thread pool. in this case we need to send a dedicated + // request to clear the search context. + releaseIrrelevantSearchContext(querySearchResult); + } + } + } + ); + } + + /** + * Releases shard targets that are not used in the docsIdsToLoad. + */ + private void releaseIrrelevantSearchContext(QuerySearchResult queryResult) { + // we only release search context that we did not fetch from, if we are not scrolling + // or using a PIT and if it has at least one hit that didn't make it to the global topDocs + if (queryResult.hasSearchContext() && context.getProtobufRequest().scroll() == null && context.getProtobufRequest().pointInTimeBuilder() == null) { + try { + SearchShardTarget searchShardTarget = queryResult.getSearchShardTarget(); + Transport.Connection connection = context.getConnection(searchShardTarget.getClusterAlias(), searchShardTarget.getNodeId()); + context.sendReleaseSearchContext(queryResult.getContextId(), connection, searchShardTarget.getOriginalIndices()); + } catch (Exception e) { + context.getLogger().trace("failed to release context", e); + } + } + } + + private void moveToNextPhase( + SearchPhaseController searchPhaseController, + AtomicArray queryPhaseResults, + SearchPhaseController.ReducedQueryPhase reducedQueryPhase, + AtomicArray fetchResultsArr + ) { + final InternalSearchResponse internalResponse = searchPhaseController.merge( + context.getProtobufRequest().scroll() != null, + reducedQueryPhase, + fetchResultsArr.asList(), + fetchResultsArr::get + ); + context.executeNextPhase(this, nextPhaseFactory.apply(internalResponse, queryPhaseResults)); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/ProtobufSearchAction.java b/server/src/main/java/org/opensearch/action/search/ProtobufSearchAction.java new file mode 100644 index 0000000000000..41a7e59eb7379 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/ProtobufSearchAction.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionType; +import org.opensearch.action.ProtobufActionType; + +/** + * Transport action for executing a search + * + * @opensearch.internal + */ +public class ProtobufSearchAction extends ProtobufActionType { + + public static final ProtobufSearchAction INSTANCE = new ProtobufSearchAction(); + public static final String NAME = "indices:data/read/search"; + + private ProtobufSearchAction() { + super(NAME, ProtobufSearchResponse::new); + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/ProtobufSearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/ProtobufSearchQueryThenFetchAsyncAction.java new file mode 100644 index 0000000000000..01147f8724f57 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/ProtobufSearchQueryThenFetchAsyncAction.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.TopFieldDocs; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.search.internal.AliasFilter; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.internal.ProtobufShardSearchRequest; +import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.transport.Transport; + +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Executor; +import java.util.function.BiFunction; + +/** + * Async transport action for query then fetch + * + * @opensearch.internal + */ +class ProtobufSearchQueryThenFetchAsyncAction extends ProtobufAbstractSearchAsyncAction { + + private final SearchPhaseController searchPhaseController; + private final SearchProgressListener progressListener; + + // informations to track the best bottom top doc globally. + private final int topDocsSize; + private final int trackTotalHitsUpTo; + private volatile BottomSortValuesCollector bottomSortCollector; + + ProtobufSearchQueryThenFetchAsyncAction( + final Logger logger, + final SearchTransportService searchTransportService, + final BiFunction nodeIdToConnection, + final Map aliasFilter, + final Map concreteIndexBoosts, + final Map> indexRoutings, + final SearchPhaseController searchPhaseController, + final Executor executor, + final QueryPhaseResultConsumer resultConsumer, + final ProtobufSearchRequest request, + final ActionListener listener, + final GroupShardsIterator shardsIts, + final ProtobufTransportSearchAction.SearchTimeProvider timeProvider, + ClusterState clusterState, + ProtobufSearchTask task, + ProtobufSearchResponse.Clusters clusters + ) { + super( + "query", + logger, + searchTransportService, + nodeIdToConnection, + aliasFilter, + concreteIndexBoosts, + indexRoutings, + executor, + request, + listener, + shardsIts, + timeProvider, + clusterState, + task, + resultConsumer, + request.getMaxConcurrentShardRequests(), + clusters + ); + this.topDocsSize = SearchPhaseController.getTopDocsSizeProtobuf(request); + this.trackTotalHitsUpTo = request.resolveTrackTotalHitsUpTo(); + this.searchPhaseController = searchPhaseController; + this.progressListener = task.getProgressListener(); + + // register the release of the query consumer to free up the circuit breaker memory + // at the end of the search + addReleasable(resultConsumer); + + boolean hasFetchPhase = request.source() == null ? true : request.source().size() > 0; + progressListener.notifyListShardsProtobuf( + SearchProgressListener.buildSearchShards(this.shardsIts), + SearchProgressListener.buildSearchShards(toSkipShardsIts), + clusters, + hasFetchPhase + ); + } + + protected void executePhaseOnShard( + final SearchShardIterator shardIt, + final SearchShardTarget shard, + final SearchActionListener listener + ) { + ProtobufShardSearchRequest request = rewriteShardSearchRequest(super.buildProtobufShardSearchRequest(shardIt)); + // update inbound network time with current time before sending request over n/w to data node + if (request != null) { + request.setInboundNetworkTime(System.currentTimeMillis()); + } + getSearchTransport().sendExecuteQueryProtobuf(getConnection(shard.getClusterAlias(), shard.getNodeId()), request, getProtobufTask(), listener); + } + + @Override + protected void onShardGroupFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) { + progressListener.notifyQueryFailure(shardIndex, shardTarget, exc); + } + + @Override + protected void onShardResult(SearchPhaseResult result, SearchShardIterator shardIt) { + QuerySearchResult queryResult = result.queryResult(); + if (queryResult.isNull() == false + // disable sort optims for scroll requests because they keep track of the last bottom doc locally (per shard) + && getProtobufRequest().scroll() == null + && queryResult.topDocs() != null + && queryResult.topDocs().topDocs.getClass() == TopFieldDocs.class) { + TopFieldDocs topDocs = (TopFieldDocs) queryResult.topDocs().topDocs; + if (bottomSortCollector == null) { + synchronized (this) { + if (bottomSortCollector == null) { + bottomSortCollector = new BottomSortValuesCollector(topDocsSize, topDocs.fields); + } + } + } + bottomSortCollector.consumeTopDocs(topDocs, queryResult.sortValueFormats()); + } + super.onShardResult(result, shardIt); + } + + @Override + protected SearchPhase getNextPhase(final SearchPhaseResults results, SearchPhaseContext context) { + return new ProtobufFetchSearchPhase(results, searchPhaseController, null, this); + } + + private ProtobufShardSearchRequest rewriteShardSearchRequest(ProtobufShardSearchRequest request) { + if (bottomSortCollector == null) { + return request; + } + + // disable tracking total hits if we already reached the required estimation. + if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_ACCURATE && bottomSortCollector.getTotalHits() > trackTotalHitsUpTo) { + request.source(request.source().shallowCopy().trackTotalHits(false)); + } + + // set the current best bottom field doc + if (bottomSortCollector.getBottomSortValues() != null) { + // request.setBottomSortValues(bottomSortCollector.getBottomSortValues()); + System.out.println("Bottom sort values is not null......now what????"); + } + return request; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/ProtobufSearchRequest.java b/server/src/main/java/org/opensearch/action/search/ProtobufSearchRequest.java new file mode 100644 index 0000000000000..0e48f517500cb --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/ProtobufSearchRequest.java @@ -0,0 +1,766 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.Version; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.IndicesRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.Nullable; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.search.Scroll; +import org.opensearch.search.builder.PointInTimeBuilder; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.tasks.ProtobufTaskId; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Arrays; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * A request to execute search against one or more indices (or all). Best created using + * {@link org.opensearch.client.Requests#searchRequest(String...)}. + *

+ * Note, the search {@link #source(org.opensearch.search.builder.SearchSourceBuilder)} + * is required. The search source is the different search options, including aggregations and such. + *

+ * + * @see org.opensearch.client.Requests#searchRequest(String...) + * @see org.opensearch.client.Client#search(SearchRequest) + * @see SearchResponse + * + * @opensearch.internal + */ +public class ProtobufSearchRequest extends ProtobufActionRequest implements IndicesRequest.Replaceable { + + public static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); + + public static final int DEFAULT_PRE_FILTER_SHARD_SIZE = 128; + public static final int DEFAULT_BATCHED_REDUCE_SIZE = 512; + + private static final long DEFAULT_ABSOLUTE_START_MILLIS = -1; + + private final String localClusterAlias; + private final long absoluteStartMillis; + private final boolean finalReduce; + + private SearchType searchType = SearchType.DEFAULT; + + private String[] indices = Strings.EMPTY_ARRAY; + + @Nullable + private String routing; + @Nullable + private String preference; + + private SearchSourceBuilder source; + + private Boolean requestCache; + + private Boolean allowPartialSearchResults; + + private Scroll scroll; + + private int batchedReduceSize = DEFAULT_BATCHED_REDUCE_SIZE; + + private int maxConcurrentShardRequests = 0; + + private Integer preFilterShardSize; + + private boolean ccsMinimizeRoundtrips = true; + + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosedIgnoreThrottled(); + + private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; + + private TimeValue cancelAfterTimeInterval; + + private String pipeline; + + public ProtobufSearchRequest() { + this.localClusterAlias = null; + this.absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; + this.finalReduce = true; + } + + /** + * Constructs a new search request from the provided search request + */ + public ProtobufSearchRequest(ProtobufSearchRequest searchRequest) { + this( + searchRequest, + searchRequest.indices, + searchRequest.localClusterAlias, + searchRequest.absoluteStartMillis, + searchRequest.finalReduce + ); + } + + /** + * Constructs a new search request against the indices. No indices provided here means that search + * will run against all indices. + */ + public ProtobufSearchRequest(String... indices) { + this(indices, new SearchSourceBuilder()); + } + + /** + * Constructs a new search request against the provided indices with the given search source. + */ + public ProtobufSearchRequest(String[] indices, SearchSourceBuilder source) { + this(); + if (source == null) { + throw new IllegalArgumentException("source must not be null"); + } + indices(indices); + this.source = source; + } + + /** + * Creates a new sub-search request starting from the original search request that is provided. + * For internal use only, allows to fork a search request into multiple search requests that will be executed independently. + * Such requests will not be finally reduced, so that their results can be merged together in one response at completion. + * Used when a {@link ProtobufSearchRequest} is created and executed as part of a cross-cluster search request + * performing reduction on each cluster in order to minimize network round-trips between the coordinating node and the remote clusters. + * + * @param originalSearchRequest the original search request + * @param indices the indices to search against + * @param clusterAlias the alias to prefix index names with in the returned search results + * @param absoluteStartMillis the absolute start time to be used on the remote clusters to ensure that the same value is used + * @param finalReduce whether the reduction should be final or not + */ + static ProtobufSearchRequest subSearchRequest( + ProtobufSearchRequest originalSearchRequest, + String[] indices, + String clusterAlias, + long absoluteStartMillis, + boolean finalReduce + ) { + Objects.requireNonNull(originalSearchRequest, "search request must not be null"); + validateIndices(indices); + Objects.requireNonNull(clusterAlias, "cluster alias must not be null"); + if (absoluteStartMillis < 0) { + throw new IllegalArgumentException("absoluteStartMillis must not be negative but was [" + absoluteStartMillis + "]"); + } + return new ProtobufSearchRequest(originalSearchRequest, indices, clusterAlias, absoluteStartMillis, finalReduce); + } + + private ProtobufSearchRequest( + ProtobufSearchRequest searchRequest, + String[] indices, + String localClusterAlias, + long absoluteStartMillis, + boolean finalReduce + ) { + this.allowPartialSearchResults = searchRequest.allowPartialSearchResults; + this.batchedReduceSize = searchRequest.batchedReduceSize; + this.ccsMinimizeRoundtrips = searchRequest.ccsMinimizeRoundtrips; + this.indices = indices; + this.indicesOptions = searchRequest.indicesOptions; + this.maxConcurrentShardRequests = searchRequest.maxConcurrentShardRequests; + this.preference = searchRequest.preference; + this.preFilterShardSize = searchRequest.preFilterShardSize; + this.requestCache = searchRequest.requestCache; + this.routing = searchRequest.routing; + this.scroll = searchRequest.scroll; + this.searchType = searchRequest.searchType; + this.source = searchRequest.source; + this.localClusterAlias = localClusterAlias; + this.absoluteStartMillis = absoluteStartMillis; + this.finalReduce = finalReduce; + this.cancelAfterTimeInterval = searchRequest.cancelAfterTimeInterval; + } + + /** + * Constructs a new search request from reading the specified stream. + * + * @param in The stream the request is read from + * @throws IOException if there is an issue reading the stream + */ + public ProtobufSearchRequest(byte[] in) throws IOException { + super(in); + // TODO: proto message + searchType = null; + indices = null; + routing = ""; + preference = ""; + scroll = null; + source = null; + // if (in.getVersion().before(Version.V_2_0_0)) { + // // types no longer relevant so ignore + // String[] types = in.readStringArray(); + // if (types.length > 0) { + // throw new IllegalStateException( + // "types are no longer supported in search requests but found [" + Arrays.toString(types) + "]" + // ); + // } + // } + indicesOptions = null; + requestCache = false; + batchedReduceSize = 0; + maxConcurrentShardRequests = 0; + preFilterShardSize = 0; + allowPartialSearchResults = false; + localClusterAlias = ""; + if (localClusterAlias != null) { + absoluteStartMillis = 0; + finalReduce = false; + } else { + absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; + finalReduce = true; + } + ccsMinimizeRoundtrips = false; + cancelAfterTimeInterval = null; + // if (in.getVersion().onOrAfter(Version.V_2_7_0)) { + // pipeline = in.readOptionalString(); + // } + } + + @Override + public void writeTo(OutputStream out) throws IOException { + super.writeTo(out); + // out.writeByte(searchType.id()); + // out.writeStringArray(indices); + // out.writeOptionalString(routing); + // out.writeOptionalString(preference); + // out.writeOptionalWriteable(scroll); + // out.writeOptionalWriteable(source); + // if (out.getVersion().before(Version.V_2_0_0)) { + // // types not supported so send an empty array to previous versions + // out.writeStringArray(Strings.EMPTY_ARRAY); + // } + // indicesOptions.writeIndicesOptions(out); + // out.writeOptionalBoolean(requestCache); + // out.writeVInt(batchedReduceSize); + // out.writeVInt(maxConcurrentShardRequests); + // out.writeOptionalVInt(preFilterShardSize); + // out.writeOptionalBoolean(allowPartialSearchResults); + // out.writeOptionalString(localClusterAlias); + // if (localClusterAlias != null) { + // out.writeVLong(absoluteStartMillis); + // out.writeBoolean(finalReduce); + // } + // out.writeBoolean(ccsMinimizeRoundtrips); + // out.writeOptionalTimeValue(cancelAfterTimeInterval); + // if (out.getVersion().onOrAfter(Version.V_2_7_0)) { + // out.writeOptionalString(pipeline); + // } + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + boolean scroll = scroll() != null; + if (scroll) { + if (source != null) { + if (source.trackTotalHitsUpTo() != null && source.trackTotalHitsUpTo() != SearchContext.TRACK_TOTAL_HITS_ACCURATE) { + validationException = addValidationError( + "disabling [track_total_hits] is not allowed in a scroll context", + validationException + ); + } + if (source.from() > 0) { + validationException = addValidationError("using [from] is not allowed in a scroll context", validationException); + } + if (source.size() == 0) { + validationException = addValidationError("[size] cannot be [0] in a scroll context", validationException); + } + if (source.rescores() != null && source.rescores().isEmpty() == false) { + validationException = addValidationError("using [rescore] is not allowed in a scroll context", validationException); + } + } + if (requestCache != null && requestCache) { + validationException = addValidationError("[request_cache] cannot be used in a scroll context", validationException); + } + } + if (source != null) { + if (source.aggregations() != null) { + validationException = source.aggregations().validate(validationException); + } + } + if (pointInTimeBuilder() != null) { + if (scroll) { + validationException = addValidationError("using [point in time] is not allowed in a scroll context", validationException); + } + } + return validationException; + } + + /** + * Returns the alias of the cluster that this search request is being executed on. A non-null value indicates that this search request + * is being executed as part of a locally reduced cross-cluster search request. The cluster alias is used to prefix index names + * returned as part of search hits with the alias of the cluster they came from. + */ + @Nullable + String getLocalClusterAlias() { + return localClusterAlias; + } + + /** + * Returns whether the reduction phase that will be performed needs to be final or not. + */ + boolean isFinalReduce() { + return finalReduce; + } + + /** + * Returns the current time in milliseconds from the time epoch, to be used for the execution of this search request. Used to + * ensure that the same value, determined by the coordinating node, is used on all nodes involved in the execution of the search + * request. When created through {@link #subSearchRequest(ProtobufSearchRequest, String[], String, long, boolean)}, this method returns + * the provided current time, otherwise it will return {@link System#currentTimeMillis()}. + */ + long getOrCreateAbsoluteStartMillis() { + return absoluteStartMillis == DEFAULT_ABSOLUTE_START_MILLIS ? System.currentTimeMillis() : absoluteStartMillis; + } + + /** + * Returns the provided absoluteStartMillis when created through {@link #subSearchRequest} and + * -1 otherwise. + */ + long getAbsoluteStartMillis() { + return absoluteStartMillis; + } + + /** + * Sets the indices the search will be executed on. + */ + @Override + public ProtobufSearchRequest indices(String... indices) { + validateIndices(indices); + this.indices = indices; + return this; + } + + private static void validateIndices(String... indices) { + Objects.requireNonNull(indices, "indices must not be null"); + for (String index : indices) { + Objects.requireNonNull(index, "index must not be null"); + } + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public ProtobufSearchRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = Objects.requireNonNull(indicesOptions, "indicesOptions must not be null"); + return this; + } + + @Override + public boolean includeDataStreams() { + return true; + } + + /** + * Returns whether network round-trips should be minimized when executing cross-cluster search requests. + * Defaults to true. + */ + public boolean isCcsMinimizeRoundtrips() { + return ccsMinimizeRoundtrips; + } + + /** + * Sets whether network round-trips should be minimized when executing cross-cluster search requests. Defaults to true. + */ + public void setCcsMinimizeRoundtrips(boolean ccsMinimizeRoundtrips) { + this.ccsMinimizeRoundtrips = ccsMinimizeRoundtrips; + } + + /** + * A comma separated list of routing values to control the shards the search will be executed on. + */ + public String routing() { + return this.routing; + } + + /** + * A comma separated list of routing values to control the shards the search will be executed on. + */ + public ProtobufSearchRequest routing(String routing) { + this.routing = routing; + return this; + } + + /** + * The routing values to control the shards that the search will be executed on. + */ + public ProtobufSearchRequest routing(String... routings) { + this.routing = Strings.arrayToCommaDelimitedString(routings); + return this; + } + + /** + * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to + * {@code _local} to prefer local shards, {@code _primary} to execute only on primary shards, + * or a custom value, which guarantees that the same order + * will be used across different requests. + */ + public ProtobufSearchRequest preference(String preference) { + this.preference = preference; + return this; + } + + public String preference() { + return this.preference; + } + + /** + * The search type to execute, defaults to {@link SearchType#DEFAULT}. + */ + public ProtobufSearchRequest searchType(SearchType searchType) { + this.searchType = Objects.requireNonNull(searchType, "searchType must not be null"); + return this; + } + + /** + * The a string representation search type to execute, defaults to {@link SearchType#DEFAULT}. Can be + * one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch", + * "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch". + */ + public ProtobufSearchRequest searchType(String searchType) { + return searchType(SearchType.fromString(searchType)); + } + + /** + * The source of the search request. + */ + public ProtobufSearchRequest source(SearchSourceBuilder sourceBuilder) { + this.source = Objects.requireNonNull(sourceBuilder, "source must not be null"); + return this; + } + + /** + * The search source to execute. + */ + public SearchSourceBuilder source() { + return source; + } + + public PointInTimeBuilder pointInTimeBuilder() { + if (source != null) { + return source.pointInTimeBuilder(); + } + return null; + } + + /** + * The tye of search to execute. + */ + public SearchType searchType() { + return searchType; + } + + /** + * The indices + */ + @Override + public String[] indices() { + return indices; + } + + /** + * If set, will enable scrolling of the search request. + */ + public Scroll scroll() { + return scroll; + } + + /** + * If set, will enable scrolling of the search request. + */ + public ProtobufSearchRequest scroll(Scroll scroll) { + this.scroll = scroll; + return this; + } + + /** + * If set, will enable scrolling of the search request for the specified timeout. + */ + public ProtobufSearchRequest scroll(TimeValue keepAlive) { + return scroll(new Scroll(keepAlive)); + } + + /** + * If set, will enable scrolling of the search request for the specified timeout. + */ + public ProtobufSearchRequest scroll(String keepAlive) { + return scroll(new Scroll(TimeValue.parseTimeValue(keepAlive, null, getClass().getSimpleName() + ".Scroll.keepAlive"))); + } + + /** + * Sets if this request should use the request cache or not, assuming that it can (for + * example, if "now" is used, it will never be cached). By default (not set, or null, + * will default to the index level setting if request cache is enabled or not). + */ + public ProtobufSearchRequest requestCache(Boolean requestCache) { + this.requestCache = requestCache; + return this; + } + + public Boolean requestCache() { + return this.requestCache; + } + + /** + * Sets if this request should allow partial results. (If method is not called, + * will default to the cluster level setting). + */ + public ProtobufSearchRequest allowPartialSearchResults(boolean allowPartialSearchResults) { + this.allowPartialSearchResults = allowPartialSearchResults; + return this; + } + + public Boolean allowPartialSearchResults() { + return this.allowPartialSearchResults; + } + + /** + * Sets the number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection + * mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. + */ + public void setBatchedReduceSize(int batchedReduceSize) { + if (batchedReduceSize <= 1) { + throw new IllegalArgumentException("batchedReduceSize must be >= 2"); + } + this.batchedReduceSize = batchedReduceSize; + } + + /** + * Returns the number of shard results that should be reduced at once on the coordinating node. This value should be used as a + * protection mechanism to reduce the memory overhead per search request if the potential number of shards in the request can be large. + */ + public int getBatchedReduceSize() { + return batchedReduceSize; + } + + /** + * Returns the number of shard requests that should be executed concurrently on a single node. This value should be used as a + * protection mechanism to reduce the number of shard requests fired per high level search request. Searches that hit the entire + * cluster can be throttled with this number to reduce the cluster load. The default is {@code 5} + */ + public int getMaxConcurrentShardRequests() { + return maxConcurrentShardRequests == 0 ? 5 : maxConcurrentShardRequests; + } + + /** + * Sets the number of shard requests that should be executed concurrently on a single node. This value should be used as a + * protection mechanism to reduce the number of shard requests fired per high level search request. Searches that hit the entire + * cluster can be throttled with this number to reduce the cluster load. The default is {@code 5} + */ + public void setMaxConcurrentShardRequests(int maxConcurrentShardRequests) { + if (maxConcurrentShardRequests < 1) { + throw new IllegalArgumentException("maxConcurrentShardRequests must be >= 1"); + } + this.maxConcurrentShardRequests = maxConcurrentShardRequests; + } + + /** + * Sets a threshold that enforces a pre-filter roundtrip to pre-filter search shards based on query rewriting if the number of shards + * the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for + * instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard + * bounds and the query are disjoint. + * + * When unspecified, the pre-filter phase is executed if any of these conditions is met: + *
    + *
  • The request targets more than 128 shards
  • + *
  • The request targets one or more read-only index
  • + *
  • The primary sort of the query targets an indexed field
  • + *
+ */ + public void setPreFilterShardSize(int preFilterShardSize) { + if (preFilterShardSize < 1) { + throw new IllegalArgumentException("preFilterShardSize must be >= 1"); + } + this.preFilterShardSize = preFilterShardSize; + } + + /** + * Returns a threshold that enforces a pre-filter roundtrip to pre-filter search shards based on query rewriting if the number of shards + * the search request expands to exceeds the threshold, or null if the threshold is unspecified. + * This filter roundtrip can limit the number of shards significantly if for + * instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard + * bounds and the query are disjoint. + * + * When unspecified, the pre-filter phase is executed if any of these conditions is met: + *
    + *
  • The request targets more than 128 shards
  • + *
  • The request targets one or more read-only index
  • + *
  • The primary sort of the query targets an indexed field
  • + *
+ */ + @Nullable + public Integer getPreFilterShardSize() { + return preFilterShardSize; + } + + /** + * @return true if the request only has suggest + */ + public boolean isSuggestOnly() { + return source != null && source.isSuggestOnly(); + } + + public int resolveTrackTotalHitsUpTo() { + return resolveTrackTotalHitsUpTo(scroll, source); + } + + public static int resolveTrackTotalHitsUpTo(Scroll scroll, SearchSourceBuilder source) { + if (scroll != null) { + // no matter what the value of track_total_hits is + return SearchContext.TRACK_TOTAL_HITS_ACCURATE; + } + return source == null ? SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO + : source.trackTotalHitsUpTo() == null ? SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO + : source.trackTotalHitsUpTo(); + } + + public void setCancelAfterTimeInterval(TimeValue cancelAfterTimeInterval) { + this.cancelAfterTimeInterval = cancelAfterTimeInterval; + } + + public TimeValue getCancelAfterTimeInterval() { + return cancelAfterTimeInterval; + } + + public ProtobufSearchRequest pipeline(String pipeline) { + this.pipeline = pipeline; + return this; + } + + public String pipeline() { + return pipeline; + } + + @Override + public ProtobufSearchTask createProtobufTask(long id, String type, String action, ProtobufTaskId parentTaskId, Map headers) { + return new ProtobufSearchTask(id, type, action, this::buildDescription, parentTaskId, headers, cancelAfterTimeInterval); + } + + public final String buildDescription() { + StringBuilder sb = new StringBuilder(); + sb.append("indices["); + Strings.arrayToDelimitedString(indices, ",", sb); + sb.append("], "); + sb.append("search_type[").append(searchType).append("], "); + if (scroll != null) { + sb.append("scroll[").append(scroll.keepAlive()).append("], "); + } + if (source != null) { + sb.append("source[").append(source.toString(FORMAT_PARAMS)).append("]"); + } else { + sb.append("source[]"); + } + return sb.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ProtobufSearchRequest that = (ProtobufSearchRequest) o; + return searchType == that.searchType + && Arrays.equals(indices, that.indices) + && Objects.equals(routing, that.routing) + && Objects.equals(preference, that.preference) + && Objects.equals(source, that.source) + && Objects.equals(requestCache, that.requestCache) + && Objects.equals(scroll, that.scroll) + && Objects.equals(batchedReduceSize, that.batchedReduceSize) + && Objects.equals(maxConcurrentShardRequests, that.maxConcurrentShardRequests) + && Objects.equals(preFilterShardSize, that.preFilterShardSize) + && Objects.equals(indicesOptions, that.indicesOptions) + && Objects.equals(allowPartialSearchResults, that.allowPartialSearchResults) + && Objects.equals(localClusterAlias, that.localClusterAlias) + && absoluteStartMillis == that.absoluteStartMillis + && ccsMinimizeRoundtrips == that.ccsMinimizeRoundtrips + && Objects.equals(cancelAfterTimeInterval, that.cancelAfterTimeInterval) + && Objects.equals(pipeline, that.pipeline); + } + + @Override + public int hashCode() { + return Objects.hash( + searchType, + Arrays.hashCode(indices), + routing, + preference, + source, + requestCache, + scroll, + indicesOptions, + batchedReduceSize, + maxConcurrentShardRequests, + preFilterShardSize, + allowPartialSearchResults, + localClusterAlias, + absoluteStartMillis, + ccsMinimizeRoundtrips, + cancelAfterTimeInterval + ); + } + + @Override + public String toString() { + return "ProtobufSearchRequest{" + + "searchType=" + + searchType + + ", indices=" + + Arrays.toString(indices) + + ", indicesOptions=" + + indicesOptions + + ", routing='" + + routing + + '\'' + + ", preference='" + + preference + + '\'' + + ", requestCache=" + + requestCache + + ", scroll=" + + scroll + + ", maxConcurrentShardRequests=" + + maxConcurrentShardRequests + + ", batchedReduceSize=" + + batchedReduceSize + + ", preFilterShardSize=" + + preFilterShardSize + + ", allowPartialSearchResults=" + + allowPartialSearchResults + + ", localClusterAlias=" + + localClusterAlias + + ", getOrCreateAbsoluteStartMillis=" + + absoluteStartMillis + + ", ccsMinimizeRoundtrips=" + + ccsMinimizeRoundtrips + + ", source=" + + source + + ", cancelAfterTimeInterval=" + + cancelAfterTimeInterval + + ", pipeline=" + + pipeline + + "}"; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/ProtobufSearchResponse.java b/server/src/main/java/org/opensearch/action/search/ProtobufSearchResponse.java new file mode 100644 index 0000000000000..89a8a74b1c125 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/ProtobufSearchResponse.java @@ -0,0 +1,575 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.lucene.search.TotalHits; +import org.opensearch.action.ActionResponse; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.common.Nullable; +import org.opensearch.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.core.xcontent.XContentParser.Token; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.rest.action.RestActions; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.aggregations.Aggregations; +import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.profile.ProfileShardResult; +import org.opensearch.search.profile.SearchProfileShardResults; +import org.opensearch.search.suggest.Suggest; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Supplier; + +import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; + +/** + * A response of a search request. + * + * @opensearch.internal + */ +public class ProtobufSearchResponse extends ProtobufActionResponse implements StatusToXContentObject { + + private static final ParseField SCROLL_ID = new ParseField("_scroll_id"); + private static final ParseField POINT_IN_TIME_ID = new ParseField("pit_id"); + private static final ParseField TOOK = new ParseField("took"); + private static final ParseField TIMED_OUT = new ParseField("timed_out"); + private static final ParseField TERMINATED_EARLY = new ParseField("terminated_early"); + private static final ParseField NUM_REDUCE_PHASES = new ParseField("num_reduce_phases"); + + private final SearchResponseSections internalResponse; + private final String scrollId; + private final String pointInTimeId; + private final int totalShards; + private final int successfulShards; + private final int skippedShards; + private final ShardSearchFailure[] shardFailures; + private final Clusters clusters; + private final long tookInMillis; + + public ProtobufSearchResponse(byte[] in) throws IOException { + super(in); + // TODO: proto message + internalResponse = null; + totalShards = 0; + successfulShards = 0; + int size = 0; + if (size == 0) { + shardFailures = ShardSearchFailure.EMPTY_ARRAY; + } else { + shardFailures = new ShardSearchFailure[size]; + for (int i = 0; i < shardFailures.length; i++) { + shardFailures[i] = null; + } + } + clusters = null; + scrollId = ""; + tookInMillis = 0; + skippedShards = 0; + pointInTimeId = ""; + } + + public ProtobufSearchResponse( + SearchResponseSections internalResponse, + String scrollId, + int totalShards, + int successfulShards, + int skippedShards, + long tookInMillis, + ShardSearchFailure[] shardFailures, + Clusters clusters + ) { + this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, shardFailures, clusters, null); + } + + public ProtobufSearchResponse( + SearchResponseSections internalResponse, + String scrollId, + int totalShards, + int successfulShards, + int skippedShards, + long tookInMillis, + ShardSearchFailure[] shardFailures, + Clusters clusters, + String pointInTimeId + ) { + this.internalResponse = internalResponse; + this.scrollId = scrollId; + this.pointInTimeId = pointInTimeId; + this.clusters = clusters; + this.totalShards = totalShards; + this.successfulShards = successfulShards; + this.skippedShards = skippedShards; + this.tookInMillis = tookInMillis; + this.shardFailures = shardFailures; + assert skippedShards <= totalShards : "skipped: " + skippedShards + " total: " + totalShards; + assert scrollId == null || pointInTimeId == null : "ProtobufSearchResponse can't have both scrollId [" + + scrollId + + "] and searchContextId [" + + pointInTimeId + + "]"; + } + + @Override + public RestStatus status() { + return RestStatus.status(successfulShards, totalShards, shardFailures); + } + + public SearchResponseSections getInternalResponse() { + return internalResponse; + } + + /** + * The search hits. + */ + public SearchHits getHits() { + return internalResponse.hits(); + } + + public Aggregations getAggregations() { + return internalResponse.aggregations(); + } + + public Suggest getSuggest() { + return internalResponse.suggest(); + } + + /** + * Has the search operation timed out. + */ + public boolean isTimedOut() { + return internalResponse.timedOut(); + } + + /** + * Has the search operation terminated early due to reaching + * terminateAfter + */ + public Boolean isTerminatedEarly() { + return internalResponse.terminatedEarly(); + } + + /** + * Returns the number of reduce phases applied to obtain this search response + */ + public int getNumReducePhases() { + return internalResponse.getNumReducePhases(); + } + + /** + * How long the search took. + */ + public TimeValue getTook() { + return new TimeValue(tookInMillis); + } + + /** + * The total number of shards the search was executed on. + */ + public int getTotalShards() { + return totalShards; + } + + /** + * The successful number of shards the search was executed on. + */ + public int getSuccessfulShards() { + return successfulShards; + } + + /** + * The number of shards skipped due to pre-filtering + */ + public int getSkippedShards() { + return skippedShards; + } + + /** + * The failed number of shards the search was executed on. + */ + public int getFailedShards() { + // we don't return totalShards - successfulShards, we don't count "no shards available" as a failed shard, just don't + // count it in the successful counter + return shardFailures.length; + } + + /** + * The failures that occurred during the search. + */ + public ShardSearchFailure[] getShardFailures() { + return this.shardFailures; + } + + /** + * If scrolling was enabled ({@link SearchRequest#scroll(org.opensearch.search.Scroll)}, the + * scroll id that can be used to continue scrolling. + */ + public String getScrollId() { + return scrollId; + } + + /** + * Returns the encoded string of the search context that the search request is used to executed + */ + public String pointInTimeId() { + return pointInTimeId; + } + + /** + * If profiling was enabled, this returns an object containing the profile results from + * each shard. If profiling was not enabled, this will return null + * + * @return The profile results or an empty map + */ + @Nullable + public Map getProfileResults() { + return internalResponse.profile(); + } + + /** + * Returns info about what clusters the search was executed against. Available only in responses obtained + * from a Cross Cluster Search request, otherwise null + * @see Clusters + */ + public Clusters getClusters() { + return clusters; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerToXContent(builder, params); + builder.endObject(); + return builder; + } + + public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + if (scrollId != null) { + builder.field(SCROLL_ID.getPreferredName(), scrollId); + } + if (pointInTimeId != null) { + builder.field(POINT_IN_TIME_ID.getPreferredName(), pointInTimeId); + } + builder.field(TOOK.getPreferredName(), tookInMillis); + builder.field(TIMED_OUT.getPreferredName(), isTimedOut()); + if (isTerminatedEarly() != null) { + builder.field(TERMINATED_EARLY.getPreferredName(), isTerminatedEarly()); + } + if (getNumReducePhases() != 1) { + builder.field(NUM_REDUCE_PHASES.getPreferredName(), getNumReducePhases()); + } + RestActions.buildBroadcastShardsHeader( + builder, + params, + getTotalShards(), + getSuccessfulShards(), + getSkippedShards(), + getFailedShards(), + getShardFailures() + ); + clusters.toXContent(builder, params); + internalResponse.toXContent(builder, params); + return builder; + } + + public static ProtobufSearchResponse fromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser); + parser.nextToken(); + return innerFromXContent(parser); + } + + public static ProtobufSearchResponse innerFromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(Token.FIELD_NAME, parser.currentToken(), parser); + String currentFieldName = parser.currentName(); + SearchHits hits = null; + Aggregations aggs = null; + Suggest suggest = null; + SearchProfileShardResults profile = null; + boolean timedOut = false; + Boolean terminatedEarly = null; + int numReducePhases = 1; + long tookInMillis = -1; + int successfulShards = -1; + int totalShards = -1; + int skippedShards = 0; // 0 for BWC + String scrollId = null; + String searchContextId = null; + List failures = new ArrayList<>(); + Clusters clusters = Clusters.EMPTY; + for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { + if (token == Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (SCROLL_ID.match(currentFieldName, parser.getDeprecationHandler())) { + scrollId = parser.text(); + } else if (POINT_IN_TIME_ID.match(currentFieldName, parser.getDeprecationHandler())) { + searchContextId = parser.text(); + } else if (TOOK.match(currentFieldName, parser.getDeprecationHandler())) { + tookInMillis = parser.longValue(); + } else if (TIMED_OUT.match(currentFieldName, parser.getDeprecationHandler())) { + timedOut = parser.booleanValue(); + } else if (TERMINATED_EARLY.match(currentFieldName, parser.getDeprecationHandler())) { + terminatedEarly = parser.booleanValue(); + } else if (NUM_REDUCE_PHASES.match(currentFieldName, parser.getDeprecationHandler())) { + numReducePhases = parser.intValue(); + } else { + parser.skipChildren(); + } + } else if (token == Token.START_OBJECT) { + if (SearchHits.Fields.HITS.equals(currentFieldName)) { + hits = SearchHits.fromXContent(parser); + } else if (Aggregations.AGGREGATIONS_FIELD.equals(currentFieldName)) { + aggs = Aggregations.fromXContent(parser); + } else if (Suggest.NAME.equals(currentFieldName)) { + suggest = Suggest.fromXContent(parser); + } else if (SearchProfileShardResults.PROFILE_FIELD.equals(currentFieldName)) { + profile = SearchProfileShardResults.fromXContent(parser); + } else if (RestActions._SHARDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (RestActions.FAILED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + parser.intValue(); // we don't need it but need to consume it + } else if (RestActions.SUCCESSFUL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + successfulShards = parser.intValue(); + } else if (RestActions.TOTAL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + totalShards = parser.intValue(); + } else if (RestActions.SKIPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + skippedShards = parser.intValue(); + } else { + parser.skipChildren(); + } + } else if (token == Token.START_ARRAY) { + if (RestActions.FAILURES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != Token.END_ARRAY) { + failures.add(ShardSearchFailure.fromXContent(parser)); + } + } else { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + } else if (Clusters._CLUSTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + int successful = -1; + int total = -1; + int skipped = -1; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (Clusters.SUCCESSFUL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + successful = parser.intValue(); + } else if (Clusters.TOTAL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + total = parser.intValue(); + } else if (Clusters.SKIPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + skipped = parser.intValue(); + } else { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + clusters = new Clusters(total, successful, skipped); + } else { + parser.skipChildren(); + } + } + } + SearchResponseSections searchResponseSections = new SearchResponseSections( + hits, + aggs, + suggest, + timedOut, + terminatedEarly, + profile, + numReducePhases + ); + return new ProtobufSearchResponse( + searchResponseSections, + scrollId, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + failures.toArray(ShardSearchFailure.EMPTY_ARRAY), + clusters, + searchContextId + ); + } + + @Override + public void writeTo(OutputStream out) throws IOException { + // internalResponse.writeTo(out); + // out.writeVInt(totalShards); + // out.writeVInt(successfulShards); + + // out.writeVInt(shardFailures.length); + // for (ShardSearchFailure shardSearchFailure : shardFailures) { + // shardSearchFailure.writeTo(out); + // } + // clusters.writeTo(out); + // out.writeOptionalString(scrollId); + // out.writeVLong(tookInMillis); + // out.writeVInt(skippedShards); + // out.writeOptionalString(pointInTimeId); + } + + @Override + public String toString() { + return Strings.toString(XContentType.JSON, this); + } + + /** + * Holds info about the clusters that the search was executed on: how many in total, how many of them were successful + * and how many of them were skipped. + * + * @opensearch.internal + */ + public static class Clusters implements ToXContentFragment, Writeable { + + public static final Clusters EMPTY = new Clusters(0, 0, 0); + + static final ParseField _CLUSTERS_FIELD = new ParseField("_clusters"); + static final ParseField SUCCESSFUL_FIELD = new ParseField("successful"); + static final ParseField SKIPPED_FIELD = new ParseField("skipped"); + static final ParseField TOTAL_FIELD = new ParseField("total"); + + private final int total; + private final int successful; + private final int skipped; + + public Clusters(int total, int successful, int skipped) { + assert total >= 0 && successful >= 0 && skipped >= 0 : "total: " + + total + + " successful: " + + successful + + " skipped: " + + skipped; + assert successful <= total && skipped == total - successful : "total: " + + total + + " successful: " + + successful + + " skipped: " + + skipped; + this.total = total; + this.successful = successful; + this.skipped = skipped; + } + + private Clusters(StreamInput in) throws IOException { + this(in.readVInt(), in.readVInt(), in.readVInt()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(total); + out.writeVInt(successful); + out.writeVInt(skipped); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (total > 0) { + builder.startObject(_CLUSTERS_FIELD.getPreferredName()); + builder.field(TOTAL_FIELD.getPreferredName(), total); + builder.field(SUCCESSFUL_FIELD.getPreferredName(), successful); + builder.field(SKIPPED_FIELD.getPreferredName(), skipped); + builder.endObject(); + } + return builder; + } + + /** + * Returns how many total clusters the search was requested to be executed on + */ + public int getTotal() { + return total; + } + + /** + * Returns how many total clusters the search was executed successfully on + */ + public int getSuccessful() { + return successful; + } + + /** + * Returns how many total clusters were during the execution of the search request + */ + public int getSkipped() { + return skipped; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Clusters clusters = (Clusters) o; + return total == clusters.total && successful == clusters.successful && skipped == clusters.skipped; + } + + @Override + public int hashCode() { + return Objects.hash(total, successful, skipped); + } + + @Override + public String toString() { + return "Clusters{total=" + total + ", successful=" + successful + ", skipped=" + skipped + '}'; + } + } + + static ProtobufSearchResponse empty(Supplier tookInMillisSupplier, Clusters clusters) { + SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse( + searchHits, + InternalAggregations.EMPTY, + null, + null, + false, + null, + 0 + ); + return new ProtobufSearchResponse( + internalSearchResponse, + null, + 0, + 0, + 0, + tookInMillisSupplier.get(), + ShardSearchFailure.EMPTY_ARRAY, + clusters, + null + ); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/ProtobufSearchShardTask.java b/server/src/main/java/org/opensearch/action/search/ProtobufSearchShardTask.java new file mode 100644 index 0000000000000..6abe4d34870a7 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/ProtobufSearchShardTask.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.MemoizedSupplier; +import org.opensearch.search.fetch.ShardFetchSearchRequest; +import org.opensearch.search.internal.ShardSearchRequest; +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.ProtobufCancellableTask; +import org.opensearch.tasks.SearchBackpressureTask; +import org.opensearch.tasks.ProtobufTaskId; + +import java.util.Map; +import java.util.function.Supplier; + +/** + * Task storing information about a currently running search shard request. + * See {@link ShardSearchRequest}, {@link ShardFetchSearchRequest}, ... + * + * @opensearch.internal + */ +public class ProtobufSearchShardTask extends ProtobufCancellableTask implements SearchBackpressureTask { + // generating metadata in a lazy way since source can be quite big + private final MemoizedSupplier metadataSupplier; + + public ProtobufSearchShardTask(long id, String type, String action, String description, ProtobufTaskId parentTaskId, Map headers) { + this(id, type, action, description, parentTaskId, headers, () -> ""); + } + + public ProtobufSearchShardTask( + long id, + String type, + String action, + String description, + ProtobufTaskId parentTaskId, + Map headers, + Supplier metadataSupplier + ) { + super(id, type, action, description, parentTaskId, headers); + this.metadataSupplier = new MemoizedSupplier<>(metadataSupplier); + } + + public String getTaskMetadata() { + return metadataSupplier.get(); + } + + @Override + public boolean supportsResourceTracking() { + return true; + } + + @Override + public boolean shouldCancelChildrenOnCancellation() { + return false; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/ProtobufSearchTask.java b/server/src/main/java/org/opensearch/action/search/ProtobufSearchTask.java new file mode 100644 index 0000000000000..1a85b83d3fc83 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/ProtobufSearchTask.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.tasks.ProtobufCancellableTask; +import org.opensearch.tasks.SearchBackpressureTask; +import org.opensearch.tasks.ProtobufTaskId; + +import java.util.Map; +import java.util.function.Supplier; + +import static org.opensearch.search.SearchService.NO_TIMEOUT; + +/** + * Task storing information about a currently running {@link SearchRequest}. + * + * @opensearch.internal + */ +public class ProtobufSearchTask extends ProtobufCancellableTask implements SearchBackpressureTask { + // generating description in a lazy way since source can be quite big + private final Supplier descriptionSupplier; + private SearchProgressListener progressListener = SearchProgressListener.NOOP; + + public ProtobufSearchTask( + long id, + String type, + String action, + Supplier descriptionSupplier, + ProtobufTaskId parentTaskId, + Map headers + ) { + this(id, type, action, descriptionSupplier, parentTaskId, headers, NO_TIMEOUT); + } + + public ProtobufSearchTask( + long id, + String type, + String action, + Supplier descriptionSupplier, + ProtobufTaskId parentTaskId, + Map headers, + TimeValue cancelAfterTimeInterval + ) { + super(id, type, action, null, parentTaskId, headers, cancelAfterTimeInterval); + this.descriptionSupplier = descriptionSupplier; + } + + @Override + public final String getDescription() { + return descriptionSupplier.get(); + } + + @Override + public boolean supportsResourceTracking() { + return true; + } + + /** + * Attach a {@link SearchProgressListener} to this task. + */ + public final void setProgressListener(SearchProgressListener progressListener) { + this.progressListener = progressListener; + } + + /** + * Return the {@link SearchProgressListener} attached to this task. + */ + public final SearchProgressListener getProgressListener() { + return progressListener; + } + + @Override + public boolean shouldCancelChildrenOnCancellation() { + return true; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/ProtobufTransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/ProtobufTransportSearchAction.java new file mode 100644 index 0000000000000..d33cebb1969d6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/ProtobufTransportSearchAction.java @@ -0,0 +1,1355 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.OriginalIndices; +import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsGroup; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.opensearch.action.support.ProtobufActionFilters; +import org.opensearch.action.support.ProtobufHandledTransportAction; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.action.support.TimeoutTaskCancellationUtility; +import org.opensearch.client.Client; +import org.opensearch.client.ProtobufOriginSettingClient; +import org.opensearch.client.node.ProtobufNodeClient; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.cluster.routing.OperationRouting; +import org.opensearch.cluster.routing.ShardIterator; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Nullable; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.ProtobufWriteable; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.common.Strings; +import org.opensearch.core.index.Index; +import org.opensearch.index.query.Rewriteable; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.SearchService; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.search.aggregations.InternalAggregation; +import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.internal.AliasFilter; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.pipeline.PipelinedRequest; +import org.opensearch.search.pipeline.SearchPipelineService; +import org.opensearch.search.profile.ProfileShardResult; +import org.opensearch.search.profile.SearchProfileShardResults; +import org.opensearch.tasks.ProtobufCancellableTask; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.tasks.TaskId; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.RemoteClusterAware; +import org.opensearch.transport.RemoteClusterService; +import org.opensearch.transport.RemoteTransportException; +import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.function.LongSupplier; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +import static org.opensearch.action.admin.cluster.node.tasks.get.GetTaskAction.TASKS_ORIGIN; +import static org.opensearch.action.search.SearchType.DFS_QUERY_THEN_FETCH; +import static org.opensearch.action.search.SearchType.QUERY_THEN_FETCH; +import static org.opensearch.search.sort.FieldSortBuilder.hasPrimaryFieldSort; + +/** + * Perform search action + * + * @opensearch.internal + */ +public class ProtobufTransportSearchAction extends ProtobufHandledTransportAction { + + /** The maximum number of shards for a single search request. */ + public static final Setting SHARD_COUNT_LIMIT_SETTING = Setting.longSetting( + "action.search.shard_count.limit", + Long.MAX_VALUE, + 1L, + Property.Dynamic, + Property.NodeScope + ); + + // cluster level setting for timeout based search cancellation. If search request level parameter is present then that will take + // precedence over the cluster setting value + public static final String SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING_KEY = "search.cancel_after_time_interval"; + public static final Setting SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING = Setting.timeSetting( + SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING_KEY, + SearchService.NO_TIMEOUT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private final ProtobufNodeClient client; + private final ThreadPool threadPool; + private final ClusterService clusterService; + private final SearchTransportService searchTransportService; + private final RemoteClusterService remoteClusterService; + private final SearchPhaseController searchPhaseController; + private final SearchService searchService; + private final IndexNameExpressionResolver indexNameExpressionResolver; + private final NamedWriteableRegistry namedWriteableRegistry; + private final CircuitBreaker circuitBreaker; + private final SearchPipelineService searchPipelineService; + + @Inject + public ProtobufTransportSearchAction( + ProtobufNodeClient client, + ThreadPool threadPool, + CircuitBreakerService circuitBreakerService, + TransportService transportService, + SearchService searchService, + SearchTransportService searchTransportService, + SearchPhaseController searchPhaseController, + ClusterService clusterService, + ProtobufActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + NamedWriteableRegistry namedWriteableRegistry, + SearchPipelineService searchPipelineService + ) { + super(ProtobufSearchAction.NAME, transportService, actionFilters, (ProtobufWriteable.Reader) ProtobufSearchRequest::new); + this.client = client; + this.threadPool = threadPool; + this.circuitBreaker = circuitBreakerService.getBreaker(CircuitBreaker.REQUEST); + this.searchPhaseController = searchPhaseController; + this.searchTransportService = searchTransportService; + this.remoteClusterService = searchTransportService.getRemoteClusterService(); + SearchTransportService.registerRequestHandlerProtobuf(transportService, searchService); + this.clusterService = clusterService; + this.searchService = searchService; + this.indexNameExpressionResolver = indexNameExpressionResolver; + this.namedWriteableRegistry = namedWriteableRegistry; + this.searchPipelineService = searchPipelineService; + } + + private Map buildPerIndexAliasFilter( + ProtobufSearchRequest request, + ClusterState clusterState, + Index[] concreteIndices, + Map remoteAliasMap + ) { + final Map aliasFilterMap = new HashMap<>(); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); + for (Index index : concreteIndices) { + clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index.getName()); + AliasFilter aliasFilter = searchService.buildAliasFilter(clusterState, index.getName(), indicesAndAliases); + assert aliasFilter != null; + aliasFilterMap.put(index.getUUID(), aliasFilter); + } + aliasFilterMap.putAll(remoteAliasMap); + return aliasFilterMap; + } + + private Map resolveIndexBoosts(ProtobufSearchRequest searchRequest, ClusterState clusterState) { + if (searchRequest.source() == null) { + return Collections.emptyMap(); + } + + SearchSourceBuilder source = searchRequest.source(); + if (source.indexBoosts() == null) { + return Collections.emptyMap(); + } + + Map concreteIndexBoosts = new HashMap<>(); + for (SearchSourceBuilder.IndexBoost ib : source.indexBoosts()) { + Index[] concreteIndices = indexNameExpressionResolver.concreteIndices( + clusterState, + searchRequest.indicesOptions(), + ib.getIndex() + ); + + for (Index concreteIndex : concreteIndices) { + concreteIndexBoosts.putIfAbsent(concreteIndex.getUUID(), ib.getBoost()); + } + } + return Collections.unmodifiableMap(concreteIndexBoosts); + } + + /** + * Search operations need two clocks. One clock is to fulfill real clock needs (e.g., resolving + * "now" to an index name). Another clock is needed for measuring how long a search operation + * took. These two uses are at odds with each other. There are many issues with using a real + * clock for measuring how long an operation took (they often lack precision, they are subject + * to moving backwards due to NTP and other such complexities, etc.). There are also issues with + * using a relative clock for reporting real time. Thus, we simply separate these two uses. + * + * @opensearch.internal + */ + static final class SearchTimeProvider { + + private final long absoluteStartMillis; + private final long relativeStartNanos; + private final LongSupplier relativeCurrentNanosProvider; + + /** + * Instantiates a new search time provider. The absolute start time is the real clock time + * used for resolving index expressions that include dates. The relative start time is the + * start of the search operation according to a relative clock. The total time the search + * operation took can be measured against the provided relative clock and the relative start + * time. + * + * @param absoluteStartMillis the absolute start time in milliseconds since the epoch + * @param relativeStartNanos the relative start time in nanoseconds + * @param relativeCurrentNanosProvider provides the current relative time + */ + SearchTimeProvider(final long absoluteStartMillis, final long relativeStartNanos, final LongSupplier relativeCurrentNanosProvider) { + this.absoluteStartMillis = absoluteStartMillis; + this.relativeStartNanos = relativeStartNanos; + this.relativeCurrentNanosProvider = relativeCurrentNanosProvider; + } + + long getAbsoluteStartMillis() { + return absoluteStartMillis; + } + + long buildTookInMillis() { + return TimeUnit.NANOSECONDS.toMillis(relativeCurrentNanosProvider.getAsLong() - relativeStartNanos); + } + } + + @Override + protected void doExecute(ProtobufTask task, ProtobufSearchRequest searchRequest, ActionListener listener) { + System.out.println("ProtobufTransportSearchAction doExecute"); + // only if task is of type ProtobufCancellableTask and support cancellation on timeout, treat this request eligible for timeout based + // cancellation. There may be other top level requests like AsyncSearch which is using ProtobufSearchRequest internally and has it's own + // cancellation mechanism. For such cases, the ProtobufSearchRequest when created can override the createTask and set the + // cancelAfterTimeInterval to NO_TIMEOUT and bypass this mechanism + // if (task instanceof ProtobufCancellableTask) { + // listener = TimeoutTaskCancellationUtility.wrapWithCancellationListener( + // client, + // (ProtobufCancellableTask) task, + // clusterService.getClusterSettings(), + // listener + // ); + // } + executeRequest(task, searchRequest, this::searchAsyncAction, listener); + } + + /** + * The single phase search action. + * + * @opensearch.internal + */ + public interface SinglePhaseSearchAction { + void executeOnShardTarget( + ProtobufSearchTask searchTask, + SearchShardTarget target, + Transport.Connection connection, + ActionListener listener + ); + } + + public void executeRequest( + ProtobufTask task, + ProtobufSearchRequest searchRequest, + String actionName, + boolean includeSearchContext, + SinglePhaseSearchAction phaseSearchAction, + ActionListener listener + ) { + executeRequest(task, searchRequest, new SearchAsyncActionProvider() { + @Override + public ProtobufAbstractSearchAsyncAction asyncSearchAction( + ProtobufSearchTask task, + ProtobufSearchRequest searchRequest, + Executor executor, + GroupShardsIterator shardsIts, + SearchTimeProvider timeProvider, + BiFunction connectionLookup, + ClusterState clusterState, + Map aliasFilter, + Map concreteIndexBoosts, + Map> indexRoutings, + ActionListener listener, + boolean preFilter, + ThreadPool threadPool, + ProtobufSearchResponse.Clusters clusters + ) { + return new ProtobufAbstractSearchAsyncAction( + actionName, + logger, + searchTransportService, + connectionLookup, + aliasFilter, + concreteIndexBoosts, + indexRoutings, + executor, + searchRequest, + listener, + shardsIts, + timeProvider, + clusterState, + task, + new ArraySearchPhaseResults<>(shardsIts.size()), + searchRequest.getMaxConcurrentShardRequests(), + clusters + ) { + @Override + protected void executePhaseOnShard( + SearchShardIterator shardIt, + SearchShardTarget shard, + SearchActionListener listener + ) { + final Transport.Connection connection = getConnection(shard.getClusterAlias(), shard.getNodeId()); + phaseSearchAction.executeOnShardTarget(task, shard, connection, listener); + } + + @Override + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + return new SearchPhase(getName()) { + @Override + public void run() { + final AtomicArray atomicArray = results.getAtomicArray(); + sendSearchResponse(InternalSearchResponse.empty(), atomicArray); + } + }; + } + + @Override + boolean buildPointInTimeFromSearchResults() { + return includeSearchContext; + } + }; + } + }, listener); + } + + private void executeRequest( + ProtobufTask task, + ProtobufSearchRequest originalSearchRequest, + SearchAsyncActionProvider searchAsyncActionProvider, + ActionListener originalListener + ) { + System.out.println("TrasportSearchAction executeRequest"); + final long relativeStartNanos = System.nanoTime(); + final SearchTimeProvider timeProvider = new SearchTimeProvider( + originalSearchRequest.getOrCreateAbsoluteStartMillis(), + relativeStartNanos, + System::nanoTime + ); + // PipelinedRequest searchRequest; + ActionListener listener; + // try { + // searchRequest = searchPipelineService.resolvePipeline(originalSearchRequest); + // listener = ActionListener.wrap( + // r -> originalListener.onResponse(searchRequest.transformResponse(r)), + // originalListener::onFailure + // ); + // } catch (Exception e) { + // originalListener.onFailure(e); + // return; + // } + + ActionListener rewriteListener = ActionListener.wrap(source -> { + if (source != originalSearchRequest.source()) { + // only set it if it changed - we don't allow null values to be set but it might be already null. this way we catch + // situations when source is rewritten to null due to a bug + originalSearchRequest.source(source); + } + final ClusterState clusterState = clusterService.state(); + final SearchContextId searchContext; + final Map remoteClusterIndices; + if (originalSearchRequest.pointInTimeBuilder() != null) { + searchContext = SearchContextId.decode(namedWriteableRegistry, originalSearchRequest.pointInTimeBuilder().getId()); + remoteClusterIndices = getIndicesFromSearchContexts(searchContext, originalSearchRequest.indicesOptions()); + } else { + searchContext = null; + remoteClusterIndices = remoteClusterService.groupIndices( + originalSearchRequest.indicesOptions(), + originalSearchRequest.indices(), + idx -> indexNameExpressionResolver.hasIndexAbstraction(idx, clusterState) + ); + } + OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + if (remoteClusterIndices.isEmpty()) { + executeLocalSearch( + task, + timeProvider, + originalSearchRequest, + localIndices, + clusterState, + originalListener, + searchContext, + searchAsyncActionProvider + ); + } else { + // if (shouldMinimizeRoundtrips(originalSearchRequest)) { + // ccsRemoteReduce( + // originalSearchRequest, + // localIndices, + // remoteClusterIndices, + // timeProvider, + // searchService.aggReduceContextBuilder(originalSearchRequest.source()), + // remoteClusterService, + // threadPool, + // listener, + // (r, l) -> executeLocalSearch( + // task, + // timeProvider, + // r, + // localIndices, + // clusterState, + // l, + // searchContext, + // searchAsyncActionProvider + // ) + // ); + // } else { + // AtomicInteger skippedClusters = new AtomicInteger(0); + // collectSearchShards( + // originalSearchRequest.indicesOptions(), + // originalSearchRequest.preference(), + // originalSearchRequest.routing(), + // skippedClusters, + // remoteClusterIndices, + // remoteClusterService, + // threadPool, + // ActionListener.wrap(searchShardsResponses -> { + // final BiFunction clusterNodeLookup = getRemoteClusterNodeLookup( + // searchShardsResponses + // ); + // final Map remoteAliasFilters; + // final List remoteShardIterators; + // if (searchContext != null) { + // remoteAliasFilters = searchContext.aliasFilter(); + // remoteShardIterators = getRemoteShardsIteratorFromPointInTime( + // searchShardsResponses, + // searchContext, + // originalSearchRequest.pointInTimeBuilder().getKeepAlive(), + // remoteClusterIndices + // ); + // } else { + // remoteAliasFilters = getRemoteAliasFilters(searchShardsResponses); + // remoteShardIterators = getRemoteShardsIterator( + // searchShardsResponses, + // remoteClusterIndices, + // remoteAliasFilters + // ); + // } + // int localClusters = localIndices == null ? 0 : 1; + // int totalClusters = remoteClusterIndices.size() + localClusters; + // int successfulClusters = searchShardsResponses.size() + localClusters; + // executeSearch( + // (ProtobufSearchTask) task, + // timeProvider, + // originalSearchRequest, + // localIndices, + // remoteShardIterators, + // clusterNodeLookup, + // clusterState, + // remoteAliasFilters, + // listener, + // new ProtobufSearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters.get()), + // searchContext, + // searchAsyncActionProvider + // ); + // }, listener::onFailure) + // ); + // } + } + }, originalListener::onFailure); + if (originalSearchRequest.source() == null) { + rewriteListener.onResponse(originalSearchRequest.source()); + } else { + Rewriteable.rewriteAndFetch( + originalSearchRequest.source(), + searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), + rewriteListener + ); + } + } + + static boolean shouldMinimizeRoundtrips(ProtobufSearchRequest searchRequest) { + if (searchRequest.isCcsMinimizeRoundtrips() == false) { + return false; + } + if (searchRequest.scroll() != null) { + return false; + } + if (searchRequest.pointInTimeBuilder() != null) { + return false; + } + if (searchRequest.searchType() == DFS_QUERY_THEN_FETCH) { + return false; + } + SearchSourceBuilder source = searchRequest.source(); + return source == null + || source.collapse() == null + || source.collapse().getInnerHits() == null + || source.collapse().getInnerHits().isEmpty(); + } + + // static void ccsRemoteReduce( + // ProtobufSearchRequest searchRequest, + // OriginalIndices localIndices, + // Map remoteIndices, + // SearchTimeProvider timeProvider, + // InternalAggregation.ReduceContextBuilder aggReduceContextBuilder, + // RemoteClusterService remoteClusterService, + // ThreadPool threadPool, + // ActionListener listener, + // BiConsumer> localSearchConsumer + // ) { + + // if (localIndices == null && remoteIndices.size() == 1) { + // // if we are searching against a single remote cluster, we simply forward the original search request to such cluster + // // and we directly perform final reduction in the remote cluster + // Map.Entry entry = remoteIndices.entrySet().iterator().next(); + // String clusterAlias = entry.getKey(); + // boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); + // OriginalIndices indices = entry.getValue(); + // ProtobufSearchRequest ccsSearchRequest = ProtobufSearchRequest.subSearchRequest( + // searchRequest, + // indices.indices(), + // clusterAlias, + // timeProvider.getAbsoluteStartMillis(), + // true + // ); + // Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); + // remoteClusterClient.search(ccsSearchRequest, new ActionListener() { + // @Override + // public void onResponse(ProtobufSearchResponse searchResponse) { + // Map profileResults = searchResponse.getProfileResults(); + // SearchProfileShardResults profile = profileResults == null || profileResults.isEmpty() + // ? null + // : new SearchProfileShardResults(profileResults); + // InternalSearchResponse internalSearchResponse = new InternalSearchResponse( + // searchResponse.getHits(), + // (InternalAggregations) searchResponse.getAggregations(), + // searchResponse.getSuggest(), + // profile, + // searchResponse.isTimedOut(), + // searchResponse.isTerminatedEarly(), + // searchResponse.getNumReducePhases() + // ); + // listener.onResponse( + // new ProtobufSearchResponse( + // internalSearchResponse, + // searchResponse.getScrollId(), + // searchResponse.getTotalShards(), + // searchResponse.getSuccessfulShards(), + // searchResponse.getSkippedShards(), + // timeProvider.buildTookInMillis(), + // searchResponse.getShardFailures(), + // new ProtobufSearchResponse.Clusters(1, 1, 0), + // searchResponse.pointInTimeId() + // ) + // ); + // } + + // @Override + // public void onFailure(Exception e) { + // if (skipUnavailable) { + // listener.onResponse(ProtobufSearchResponse.empty(timeProvider::buildTookInMillis, new ProtobufSearchResponse.Clusters(1, 0, 1))); + // } else { + // listener.onFailure(wrapRemoteClusterFailure(clusterAlias, e)); + // } + // } + // }); + // } else { + // SearchResponseMerger searchResponseMerger = createSearchResponseMerger( + // searchRequest.source(), + // timeProvider, + // aggReduceContextBuilder + // ); + // AtomicInteger skippedClusters = new AtomicInteger(0); + // final AtomicReference exceptions = new AtomicReference<>(); + // int totalClusters = remoteIndices.size() + (localIndices == null ? 0 : 1); + // final CountDown countDown = new CountDown(totalClusters); + // for (Map.Entry entry : remoteIndices.entrySet()) { + // String clusterAlias = entry.getKey(); + // boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); + // OriginalIndices indices = entry.getValue(); + // ProtobufSearchRequest ccsSearchRequest = ProtobufSearchRequest.subSearchRequest( + // searchRequest, + // indices.indices(), + // clusterAlias, + // timeProvider.getAbsoluteStartMillis(), + // false + // ); + // ActionListener ccsListener = createCCSListener( + // clusterAlias, + // skipUnavailable, + // countDown, + // skippedClusters, + // exceptions, + // searchResponseMerger, + // totalClusters, + // listener + // ); + // Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); + // remoteClusterClient.search(ccsSearchRequest, ccsListener); + // } + // if (localIndices != null) { + // ActionListener ccsListener = createCCSListener( + // RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + // false, + // countDown, + // skippedClusters, + // exceptions, + // searchResponseMerger, + // totalClusters, + // listener + // ); + // ProtobufSearchRequest ccsLocalSearchRequest = ProtobufSearchRequest.subSearchRequest( + // searchRequest, + // localIndices.indices(), + // RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + // timeProvider.getAbsoluteStartMillis(), + // false + // ); + // localSearchConsumer.accept(ccsLocalSearchRequest, ccsListener); + // } + // } + // } + + // static SearchResponseMerger createSearchResponseMerger( + // SearchSourceBuilder source, + // SearchTimeProvider timeProvider, + // InternalAggregation.ReduceContextBuilder aggReduceContextBuilder + // ) { + // final int from; + // final int size; + // final int trackTotalHitsUpTo; + // if (source == null) { + // from = SearchService.DEFAULT_FROM; + // size = SearchService.DEFAULT_SIZE; + // trackTotalHitsUpTo = SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO; + // } else { + // from = source.from() == -1 ? SearchService.DEFAULT_FROM : source.from(); + // size = source.size() == -1 ? SearchService.DEFAULT_SIZE : source.size(); + // trackTotalHitsUpTo = source.trackTotalHitsUpTo() == null + // ? SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO + // : source.trackTotalHitsUpTo(); + // // here we modify the original source so we can re-use it by setting it to each outgoing search request + // source.from(0); + // source.size(from + size); + // } + // return new SearchResponseMerger(from, size, trackTotalHitsUpTo, timeProvider, aggReduceContextBuilder); + // } + + static void collectSearchShards( + IndicesOptions indicesOptions, + String preference, + String routing, + AtomicInteger skippedClusters, + Map remoteIndicesByCluster, + RemoteClusterService remoteClusterService, + ThreadPool threadPool, + ActionListener> listener + ) { + final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size()); + final Map searchShardsResponses = new ConcurrentHashMap<>(); + final AtomicReference exceptions = new AtomicReference<>(); + for (Map.Entry entry : remoteIndicesByCluster.entrySet()) { + final String clusterAlias = entry.getKey(); + boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); + Client clusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); + final String[] indices = entry.getValue().indices(); + ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices).indicesOptions(indicesOptions) + .local(true) + .preference(preference) + .routing(routing); + clusterClient.admin() + .cluster() + .searchShards( + searchShardsRequest, + new CCSActionListener>( + clusterAlias, + skipUnavailable, + responsesCountDown, + skippedClusters, + exceptions, + listener + ) { + @Override + void innerOnResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { + searchShardsResponses.put(clusterAlias, clusterSearchShardsResponse); + } + + @Override + Map createFinalResponse() { + return searchShardsResponses; + } + } + ); + } + } + + // private static ActionListener createCCSListener( + // String clusterAlias, + // boolean skipUnavailable, + // CountDown countDown, + // AtomicInteger skippedClusters, + // AtomicReference exceptions, + // SearchResponseMerger searchResponseMerger, + // int totalClusters, + // ActionListener originalListener + // ) { + // return new CCSActionListener( + // clusterAlias, + // skipUnavailable, + // countDown, + // skippedClusters, + // exceptions, + // originalListener + // ) { + // @Override + // void innerOnResponse(ProtobufSearchResponse searchResponse) { + // searchResponseMerger.add(searchResponse); + // } + + // @Override + // ProtobufSearchResponse createFinalResponse() { + // ProtobufSearchResponse.Clusters clusters = new ProtobufSearchResponse.Clusters( + // totalClusters, + // searchResponseMerger.numResponses(), + // skippedClusters.get() + // ); + // return searchResponseMerger.getMergedResponse(clusters); + // } + // }; + // } + + private void executeLocalSearch( + ProtobufTask task, + SearchTimeProvider timeProvider, + ProtobufSearchRequest searchRequest, + OriginalIndices localIndices, + ClusterState clusterState, + ActionListener listener, + SearchContextId searchContext, + SearchAsyncActionProvider searchAsyncActionProvider + ) { + System.out.println("Task is: " + task.getClass().getName()); + System.out.println("Task is: " + task); + executeSearch( + (ProtobufSearchTask) task, + timeProvider, + searchRequest, + localIndices, + Collections.emptyList(), + (clusterName, nodeId) -> null, + clusterState, + Collections.emptyMap(), + listener, + ProtobufSearchResponse.Clusters.EMPTY, + searchContext, + searchAsyncActionProvider + ); + } + + static BiFunction getRemoteClusterNodeLookup(Map searchShardsResp) { + Map> clusterToNode = new HashMap<>(); + for (Map.Entry entry : searchShardsResp.entrySet()) { + String clusterAlias = entry.getKey(); + for (DiscoveryNode remoteNode : entry.getValue().getNodes()) { + clusterToNode.computeIfAbsent(clusterAlias, k -> new HashMap<>()).put(remoteNode.getId(), remoteNode); + } + } + return (clusterAlias, nodeId) -> { + Map clusterNodes = clusterToNode.get(clusterAlias); + if (clusterNodes == null) { + throw new IllegalArgumentException("unknown remote cluster: " + clusterAlias); + } + return clusterNodes.get(nodeId); + }; + } + + static Map getRemoteAliasFilters(Map searchShardsResp) { + final Map aliasFilterMap = new HashMap<>(); + for (Map.Entry entry : searchShardsResp.entrySet()) { + ClusterSearchShardsResponse searchShardsResponse = entry.getValue(); + final Map indicesAndFilters = searchShardsResponse.getIndicesAndFilters(); + for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) { + ShardId shardId = clusterSearchShardsGroup.getShardId(); + final AliasFilter aliasFilter; + if (indicesAndFilters == null) { + aliasFilter = AliasFilter.EMPTY; + } else { + aliasFilter = indicesAndFilters.get(shardId.getIndexName()); + assert aliasFilter != null : "alias filter must not be null for index: " + shardId.getIndex(); + } + // here we have to map the filters to the UUID since from now on we use the uuid for the lookup + aliasFilterMap.put(shardId.getIndex().getUUID(), aliasFilter); + } + } + return aliasFilterMap; + } + + static List getRemoteShardsIterator( + Map searchShardsResponses, + Map remoteIndicesByCluster, + Map aliasFilterMap + ) { + final List remoteShardIterators = new ArrayList<>(); + for (Map.Entry entry : searchShardsResponses.entrySet()) { + for (ClusterSearchShardsGroup clusterSearchShardsGroup : entry.getValue().getGroups()) { + // add the cluster name to the remote index names for indices disambiguation + // this ends up in the hits returned with the search response + ShardId shardId = clusterSearchShardsGroup.getShardId(); + AliasFilter aliasFilter = aliasFilterMap.get(shardId.getIndex().getUUID()); + String[] aliases = aliasFilter.getAliases(); + String clusterAlias = entry.getKey(); + String[] finalIndices = aliases.length == 0 ? new String[] { shardId.getIndexName() } : aliases; + final OriginalIndices originalIndices = remoteIndicesByCluster.get(clusterAlias); + assert originalIndices != null : "original indices are null for clusterAlias: " + clusterAlias; + SearchShardIterator shardIterator = new SearchShardIterator( + clusterAlias, + shardId, + Arrays.asList(clusterSearchShardsGroup.getShards()), + new OriginalIndices(finalIndices, originalIndices.indicesOptions()) + ); + remoteShardIterators.add(shardIterator); + } + } + return remoteShardIterators; + } + + static List getRemoteShardsIteratorFromPointInTime( + Map searchShardsResponses, + SearchContextId searchContextId, + TimeValue searchContextKeepAlive, + Map remoteClusterIndices + ) { + final List remoteShardIterators = new ArrayList<>(); + for (Map.Entry entry : searchShardsResponses.entrySet()) { + for (ClusterSearchShardsGroup group : entry.getValue().getGroups()) { + final ShardId shardId = group.getShardId(); + final String clusterAlias = entry.getKey(); + final SearchContextIdForNode perNode = searchContextId.shards().get(shardId); + assert clusterAlias.equals(perNode.getClusterAlias()) : clusterAlias + " != " + perNode.getClusterAlias(); + final List targetNodes = Collections.singletonList(perNode.getNode()); + SearchShardIterator shardIterator = new SearchShardIterator( + clusterAlias, + shardId, + targetNodes, + remoteClusterIndices.get(clusterAlias), + perNode.getSearchContextId(), + searchContextKeepAlive + ); + remoteShardIterators.add(shardIterator); + } + } + return remoteShardIterators; + } + + private Index[] resolveLocalIndices(OriginalIndices localIndices, ClusterState clusterState, SearchTimeProvider timeProvider) { + if (localIndices == null) { + return Index.EMPTY_ARRAY; // don't search on any local index (happens when only remote indices were specified) + } + return indexNameExpressionResolver.concreteIndices(clusterState, localIndices, timeProvider.getAbsoluteStartMillis()); + } + + private void executeSearch( + ProtobufSearchTask task, + SearchTimeProvider timeProvider, + ProtobufSearchRequest searchRequest, + OriginalIndices localIndices, + List remoteShardIterators, + BiFunction remoteConnections, + ClusterState clusterState, + Map remoteAliasMap, + ActionListener listener, + ProtobufSearchResponse.Clusters clusters, + @Nullable SearchContextId searchContext, + SearchAsyncActionProvider searchAsyncActionProvider + ) { + System.out.println("ProtobufTransportSearchAction executeSearch"); + clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); + + // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name + // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead + // of just for the _search api + final List localShardIterators; + final Map aliasFilter; + final Map> indexRoutings; + + final String[] concreteLocalIndices; + if (searchContext != null) { + assert searchRequest.pointInTimeBuilder() != null; + aliasFilter = searchContext.aliasFilter(); + indexRoutings = Collections.emptyMap(); + concreteLocalIndices = localIndices == null ? new String[0] : localIndices.indices(); + localShardIterators = getLocalLocalShardsIteratorFromPointInTime( + clusterState, + localIndices, + searchRequest.getLocalClusterAlias(), + searchContext, + searchRequest.pointInTimeBuilder().getKeepAlive() + ); + } else { + final Index[] indices = resolveLocalIndices(localIndices, clusterState, timeProvider); + Map> routingMap = indexNameExpressionResolver.resolveSearchRouting( + clusterState, + searchRequest.routing(), + searchRequest.indices() + ); + routingMap = routingMap == null ? Collections.emptyMap() : Collections.unmodifiableMap(routingMap); + concreteLocalIndices = new String[indices.length]; + for (int i = 0; i < indices.length; i++) { + concreteLocalIndices[i] = indices[i].getName(); + } + Map nodeSearchCounts = searchTransportService.getPendingSearchRequests(); + GroupShardsIterator localShardRoutings = clusterService.operationRouting() + .searchShards( + clusterState, + concreteLocalIndices, + routingMap, + searchRequest.preference(), + searchService.getResponseCollectorService(), + nodeSearchCounts + ); + localShardIterators = StreamSupport.stream(localShardRoutings.spliterator(), false) + .map(it -> new SearchShardIterator(searchRequest.getLocalClusterAlias(), it.shardId(), it.getShardRoutings(), localIndices)) + .collect(Collectors.toList()); + aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap); + indexRoutings = routingMap; + } + final GroupShardsIterator shardIterators = mergeShardsIterators(localShardIterators, remoteShardIterators); + + failIfOverShardCountLimit(clusterService, shardIterators.size()); + + Map concreteIndexBoosts = resolveIndexBoosts(searchRequest, clusterState); + + // optimize search type for cases where there is only one shard group to search on + if (shardIterators.size() == 1) { + // if we only have one group, then we always want Q_T_F, no need for DFS, and no need to do THEN since we hit one shard + searchRequest.searchType(QUERY_THEN_FETCH); + } + if (searchRequest.allowPartialSearchResults() == null) { + // No user preference defined in search request - apply cluster service default + searchRequest.allowPartialSearchResults(searchService.defaultAllowPartialSearchResults()); + } + if (searchRequest.isSuggestOnly()) { + // disable request cache if we have only suggest + searchRequest.requestCache(false); + switch (searchRequest.searchType()) { + case DFS_QUERY_THEN_FETCH: + // convert to Q_T_F if we have only suggest + searchRequest.searchType(QUERY_THEN_FETCH); + break; + } + } + final DiscoveryNodes nodes = clusterState.nodes(); + BiFunction connectionLookup = buildConnectionLookup( + searchRequest.getLocalClusterAlias(), + nodes::get, + remoteConnections, + searchTransportService::getConnection + ); + final Executor asyncSearchExecutor = asyncSearchExecutor(concreteLocalIndices, clusterState); + final boolean preFilterSearchShards = shouldPreFilterSearchShards( + clusterState, + searchRequest, + concreteLocalIndices, + localShardIterators.size() + remoteShardIterators.size() + ); + System.out.println("Going to ProtobufAbstractSearchAsyncAction"); + System.out.println("Search request: " + searchRequest); + System.out.println("Cluster state: " + clusterState); + searchAsyncActionProvider.asyncSearchAction( + task, + searchRequest, + asyncSearchExecutor, + shardIterators, + timeProvider, + connectionLookup, + clusterState, + Collections.unmodifiableMap(aliasFilter), + concreteIndexBoosts, + indexRoutings, + listener, + preFilterSearchShards, + threadPool, + clusters + ).start(); + } + + Executor asyncSearchExecutor(final String[] indices, final ClusterState clusterState) { + final boolean onlySystemIndices = Arrays.stream(indices).allMatch(index -> { + final IndexMetadata indexMetadata = clusterState.metadata().index(index); + return indexMetadata != null && indexMetadata.isSystem(); + }); + return onlySystemIndices ? threadPool.executor(ThreadPool.Names.SYSTEM_READ) : threadPool.executor(ThreadPool.Names.SEARCH); + } + + static BiFunction buildConnectionLookup( + String requestClusterAlias, + Function localNodes, + BiFunction remoteNodes, + BiFunction nodeToConnection + ) { + return (clusterAlias, nodeId) -> { + final DiscoveryNode discoveryNode; + final boolean remoteCluster; + if (clusterAlias == null || requestClusterAlias != null) { + assert requestClusterAlias == null || requestClusterAlias.equals(clusterAlias); + discoveryNode = localNodes.apply(nodeId); + remoteCluster = false; + } else { + discoveryNode = remoteNodes.apply(clusterAlias, nodeId); + remoteCluster = true; + } + if (discoveryNode == null) { + throw new IllegalStateException("no node found for id: " + nodeId); + } + return nodeToConnection.apply(remoteCluster ? clusterAlias : null, discoveryNode); + }; + } + + static boolean shouldPreFilterSearchShards(ClusterState clusterState, ProtobufSearchRequest searchRequest, String[] indices, int numShards) { + SearchSourceBuilder source = searchRequest.source(); + Integer preFilterShardSize = searchRequest.getPreFilterShardSize(); + if (preFilterShardSize == null && (hasReadOnlyIndices(indices, clusterState) || hasPrimaryFieldSort(source))) { + preFilterShardSize = 1; + } else if (preFilterShardSize == null) { + preFilterShardSize = ProtobufSearchRequest.DEFAULT_PRE_FILTER_SHARD_SIZE; + } + return searchRequest.searchType() == QUERY_THEN_FETCH // we can't do this for DFS it needs to fan out to all shards all the time + && (SearchService.canRewriteToMatchNone(source) || hasPrimaryFieldSort(source)) + && preFilterShardSize < numShards; + } + + private static boolean hasReadOnlyIndices(String[] indices, ClusterState clusterState) { + for (String index : indices) { + ClusterBlockException writeBlock = clusterState.blocks().indexBlockedException(ClusterBlockLevel.WRITE, index); + if (writeBlock != null) { + return true; + } + } + return false; + } + + static GroupShardsIterator mergeShardsIterators( + List localShardIterators, + List remoteShardIterators + ) { + List shards = new ArrayList<>(remoteShardIterators); + shards.addAll(localShardIterators); + return GroupShardsIterator.sortAndCreate(shards); + } + + interface SearchAsyncActionProvider { + ProtobufAbstractSearchAsyncAction asyncSearchAction( + ProtobufSearchTask task, + ProtobufSearchRequest searchRequest, + Executor executor, + GroupShardsIterator shardIterators, + SearchTimeProvider timeProvider, + BiFunction connectionLookup, + ClusterState clusterState, + Map aliasFilter, + Map concreteIndexBoosts, + Map> indexRoutings, + ActionListener listener, + boolean preFilter, + ThreadPool threadPool, + ProtobufSearchResponse.Clusters clusters + ); + } + + private ProtobufAbstractSearchAsyncAction searchAsyncAction( + ProtobufSearchTask task, + ProtobufSearchRequest searchRequest, + Executor executor, + GroupShardsIterator shardIterators, + SearchTimeProvider timeProvider, + BiFunction connectionLookup, + ClusterState clusterState, + Map aliasFilter, + Map concreteIndexBoosts, + Map> indexRoutings, + ActionListener listener, + boolean preFilter, + ThreadPool threadPool, + ProtobufSearchResponse.Clusters clusters + ) { + if (preFilter) { + return new ProtobufCanMatchPreFilterSearchPhase( + logger, + searchTransportService, + connectionLookup, + aliasFilter, + concreteIndexBoosts, + indexRoutings, + executor, + searchRequest, + listener, + shardIterators, + timeProvider, + clusterState, + task, + (iter) -> { + ProtobufAbstractSearchAsyncAction action = searchAsyncAction( + task, + searchRequest, + executor, + iter, + timeProvider, + connectionLookup, + clusterState, + aliasFilter, + concreteIndexBoosts, + indexRoutings, + listener, + false, + threadPool, + clusters + ); + return new SearchPhase(action.getName()) { + @Override + public void run() { + action.start(); + } + }; + }, + clusters + ); + } else { + final QueryPhaseResultConsumer queryResultConsumer = searchPhaseController.newSearchPhaseResults( + executor, + circuitBreaker, + task.getProgressListener(), + searchRequest, + shardIterators.size(), + exc -> cancelTask(task, exc) + ); + ProtobufAbstractSearchAsyncAction searchAsyncAction; + switch (searchRequest.searchType()) { + // case DFS_QUERY_THEN_FETCH: + // searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction( + // logger, + // searchTransportService, + // connectionLookup, + // aliasFilter, + // concreteIndexBoosts, + // indexRoutings, + // searchPhaseController, + // executor, + // queryResultConsumer, + // searchRequest, + // listener, + // shardIterators, + // timeProvider, + // clusterState, + // task, + // clusters + // ); + // break; + case QUERY_THEN_FETCH: + searchAsyncAction = new ProtobufSearchQueryThenFetchAsyncAction( + logger, + searchTransportService, + connectionLookup, + aliasFilter, + concreteIndexBoosts, + indexRoutings, + searchPhaseController, + executor, + queryResultConsumer, + searchRequest, + listener, + shardIterators, + timeProvider, + clusterState, + task, + clusters + ); + break; + default: + throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]"); + } + return searchAsyncAction; + } + } + + private void cancelTask(ProtobufSearchTask task, Exception exc) { + String errorMsg = exc.getMessage() != null ? exc.getMessage() : ""; + CancelTasksRequest req = new CancelTasksRequest().setTaskId(new TaskId(client.getLocalNodeId(), task.getId())) + .setReason("Fatal failure during search: " + errorMsg); + // force the origin to execute the cancellation as a system user + // new ProtobufOriginSettingClient(client, TASKS_ORIGIN).admin().cluster().cancelTasks(req, ActionListener.wrap(() -> {})); + } + + private static void failIfOverShardCountLimit(ClusterService clusterService, int shardCount) { + final long shardCountLimit = clusterService.getClusterSettings().get(SHARD_COUNT_LIMIT_SETTING); + if (shardCount > shardCountLimit) { + throw new IllegalArgumentException( + "Trying to query " + + shardCount + + " shards, which is over the limit of " + + shardCountLimit + + ". This limit exists because querying many shards at the same time can make the " + + "job of the coordinating node very CPU and/or memory intensive. It is usually a better idea to " + + "have a smaller number of larger shards. Update [" + + SHARD_COUNT_LIMIT_SETTING.getKey() + + "] to a greater value if you really want to query that many shards at the same time." + ); + } + } + + /** + * xcluster search listener + * + * @opensearch.internal + */ + abstract static class CCSActionListener implements ActionListener { + private final String clusterAlias; + private final boolean skipUnavailable; + private final CountDown countDown; + private final AtomicInteger skippedClusters; + private final AtomicReference exceptions; + private final ActionListener originalListener; + + CCSActionListener( + String clusterAlias, + boolean skipUnavailable, + CountDown countDown, + AtomicInteger skippedClusters, + AtomicReference exceptions, + ActionListener originalListener + ) { + this.clusterAlias = clusterAlias; + this.skipUnavailable = skipUnavailable; + this.countDown = countDown; + this.skippedClusters = skippedClusters; + this.exceptions = exceptions; + this.originalListener = originalListener; + } + + @Override + public final void onResponse(Response response) { + innerOnResponse(response); + maybeFinish(); + } + + abstract void innerOnResponse(Response response); + + @Override + public final void onFailure(Exception e) { + if (skipUnavailable) { + skippedClusters.incrementAndGet(); + } else { + Exception exception = e; + if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias) == false) { + exception = wrapRemoteClusterFailure(clusterAlias, e); + } + if (exceptions.compareAndSet(null, exception) == false) { + exceptions.accumulateAndGet(exception, (previous, current) -> { + current.addSuppressed(previous); + return current; + }); + } + } + maybeFinish(); + } + + private void maybeFinish() { + if (countDown.countDown()) { + Exception exception = exceptions.get(); + if (exception == null) { + FinalResponse response; + try { + response = createFinalResponse(); + } catch (Exception e) { + originalListener.onFailure(e); + return; + } + originalListener.onResponse(response); + } else { + originalListener.onFailure(exceptions.get()); + } + } + } + + abstract FinalResponse createFinalResponse(); + } + + private static RemoteTransportException wrapRemoteClusterFailure(String clusterAlias, Exception e) { + return new RemoteTransportException("error while communicating with remote cluster [" + clusterAlias + "]", e); + } + + static Map getIndicesFromSearchContexts(SearchContextId searchContext, IndicesOptions indicesOptions) { + final Map> indices = new HashMap<>(); + for (Map.Entry entry : searchContext.shards().entrySet()) { + String clusterAlias = entry.getValue().getClusterAlias() == null + ? RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY + : entry.getValue().getClusterAlias(); + indices.computeIfAbsent(clusterAlias, k -> new HashSet<>()).add(entry.getKey().getIndexName()); + } + return indices.entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> new OriginalIndices(e.getValue().toArray(new String[0]), indicesOptions))); + } + + static List getLocalLocalShardsIteratorFromPointInTime( + ClusterState clusterState, + OriginalIndices originalIndices, + String localClusterAlias, + SearchContextId searchContext, + TimeValue keepAlive + ) { + final List iterators = new ArrayList<>(searchContext.shards().size()); + for (Map.Entry entry : searchContext.shards().entrySet()) { + final SearchContextIdForNode perNode = entry.getValue(); + if (Strings.isEmpty(perNode.getClusterAlias())) { + final ShardId shardId = entry.getKey(); + OperationRouting.getShards(clusterState, shardId); + final List targetNodes = Collections.singletonList(perNode.getNode()); + iterators.add( + new SearchShardIterator( + localClusterAlias, + shardId, + targetNodes, + originalIndices, + perNode.getSearchContextId(), + keepAlive + ) + ); + } + } + return iterators; + } +} diff --git a/server/src/main/java/org/opensearch/action/support/ProtobufActionFilter.java b/server/src/main/java/org/opensearch/action/support/ProtobufActionFilter.java new file mode 100644 index 0000000000000..bf20ddae5fbce --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/ProtobufActionFilter.java @@ -0,0 +1,65 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.support; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.tasks.ProtobufTask; + +/** + * A filter allowing to filter transport actions +* +* @opensearch.internal +*/ +public interface ProtobufActionFilter { + + /** + * The position of the filter in the chain. Execution is done from lowest order to highest. + */ + int order(); + + /** + * Enables filtering the execution of an action on the request side, either by sending a response through the + * {@link ActionListener} or by continuing the execution through the given {@link ActionFilterChain chain} + */ + void apply( + ProtobufTask task, + String action, + Request request, + ActionListener listener, + ProtobufActionFilterChain chain + ); + + /** + * A simple base class for injectable action filters that spares the implementation from handling the + * filter chain. This base class should serve any action filter implementations that doesn't require + * to apply async filtering logic. + */ + abstract class Simple implements ProtobufActionFilter { + @Override + public final void apply( + ProtobufTask task, + String action, + Request request, + ActionListener listener, + ProtobufActionFilterChain chain + ) { + if (apply(action, request, listener)) { + chain.proceed(task, action, request, listener); + } + } + + /** + * Applies this filter and returns {@code true} if the execution chain should proceed, or {@code false} + * if it should be aborted since the filter already handled the request and called the given listener. + */ + protected abstract boolean apply(String action, ProtobufActionRequest request, ActionListener listener); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/ProtobufActionFilterChain.java b/server/src/main/java/org/opensearch/action/support/ProtobufActionFilterChain.java new file mode 100644 index 0000000000000..216797f1ea1a0 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/ProtobufActionFilterChain.java @@ -0,0 +1,28 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.support; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.tasks.ProtobufTask; + +/** + * A filter chain allowing to continue and process the transport action request +* +* @opensearch.internal +*/ +public interface ProtobufActionFilterChain { + + /** + * Continue processing the request. Should only be called if a response has not been sent through + * the given {@link ActionListener listener} + */ + void proceed(ProtobufTask task, String action, Request request, ActionListener listener); +} diff --git a/server/src/main/java/org/opensearch/action/support/ProtobufActionFilters.java b/server/src/main/java/org/opensearch/action/support/ProtobufActionFilters.java new file mode 100644 index 0000000000000..a1395cbf73150 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/ProtobufActionFilters.java @@ -0,0 +1,40 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.support; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.Set; + +/** + * Holds the action filters injected through plugins, properly sorted by {@link org.opensearch.action.support.ProtobufActionFilter#order()} +* +* @opensearch.internal +*/ +public class ProtobufActionFilters { + + private final ProtobufActionFilter[] filters; + + public ProtobufActionFilters(Set actionFilters) { + this.filters = actionFilters.toArray(new ProtobufActionFilter[0]); + Arrays.sort(filters, new Comparator() { + @Override + public int compare(ProtobufActionFilter o1, ProtobufActionFilter o2) { + return Integer.compare(o1.order(), o2.order()); + } + }); + } + + /** + * Returns the action filters that have been injected + */ + public ProtobufActionFilter[] filters() { + return filters; + } +} diff --git a/server/src/main/java/org/opensearch/action/support/ProtobufChannelActionListener.java b/server/src/main/java/org/opensearch/action/support/ProtobufChannelActionListener.java new file mode 100644 index 0000000000000..5e1bf4d6e9835 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/ProtobufChannelActionListener.java @@ -0,0 +1,48 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.support; + +import org.opensearch.action.ActionListener; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequest; +import org.opensearch.core.transport.TransportResponse; + +/** + * Listener for transport channel actions +* +* @opensearch.internal +*/ +public final class ProtobufChannelActionListener + implements + ActionListener { + + private final TransportChannel channel; + private final Request request; + private final String actionName; + + public ProtobufChannelActionListener(TransportChannel channel, String actionName, Request request) { + this.channel = channel; + this.request = request; + this.actionName = actionName; + } + + @Override + public void onResponse(Response response) { + try { + channel.sendResponse(response); + } catch (Exception e) { + onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + TransportChannel.sendErrorResponse(channel, actionName, request, e); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/ProtobufHandledTransportAction.java b/server/src/main/java/org/opensearch/action/support/ProtobufHandledTransportAction.java new file mode 100644 index 0000000000000..5c7007bd8b05d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/ProtobufHandledTransportAction.java @@ -0,0 +1,89 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.support; + +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.core.common.io.stream.ProtobufWriteable; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ProtobufTransportRequestHandler; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportService; + +/** + * A ProtobufTransportAction that self registers a handler into the transport service +* +* @opensearch.internal +*/ +public abstract class ProtobufHandledTransportAction extends + ProtobufTransportAction { + + protected ProtobufHandledTransportAction( + String actionName, + TransportService transportService, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader requestReader + ) { + this(actionName, true, transportService, actionFilters, requestReader); + } + + protected ProtobufHandledTransportAction( + String actionName, + TransportService transportService, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader requestReader, + String executor + ) { + this(actionName, true, transportService, actionFilters, requestReader, executor); + } + + protected ProtobufHandledTransportAction( + String actionName, + boolean canTripCircuitBreaker, + TransportService transportService, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader requestReader + ) { + this(actionName, canTripCircuitBreaker, transportService, actionFilters, requestReader, ThreadPool.Names.SAME); + } + + protected ProtobufHandledTransportAction( + String actionName, + boolean canTripCircuitBreaker, + TransportService transportService, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader requestReader, + String executor + ) { + super(actionName, actionFilters, transportService.getTaskManager()); + transportService.registerRequestHandlerProtobuf( + actionName, + executor, + false, + canTripCircuitBreaker, + requestReader, + new TransportHandler() + ); + } + + /** + * Inner transport handler + * + * @opensearch.internal + */ + class TransportHandler implements ProtobufTransportRequestHandler { + @Override + public final void messageReceived(final Request request, final TransportChannel channel, ProtobufTask task) { + // We already got the task created on the network layer - no need to create it again on the transport layer + execute(task, request, new ProtobufChannelActionListener<>(channel, actionName, request)); + } + } + +} diff --git a/server/src/main/java/org/opensearch/action/support/ProtobufTransportAction.java b/server/src/main/java/org/opensearch/action/support/ProtobufTransportAction.java new file mode 100644 index 0000000000000..45ea84321d113 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/ProtobufTransportAction.java @@ -0,0 +1,241 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.support; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.tasks.ProtobufTaskId; +import org.opensearch.tasks.TaskCancelledException; +import org.opensearch.tasks.TaskManager; +import org.opensearch.tasks.ProtobufTaskListener; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Base class for a transport action +* +* @opensearch.internal +*/ +public abstract class ProtobufTransportAction { + + public final String actionName; + private final ProtobufActionFilter[] filters; + protected final TaskManager taskManager; + /** + * @deprecated declare your own logger. + */ + @Deprecated + protected Logger logger = LogManager.getLogger(getClass()); + + protected ProtobufTransportAction(String actionName, ProtobufActionFilters actionFilters, TaskManager taskManager) { + this.actionName = actionName; + this.filters = actionFilters.filters(); + this.taskManager = taskManager; + } + + private Releasable registerChildNode(ProtobufTaskId parentTask) { + if (parentTask.isSet()) { + return taskManager.registerProtobufChildNode(parentTask.getId(), taskManager.localNode()); + } else { + return () -> {}; + } + } + + /** + * Use this method when the transport action call should result in creation of a new task associated with the call. + * + * This is a typical behavior. + */ + public final ProtobufTask execute(Request request, ActionListener listener) { + /* + * While this version of execute could delegate to the ProtobufTaskListener + * version of execute that'd add yet another layer of wrapping on the + * listener and prevent us from using the listener bare if there isn't a + * task. That just seems like too many objects. Thus the two versions of + * this method. + */ + final Releasable unregisterChildNode = registerChildNode(request.getProtobufParentTask()); + final ProtobufTask task; + + try { + task = taskManager.registerProtobuf("transport", actionName, request); + System.out.println("Protobuf task registered from execute is " + task); + } catch (TaskCancelledException e) { + unregisterChildNode.close(); + throw e; + } + + ThreadContext.StoredContext storedContext = taskManager.protobufTaskExecutionStarted(task); + try { + execute(task, request, new ActionListener() { + @Override + public void onResponse(Response response) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregisterProtobufTask(task)); + } finally { + listener.onResponse(response); + } + } + + @Override + public void onFailure(Exception e) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregisterProtobufTask(task)); + } finally { + listener.onFailure(e); + } + } + }); + } finally { + storedContext.close(); + } + + return task; + } + + /** + * Execute the transport action on the local node, returning the {@link ProtobufTask} used to track its execution and accepting a + * {@link ProtobufTaskListener} which listens for the completion of the action. + */ + public final ProtobufTask execute(Request request, ProtobufTaskListener listener) { + final Releasable unregisterChildNode = registerChildNode(request.getProtobufParentTask()); + final ProtobufTask task; + try { + task = taskManager.registerProtobuf("transport", actionName, request); + } catch (TaskCancelledException e) { + unregisterChildNode.close(); + throw e; + } + ThreadContext.StoredContext storedContext = taskManager.protobufTaskExecutionStarted(task); + try { + execute(task, request, new ActionListener() { + @Override + public void onResponse(Response response) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregisterProtobufTask(task)); + } finally { + listener.onResponse(task, response); + } + } + + @Override + public void onFailure(Exception e) { + try { + Releasables.close(unregisterChildNode, () -> taskManager.unregisterProtobufTask(task)); + } finally { + listener.onFailure(task, e); + } + } + }); + } finally { + storedContext.close(); + } + return task; + } + + /** + * Use this method when the transport action should continue to run in the context of the current task + */ + public final void execute(ProtobufTask task, Request request, ActionListener listener) { + ActionRequestValidationException validationException = request.validate(); + if (validationException != null) { + listener.onFailure(validationException); + return; + } + + if (task != null && request.getShouldStoreResult()) { + listener = new TaskResultStoringActionListener<>(taskManager, task, listener); + } + + RequestFilterChain requestFilterChain = new RequestFilterChain<>(this, logger); + requestFilterChain.proceed(task, actionName, request, listener); + } + + protected abstract void doExecute(ProtobufTask task, Request request, ActionListener listener); + + /** + * A request filter chain + * + * @opensearch.internal + */ + private static class RequestFilterChain + implements + ProtobufActionFilterChain { + + private final ProtobufTransportAction action; + private final AtomicInteger index = new AtomicInteger(); + private final Logger logger; + + private RequestFilterChain(ProtobufTransportAction action, Logger logger) { + this.action = action; + this.logger = logger; + } + + @Override + public void proceed(ProtobufTask task, String actionName, Request request, ActionListener listener) { + int i = index.getAndIncrement(); + try { + if (i < this.action.filters.length) { + this.action.filters[i].apply(task, actionName, request, listener, this); + } else if (i == this.action.filters.length) { + this.action.doExecute(task, request, listener); + } else { + listener.onFailure(new IllegalStateException("proceed was called too many times")); + } + } catch (Exception e) { + logger.trace("Error during transport action execution.", e); + listener.onFailure(e); + } + } + + } + + /** + * Wrapper for an action listener that stores the result at the end of the execution + * + * @opensearch.internal + */ + private static class TaskResultStoringActionListener implements ActionListener { + private final ActionListener delegate; + private final ProtobufTask task; + private final TaskManager taskManager; + + private TaskResultStoringActionListener(TaskManager taskManager, ProtobufTask task, ActionListener delegate) { + this.taskManager = taskManager; + this.task = task; + this.delegate = delegate; + } + + @Override + public void onResponse(Response response) { + try { + taskManager.storeResultProtobuf(task, response, delegate); + } catch (Exception e) { + delegate.onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + try { + taskManager.storeResultProtobuf(task, e, delegate); + } catch (Exception inner) { + inner.addSuppressed(e); + delegate.onFailure(inner); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufClusterManagerNodeReadRequest.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufClusterManagerNodeReadRequest.java new file mode 100644 index 0000000000000..8da301b87716c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufClusterManagerNodeReadRequest.java @@ -0,0 +1,49 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.support.clustermanager; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * Base request for cluster-manager based read operations that allows to read the cluster state from the local node if needed +* +* @opensearch.internal +*/ +public abstract class ProtobufClusterManagerNodeReadRequest> extends + ProtobufClusterManagerNodeRequest { + + protected boolean local = false; + + protected ProtobufClusterManagerNodeReadRequest() {} + + protected ProtobufClusterManagerNodeReadRequest(byte[] in) throws IOException { + + } + + @Override + public void writeTo(OutputStream out) throws IOException { + + } + + @SuppressWarnings("unchecked") + public final Request local(boolean local) { + this.local = local; + return (Request) this; + } + + /** + * Return local information, do not retrieve the state from cluster-manager node (default: false). + * @return true if local information is to be returned; + * false if information is to be retrieved from cluster-manager node (default). + */ + public final boolean local() { + return local; + } +} diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufClusterManagerNodeRequest.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufClusterManagerNodeRequest.java new file mode 100644 index 0000000000000..6b6704d36ecc6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufClusterManagerNodeRequest.java @@ -0,0 +1,84 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.support.clustermanager; + +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.common.unit.TimeValue; + +/** + * A protobuf request for cluster-manager based operation. +* +* @opensearch.internal +*/ +public abstract class ProtobufClusterManagerNodeRequest> extends + ProtobufActionRequest { + + public static final TimeValue DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); + + /** @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT} */ + @Deprecated + public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; + + protected TimeValue clusterManagerNodeTimeout = DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; + + /** @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #clusterManagerNodeTimeout} */ + @Deprecated + protected TimeValue masterNodeTimeout = clusterManagerNodeTimeout; + + protected ProtobufClusterManagerNodeRequest() {} + + /** + * A timeout value in case the cluster-manager has not been discovered yet or disconnected. + */ + @SuppressWarnings("unchecked") + public final Request clusterManagerNodeTimeout(TimeValue timeout) { + this.clusterManagerNodeTimeout = timeout; + return (Request) this; + } + + /** + * A timeout value in case the cluster-manager has not been discovered yet or disconnected. + * + * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #clusterManagerNodeTimeout(TimeValue)} + */ + @SuppressWarnings("unchecked") + @Deprecated + public final Request masterNodeTimeout(TimeValue timeout) { + return clusterManagerNodeTimeout(timeout); + } + + /** + * A timeout value in case the cluster-manager has not been discovered yet or disconnected. + */ + public final Request clusterManagerNodeTimeout(String timeout) { + return clusterManagerNodeTimeout( + TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".clusterManagerNodeTimeout") + ); + } + + /** + * A timeout value in case the cluster-manager has not been discovered yet or disconnected. + * + * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #clusterManagerNodeTimeout(String)} + */ + @Deprecated + public final Request masterNodeTimeout(String timeout) { + return clusterManagerNodeTimeout(timeout); + } + + public final TimeValue clusterManagerNodeTimeout() { + return this.clusterManagerNodeTimeout; + } + + /** @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #clusterManagerNodeTimeout()} */ + @Deprecated + public final TimeValue masterNodeTimeout() { + return clusterManagerNodeTimeout(); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java new file mode 100644 index 0000000000000..93ef1c8f2cbe5 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeAction.java @@ -0,0 +1,355 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.clustermanager; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ProtobufActionListenerResponseHandler; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.ActionRunnable; +import org.opensearch.action.bulk.BackoffPolicy; +import org.opensearch.action.support.ProtobufActionFilters; +import org.opensearch.action.support.ProtobufHandledTransportAction; +import org.opensearch.action.support.RetryableAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateObserver; +import org.opensearch.cluster.ClusterManagerNodeChangePredicate; +import org.opensearch.cluster.NotClusterManagerException; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterManagerTaskThrottler; +import org.opensearch.cluster.service.ClusterManagerThrottlingException; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.ProtobufWriteable; +import org.opensearch.discovery.ClusterManagerNotDiscoveredException; +import org.opensearch.node.NodeClosedException; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ConnectTransportException; +import org.opensearch.transport.RemoteTransportException; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.function.Predicate; + +/** + * A base class for operations that needs to be performed on the cluster-manager node. + * + * @opensearch.internal + */ +public abstract class ProtobufTransportClusterManagerNodeAction< + Request extends ProtobufClusterManagerNodeRequest, + Response extends ProtobufActionResponse> extends ProtobufHandledTransportAction { + + private static final Logger logger = LogManager.getLogger(ProtobufTransportClusterManagerNodeAction.class); + + protected final ThreadPool threadPool; + protected final TransportService transportService; + protected final ClusterService clusterService; + protected final IndexNameExpressionResolver indexNameExpressionResolver; + + private final String executor; + + protected ProtobufTransportClusterManagerNodeAction( + String actionName, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader request, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + this(actionName, true, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); + } + + protected ProtobufTransportClusterManagerNodeAction( + String actionName, + boolean canTripCircuitBreaker, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader request, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super(actionName, canTripCircuitBreaker, transportService, actionFilters, request); + this.transportService = transportService; + this.clusterService = clusterService; + this.threadPool = threadPool; + this.indexNameExpressionResolver = indexNameExpressionResolver; + this.executor = executor(); + } + + protected abstract String executor(); + + protected abstract Response read(byte[] in) throws IOException; + + /** + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOperation(ProtobufClusterManagerNodeRequest, ClusterState, ActionListener)} + */ + @Deprecated + protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { + throw new UnsupportedOperationException("Must be overridden"); + } + + // TODO: Add abstract keyword after removing the deprecated masterOperation() + protected void clusterManagerOperation(Request request, ClusterState state, ActionListener listener) throws Exception { + masterOperation(request, state, listener); + } + + /** + * Override this operation if access to the task parameter is needed + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOperation(ProtobufTask, ProtobufClusterManagerNodeRequest, ClusterState, ActionListener)} + */ + @Deprecated + protected void masterOperation(ProtobufTask task, Request request, ClusterState state, ActionListener listener) + throws Exception { + clusterManagerOperation(request, state, listener); + } + + /** + * Override this operation if access to the task parameter is needed + */ + // TODO: Change the implementation to call 'clusterManagerOperation(request...)' after removing the deprecated masterOperation() + protected void clusterManagerOperation(ProtobufTask task, Request request, ClusterState state, ActionListener listener) + throws Exception { + masterOperation(task, request, state, listener); + } + + protected boolean localExecute(Request request) { + return false; + } + + protected abstract ClusterBlockException checkBlock(Request request, ClusterState state); + + @Override + protected void doExecute(ProtobufTask task, final Request request, ActionListener listener) { + if (task != null) { + request.setParentTask(clusterService.localNode().getId(), task.getId()); + } + new AsyncSingleAction(task, request, listener).run(); + } + + /** + * Asynchronous single action + * + * @opensearch.internal + */ + class AsyncSingleAction extends RetryableAction { + + private ActionListener listener; + private final Request request; + private ClusterStateObserver observer; + private final long startTime; + private final ProtobufTask task; + + AsyncSingleAction(ProtobufTask task, Request request, ActionListener listener) { + super( + logger, + threadPool, + ClusterManagerTaskThrottler.getBaseDelayForRetry(), + request.clusterManagerNodeTimeout, + listener, + BackoffPolicy.exponentialEqualJitterBackoff( + ClusterManagerTaskThrottler.getBaseDelayForRetry().millis(), + ClusterManagerTaskThrottler.getMaxDelayForRetry().millis() + ), + ThreadPool.Names.SAME + ); + this.task = task; + this.request = request; + this.startTime = threadPool.relativeTimeInMillis(); + } + + @Override + public void tryAction(ActionListener retryListener) { + ClusterState state = clusterService.state(); + logger.trace("starting processing request [{}] with cluster state version [{}]", request, state.version()); + this.listener = retryListener; + doStart(state); + } + + @Override + public boolean shouldRetry(Exception e) { + // If remote address is null, i.e request is generated from same node and we would want to perform retry for it + // If remote address is not null, i.e request is generated from remote node and received on this master node on transport layer + // in that case we would want throttling retry to perform on remote node only not on this master node. + if (request.remoteAddress() == null) { + if (e instanceof TransportException) { + return ((TransportException) e).unwrapCause() instanceof ClusterManagerThrottlingException; + } + return e instanceof ClusterManagerThrottlingException; + } + return false; + } + + /** + * If tasks gets timed out in retrying on throttling, + * it should send cluster event timeout exception. + */ + @Override + public Exception getTimeoutException(Exception e) { + return new ProcessClusterEventTimeoutException(request.masterNodeTimeout, actionName); + } + + protected void doStart(ClusterState clusterState) { + try { + final DiscoveryNodes nodes = clusterService.state().nodes(); + if (nodes.isLocalNodeElectedClusterManager() || localExecute(request)) { + // check for block, if blocked, retry, else, execute locally + final ClusterBlockException blockException = checkBlock(request, clusterState); + if (blockException != null) { + if (!blockException.retryable()) { + listener.onFailure(blockException); + } else { + logger.debug("can't execute due to a cluster block, retrying", blockException); + retry(clusterState, blockException, newState -> { + try { + ClusterBlockException newException = checkBlock(request, newState); + return (newException == null || !newException.retryable()); + } catch (Exception e) { + // accept state as block will be rechecked by doStart() and listener.onFailure() then called + logger.trace("exception occurred during cluster block checking, accepting state", e); + return true; + } + }); + } + } else { + ActionListener delegate = ActionListener.delegateResponse(listener, (delegatedListener, t) -> { + if (t instanceof FailedToCommitClusterStateException || t instanceof NotClusterManagerException) { + logger.debug( + () -> new ParameterizedMessage( + "master could not publish cluster state or " + + "stepped down before publishing action [{}], scheduling a retry", + actionName + ), + t + ); + retryOnMasterChange(clusterState, t); + } else { + delegatedListener.onFailure(t); + } + }); + threadPool.executor(executor) + .execute(ActionRunnable.wrap(delegate, l -> clusterManagerOperation(task, request, clusterState, l))); + } + } else { + if (nodes.getClusterManagerNode() == null) { + logger.debug("no known cluster-manager node, scheduling a retry"); + retryOnMasterChange(clusterState, null); + } else { + DiscoveryNode clusterManagerNode = nodes.getClusterManagerNode(); + final String actionName = getClusterManagerActionName(clusterManagerNode); + transportService.sendRequest( + clusterManagerNode, + actionName, + request, + new ProtobufActionListenerResponseHandler( + listener, + ProtobufTransportClusterManagerNodeAction.this::read + ) { + @Override + public void handleException(final TransportException exp) { + Throwable cause = exp.unwrapCause(); + if (cause instanceof ConnectTransportException + || (exp instanceof RemoteTransportException && cause instanceof NodeClosedException)) { + // we want to retry here a bit to see if a new cluster-manager is elected + logger.debug( + "connection exception while trying to forward request with action name [{}] to " + + "master node [{}], scheduling a retry. Error: [{}]", + actionName, + nodes.getClusterManagerNode(), + exp.getDetailedMessage() + ); + retryOnMasterChange(clusterState, cause); + } else { + listener.onFailure(exp); + } + } + } + ); + } + } + } catch (Exception e) { + listener.onFailure(e); + } + } + + private void retryOnMasterChange(ClusterState state, Throwable failure) { + retry(state, failure, ClusterManagerNodeChangePredicate.build(state)); + } + + private void retry(ClusterState state, final Throwable failure, final Predicate statePredicate) { + if (observer == null) { + final long remainingTimeoutMS = request.clusterManagerNodeTimeout().millis() - (threadPool.relativeTimeInMillis() + - startTime); + if (remainingTimeoutMS <= 0) { + logger.debug(() -> new ParameterizedMessage("timed out before retrying [{}] after failure", actionName), failure); + listener.onFailure(new ClusterManagerNotDiscoveredException(failure)); + return; + } + this.observer = new ClusterStateObserver( + state, + clusterService, + TimeValue.timeValueMillis(remainingTimeoutMS), + logger, + threadPool.getThreadContext() + ); + } + observer.waitForNextChange(new ClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ClusterState state) { + doStart(state); + } + + @Override + public void onClusterServiceClose() { + listener.onFailure(new NodeClosedException(clusterService.localNode())); + } + + @Override + public void onTimeout(TimeValue timeout) { + logger.debug( + () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), + failure + ); + listener.onFailure(new ClusterManagerNotDiscoveredException(failure)); + } + }, statePredicate); + } + } + + /** + * Allows to conditionally return a different cluster-manager node action name in the case an action gets renamed. + * This mainly for backwards compatibility should be used rarely + */ + protected String getClusterManagerActionName(DiscoveryNode node) { + return actionName; + } + + /** + * Allows to conditionally return a different cluster-manager node action name in the case an action gets renamed. + * This mainly for backwards compatibility should be used rarely + * + * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #getClusterManagerActionName(DiscoveryNode)} + */ + @Deprecated + protected String getMasterActionName(DiscoveryNode node) { + return getClusterManagerActionName(node); + } + +} diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeReadAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeReadAction.java new file mode 100644 index 0000000000000..5d82ed341611b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ProtobufTransportClusterManagerNodeReadAction.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.clustermanager; + +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.support.ProtobufActionFilters; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.common.io.stream.ProtobufWriteable; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +/** + * A base class for read operations that needs to be performed on the cluster-manager node. + * Can also be executed on the local node if needed. + * + * @opensearch.internal + */ +public abstract class ProtobufTransportClusterManagerNodeReadAction< + Request extends ProtobufClusterManagerNodeReadRequest, + Response extends ProtobufActionResponse> extends ProtobufTransportClusterManagerNodeAction { + + protected ProtobufTransportClusterManagerNodeReadAction( + String actionName, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader request, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + this(actionName, true, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); + } + + protected ProtobufTransportClusterManagerNodeReadAction( + String actionName, + boolean checkSizeLimit, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader request, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + actionName, + checkSizeLimit, + transportService, + clusterService, + threadPool, + actionFilters, + request, + indexNameExpressionResolver + ); + } + + @Override + protected final boolean localExecute(Request request) { + return request.local(); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodeResponse.java b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodeResponse.java new file mode 100644 index 0000000000000..a164d423c5bc7 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodeResponse.java @@ -0,0 +1,39 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.support.nodes; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.transport.TransportResponse; + +/** + * A base class for node level operations. +* +* @opensearch.internal +*/ +public abstract class ProtobufBaseNodeResponse extends TransportResponse { + + private DiscoveryNode node; + + protected ProtobufBaseNodeResponse(byte[] data) { + + } + + protected ProtobufBaseNodeResponse(DiscoveryNode node) { + assert node != null; + this.node = node; + } + + /** + * The node this information relates to. + */ + public DiscoveryNode getNode() { + return node; + } + +} diff --git a/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodesRequest.java b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodesRequest.java new file mode 100644 index 0000000000000..861315f599ade --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodesRequest.java @@ -0,0 +1,95 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.support.nodes; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.unit.TimeValue; + +import java.io.IOException; + +/** + * Base class for requests targeting a list of nodes +* +* @opensearch.internal +*/ +public abstract class ProtobufBaseNodesRequest> extends ProtobufActionRequest { + + /** + * the list of nodesIds that will be used to resolve this request and {@link #concreteNodes} + * will be populated. Note that if {@link #concreteNodes} is not null, it will be used and nodeIds + * will be ignored. + * + * See {@link DiscoveryNodes#resolveNodes} for a full description of the options. + * + * TODO: get rid of this and resolve it to concrete nodes in the rest layer + **/ + private String[] nodesIds; + + /** + * once {@link #nodesIds} are resolved this will contain the concrete nodes that are part of this request. If set, {@link #nodesIds} + * will be ignored and this will be used. + * */ + private DiscoveryNode[] concreteNodes; + private final TimeValue DEFAULT_TIMEOUT_SECS = TimeValue.timeValueSeconds(30); + + private TimeValue timeout; + + protected ProtobufBaseNodesRequest(byte[] data) throws IOException {} + + protected ProtobufBaseNodesRequest(String... nodesIds) { + this.nodesIds = nodesIds; + } + + protected ProtobufBaseNodesRequest(DiscoveryNode... concreteNodes) { + this.nodesIds = null; + this.concreteNodes = concreteNodes; + } + + public final String[] nodesIds() { + return nodesIds; + } + + @SuppressWarnings("unchecked") + public final Request nodesIds(String... nodesIds) { + this.nodesIds = nodesIds; + return (Request) this; + } + + public TimeValue timeout() { + return this.timeout; + } + + @SuppressWarnings("unchecked") + public final Request timeout(TimeValue timeout) { + this.timeout = timeout; + return (Request) this; + } + + @SuppressWarnings("unchecked") + public final Request timeout(String timeout) { + this.timeout = TimeValue.parseTimeValue(timeout, DEFAULT_TIMEOUT_SECS, getClass().getSimpleName() + ".timeout"); + return (Request) this; + } + + public DiscoveryNode[] concreteNodes() { + return concreteNodes; + } + + public void setConcreteNodes(DiscoveryNode[] concreteNodes) { + this.concreteNodes = concreteNodes; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodesResponse.java b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodesResponse.java new file mode 100644 index 0000000000000..798f06bf5ce75 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufBaseNodesResponse.java @@ -0,0 +1,100 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.support.nodes; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.cluster.ClusterName; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Transport response for nodes requests +* +* @opensearch.internal +*/ +public abstract class ProtobufBaseNodesResponse extends ProtobufActionResponse { + + private ClusterName clusterName; + private List failures; + private List nodes; + private Map nodesMap; + + protected ProtobufBaseNodesResponse(byte[] in) throws IOException { + super(in); + } + + protected ProtobufBaseNodesResponse(ClusterName clusterName, List nodes, List failures) { + this.clusterName = Objects.requireNonNull(clusterName); + this.failures = Objects.requireNonNull(failures); + this.nodes = Objects.requireNonNull(nodes); + } + + /** + * Get the {@link ClusterName} associated with all of the nodes. + * + * @return Never {@code null}. + */ + public ClusterName getClusterName() { + return clusterName; + } + + /** + * Get the failed node exceptions. + * + * @return Never {@code null}. Can be empty. + */ + public List failures() { + return failures; + } + + /** + * Determine if there are any node failures in {@link #failures}. + * + * @return {@code true} if {@link #failures} contains at least 1 {@link FailedNodeException}. + */ + public boolean hasFailures() { + return failures.isEmpty() == false; + } + + /** + * Get the successful node responses. + * + * @return Never {@code null}. Can be empty. + * @see #hasFailures() + */ + public List getNodes() { + return nodes; + } + + /** + * Lazily build and get a map of Node ID to node response. + * + * @return Never {@code null}. Can be empty. + * @see #getNodes() + */ + public Map getNodesMap() { + if (nodesMap == null) { + nodesMap = new HashMap<>(); + for (TNodeResponse nodeResponse : nodes) { + nodesMap.put(nodeResponse.getNode().getId(), nodeResponse); + } + } + return nodesMap; + } + + @Override + public void writeTo(OutputStream out) throws IOException {} + +} diff --git a/server/src/main/java/org/opensearch/action/support/nodes/ProtobufTransportNodesAction.java b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufTransportNodesAction.java new file mode 100644 index 0000000000000..1ab16a429f205 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/nodes/ProtobufTransportNodesAction.java @@ -0,0 +1,314 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.action.support.nodes; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionRunnable; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ProtobufActionFilters; +import org.opensearch.action.support.ProtobufHandledTransportAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.common.io.stream.ProtobufWriteable; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.NodeShouldNotConnectException; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.ProtobufTransportRequestHandler; +import org.opensearch.transport.TransportRequestOptions; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; + +/** + * Base action class for transport nodes +* +* @opensearch.internal +*/ +public abstract class ProtobufTransportNodesAction< + NodesRequest extends ProtobufBaseNodesRequest, + NodesResponse extends ProtobufBaseNodesResponse, + NodeRequest extends TransportRequest, + NodeResponse extends ProtobufBaseNodeResponse> extends ProtobufHandledTransportAction { + + protected final ThreadPool threadPool; + protected final ClusterService clusterService; + protected final TransportService transportService; + protected final Class nodeResponseClass; + protected final String transportNodeAction; + + private final String finalExecutor; + + /** + * @param actionName action name + * @param threadPool thread-pool + * @param clusterService cluster service + * @param transportService transport service + * @param actionFilters action filters + * @param request node request writer + * @param nodeRequest node request reader + * @param nodeExecutor executor to execute node action on + * @param finalExecutor executor to execute final collection of all responses on + * @param nodeResponseClass class of the node responses + */ + protected ProtobufTransportNodesAction( + String actionName, + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader request, + ProtobufWriteable.Reader nodeRequest, + String nodeExecutor, + String finalExecutor, + Class nodeResponseClass + ) { + super(actionName, transportService, actionFilters, request); + this.threadPool = threadPool; + this.clusterService = Objects.requireNonNull(clusterService); + this.transportService = Objects.requireNonNull(transportService); + this.nodeResponseClass = Objects.requireNonNull(nodeResponseClass); + + this.transportNodeAction = actionName + "[n]"; + this.finalExecutor = finalExecutor; + transportService.registerRequestHandlerProtobuf(transportNodeAction, nodeExecutor, nodeRequest, new NodeTransportHandler()); + } + + /** + * Same as {@link #ProtobufTransportNodesAction(String, ThreadPool, ClusterService, TransportService, ProtobufActionFilters, ProtobufWriteable.Reader, + * ProtobufWriteable.Reader, String, String, Class)} but executes final response collection on the transport thread except for when the final + * node response is received from the local node, in which case {@code nodeExecutor} is used. + * This constructor should only be used for actions for which the creation of the final response is fast enough to be safely executed + * on a transport thread. + */ + protected ProtobufTransportNodesAction( + String actionName, + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ProtobufActionFilters actionFilters, + ProtobufWriteable.Reader request, + ProtobufWriteable.Reader nodeRequest, + String nodeExecutor, + Class nodeResponseClass + ) { + this( + actionName, + threadPool, + clusterService, + transportService, + actionFilters, + request, + nodeRequest, + nodeExecutor, + ThreadPool.Names.SAME, + nodeResponseClass + ); + } + + @Override + protected void doExecute(ProtobufTask task, NodesRequest request, ActionListener listener) { + new AsyncAction(task, request, listener).start(); + } + + /** + * Map the responses into {@code nodeResponseClass} responses and {@link FailedNodeException}s. + * + * @param request The associated request. + * @param nodesResponses All node-level responses + * @return Never {@code null}. + * @throws NullPointerException if {@code nodesResponses} is {@code null} + * @see #newResponse(ProtobufBaseNodesRequest, List, List) + */ + protected NodesResponse newResponse(NodesRequest request, AtomicReferenceArray nodesResponses) { + final List responses = new ArrayList<>(); + final List failures = new ArrayList<>(); + + for (int i = 0; i < nodesResponses.length(); ++i) { + Object response = nodesResponses.get(i); + + if (response instanceof FailedNodeException) { + failures.add((FailedNodeException) response); + } else { + responses.add(nodeResponseClass.cast(response)); + } + } + + return newResponse(request, responses, failures); + } + + /** + * Create a new {@link NodesResponse} (multi-node response). + * + * @param request The associated request. + * @param responses All successful node-level responses. + * @param failures All node-level failures. + * @return Never {@code null}. + * @throws NullPointerException if any parameter is {@code null}. + */ + protected abstract NodesResponse newResponse(NodesRequest request, List responses, List failures); + + protected abstract NodeRequest newNodeRequest(NodesRequest request); + + protected abstract NodeResponse newNodeResponse(byte[] in) throws IOException; + + protected abstract NodeResponse nodeOperation(NodeRequest request); + + protected NodeResponse nodeOperation(NodeRequest request, ProtobufTask task) { + return nodeOperation(request); + } + + /** + * resolve node ids to concrete nodes of the incoming request + **/ + protected void resolveRequest(NodesRequest request, ClusterState clusterState) { + assert request.concreteNodes() == null : "request concreteNodes shouldn't be set"; + String[] nodesIds = clusterState.nodes().resolveNodes(request.nodesIds()); + request.setConcreteNodes(Arrays.stream(nodesIds).map(clusterState.nodes()::get).toArray(DiscoveryNode[]::new)); + } + + /** + * Get a backwards compatible transport action name + */ + protected String getTransportNodeAction(DiscoveryNode node) { + return transportNodeAction; + } + + /** + * Asynchronous action + * + * @opensearch.internal + */ + class AsyncAction { + + private final NodesRequest request; + private final ActionListener listener; + private final AtomicReferenceArray responses; + private final AtomicInteger counter = new AtomicInteger(); + private final ProtobufTask task; + + AsyncAction(ProtobufTask task, NodesRequest request, ActionListener listener) { + this.task = task; + this.request = request; + this.listener = listener; + if (request.concreteNodes() == null) { + resolveRequest(request, clusterService.state()); + assert request.concreteNodes() != null; + } + this.responses = new AtomicReferenceArray<>(request.concreteNodes().length); + } + + void start() { + final DiscoveryNode[] nodes = request.concreteNodes(); + if (nodes.length == 0) { + // nothing to notify + threadPool.generic().execute(() -> listener.onResponse(newResponse(request, responses))); + return; + } + TransportRequestOptions.Builder builder = TransportRequestOptions.builder(); + if (request.timeout() != null) { + builder.withTimeout(request.timeout()); + } + for (int i = 0; i < nodes.length; i++) { + final int idx = i; + final DiscoveryNode node = nodes[i]; + final String nodeId = node.getId(); + try { + TransportRequest nodeRequest = newNodeRequest(request); + if (task != null) { + nodeRequest.setProtobufParentTask(clusterService.localNode().getId(), task.getId()); + } + + transportService.sendRequest( + node, + getTransportNodeAction(node), + nodeRequest, + builder.build(), + new TransportResponseHandler() { + @Override + public void handleResponse(NodeResponse response) { + onOperation(idx, response); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public NodeResponse read(StreamInput in) throws IOException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'read'"); + } + + @Override + public void handleException(TransportException exp) { + onFailure(idx, node.getId(), exp); + } + + @Override + public NodeResponse read(byte[] in) throws IOException { + return newNodeResponse(in); + } + } + ); + } catch (Exception e) { + onFailure(idx, nodeId, e); + } + } + } + + private void onOperation(int idx, NodeResponse nodeResponse) { + responses.set(idx, nodeResponse); + if (counter.incrementAndGet() == responses.length()) { + finishHim(); + } + } + + private void onFailure(int idx, String nodeId, Throwable t) { + if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { + logger.debug(new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); + } + responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); + if (counter.incrementAndGet() == responses.length()) { + finishHim(); + } + } + + private void finishHim() { + threadPool.executor(finalExecutor).execute(ActionRunnable.supply(listener, () -> newResponse(request, responses))); + } + } + + /** + * A node transport handler + * + * @opensearch.internal + */ + class NodeTransportHandler implements ProtobufTransportRequestHandler { + + @Override + public void messageReceived(NodeRequest request, TransportChannel channel, ProtobufTask task) throws Exception { + channel.sendResponse(nodeOperation(request, task)); + } + } + +} diff --git a/server/src/main/java/org/opensearch/client/ProtobufAdminClient.java b/server/src/main/java/org/opensearch/client/ProtobufAdminClient.java new file mode 100644 index 0000000000000..2b3ab327d1829 --- /dev/null +++ b/server/src/main/java/org/opensearch/client/ProtobufAdminClient.java @@ -0,0 +1,24 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.client; + +/** + * Administrative actions/operations against the cluster or the indices. +* +* @see org.opensearch.client.Client#admin() +* +* @opensearch.internal +*/ +public interface ProtobufAdminClient { + + /** + * A client allowing to perform actions/operations against the cluster. + */ + ProtobufClusterAdminClient cluster(); +} diff --git a/server/src/main/java/org/opensearch/client/ProtobufClient.java b/server/src/main/java/org/opensearch/client/ProtobufClient.java new file mode 100644 index 0000000000000..2b75e919e11d5 --- /dev/null +++ b/server/src/main/java/org/opensearch/client/ProtobufClient.java @@ -0,0 +1,51 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.client; + +import org.opensearch.action.ActionListener; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; + +/** + * A client provides a one stop interface for performing actions/operations against the cluster. +*

+* All operations performed are asynchronous by nature. Each action/operation has two flavors, the first +* simply returns an {@link org.opensearch.action.ActionFuture}, while the second accepts an +* {@link ActionListener}. +*

+* A client can be retrieved from a started {@link org.opensearch.node.Node}. +* +* @see org.opensearch.node.Node#client() +* +* @opensearch.internal +*/ +public interface ProtobufClient extends ProtobufOpenSearchClient, Releasable { + + Setting CLIENT_TYPE_SETTING_S = new Setting<>("client.type", "node", (s) -> { + switch (s) { + case "node": + case "transport": + return s; + default: + throw new IllegalArgumentException("Can't parse [client.type] must be one of [node, transport]"); + } + }, Property.NodeScope); + + /** + * The admin client that can be used to perform administrative operations. + */ + ProtobufAdminClient admin(); + + /** + * Returns this clients settings + */ + Settings settings(); +} diff --git a/server/src/main/java/org/opensearch/client/ProtobufClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ProtobufClusterAdminClient.java new file mode 100644 index 0000000000000..d3977cc74a6d5 --- /dev/null +++ b/server/src/main/java/org/opensearch/client/ProtobufClusterAdminClient.java @@ -0,0 +1,82 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.client; + +import org.opensearch.action.ActionFuture; +import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoResponse; +import org.opensearch.action.admin.cluster.node.stats.ProtobufNodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.ProtobufNodesStatsResponse; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateResponse; + +/** + * Administrative actions/operations against indices. +* +* @see AdminClient#cluster() +* +* @opensearch.internal +*/ +public interface ProtobufClusterAdminClient extends ProtobufOpenSearchClient { + + /** + * The state of the cluster. + * + * @param request The cluster state request. + * @return The result future + * @see Requests#clusterStateRequest() + */ + ActionFuture state(ProtobufClusterStateRequest request); + + /** + * The state of the cluster. + * + * @param request The cluster state request. + * @param listener A listener to be notified with a result + * @see Requests#clusterStateRequest() + */ + void state(ProtobufClusterStateRequest request, ActionListener listener); + + /** + * Nodes info of the cluster. + * + * @param request The nodes info request + * @return The result future + * @see org.opensearch.client.Requests#nodesInfoRequest(String...) + */ + ActionFuture nodesInfo(ProtobufNodesInfoRequest request); + + /** + * Nodes info of the cluster. + * + * @param request The nodes info request + * @param listener A listener to be notified with a result + * @see org.opensearch.client.Requests#nodesInfoRequest(String...) + */ + void nodesInfo(ProtobufNodesInfoRequest request, ActionListener listener); + + /** + * Nodes stats of the cluster. + * + * @param request The nodes stats request + * @return The result future + * @see org.opensearch.client.Requests#nodesStatsRequest(String...) + */ + ActionFuture nodesStats(ProtobufNodesStatsRequest request); + + /** + * Nodes stats of the cluster. + * + * @param request The nodes info request + * @param listener A listener to be notified with a result + * @see org.opensearch.client.Requests#nodesStatsRequest(String...) + */ + void nodesStats(ProtobufNodesStatsRequest request, ActionListener listener); +} diff --git a/server/src/main/java/org/opensearch/client/ProtobufFilterClient.java b/server/src/main/java/org/opensearch/client/ProtobufFilterClient.java new file mode 100644 index 0000000000000..5982419cc0caa --- /dev/null +++ b/server/src/main/java/org/opensearch/client/ProtobufFilterClient.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.opensearch.action.ActionType; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionType; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.client.support.AbstractClient; +import org.opensearch.client.support.ProtobufAbstractClient; +import org.opensearch.common.settings.Settings; +import org.opensearch.threadpool.ThreadPool; + +/** + * A {@link Client} that contains another {@link Client} which it + * uses as its basic source, possibly transforming the requests / responses along the + * way or providing additional functionality. + * + * @opensearch.internal + */ +public abstract class ProtobufFilterClient extends ProtobufAbstractClient { + + protected final ProtobufClient in; + + /** + * Creates a new FilterClient + * + * @param in the client to delegate to + * @see #in() + */ + public ProtobufFilterClient(ProtobufClient in) { + this(in.settings(), in.threadPool(), in); + } + + /** + * A Constructor that allows to pass settings and threadpool separately. This is useful if the + * client is a proxy and not yet fully constructed ie. both dependencies are not available yet. + */ + protected ProtobufFilterClient(Settings settings, ThreadPool threadPool, ProtobufClient in) { + super(settings, threadPool); + this.in = in; + } + + @Override + public void close() { + in().close(); + } + + @Override + protected void doExecute( + ProtobufActionType action, + Request request, + ActionListener listener + ) { + in().execute(action, request, listener); + } + + /** + * Returns the delegate {@link ProtobufClient} + */ + protected ProtobufClient in() { + return in; + } +} diff --git a/server/src/main/java/org/opensearch/client/ProtobufOpenSearchClient.java b/server/src/main/java/org/opensearch/client/ProtobufOpenSearchClient.java new file mode 100644 index 0000000000000..2db9f9c93e940 --- /dev/null +++ b/server/src/main/java/org/opensearch/client/ProtobufOpenSearchClient.java @@ -0,0 +1,59 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.client; + +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.ProtobufActionType; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.ActionListener; +import org.opensearch.threadpool.ThreadPool; + +/** + * Interface for an OpenSearch client implementation +* +* @opensearch.internal +*/ +public interface ProtobufOpenSearchClient { + + /** + * Executes a generic action, denoted by an {@link ProtobufActionType}. + * + * @param action The action type to execute. + * @param request The action request. + * @param The request type. + * @param the response type. + * @return A future allowing to get back the response. + */ + ActionFuture execute( + ProtobufActionType action, + Request request + ); + + /** + * Executes a generic action, denoted by an {@link ProtobufActionType}. + * + * @param action The action type to execute. + * @param request The action request. + * @param listener The listener to receive the response back. + * @param The request type. + * @param The response type. + */ + void execute( + ProtobufActionType action, + Request request, + ActionListener listener + ); + + /** + * Returns the threadpool used to execute requests on this client + */ + ThreadPool threadPool(); + +} diff --git a/server/src/main/java/org/opensearch/client/ProtobufOriginSettingClient.java b/server/src/main/java/org/opensearch/client/ProtobufOriginSettingClient.java new file mode 100644 index 0000000000000..ee7182de9e1fb --- /dev/null +++ b/server/src/main/java/org/opensearch/client/ProtobufOriginSettingClient.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.opensearch.action.ActionType; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.ProtobufActionType; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionResponse; +import org.opensearch.action.support.ContextPreservingActionListener; +import org.opensearch.common.util.concurrent.ThreadContext; + +import java.util.function.Supplier; + +/** + * A {@linkplain Client} that sends requests with the + * {@link ThreadContext#stashWithOrigin origin} set to a particular + * value and calls its {@linkplain ActionListener} in its original + * {@link ThreadContext}. + * + * @opensearch.internal + */ +public final class ProtobufOriginSettingClient extends ProtobufFilterClient { + + private final String origin; + + public ProtobufOriginSettingClient(ProtobufClient in, String origin) { + super(in); + this.origin = origin; + } + + @Override + protected void doExecute( + ProtobufActionType action, + Request request, + ActionListener listener + ) { + final Supplier supplier = in().threadPool().getThreadContext().newRestorableContext(false); + try (ThreadContext.StoredContext ignore = in().threadPool().getThreadContext().stashWithOrigin(origin)) { + super.doExecute(action, request, new ContextPreservingActionListener<>(supplier, listener)); + } + } +} diff --git a/server/src/main/java/org/opensearch/client/node/ProtobufNodeClient.java b/server/src/main/java/org/opensearch/client/node/ProtobufNodeClient.java new file mode 100644 index 0000000000000..e2e1136966f3c --- /dev/null +++ b/server/src/main/java/org/opensearch/client/node/ProtobufNodeClient.java @@ -0,0 +1,134 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.client.node; + +import org.opensearch.action.ActionType; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.ProtobufActionType; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionModule.ProtobufDynamicActionRegistry; +import org.opensearch.action.support.ProtobufTransportAction; +import org.opensearch.client.ProtobufClient; +import org.opensearch.client.support.ProtobufAbstractClient; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.settings.Settings; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.tasks.ProtobufTaskListener; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.RemoteClusterService; + +import java.util.function.Supplier; + +/** + * ProtobufClient that executes actions on the local node. +* +* @opensearch.internal +*/ +public class ProtobufNodeClient extends ProtobufAbstractClient { + + private ProtobufDynamicActionRegistry actionRegistry; + /** + * The id of the local {@link DiscoveryNode}. Useful for generating task ids from tasks returned by + * {@link #executeLocally(ProtobufActionType, ProtobufActionRequest, ProtobufTaskListener)}. + */ + private Supplier localNodeId; + private RemoteClusterService remoteClusterService; + private NamedWriteableRegistry namedWriteableRegistry; + + public ProtobufNodeClient(Settings settings, ThreadPool threadPool) { + super(settings, threadPool); + } + + public void initialize( + ProtobufDynamicActionRegistry actionRegistry, + Supplier localNodeId, + RemoteClusterService remoteClusterService, + NamedWriteableRegistry namedWriteableRegistry + ) { + this.actionRegistry = actionRegistry; + this.localNodeId = localNodeId; + this.remoteClusterService = remoteClusterService; + this.namedWriteableRegistry = namedWriteableRegistry; + } + + @Override + public void close() { + // nothing really to do + } + + @Override + public void doExecute( + ProtobufActionType action, + Request request, + ActionListener listener + ) { + // Discard the task because the ProtobufClient interface doesn't use it. + executeLocally(action, request, listener); + } + + /** + * Execute an {@link ActionType} locally, returning that {@link ProtobufTask} used to track it, and linking an {@link ActionListener}. + * Prefer this method if you don't need access to the task when listening for the response. This is the method used to implement + * the {@link ProtobufClient} interface. + */ + public ProtobufTask executeLocally( + ProtobufActionType action, + Request request, + ActionListener listener + ) { + return transportAction(action).execute(request, listener); + } + + /** + * Execute an {@link ActionType} locally, returning that {@link ProtobufTask} used to track it, and linking an {@link ProtobufTaskListener}. Prefer this + * method if you need access to the task when listening for the response. + */ + public ProtobufTask executeLocally( + ProtobufActionType action, + Request request, + ProtobufTaskListener listener + ) { + return transportAction(action).execute(request, listener); + } + + /** + * The id of the local {@link DiscoveryNode}. Useful for generating task ids from tasks returned by + * {@link #executeLocally(ProtobufActionType, ProtobufActionRequest, ProtobufTaskListener)}. + */ + public String getLocalNodeId() { + return localNodeId.get(); + } + + /** + * Get the {@link ProtobufTransportAction} for an {@link ActionType}, throwing exceptions if the action isn't available. + */ + @SuppressWarnings("unchecked") + private < + Request extends ProtobufActionRequest, + Response extends ProtobufActionResponse> ProtobufTransportAction transportAction( + ProtobufActionType action + ) { + if (actionRegistry == null) { + throw new IllegalStateException("NodeClient has not been initialized"); + } + ProtobufTransportAction transportAction = (ProtobufTransportAction) actionRegistry.get( + action + ); + if (transportAction == null) { + throw new IllegalStateException("failed to find action [" + action + "] to execute"); + } + return transportAction; + } + + public NamedWriteableRegistry getNamedWriteableRegistry() { + return namedWriteableRegistry; + } +} diff --git a/server/src/main/java/org/opensearch/client/support/ProtobufAbstractClient.java b/server/src/main/java/org/opensearch/client/support/ProtobufAbstractClient.java new file mode 100644 index 0000000000000..f1494c46fd7de --- /dev/null +++ b/server/src/main/java/org/opensearch/client/support/ProtobufAbstractClient.java @@ -0,0 +1,172 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.client.support; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.ProtobufActionType; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoAction; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoResponse; +import org.opensearch.action.admin.cluster.node.stats.ProtobufNodesStatsAction; +import org.opensearch.action.admin.cluster.node.stats.ProtobufNodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.ProtobufNodesStatsResponse; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateAction; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateResponse; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.client.ProtobufClient; +import org.opensearch.client.ProtobufAdminClient; +import org.opensearch.client.ProtobufClusterAdminClient; +import org.opensearch.client.ProtobufOpenSearchClient; +import org.opensearch.common.settings.Settings; +import org.opensearch.threadpool.ThreadPool; + +/** + * Base client used to create concrete client implementations +* +* @opensearch.internal +*/ +public abstract class ProtobufAbstractClient implements ProtobufClient { + + protected final Logger logger; + + protected final Settings settings; + private final ThreadPool threadPool; + private final Admin admin; + + public ProtobufAbstractClient(Settings settings, ThreadPool threadPool) { + this.settings = settings; + this.threadPool = threadPool; + this.admin = new Admin(this); + this.logger = LogManager.getLogger(this.getClass()); + } + + @Override + public final Settings settings() { + return this.settings; + } + + @Override + public final ThreadPool threadPool() { + return this.threadPool; + } + + @Override + public final ProtobufAdminClient admin() { + return admin; + } + + @Override + public final ActionFuture execute( + ProtobufActionType action, + Request request + ) { + PlainActionFuture actionFuture = PlainActionFuture.newFuture(); + execute(action, request, actionFuture); + return actionFuture; + } + + /** + * This is the single execution point of *all* clients. + */ + @Override + public final void execute( + ProtobufActionType action, + Request request, + ActionListener listener + ) { + doExecute(action, request, listener); + } + + protected abstract void doExecute( + ProtobufActionType action, + Request request, + ActionListener listener + ); + + static class Admin implements ProtobufAdminClient { + + private final ClusterAdmin clusterAdmin; + + Admin(ProtobufOpenSearchClient client) { + this.clusterAdmin = new ClusterAdmin(client); + } + + @Override + public ProtobufClusterAdminClient cluster() { + return clusterAdmin; + } + } + + static class ClusterAdmin implements ProtobufClusterAdminClient { + + private final ProtobufOpenSearchClient client; + + ClusterAdmin(ProtobufOpenSearchClient client) { + this.client = client; + } + + @Override + public ThreadPool threadPool() { + return client.threadPool(); + } + + @Override + public ActionFuture execute( + ProtobufActionType action, + Request request + ) { + return client.execute(action, request); + } + + @Override + public void execute( + ProtobufActionType action, + Request request, + ActionListener listener + ) { + client.execute(action, request, listener); + } + + @Override + public ActionFuture state(final ProtobufClusterStateRequest request) { + return execute(ProtobufClusterStateAction.INSTANCE, request); + } + + @Override + public void state(final ProtobufClusterStateRequest request, final ActionListener listener) { + execute(ProtobufClusterStateAction.INSTANCE, request, listener); + } + + @Override + public ActionFuture nodesInfo(final ProtobufNodesInfoRequest request) { + return execute(ProtobufNodesInfoAction.INSTANCE, request); + } + + @Override + public void nodesInfo(final ProtobufNodesInfoRequest request, final ActionListener listener) { + execute(ProtobufNodesInfoAction.INSTANCE, request, listener); + } + + @Override + public ActionFuture nodesStats(final ProtobufNodesStatsRequest request) { + return execute(ProtobufNodesStatsAction.INSTANCE, request); + } + + @Override + public void nodesStats(final ProtobufNodesStatsRequest request, final ActionListener listener) { + execute(ProtobufNodesStatsAction.INSTANCE, request, listener); + } + } +} diff --git a/server/src/main/java/org/opensearch/common/io/stream/BaseWriteable.java b/server/src/main/java/org/opensearch/common/io/stream/BaseWriteable.java new file mode 100644 index 0000000000000..7364e1c24e47f --- /dev/null +++ b/server/src/main/java/org/opensearch/common/io/stream/BaseWriteable.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.io.stream; + +import java.io.IOException; + +/** + * This interface can be extended to different types of serialization and deserialization mechanisms. + * + * @opensearch.internal + */ +public interface BaseWriteable { + + /** + * Write this into the stream output. + */ + void writeTo(T out) throws IOException; + + /** + * Reference to a method that can write some object to a given type. + */ + @FunctionalInterface + interface Writer { + + /** + * Write {@code V}-type {@code value} to the {@code T}-type stream. + * + * @param out Output to write the {@code value} too + * @param value The value to add + */ + void write(T out, V value) throws IOException; + } + + /** + * Reference to a method that can read some object from a given stream type. + */ + @FunctionalInterface + interface Reader { + + /** + * Read {@code V}-type value from a {@code T}-type stream. + * + * @param in Input to read the value from + */ + V read(S in) throws IOException; + } +} diff --git a/server/src/main/java/org/opensearch/common/io/stream/TryWriteable.java b/server/src/main/java/org/opensearch/common/io/stream/TryWriteable.java new file mode 100644 index 0000000000000..3a4ab6974db0d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/io/stream/TryWriteable.java @@ -0,0 +1,70 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.common.io.stream; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * Implementers can be written to write to output and read from input using Protobuf. +* +* @opensearch.internal +*/ +public interface TryWriteable { + + /** + * Write this into the stream output. + */ + public void writeTo(OutputStream out) throws IOException; + + /** + * Reference to a method that can write some object to a {@link OutputStream}. + * Most classes should implement {@link TryWriteable} and the {@link TryWriteable#writeTo(OutputStream)} method should use + * {@link OutputStream} methods directly or this indirectly: + *


+     * public void writeTo(OutputStream out) throws IOException {
+     *     out.writeVInt(someValue);
+     * }
+     * 
+ */ + @FunctionalInterface + interface Writer { + + /** + * Write {@code V}-type {@code value} to the {@code out}put stream. + * + * @param out Output to write the {@code value} too + * @param value The value to add + */ + void write(OutputStream out, V value) throws IOException; + + } + + /** + * Reference to a method that can read some object from a stream. By convention this is a constructor that takes + * {@linkplain byte[]} as an argument for most classes and a static method for things like enums. + *

+     * public MyClass(final byte[] in) throws IOException {
+     *     this.someValue = in.readVInt();
+     * }
+     * 
+ */ + @FunctionalInterface + interface Reader { + + /** + * Read {@code V}-type value from a stream. + * + * @param in Input to read the value from + */ + V read(byte[] in) throws IOException; + + } + +} diff --git a/server/src/main/java/org/opensearch/node/ProtobufNodeService.java b/server/src/main/java/org/opensearch/node/ProtobufNodeService.java new file mode 100644 index 0000000000000..7c2a1554c6ef9 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/ProtobufNodeService.java @@ -0,0 +1,237 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.node; + +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.Build; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodeInfo; +import org.opensearch.action.admin.cluster.node.stats.ProtobufNodeStats; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.search.SearchTransportService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Nullable; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.discovery.Discovery; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.store.remote.filecache.FileCache; +import org.opensearch.indices.IndicesService; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.ingest.IngestService; +import org.opensearch.monitor.MonitorService; +import org.opensearch.plugins.PluginsService; +import org.opensearch.script.ScriptService; +import org.opensearch.search.aggregations.support.AggregationUsageService; +import org.opensearch.search.backpressure.SearchBackpressureService; +import org.opensearch.search.pipeline.SearchPipelineService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +/** + * Services exposed to nodes +* +* @opensearch.internal +*/ +public class ProtobufNodeService implements Closeable { + private final Settings settings; + private final ThreadPool threadPool; + private final MonitorService monitorService; + private final TransportService transportService; + private final IndicesService indicesService; + private final PluginsService pluginService; + private final CircuitBreakerService circuitBreakerService; + private final IngestService ingestService; + private final SettingsFilter settingsFilter; + private final ScriptService scriptService; + private final HttpServerTransport httpServerTransport; + private final ResponseCollectorService responseCollectorService; + private final SearchTransportService searchTransportService; + private final IndexingPressureService indexingPressureService; + private final AggregationUsageService aggregationUsageService; + private final SearchBackpressureService searchBackpressureService; + private final SearchPipelineService searchPipelineService; + private final ClusterService clusterService; + private final Discovery discovery; + private final FileCache fileCache; + + ProtobufNodeService( + Settings settings, + ThreadPool threadPool, + MonitorService monitorService, + Discovery discovery, + TransportService transportService, + IndicesService indicesService, + PluginsService pluginService, + CircuitBreakerService circuitBreakerService, + ScriptService scriptService, + @Nullable HttpServerTransport httpServerTransport, + IngestService ingestService, + ClusterService clusterService, + SettingsFilter settingsFilter, + ResponseCollectorService responseCollectorService, + SearchTransportService searchTransportService, + IndexingPressureService indexingPressureService, + AggregationUsageService aggregationUsageService, + SearchBackpressureService searchBackpressureService, + SearchPipelineService searchPipelineService, + FileCache fileCache + ) { + this.settings = settings; + this.threadPool = threadPool; + this.monitorService = monitorService; + this.transportService = transportService; + this.indicesService = indicesService; + this.discovery = discovery; + this.pluginService = pluginService; + this.circuitBreakerService = circuitBreakerService; + this.httpServerTransport = httpServerTransport; + this.ingestService = ingestService; + this.settingsFilter = settingsFilter; + this.scriptService = scriptService; + this.responseCollectorService = responseCollectorService; + this.searchTransportService = searchTransportService; + this.indexingPressureService = indexingPressureService; + this.aggregationUsageService = aggregationUsageService; + this.searchBackpressureService = searchBackpressureService; + this.searchPipelineService = searchPipelineService; + this.clusterService = clusterService; + this.fileCache = fileCache; + clusterService.addStateApplier(ingestService); + clusterService.addStateApplier(searchPipelineService); + } + + public ProtobufNodeInfo info( + boolean settings, + boolean os, + boolean process, + boolean jvm, + boolean threadPool, + boolean transport, + boolean http, + boolean plugin, + boolean ingest, + boolean aggs, + boolean indices, + boolean searchPipeline + ) { + ProtobufNodeInfo.Builder builder = ProtobufNodeInfo.builder(Version.CURRENT, Build.CURRENT, transportService.getLocalNode()); + if (settings) { + builder.setSettings(settingsFilter.filter(this.settings)); + } + if (os) { + builder.setOs(monitorService.osService().info()); + } + if (process) { + builder.setProcess(monitorService.processService().info()); + } + if (jvm) { + builder.setJvm(monitorService.jvmService().info()); + } + if (threadPool) { + builder.setThreadPool(this.threadPool.info()); + } + if (transport) { + builder.setTransport(transportService.info()); + } + if (http && httpServerTransport != null) { + builder.setHttp(httpServerTransport.info()); + } + // if (plugin && pluginService != null) { + // builder.setPlugins(pluginService.info()); + // } + if (ingest && ingestService != null) { + builder.setIngest(ingestService.info()); + } + if (aggs && aggregationUsageService != null) { + builder.setAggsInfo(aggregationUsageService.info()); + } + if (indices) { + builder.setTotalIndexingBuffer(indicesService.getTotalIndexingBufferBytes()); + } + if (searchPipeline && searchPipelineService != null) { + builder.setProtobufSearchPipelineInfo(searchPipelineService.info()); + } + return builder.build(); + } + + public ProtobufNodeStats stats( + CommonStatsFlags indices, + boolean os, + boolean process, + boolean jvm, + boolean threadPool, + boolean fs, + boolean transport, + boolean http, + boolean circuitBreaker, + boolean script, + boolean discoveryStats, + boolean ingest, + boolean adaptiveSelection, + boolean scriptCache, + boolean indexingPressure, + boolean shardIndexingPressure, + boolean searchBackpressure, + boolean clusterManagerThrottling, + boolean weightedRoutingStats, + boolean fileCacheStats + ) { + // for indices stats we want to include previous allocated shards stats as well (it will + // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) + return new ProtobufNodeStats( + transportService.getLocalNode(), + System.currentTimeMillis(), + indices.anySet() ? indicesService.stats(indices) : null, + os ? monitorService.osService().stats() : null, + process ? monitorService.processService().stats() : null, + jvm ? monitorService.jvmService().stats() : null, + threadPool ? this.threadPool.stats() : null, + fs ? monitorService.fsService().stats() : null, + transport ? transportService.stats() : null, + http ? (httpServerTransport == null ? null : httpServerTransport.stats()) : null, + circuitBreaker ? circuitBreakerService.stats() : null, + script ? scriptService.stats() : null, + discoveryStats ? discovery.stats() : null, + ingest ? ingestService.stats() : null, + adaptiveSelection ? responseCollectorService.getAdaptiveStats(searchTransportService.getPendingSearchRequests()) : null + ); + } + + public IngestService getIngestService() { + return ingestService; + } + + public MonitorService getMonitorService() { + return monitorService; + } + + public SearchBackpressureService getSearchBackpressureService() { + return searchBackpressureService; + } + + @Override + public void close() throws IOException { + IOUtils.close(indicesService); + } + + /** + * Wait for the node to be effectively closed. + * @see IndicesService#awaitClose(long, TimeUnit) + */ + public boolean awaitClose(long timeout, TimeUnit timeUnit) throws InterruptedException { + return indicesService.awaitClose(timeout, timeUnit); + } + +} diff --git a/server/src/main/java/org/opensearch/plugins/ProtobufActionPlugin.java b/server/src/main/java/org/opensearch/plugins/ProtobufActionPlugin.java new file mode 100644 index 0000000000000..f9794aa050eff --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/ProtobufActionPlugin.java @@ -0,0 +1,205 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.plugins; + +import org.opensearch.action.ProtobufActionType; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.RequestValidators; +import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.opensearch.action.support.ProtobufActionFilter; +import org.opensearch.action.support.ProtobufTransportAction; +import org.opensearch.action.support.TransportActions; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.core.common.Strings; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.IndexScopedSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.rest.RestController; +import org.opensearch.rest.ProtobufRestHandler; +import org.opensearch.rest.RestHeaderDefinition; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; +import java.util.stream.Collectors; + +/** + * An additional extension point for {@link Plugin}s that extends OpenSearch's scripting functionality. Implement it like this: +*
{@code
+*   {@literal @}Override
+*   public List> getActions() {
+*       return Arrays.asList(new ActionHandler<>(ReindexAction.INSTANCE, TransportReindexAction.class),
+*               new ActionHandler<>(UpdateByQueryAction.INSTANCE, TransportUpdateByQueryAction.class),
+*               new ActionHandler<>(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class),
+*               new ActionHandler<>(RethrottleAction.INSTANCE, TransportRethrottleAction.class));
+*   }
+* }
+* +* @opensearch.api +*/ +public interface ProtobufActionPlugin { + /** + * Actions added by this plugin. + */ + default List> getActions() { + return Collections.emptyList(); + } + + /** + * Client actions added by this plugin. This defaults to all of the {@linkplain ProtobufActionType} in + * {@linkplain ProtobufActionPlugin#getActions()}. + */ + default List> getClientActions() { + return getActions().stream().map(a -> a.action).collect(Collectors.toList()); + } + + /** + * ProtobufActionType filters added by this plugin. + */ + default List getActionFilters() { + return Collections.emptyList(); + } + + /** + * Rest handlers added by this plugin. + */ + default List getRestHandlers( + Settings settings, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster + ) { + return Collections.emptyList(); + } + + /** + * Returns headers which should be copied through rest requests on to internal requests. + */ + default Collection getRestHeaders() { + return Collections.emptyList(); + } + + /** + * Returns headers which should be copied from internal requests into tasks. + */ + default Collection getTaskHeaders() { + return Collections.emptyList(); + } + + /** + * Returns a function used to wrap each rest request before handling the request. + * The returned {@link UnaryOperator} is called for every incoming rest request and receives + * the original rest handler as it's input. This allows adding arbitrary functionality around + * rest request handlers to do for instance logging or authentication. + * A simple example of how to only allow GET request is here: + *
+    * {@code
+    *    UnaryOperator getRestHandlerWrapper(ThreadContext threadContext) {
+    *      return originalHandler -> (ProtobufRestHandler) (request, channel, client) -> {
+    *        if (request.method() != Method.GET) {
+    *          throw new IllegalStateException("only GET requests are allowed");
+    *        }
+    *        originalHandler.handleRequest(request, channel, client);
+    *      };
+    *    }
+    * }
+    * 
+ * + * Note: Only one installed plugin may implement a rest wrapper. + */ + default UnaryOperator getRestHandlerWrapper(ThreadContext threadContext) { + return null; + } + + /** + * Class responsible for handing Transport Actions + * + * @opensearch.internal + */ + final class ActionHandler { + private final ProtobufActionType action; + private final Class> transportAction; + private final Class[] supportTransportActions; + + /** + * Create a record of an action, the {@linkplain ProtobufTransportAction} that handles it, and any supporting {@linkplain TransportActions} + * that are needed by that {@linkplain ProtobufTransportAction}. + */ + public ActionHandler( + ProtobufActionType action, + Class> transportAction, + Class... supportTransportActions + ) { + this.action = action; + this.transportAction = transportAction; + this.supportTransportActions = supportTransportActions; + } + + public ProtobufActionType getAction() { + return action; + } + + public Class> getTransportAction() { + return transportAction; + } + + public Class[] getSupportTransportActions() { + return supportTransportActions; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder().append(action.name()).append(" is handled by ").append(transportAction.getName()); + if (supportTransportActions.length > 0) { + b.append('[').append(Strings.arrayToCommaDelimitedString(supportTransportActions)).append(']'); + } + return b.toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != ActionHandler.class) { + return false; + } + ActionHandler other = (ActionHandler) obj; + return Objects.equals(action, other.action) + && Objects.equals(transportAction, other.transportAction) + && Objects.deepEquals(supportTransportActions, other.supportTransportActions); + } + + @Override + public int hashCode() { + return Objects.hash(action, transportAction, supportTransportActions); + } + } + + /** + * Returns a collection of validators that are used by {@link RequestValidators} to validate a + * {@link org.opensearch.action.admin.indices.mapping.put.PutMappingRequest} before the executing it. + */ + default Collection> mappingRequestValidators() { + return Collections.emptyList(); + } + + default Collection> indicesAliasesRequestValidators() { + return Collections.emptyList(); + } + +} diff --git a/server/src/main/java/org/opensearch/rest/ProtobufBaseRestHandler.java b/server/src/main/java/org/opensearch/rest/ProtobufBaseRestHandler.java new file mode 100644 index 0000000000000..6245c213ea09b --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/ProtobufBaseRestHandler.java @@ -0,0 +1,310 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.rest; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.spell.LevenshteinDistance; +import org.apache.lucene.util.CollectionUtil; +import org.opensearch.OpenSearchParseException; +import org.opensearch.action.support.clustermanager.ProtobufClusterManagerNodeRequest; +import org.opensearch.client.node.ProtobufNodeClient; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.rest.action.admin.cluster.RestNodesUsageAction; +import org.opensearch.tasks.ProtobufTask; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.atomic.LongAdder; +import java.util.stream.Collectors; + +/** + * Base handler for REST requests. +*

+* This handler makes sure that the headers & context of the handled {@link RestRequest requests} are copied over to +* the transport requests executed by the associated client. While the context is fully copied over, not all the headers +* are copied, but a selected few. It is possible to control what headers are copied over by returning them in +* {@link ActionPlugin#getRestHeaders()}. +* +* @opensearch.api +*/ +public abstract class ProtobufBaseRestHandler implements ProtobufRestHandler { + + public static final Setting MULTI_ALLOW_EXPLICIT_INDEX = Setting.boolSetting( + "rest.action.multi.allow_explicit_index", + true, + Property.NodeScope + ); + + private final LongAdder usageCount = new LongAdder(); + /** + * @deprecated declare your own logger. + */ + @Deprecated + protected Logger logger = LogManager.getLogger(getClass()); + + public final long getUsageCount() { + return usageCount.sum(); + } + + /** + * @return the name of this handler. The name should be human readable and + * should describe the action that will performed when this API is + * called. This name is used in the response to the + * {@link RestNodesUsageAction}. + */ + public abstract String getName(); + + @Override + public final void handleRequest(RestRequest request, RestChannel channel, ProtobufNodeClient client) throws Exception { + // prepare the request for execution; has the side effect of touching the request parameters + final RestChannelConsumer action = prepareRequest(request, client); + + // validate unconsumed params, but we must exclude params used to format the response + // use a sorted set so the unconsumed parameters appear in a reliable sorted order + final SortedSet unconsumedParams = request.unconsumedParams() + .stream() + .filter(p -> !responseParams().contains(p)) + .collect(Collectors.toCollection(TreeSet::new)); + + // validate the non-response params + if (!unconsumedParams.isEmpty()) { + final Set candidateParams = new HashSet<>(); + candidateParams.addAll(request.consumedParams()); + candidateParams.addAll(responseParams()); + throw new IllegalArgumentException(unrecognized(request, unconsumedParams, candidateParams, "parameter")); + } + + if (request.hasContent() && request.isContentConsumed() == false) { + throw new IllegalArgumentException("request [" + request.method() + " " + request.path() + "] does not support having a body"); + } + + usageCount.increment(); + // execute the action + action.accept(channel); + } + + public static String unrecognizedStrings( + final RestRequest request, + final Set invalids, + final Set candidates, + final String detail + ) { + StringBuilder message = new StringBuilder( + String.format(Locale.ROOT, "request [%s] contains unrecognized %s%s: ", request.path(), detail, invalids.size() > 1 ? "s" : "") + ); + boolean first = true; + for (final String invalid : invalids) { + final LevenshteinDistance ld = new LevenshteinDistance(); + final List> scoredParams = new ArrayList<>(); + for (final String candidate : candidates) { + final float distance = ld.getDistance(invalid, candidate); + if (distance > 0.5f) { + scoredParams.add(new Tuple<>(distance, candidate)); + } + } + CollectionUtil.timSort(scoredParams, (a, b) -> { + // sort by distance in reverse order, then parameter name for equal distances + int compare = a.v1().compareTo(b.v1()); + if (compare != 0) return -compare; + else return a.v2().compareTo(b.v2()); + }); + if (first == false) { + message.append(", "); + } + message.append("[").append(invalid).append("]"); + final List keys = scoredParams.stream().map(Tuple::v2).collect(Collectors.toList()); + if (keys.isEmpty() == false) { + message.append(" -> did you mean "); + if (keys.size() == 1) { + message.append("[").append(keys.get(0)).append("]"); + } else { + message.append("any of ").append(keys.toString()); + } + message.append("?"); + } + first = false; + } + + return message.toString(); + } + + /** + * Returns a String message of the detail of any unrecognized error occurred. The string is intended for use in error messages to be returned to the user. + * + * @param request The request that caused the exception + * @param invalids Strings from the request which were unable to be understood. + * @param candidates A set of words that are most likely to be the valid strings determined invalid, to be suggested to the user. + * @param detail The parameter contains the details of the exception. + * @return a String that contains the message. + */ + protected final String unrecognized( + final RestRequest request, + final Set invalids, + final Set candidates, + final String detail + ) { + return unrecognizedStrings(request, invalids, candidates, detail); + } + + /** + * REST requests are handled by preparing a channel consumer that represents the execution of + * the request against a channel. + */ + @FunctionalInterface + protected interface RestChannelConsumer extends CheckedConsumer {} + + /** + * Prepare the request for execution. Implementations should consume all request params before + * returning the runnable for actual execution. Unconsumed params will immediately terminate + * execution of the request. However, some params are only used in processing the response; + * implementations can override {@link ProtobufBaseRestHandler#responseParams()} to indicate such + * params. + * + * @param request the request to execute + * @param client client for executing actions on the local node + * @return the action to execute + * @throws IOException if an I/O exception occurred parsing the request and preparing for + * execution + */ + protected abstract RestChannelConsumer prepareRequest(RestRequest request, ProtobufNodeClient client) throws IOException; + + /** + * Parameters used for controlling the response and thus might not be consumed during + * preparation of the request execution in + * {@link ProtobufBaseRestHandler#prepareRequest(RestRequest, ProtobufNodeClient)}. + * + * @return a set of parameters used to control the response and thus should not trip strict + * URL parameter checks. + */ + protected Set responseParams() { + return Collections.emptySet(); + } + + /** + * Parse the deprecated request parameter 'master_timeout', and add deprecated log if the parameter is used. + * It also validates whether the two parameters 'master_timeout' and 'cluster_manager_timeout' are not assigned together. + * The method is temporarily added in 2.0 duing applying inclusive language. Remove the method along with MASTER_ROLE. + * @param mnr the action request + * @param request the REST request to handle + * @param logger the logger that logs deprecation notices + * @param logMsgKeyPrefix the key prefix of a deprecation message to avoid duplicate messages. + */ + public static void parseDeprecatedMasterTimeoutParameter( + ProtobufClusterManagerNodeRequest mnr, + RestRequest request, + DeprecationLogger logger, + String logMsgKeyPrefix + ) { + final String MASTER_TIMEOUT_DEPRECATED_MESSAGE = + "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead."; + final String DUPLICATE_PARAMETER_ERROR_MESSAGE = + "Please only use one of the request parameters [master_timeout, cluster_manager_timeout]."; + if (request.hasParam("master_timeout")) { + logger.deprecate(logMsgKeyPrefix + "_master_timeout_parameter", MASTER_TIMEOUT_DEPRECATED_MESSAGE); + if (request.hasParam("cluster_manager_timeout")) { + throw new OpenSearchParseException(DUPLICATE_PARAMETER_ERROR_MESSAGE); + } + mnr.clusterManagerNodeTimeout(request.paramAsTime("master_timeout", mnr.clusterManagerNodeTimeout())); + } + } + + /** + * A wrapper for the base handler. + * + * @opensearch.internal + */ + public static class Wrapper extends ProtobufBaseRestHandler { + + protected final ProtobufBaseRestHandler delegate; + + public Wrapper(ProtobufBaseRestHandler delegate) { + this.delegate = Objects.requireNonNull(delegate, "ProtobufBaseRestHandler delegate can not be null"); + } + + @Override + public String getName() { + return delegate.getName(); + } + + @Override + public List routes() { + return delegate.routes(); + } + + @Override + public List deprecatedRoutes() { + return delegate.deprecatedRoutes(); + } + + @Override + public List replacedRoutes() { + return delegate.replacedRoutes(); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, ProtobufNodeClient client) throws IOException { + return delegate.prepareRequest(request, client); + } + + @Override + protected Set responseParams() { + return delegate.responseParams(); + } + + @Override + public boolean canTripCircuitBreaker() { + return delegate.canTripCircuitBreaker(); + } + + @Override + public boolean supportsContentStream() { + return delegate.supportsContentStream(); + } + + @Override + public boolean allowsUnsafeBuffers() { + return delegate.allowsUnsafeBuffers(); + } + + @Override + public boolean allowSystemIndexAccessByDefault() { + return delegate.allowSystemIndexAccessByDefault(); + } + } + + /** + * Return a task immediately when executing some long-running operations asynchronously, like reindex, resize, open, force merge + */ + public RestChannelConsumer sendTask(String nodeId, ProtobufTask task) { + return channel -> { + try (XContentBuilder builder = channel.newBuilder()) { + builder.startObject(); + builder.field("task", nodeId + ":" + task.getId()); + builder.endObject(); + channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder)); + } + }; + } +} diff --git a/server/src/main/java/org/opensearch/rest/ProtobufRestHandler.java b/server/src/main/java/org/opensearch/rest/ProtobufRestHandler.java new file mode 100644 index 0000000000000..c585dd52b7c03 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/ProtobufRestHandler.java @@ -0,0 +1,259 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.rest; + +import org.opensearch.client.node.ProtobufNodeClient; +import org.opensearch.core.xcontent.XContent; +import org.opensearch.rest.RestRequest.Method; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * Handler for REST requests +* +* @opensearch.api +*/ +@FunctionalInterface +public interface ProtobufRestHandler { + + /** + * Handles a rest request. + * @param request The request to handle + * @param channel The channel to write the request response to + * @param client A client to use to make internal requests on behalf of the original request + */ + void handleRequest(RestRequest request, RestChannel channel, ProtobufNodeClient client) throws Exception; + + default boolean canTripCircuitBreaker() { + return true; + } + + /** + * Indicates if the ProtobufRestHandler supports content as a stream. A stream would be multiple objects delineated by + * {@link XContent#streamSeparator()}. If a handler returns true this will affect the types of content that can be sent to + * this endpoint. + */ + default boolean supportsContentStream() { + return false; + } + + /** + * Indicates if the ProtobufRestHandler supports working with pooled buffers. If the request handler will not escape the return + * {@link RestRequest#content()} or any buffers extracted from it then there is no need to make a copies of any pooled buffers in the + * {@link RestRequest} instance before passing a request to this handler. If this instance does not support pooled/unsafe buffers + * {@link RestRequest#ensureSafeBuffers()} should be called on any request before passing it to {@link #handleRequest}. + * + * @return true iff the handler supports requests that make use of pooled buffers + */ + default boolean allowsUnsafeBuffers() { + return false; + } + + /** + * The list of {@link Route}s that this ProtobufRestHandler is responsible for handling. + */ + default List routes() { + return Collections.emptyList(); + } + + /** + * A list of routes handled by this ProtobufRestHandler that are deprecated and do not have a direct + * replacement. If changing the {@code path} or {@code method} of a route, + * use {@link #replacedRoutes()}. + */ + default List deprecatedRoutes() { + return Collections.emptyList(); + } + + /** + * A list of routes handled by this ProtobufRestHandler that have had their {@code path} and/or + * {@code method} changed. The pre-existing {@code route} will be registered + * as deprecated alongside the updated {@code route}. + */ + default List replacedRoutes() { + return Collections.emptyList(); + } + + /** + * Controls whether requests handled by this class are allowed to to access system indices by default. + * @return {@code true} if requests handled by this class should be allowed to access system indices. + */ + default boolean allowSystemIndexAccessByDefault() { + return false; + } + + static ProtobufRestHandler wrapper(ProtobufRestHandler delegate) { + return new Wrapper(delegate); + } + + /** + * Wrapper for a handler. + * + * @opensearch.internal + */ + class Wrapper implements ProtobufRestHandler { + private final ProtobufRestHandler delegate; + + public Wrapper(ProtobufRestHandler delegate) { + this.delegate = Objects.requireNonNull(delegate, "ProtobufRestHandler delegate can not be null"); + } + + @Override + public String toString() { + return delegate.toString(); + } + + @Override + public void handleRequest(RestRequest request, RestChannel channel, ProtobufNodeClient client) throws Exception { + delegate.handleRequest(request, channel, client); + } + + @Override + public boolean canTripCircuitBreaker() { + return delegate.canTripCircuitBreaker(); + } + + @Override + public boolean supportsContentStream() { + return delegate.supportsContentStream(); + } + + @Override + public boolean allowsUnsafeBuffers() { + return delegate.allowsUnsafeBuffers(); + } + + @Override + public List routes() { + return delegate.routes(); + } + + @Override + public List deprecatedRoutes() { + return delegate.deprecatedRoutes(); + } + + @Override + public List replacedRoutes() { + return delegate.replacedRoutes(); + } + + @Override + public boolean allowSystemIndexAccessByDefault() { + return delegate.allowSystemIndexAccessByDefault(); + } + } + + /** + * Route for the request. + * + * @opensearch.internal + */ + class Route { + + private final String path; + private final Method method; + + public Route(Method method, String path) { + this.path = path; + this.method = method; + } + + public String getPath() { + return path; + } + + public Method getMethod() { + return method; + } + } + + /** + * Represents an API that has been deprecated and is slated for removal. + */ + class DeprecatedRoute extends Route { + + private final String deprecationMessage; + + public DeprecatedRoute(Method method, String path, String deprecationMessage) { + super(method, path); + this.deprecationMessage = deprecationMessage; + } + + public String getDeprecationMessage() { + return deprecationMessage; + } + } + + /** + * Represents an API that has had its {@code path} or {@code method} changed. Holds both the + * new and previous {@code path} and {@code method} combination. + */ + class ReplacedRoute extends Route { + + private final String deprecatedPath; + private final Method deprecatedMethod; + + /** + * Construct replaced routes using new and deprocated methods and new and deprecated paths + * @param method route method + * @param path new route path + * @param deprecatedMethod deprecated method + * @param deprecatedPath deprecated path + */ + public ReplacedRoute(Method method, String path, Method deprecatedMethod, String deprecatedPath) { + super(method, path); + this.deprecatedMethod = deprecatedMethod; + this.deprecatedPath = deprecatedPath; + } + + /** + * Construct replaced routes using route method, new and deprecated paths + * This constructor can be used when both new and deprecated paths use the same method + * @param method route method + * @param path new route path + * @param deprecatedPath deprecated path + */ + public ReplacedRoute(Method method, String path, String deprecatedPath) { + this(method, path, method, deprecatedPath); + } + + /** + * Construct replaced routes using route, new and deprecated prefixes + * @param route route + * @param prefix new route prefix + * @param deprecatedPrefix deprecated prefix + */ + public ReplacedRoute(Route route, String prefix, String deprecatedPrefix) { + this(route.getMethod(), prefix + route.getPath(), deprecatedPrefix + route.getPath()); + } + + public String getDeprecatedPath() { + return deprecatedPath; + } + + public Method getDeprecatedMethod() { + return deprecatedMethod; + } + } + + /** + * Construct replaced routes using routes template and prefixes for new and deprecated paths + * @param routes routes + * @param prefix new prefix + * @param deprecatedPrefix deprecated prefix + * @return new list of API routes prefixed with the prefix string + */ + static List replaceRoutes(List routes, final String prefix, final String deprecatedPrefix) { + return routes.stream().map(route -> new ReplacedRoute(route, prefix, deprecatedPrefix)).collect(Collectors.toList()); + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/ProtobufRestCancellableNodeClient.java b/server/src/main/java/org/opensearch/rest/action/ProtobufRestCancellableNodeClient.java new file mode 100644 index 0000000000000..bbbb86ed1499d --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/ProtobufRestCancellableNodeClient.java @@ -0,0 +1,178 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.ProtobufActionRequest; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.ProtobufActionType; +import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.opensearch.client.Client; +import org.opensearch.client.FilterClient; +import org.opensearch.client.ProtobufOriginSettingClient; +import org.opensearch.client.ProtobufFilterClient; +import org.opensearch.client.node.ProtobufNodeClient; +import org.opensearch.http.HttpChannel; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.tasks.TaskId; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicReference; + +import static org.opensearch.action.admin.cluster.node.tasks.get.GetTaskAction.TASKS_ORIGIN; + +/** + * A {@linkplain Client} that cancels tasks executed locally when the provided {@link HttpChannel} + * is closed before completion. + * + * @opensearch.api + */ +public class ProtobufRestCancellableNodeClient extends ProtobufFilterClient { + private static final Map httpChannels = new ConcurrentHashMap<>(); + + private final ProtobufNodeClient client; + private final HttpChannel httpChannel; + + public ProtobufRestCancellableNodeClient(ProtobufNodeClient client, HttpChannel httpChannel) { + super(client); + this.client = client; + this.httpChannel = httpChannel; + } + + /** + * Returns the number of channels tracked globally. + */ + public static int getNumChannels() { + return httpChannels.size(); + } + + /** + * Returns the number of tasks tracked globally. + */ + static int getNumTasks() { + return httpChannels.values().stream().mapToInt(CloseListener::getNumTasks).sum(); + } + + /** + * Returns the number of tasks tracked by the provided {@link HttpChannel}. + */ + static int getNumTasks(HttpChannel channel) { + CloseListener listener = httpChannels.get(channel); + return listener == null ? 0 : listener.getNumTasks(); + } + + @Override + public void doExecute( + ProtobufActionType action, + Request request, + ActionListener listener + ) { + System.out.println("RestCancellableNodeClient execute"); + System.out.println("Action: " + action); + System.out.println("Request: " + request); + System.out.println("Listener: " + listener); + CloseListener closeListener = httpChannels.computeIfAbsent(httpChannel, channel -> new CloseListener()); + TaskHolder taskHolder = new TaskHolder(); + ProtobufTask task = client.executeLocally(action, request, new ActionListener() { + @Override + public void onResponse(Response response) { + try { + closeListener.unregisterTask(taskHolder); + } finally { + listener.onResponse(response); + } + } + + @Override + public void onFailure(Exception e) { + try { + closeListener.unregisterTask(taskHolder); + } finally { + listener.onFailure(e); + } + } + }); + final TaskId taskId = new TaskId(client.getLocalNodeId(), task.getId()); + closeListener.registerTask(taskHolder, taskId); + closeListener.maybeRegisterChannel(httpChannel); + } + + private void cancelTask(TaskId taskId) { + CancelTasksRequest req = new CancelTasksRequest().setTaskId(taskId).setReason("channel closed"); + // force the origin to execute the cancellation as a system user + // new ProtobufOriginSettingClient(client, TASKS_ORIGIN).admin().cluster().cancelTasks(req, ActionListener.wrap(() -> {})); + } + + private class CloseListener implements ActionListener { + private final AtomicReference channel = new AtomicReference<>(); + private final Set tasks = new HashSet<>(); + + CloseListener() {} + + synchronized int getNumTasks() { + return tasks.size(); + } + + void maybeRegisterChannel(HttpChannel httpChannel) { + if (channel.compareAndSet(null, httpChannel)) { + // In case the channel is already closed when we register the listener, the listener will be immediately executed which will + // remove the channel from the map straight-away. That is why we first create the CloseListener and later we associate it + // with the channel. This guarantees that the close listener is already in the map when it gets registered to its + // corresponding channel, hence it is always found in the map when it gets invoked if the channel gets closed. + httpChannel.addCloseListener(this); + } + } + + synchronized void registerTask(TaskHolder taskHolder, TaskId taskId) { + taskHolder.taskId = taskId; + if (taskHolder.completed == false) { + this.tasks.add(taskId); + } + } + + synchronized void unregisterTask(TaskHolder taskHolder) { + if (taskHolder.taskId != null) { + this.tasks.remove(taskHolder.taskId); + } + taskHolder.completed = true; + } + + @Override + public void onResponse(Void aVoid) { + final HttpChannel httpChannel = channel.get(); + assert httpChannel != null : "channel not registered"; + // when the channel gets closed it won't be reused: we can remove it from the map and forget about it. + CloseListener closeListener = httpChannels.remove(httpChannel); + assert closeListener != null : "channel not found in the map of tracked channels"; + final List toCancel; + synchronized (this) { + toCancel = new ArrayList<>(tasks); + tasks.clear(); + } + for (TaskId taskId : toCancel) { + cancelTask(taskId); + } + } + + @Override + public void onFailure(Exception e) { + onResponse(null); + } + } + + private static class TaskHolder { + private TaskId taskId; + private boolean completed = false; + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/cat/ProtobufAbstractCatAction.java b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufAbstractCatAction.java new file mode 100644 index 0000000000000..07c1b63981922 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufAbstractCatAction.java @@ -0,0 +1,78 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.rest.action.cat; + +import org.opensearch.client.node.ProtobufNodeClient; +import org.opensearch.common.Table; +import org.opensearch.common.io.Streams; +import org.opensearch.common.io.UTF8StreamWriter; +import org.opensearch.core.common.io.stream.BytesStream; +import org.opensearch.rest.ProtobufBaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestRequest; +import org.opensearch.core.rest.RestStatus; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import static org.opensearch.rest.action.cat.RestTable.buildHelpWidths; +import static org.opensearch.rest.action.cat.RestTable.pad; + +/** + * Base Transport action class for _cat API +* +* @opensearch.api +*/ +public abstract class ProtobufAbstractCatAction extends ProtobufBaseRestHandler { + + protected abstract RestChannelConsumer doCatRequest(RestRequest request, ProtobufNodeClient client); + + protected abstract void documentation(StringBuilder sb); + + protected abstract Table getTableWithHeader(RestRequest request); + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final ProtobufNodeClient client) throws IOException { + boolean helpWanted = request.paramAsBoolean("help", false); + if (helpWanted) { + return channel -> { + Table table = getTableWithHeader(request); + int[] width = buildHelpWidths(table, request); + BytesStream bytesOutput = Streams.flushOnCloseStream(channel.bytesOutput()); + UTF8StreamWriter out = new UTF8StreamWriter().setOutput(bytesOutput); + for (Table.Cell cell : table.getHeaders()) { + // need to do left-align always, so create new cells + pad(new Table.Cell(cell.value), width[0], request, out); + out.append(" | "); + pad(new Table.Cell(cell.attr.containsKey("alias") ? cell.attr.get("alias") : ""), width[1], request, out); + out.append(" | "); + pad(new Table.Cell(cell.attr.containsKey("desc") ? cell.attr.get("desc") : "not available"), width[2], request, out); + out.append("\n"); + } + out.close(); + channel.sendResponse(new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, bytesOutput.bytes())); + }; + } else { + return doCatRequest(request, client); + } + } + + static Set RESPONSE_PARAMS = Collections.unmodifiableSet( + new HashSet<>(Arrays.asList("format", "h", "v", "ts", "pri", "bytes", "size", "time", "s", "timeout")) + ); + + @Override + protected Set responseParams() { + return RESPONSE_PARAMS; + } + +} diff --git a/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestCatAction.java b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestCatAction.java new file mode 100644 index 0000000000000..8d901db9904b2 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestCatAction.java @@ -0,0 +1,58 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.rest.action.cat; + +import org.opensearch.client.node.ProtobufNodeClient; +import org.opensearch.rest.ProtobufBaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestRequest; +import org.opensearch.core.rest.RestStatus; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * Base _cat API endpoint +* +* @opensearch.api +*/ +public class ProtobufRestCatAction extends ProtobufBaseRestHandler { + + private static final String CAT = "=^.^="; + private static final String CAT_NL = CAT + "\n"; + private final String HELP; + + public ProtobufRestCatAction(List catActions) { + StringBuilder sb = new StringBuilder(); + sb.append(CAT_NL); + for (ProtobufAbstractCatAction catAction : catActions) { + catAction.documentation(sb); + } + HELP = sb.toString(); + } + + @Override + public List routes() { + return singletonList(new Route(GET, "/_cat")); + } + + @Override + public String getName() { + return "cat_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final ProtobufNodeClient client) throws IOException { + return channel -> channel.sendResponse(new BytesRestResponse(RestStatus.OK, HELP)); + } + +} diff --git a/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java new file mode 100644 index 0000000000000..6e2ab8582d235 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/cat/ProtobufRestNodesAction.java @@ -0,0 +1,522 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.rest.action.cat; + +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.ProtobufNodesInfoResponse; +import org.opensearch.action.admin.cluster.node.stats.ProtobufNodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.ProtobufNodesStatsResponse; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ProtobufClusterStateResponse; +import org.opensearch.client.node.ProtobufNodeClient; +import org.opensearch.common.Strings; +import org.opensearch.common.Table; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.rest.action.RestActionListener; +import org.opensearch.rest.action.RestResponseListener; +import org.opensearch.server.proto.ClusterStateResponseProto; +import org.opensearch.server.proto.NodesInfoProto.NodesInfo; +import org.opensearch.server.proto.NodesStatsProto.NodesStats; +import org.opensearch.server.proto.NodesStatsProto.NodesStats.FlushStats; +import org.opensearch.server.proto.NodesStatsProto.NodesStats.GetStats; +import org.opensearch.server.proto.NodesStatsProto.NodesStats.IndexingStats; +import org.opensearch.server.proto.NodesStatsProto.NodesStats.MergeStats; +import org.opensearch.server.proto.NodesStatsProto.NodesStats.QueryCacheStats; +import org.opensearch.server.proto.NodesStatsProto.NodesStats.RefreshStats; +import org.opensearch.server.proto.NodesStatsProto.NodesStats.RequestCacheStats; +import org.opensearch.server.proto.NodesStatsProto.NodesStats.ScriptStats; +import org.opensearch.server.proto.NodesStatsProto.NodesStats.SearchStats; +import org.opensearch.server.proto.NodesStatsProto.NodesStats.SegmentStats; + +import java.util.List; +import java.util.Locale; +import java.util.stream.Collectors; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * _cat API action to get node information +* +* @opensearch.api +*/ +public class ProtobufRestNodesAction extends ProtobufAbstractCatAction { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ProtobufRestNodesAction.class); + static final String LOCAL_DEPRECATED_MESSAGE = "Deprecated parameter [local] used. This parameter does not cause this API to act " + + "locally, and should not be used. It will be unsupported in version 8.0."; + + @Override + public List routes() { + return singletonList(new Route(GET, "/_cat/nodes_protobuf")); + } + + @Override + public String getName() { + return "cat_nodes_protobuf_action"; + } + + @Override + protected void documentation(StringBuilder sb) { + sb.append("/_cat/nodes_protobuf\n"); + } + + @Override + public RestChannelConsumer doCatRequest(final RestRequest request, final ProtobufNodeClient client) { + final ProtobufClusterStateRequest clusterStateRequest = new ProtobufClusterStateRequest(); + clusterStateRequest.clear().nodes(true); + if (request.hasParam("local")) { + deprecationLogger.deprecate("cat_nodes_local_parameter", LOCAL_DEPRECATED_MESSAGE); + } + clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); + clusterStateRequest.clusterManagerNodeTimeout( + request.paramAsTime("cluster_manager_timeout", clusterStateRequest.clusterManagerNodeTimeout()) + ); + parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName()); + final boolean fullId = request.paramAsBoolean("full_id", false); + return channel -> client.admin() + .cluster() + .state(clusterStateRequest, new RestActionListener(channel) { + @Override + public void processResponse(final ProtobufClusterStateResponse clusterStateResponse) { + ProtobufNodesInfoRequest nodesInfoRequest = new ProtobufNodesInfoRequest(); + nodesInfoRequest.addMetrics( + request.param("timeout"), + ProtobufNodesInfoRequest.Metric.JVM.metricName(), + ProtobufNodesInfoRequest.Metric.OS.metricName(), + ProtobufNodesInfoRequest.Metric.PROCESS.metricName(), + ProtobufNodesInfoRequest.Metric.HTTP.metricName() + ); + client.admin().cluster().nodesInfo(nodesInfoRequest, new RestActionListener(channel) { + @Override + public void processResponse(final ProtobufNodesInfoResponse nodesInfoResponse) { + ProtobufNodesStatsRequest nodesStatsRequest = new ProtobufNodesStatsRequest(); + nodesStatsRequest.timeout(request.param("timeout")); + nodesStatsRequest.clear() + .indices(true) + .addMetrics( + ProtobufNodesStatsRequest.Metric.JVM.metricName(), + ProtobufNodesStatsRequest.Metric.OS.metricName(), + ProtobufNodesStatsRequest.Metric.FS.metricName(), + ProtobufNodesStatsRequest.Metric.PROCESS.metricName(), + ProtobufNodesStatsRequest.Metric.SCRIPT.metricName() + ); + client.admin() + .cluster() + .nodesStats(nodesStatsRequest, new RestResponseListener(channel) { + @Override + public RestResponse buildResponse(ProtobufNodesStatsResponse nodesStatsResponse) throws Exception { + return RestTable.buildResponse( + buildTable(fullId, request, clusterStateResponse, nodesInfoResponse, nodesStatsResponse), + channel + ); + } + }); + } + }); + } + }); + } + + @Override + protected Table getTableWithHeader(final RestRequest request) { + Table table = new Table(); + table.startHeaders(); + table.addCell("id", "default:false;alias:id,nodeId;desc:unique node id"); + table.addCell("pid", "default:false;alias:p;desc:process id"); + table.addCell("ip", "alias:i;desc:ip address"); + table.addCell("port", "default:false;alias:po;desc:bound transport port"); + table.addCell("http_address", "default:false;alias:http;desc:bound http address"); + + table.addCell("version", "default:false;alias:v;desc:es version"); + table.addCell("type", "default:false;alias:t;desc:es distribution type"); + table.addCell("build", "default:false;alias:b;desc:es build hash"); + table.addCell("jdk", "default:false;alias:j;desc:jdk version"); + table.addCell("disk.total", "default:false;alias:dt,diskTotal;text-align:right;desc:total disk space"); + table.addCell("disk.used", "default:false;alias:du,diskUsed;text-align:right;desc:used disk space"); + table.addCell("disk.avail", "default:false;alias:d,da,disk,diskAvail;text-align:right;desc:available disk space"); + table.addCell("disk.used_percent", "default:false;alias:dup,diskUsedPercent;text-align:right;desc:used disk space percentage"); + table.addCell("heap.current", "default:false;alias:hc,heapCurrent;text-align:right;desc:used heap"); + table.addCell("heap.percent", "alias:hp,heapPercent;text-align:right;desc:used heap ratio"); + table.addCell("heap.max", "default:false;alias:hm,heapMax;text-align:right;desc:max configured heap"); + table.addCell("ram.current", "default:false;alias:rc,ramCurrent;text-align:right;desc:used machine memory"); + table.addCell("ram.percent", "alias:rp,ramPercent;text-align:right;desc:used machine memory ratio"); + table.addCell("ram.max", "default:false;alias:rm,ramMax;text-align:right;desc:total machine memory"); + table.addCell("file_desc.current", "default:false;alias:fdc,fileDescriptorCurrent;text-align:right;desc:used file descriptors"); + table.addCell( + "file_desc.percent", + "default:false;alias:fdp,fileDescriptorPercent;text-align:right;desc:used file descriptor ratio" + ); + table.addCell("file_desc.max", "default:false;alias:fdm,fileDescriptorMax;text-align:right;desc:max file descriptors"); + + table.addCell("cpu", "alias:cpu;text-align:right;desc:recent cpu usage"); + table.addCell("load_1m", "alias:l;text-align:right;desc:1m load avg"); + table.addCell("load_5m", "alias:l;text-align:right;desc:5m load avg"); + table.addCell("load_15m", "alias:l;text-align:right;desc:15m load avg"); + table.addCell("uptime", "default:false;alias:u;text-align:right;desc:node uptime"); + // TODO: Deprecate "node.role", use "node.roles" which shows full node role names + table.addCell( + "node.role", + "alias:r,role,nodeRole;desc:m:master eligible node, d:data node, i:ingest node, -:coordinating node only" + ); + table.addCell("node.roles", "alias:rs,all roles;desc: -:coordinating node only"); + // TODO: Remove the header alias 'master', after removing MASTER_ROLE. It's added for compatibility when using parameter 'h=master'. + table.addCell("cluster_manager", "alias:cm,m,master;desc:*:current cluster manager"); + table.addCell("name", "alias:n;desc:node name"); + + table.addCell("completion.size", "alias:cs,completionSize;default:false;text-align:right;desc:size of completion"); + + table.addCell("fielddata.memory_size", "alias:fm,fielddataMemory;default:false;text-align:right;desc:used fielddata cache"); + table.addCell("fielddata.evictions", "alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions"); + + table.addCell("query_cache.memory_size", "alias:qcm,queryCacheMemory;default:false;text-align:right;desc:used query cache"); + table.addCell("query_cache.evictions", "alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"); + table.addCell("query_cache.hit_count", "alias:qchc,queryCacheHitCount;default:false;text-align:right;desc:query cache hit counts"); + table.addCell( + "query_cache.miss_count", + "alias:qcmc,queryCacheMissCount;default:false;text-align:right;desc:query cache miss counts" + ); + + table.addCell("request_cache.memory_size", "alias:rcm,requestCacheMemory;default:false;text-align:right;desc:used request cache"); + table.addCell( + "request_cache.evictions", + "alias:rce,requestCacheEvictions;default:false;text-align:right;desc:request cache evictions" + ); + table.addCell( + "request_cache.hit_count", + "alias:rchc,requestCacheHitCount;default:false;text-align:right;desc:request cache hit counts" + ); + table.addCell( + "request_cache.miss_count", + "alias:rcmc,requestCacheMissCount;default:false;text-align:right;desc:request cache miss counts" + ); + + table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes"); + table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush"); + + table.addCell("get.current", "alias:gc,getCurrent;default:false;text-align:right;desc:number of current get ops"); + table.addCell("get.time", "alias:gti,getTime;default:false;text-align:right;desc:time spent in get"); + table.addCell("get.total", "alias:gto,getTotal;default:false;text-align:right;desc:number of get ops"); + table.addCell("get.exists_time", "alias:geti,getExistsTime;default:false;text-align:right;desc:time spent in successful gets"); + table.addCell("get.exists_total", "alias:geto,getExistsTotal;default:false;text-align:right;desc:number of successful gets"); + table.addCell("get.missing_time", "alias:gmti,getMissingTime;default:false;text-align:right;desc:time spent in failed gets"); + table.addCell("get.missing_total", "alias:gmto,getMissingTotal;default:false;text-align:right;desc:number of failed gets"); + + table.addCell( + "indexing.delete_current", + "alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions" + ); + table.addCell("indexing.delete_time", "alias:idti,indexingDeleteTime;default:false;text-align:right;desc:time spent in deletions"); + table.addCell("indexing.delete_total", "alias:idto,indexingDeleteTotal;default:false;text-align:right;desc:number of delete ops"); + table.addCell( + "indexing.index_current", + "alias:iic,indexingIndexCurrent;default:false;text-align:right;desc:number of current indexing ops" + ); + table.addCell("indexing.index_time", "alias:iiti,indexingIndexTime;default:false;text-align:right;desc:time spent in indexing"); + table.addCell("indexing.index_total", "alias:iito,indexingIndexTotal;default:false;text-align:right;desc:number of indexing ops"); + table.addCell( + "indexing.index_failed", + "alias:iif,indexingIndexFailed;default:false;text-align:right;desc:number of failed indexing ops" + ); + + table.addCell("merges.current", "alias:mc,mergesCurrent;default:false;text-align:right;desc:number of current merges"); + table.addCell( + "merges.current_docs", + "alias:mcd,mergesCurrentDocs;default:false;text-align:right;desc:number of current merging docs" + ); + table.addCell("merges.current_size", "alias:mcs,mergesCurrentSize;default:false;text-align:right;desc:size of current merges"); + table.addCell("merges.total", "alias:mt,mergesTotal;default:false;text-align:right;desc:number of completed merge ops"); + table.addCell("merges.total_docs", "alias:mtd,mergesTotalDocs;default:false;text-align:right;desc:docs merged"); + table.addCell("merges.total_size", "alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged"); + table.addCell("merges.total_time", "alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges"); + + table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes"); + table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes"); + table.addCell("refresh.external_total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total external refreshes"); + table.addCell( + "refresh.external_time", + "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in external refreshes" + ); + table.addCell( + "refresh.listeners", + "alias:rli,refreshListeners;default:false;text-align:right;" + "desc:number of pending refresh listeners" + ); + + table.addCell("script.compilations", "alias:scrcc,scriptCompilations;default:false;text-align:right;desc:script compilations"); + table.addCell( + "script.cache_evictions", + "alias:scrce,scriptCacheEvictions;default:false;text-align:right;desc:script cache evictions" + ); + table.addCell( + "script.compilation_limit_triggered", + "alias:scrclt,scriptCacheCompilationLimitTriggered;default:false;" + + "text-align:right;desc:script cache compilation limit triggered" + ); + + table.addCell("search.fetch_current", "alias:sfc,searchFetchCurrent;default:false;text-align:right;desc:current fetch phase ops"); + table.addCell("search.fetch_time", "alias:sfti,searchFetchTime;default:false;text-align:right;desc:time spent in fetch phase"); + table.addCell("search.fetch_total", "alias:sfto,searchFetchTotal;default:false;text-align:right;desc:total fetch ops"); + table.addCell("search.open_contexts", "alias:so,searchOpenContexts;default:false;text-align:right;desc:open search contexts"); + table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops"); + table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase"); + table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"); + table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"); + table.addCell( + "search.scroll_time", + "alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open" + ); + table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"); + + table.addCell( + "search.point_in_time_current", + "alias:scc,searchPointInTimeCurrent;default:false;text-align:right;desc:open point in time contexts" + ); + table.addCell( + "search.point_in_time_time", + "alias:scti,searchPointInTimeTime;default:false;text-align:right;desc:time point in time contexts held open" + ); + table.addCell( + "search.point_in_time_total", + "alias:scto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" + ); + + table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); + table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); + table.addCell( + "segments.index_writer_memory", + "alias:siwm,segmentsIndexWriterMemory;default:false;text-align:right;desc:memory used by index writer" + ); + table.addCell( + "segments.version_map_memory", + "alias:svmm,segmentsVersionMapMemory;default:false;text-align:right;desc:memory used by version map" + ); + table.addCell( + "segments.fixed_bitset_memory", + "alias:sfbm,fixedBitsetMemory;default:false;text-align:right;desc:memory used by fixed bit sets for nested object field types" + + " and type filters for types referred in _parent fields" + ); + + table.addCell("suggest.current", "alias:suc,suggestCurrent;default:false;text-align:right;desc:number of current suggest ops"); + table.addCell("suggest.time", "alias:suti,suggestTime;default:false;text-align:right;desc:time spend in suggest"); + table.addCell("suggest.total", "alias:suto,suggestTotal;default:false;text-align:right;desc:number of suggest ops"); + + table.endHeaders(); + return table; + } + + Table buildTable( + boolean fullId, + RestRequest req, + ProtobufClusterStateResponse state, + ProtobufNodesInfoResponse nodesInfo, + ProtobufNodesStatsResponse nodesStats + ) { + ClusterStateResponseProto.ClusterStateResponse.ClusterState.DiscoveryNodes nodes = state.response().getClusterState().getNodes(); + String clusterManagerId = nodes.getClusterManagerNodeId(); + Table table = getTableWithHeader(req); + for (ClusterStateResponseProto.ClusterStateResponse.ClusterState.DiscoveryNodes.Node node : nodes.getAllNodesList()) { + NodesInfo info = nodesInfo.nodesMap().get(node.getNodeId()); + NodesStats stats = nodesStats.nodesMap().get(node.getNodeId()); + + table.startRow(); + + table.addCell(fullId ? node.getNodeId() : Strings.substring(node.getNodeId(), 0, 4)); + table.addCell(info == null ? null : info.getProcessId()); + table.addCell(node.getHostAddress()); + table.addCell(node.getTransportAddress()); + table.addCell(info == null ? null : info.getAddress()); + + table.addCell(node.getVersion().toString()); + table.addCell(info == null ? null : info.getDisplayName()); + table.addCell(info == null ? null : info.getHash()); + table.addCell(info == null ? null : info.getJvmInfoVersion()); + + ByteSizeValue diskTotal = null; + ByteSizeValue diskUsed = null; + ByteSizeValue diskAvailable = null; + String diskUsedPercent = null; + if (stats != null) { + diskTotal = new ByteSizeValue(stats.getDiskTotal()); + diskAvailable = new ByteSizeValue(stats.getDiskAvailable()); + diskUsed = new ByteSizeValue(diskTotal.getBytes() - diskAvailable.getBytes()); + double diskUsedRatio = diskTotal.getBytes() == 0 ? 1.0 : (double) diskUsed.getBytes() / diskTotal.getBytes(); + diskUsedPercent = String.format(Locale.ROOT, "%.2f", 100.0 * diskUsedRatio); + } + + table.addCell(diskTotal); + table.addCell(diskUsed); + table.addCell(diskAvailable); + table.addCell(diskUsedPercent); + + table.addCell(stats == null ? null : new ByteSizeValue(stats.getJvmHeapUsed())); + table.addCell(stats == null ? null : stats.getJvmHeapUsedPercent()); + table.addCell(info == null ? null : info.getJvmHeapMax()); + table.addCell(stats == null ? null : new ByteSizeValue(stats.getOsMemUsed())); + table.addCell(stats == null ? null : stats.getOsMemUsedPercent()); + table.addCell(stats == null ? null : new ByteSizeValue(stats.getOsMemTotal())); + table.addCell(stats == null ? null : stats.getProcessOpenFileDescriptors()); + table.addCell( + stats == null ? null : calculatePercentage(stats.getProcessOpenFileDescriptors(), stats.getProcessMaxFileDescriptors()) + ); + table.addCell(stats == null ? null : stats.getProcessMaxFileDescriptors()); + + table.addCell(stats == null ? null : stats.getOsCpuPercent()); + boolean hasLoadAverage = stats != null && stats.getOsCpuLoadAverageList() != null; + table.addCell( + !hasLoadAverage || stats.getOsCpuLoadAverage(0) == -1 + ? null + : String.format(Locale.ROOT, "%.2f", stats.getOsCpuLoadAverage(0)) + ); + table.addCell( + !hasLoadAverage || stats.getOsCpuLoadAverage(1) == -1 + ? null + : String.format(Locale.ROOT, "%.2f", stats.getOsCpuLoadAverage(1)) + ); + table.addCell( + !hasLoadAverage || stats.getOsCpuLoadAverage(2) == -1 + ? null + : String.format(Locale.ROOT, "%.2f", stats.getOsCpuLoadAverage(2)) + ); + table.addCell(stats == null ? null : new TimeValue(stats.getJvmUpTime())); + + final String roles; + final String allRoles; + if (node.getRolesList().isEmpty()) { + roles = "-"; + allRoles = "-"; + } else { + List knownNodeRoles = node + .getRolesList() + .stream() + .filter(ClusterStateResponseProto.ClusterStateResponse.ClusterState.DiscoveryNodes.Node.NodeRole::getIsKnownRole) + .collect(Collectors.toList()); + roles = knownNodeRoles.size() > 0 + ? knownNodeRoles.stream() + .map( + ClusterStateResponseProto.ClusterStateResponse.ClusterState.DiscoveryNodes.Node.NodeRole::getRoleNameAbbreviation + ) + .sorted() + .collect(Collectors.joining()) + : "-"; + allRoles = node.getRolesList() + .stream() + .map(ClusterStateResponseProto.ClusterStateResponse.ClusterState.DiscoveryNodes.Node.NodeRole::getRoleName) + .sorted() + .collect(Collectors.joining(",")); + } + table.addCell(roles); + table.addCell(allRoles); + table.addCell(clusterManagerId == null ? "x" : clusterManagerId.equals(node.getNodeId()) ? "*" : "-"); + table.addCell(node.getNodeName()); + + table.addCell(stats == null ? null : stats.getCompletionStats() == null ? null : stats.getCompletionStats().getSize()); + + table.addCell(stats == null ? null : stats.getFieldDataStats() == null ? null : stats.getFieldDataStats().getMemSize()); + table.addCell(stats == null ? null : stats.getFieldDataStats() == null ? null : stats.getFieldDataStats().getEvictions()); + + QueryCacheStats fcStats = stats != null && stats.getQueryCacheStats() != null ? stats.getQueryCacheStats() : null; + table.addCell(fcStats == null ? null : new ByteSizeValue(fcStats.getRamBytesUsed())); + table.addCell(fcStats == null ? null : fcStats.getCacheCount() - fcStats.getCacheSize()); + table.addCell(fcStats == null ? null : fcStats.getHitCount()); + table.addCell(fcStats == null ? null : fcStats.getMissCount()); + + RequestCacheStats qcStats = stats != null && stats.getRequestCacheStats() != null ? stats.getRequestCacheStats() : null; + table.addCell(qcStats == null ? null : new ByteSizeValue(qcStats.getMemorySize())); + table.addCell(qcStats == null ? null : qcStats.getEvictions()); + table.addCell(qcStats == null ? null : qcStats.getHitCount()); + table.addCell(qcStats == null ? null : qcStats.getMissCount()); + + FlushStats flushStats = stats != null && stats.getFlushStats() != null ? stats.getFlushStats() : null; + table.addCell(flushStats == null ? null : flushStats.getTotal()); + table.addCell(flushStats == null ? null : flushStats.getTotalTimeInMillis()); + + GetStats getStats = stats != null && stats.getGetStats() != null ? stats.getGetStats() : null; + table.addCell(getStats == null ? null : getStats.getCurrent()); + table.addCell(getStats == null ? null : new TimeValue(getStats.getTime())); + table.addCell(getStats == null ? null : getStats.getCount()); + table.addCell(getStats == null ? null : new TimeValue(getStats.getExistsTimeInMillis())); + table.addCell(getStats == null ? null : getStats.getExistsCount()); + table.addCell(getStats == null ? null : new TimeValue(getStats.getMissingTimeInMillis())); + table.addCell(getStats == null ? null : getStats.getMissingCount()); + + IndexingStats indexingStats = stats != null && stats.getIndexingStats() != null ? stats.getIndexingStats() : null; + table.addCell(indexingStats == null ? null : indexingStats.getDeleteCurrent()); + table.addCell(indexingStats == null ? null : new TimeValue(indexingStats.getDeleteTimeInMillis())); + table.addCell(indexingStats == null ? null : indexingStats.getDeleteCount()); + table.addCell(indexingStats == null ? null : indexingStats.getIndexCurrent()); + table.addCell(indexingStats == null ? null : new TimeValue(indexingStats.getIndexTimeInMillis())); + table.addCell(indexingStats == null ? null : indexingStats.getIndexCount()); + table.addCell(indexingStats == null ? null : indexingStats.getIndexFailedCount()); + + MergeStats mergeStats = stats != null && stats.getMergeStats() != null ? stats.getMergeStats() : null; + table.addCell(mergeStats == null ? null : mergeStats.getCurrent()); + table.addCell(mergeStats == null ? null : mergeStats.getCurrentNumDocs()); + table.addCell(mergeStats == null ? null : new ByteSizeValue(mergeStats.getCurrentSizeInBytes())); + table.addCell(mergeStats == null ? null : mergeStats.getTotal()); + table.addCell(mergeStats == null ? null : mergeStats.getTotalNumDocs()); + table.addCell(mergeStats == null ? null : new ByteSizeValue(mergeStats.getTotalSizeInBytes())); + table.addCell(mergeStats == null ? null : new TimeValue(mergeStats.getTotalTimeInMillis())); + + RefreshStats refreshStats = stats != null && stats.getRefreshStats() != null ? stats.getRefreshStats() : null; + table.addCell(refreshStats == null ? null : refreshStats.getTotal()); + table.addCell(refreshStats == null ? null : new TimeValue(refreshStats.getTotalTimeInMillis())); + table.addCell(refreshStats == null ? null : refreshStats.getExternalTotal()); + table.addCell(refreshStats == null ? null : new TimeValue(refreshStats.getExternalTotalTimeInMillis())); + table.addCell(refreshStats == null ? null : refreshStats.getListeners()); + + ScriptStats scriptStats = stats != null && stats.getScriptStats() != null ? stats.getScriptStats() : null; + table.addCell(scriptStats == null ? null : scriptStats.getCompilations()); + table.addCell(scriptStats == null ? null : scriptStats.getCacheEvictions()); + table.addCell(scriptStats == null ? null : scriptStats.getCompilationLimitTriggered()); + + SearchStats searchStats = stats != null && stats.getSearchStats() != null ? stats.getSearchStats() : null; + table.addCell(searchStats == null ? null : searchStats.getFetchCurrent()); + table.addCell(searchStats == null ? null : new TimeValue(searchStats.getFetchTimeInMillis())); + table.addCell(searchStats == null ? null : searchStats.getFetchCount()); + table.addCell(searchStats == null ? null : searchStats.getOpenContexts()); + table.addCell(searchStats == null ? null : searchStats.getQueryCurrent()); + table.addCell(searchStats == null ? null : new TimeValue(searchStats.getQueryTimeInMillis())); + table.addCell(searchStats == null ? null : searchStats.getQueryCount()); + table.addCell(searchStats == null ? null : searchStats.getScrollCurrent()); + table.addCell(searchStats == null ? null : new TimeValue(searchStats.getScrollTimeInMillis())); + table.addCell(searchStats == null ? null : searchStats.getScrollCount()); + table.addCell(searchStats == null ? null : searchStats.getPitCurrent()); + table.addCell(searchStats == null ? null : new TimeValue(searchStats.getPitTimeInMillis())); + table.addCell(searchStats == null ? null : searchStats.getPitCount()); + + SegmentStats segmentsStats = stats != null && stats.getSegmentStats() != null ? stats.getSegmentStats() : null; + table.addCell(segmentsStats == null ? null : segmentsStats.getCount()); + table.addCell(segmentsStats == null ? null : new ByteSizeValue(0L)); + table.addCell(segmentsStats == null ? null : new ByteSizeValue(segmentsStats.getIndexWriterMemoryInBytes())); + table.addCell(segmentsStats == null ? null : new ByteSizeValue(segmentsStats.getVersionMapMemoryInBytes())); + table.addCell(segmentsStats == null ? null : new ByteSizeValue(segmentsStats.getBitsetMemoryInBytes())); + + table.addCell(searchStats == null ? null : searchStats.getSuggestCurrent()); + table.addCell(searchStats == null ? null : new TimeValue(searchStats.getSuggestTimeInMillis())); + table.addCell(searchStats == null ? null : searchStats.getSuggestCount()); + + table.endRow(); + } + + return table; + } + + /** + * Calculate the percentage of {@code used} from the {@code max} number. + * @param used The currently used number. + * @param max The maximum number. + * @return 0 if {@code max} is <= 0. Otherwise 100 * {@code used} / {@code max}. + */ + private short calculatePercentage(long used, long max) { + return max <= 0 ? 0 : (short) ((100d * used) / max); + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/search/ProtobufRestSearchAction.java b/server/src/main/java/org/opensearch/rest/action/search/ProtobufRestSearchAction.java new file mode 100644 index 0000000000000..5db97a57dbc3f --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/search/ProtobufRestSearchAction.java @@ -0,0 +1,389 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.search; + +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.search.ProtobufSearchAction; +import org.opensearch.action.search.ProtobufSearchRequest; +import org.opensearch.action.search.SearchContextId; +import org.opensearch.action.search.ProtobufSearchRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.client.node.NodeClient; +import org.opensearch.client.node.ProtobufNodeClient; +import org.opensearch.common.Booleans; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.rest.ProtobufBaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestActions; +import org.opensearch.rest.action.ProtobufRestCancellableNodeClient; +import org.opensearch.rest.action.RestStatusToXContentListener; +import org.opensearch.search.Scroll; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.fetch.StoredFieldsContext; +import org.opensearch.search.fetch.subphase.FetchSourceContext; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.search.suggest.SuggestBuilder; +import org.opensearch.search.suggest.term.TermSuggestionBuilder.SuggestMode; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.function.IntConsumer; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.action.ValidateActions.addValidationError; +import static org.opensearch.common.unit.TimeValue.parseTimeValue; +import static org.opensearch.rest.RestRequest.Method.GET; +import static org.opensearch.rest.RestRequest.Method.POST; +import static org.opensearch.search.suggest.SuggestBuilders.termSuggestion; + +/** + * Transport action to perform a search + * + * @opensearch.api + */ +public class ProtobufRestSearchAction extends ProtobufBaseRestHandler { + /** + * Indicates whether hits.total should be rendered as an integer or an object + * in the rest search response. + */ + public static final String TOTAL_HITS_AS_INT_PARAM = "rest_total_hits_as_int"; + public static final String TYPED_KEYS_PARAM = "typed_keys"; + private static final Set RESPONSE_PARAMS; + + static { + final Set responseParams = new HashSet<>(Arrays.asList(TYPED_KEYS_PARAM, TOTAL_HITS_AS_INT_PARAM)); + RESPONSE_PARAMS = Collections.unmodifiableSet(responseParams); + } + + @Override + public String getName() { + return "search_action"; + } + + @Override + public List routes() { + return unmodifiableList( + asList( + new Route(GET, "/_search_protobuf"), + new Route(POST, "/_search_protobuf"), + new Route(GET, "/{index}/_search_protobuf"), + new Route(POST, "/{index}/_search_protobuf") + ) + ); + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final ProtobufNodeClient client) throws IOException { + System.out.println("In protobuf search API"); + ProtobufSearchRequest searchRequest = new ProtobufSearchRequest(); + /* + * We have to pull out the call to `source().size(size)` because + * _update_by_query and _delete_by_query uses this same parsing + * path but sets a different variable when it sees the `size` + * url parameter. + * + * Note that we can't use `searchRequest.source()::size` because + * `searchRequest.source()` is null right now. We don't have to + * guard against it being null in the IntConsumer because it can't + * be null later. If that is confusing to you then you are in good + * company. + */ + IntConsumer setSize = size -> searchRequest.source().size(size); + request.withContentOrSourceParamParserOrNull( + parser -> parseSearchRequest(searchRequest, request, parser, client.getNamedWriteableRegistry(), setSize) + ); + + return channel -> { + ProtobufRestCancellableNodeClient cancelClient = new ProtobufRestCancellableNodeClient(client, request.getHttpChannel()); + System.out.println("Cancel client execute"); + cancelClient.execute(ProtobufSearchAction.INSTANCE, searchRequest, new RestStatusToXContentListener<>(channel)); + }; + } + + /** + * Parses the rest request on top of the ProtobufSearchRequest, preserving values that are not overridden by the rest request. + * + * @param requestContentParser body of the request to read. This method does not attempt to read the body from the {@code request} + * parameter + * @param setSize how the size url parameter is handled. {@code udpate_by_query} and regular search differ here. + */ + public static void parseSearchRequest( + ProtobufSearchRequest searchRequest, + RestRequest request, + XContentParser requestContentParser, + NamedWriteableRegistry namedWriteableRegistry, + IntConsumer setSize + ) throws IOException { + + if (searchRequest.source() == null) { + searchRequest.source(new SearchSourceBuilder()); + } + searchRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); + if (requestContentParser != null) { + searchRequest.source().parseXContent(requestContentParser, true); + } + + final int batchedReduceSize = request.paramAsInt("batched_reduce_size", searchRequest.getBatchedReduceSize()); + searchRequest.setBatchedReduceSize(batchedReduceSize); + if (request.hasParam("pre_filter_shard_size")) { + searchRequest.setPreFilterShardSize(request.paramAsInt("pre_filter_shard_size", ProtobufSearchRequest.DEFAULT_PRE_FILTER_SHARD_SIZE)); + } + + if (request.hasParam("max_concurrent_shard_requests")) { + // only set if we have the parameter since we auto adjust the max concurrency on the coordinator + // based on the number of nodes in the cluster + final int maxConcurrentShardRequests = request.paramAsInt( + "max_concurrent_shard_requests", + searchRequest.getMaxConcurrentShardRequests() + ); + searchRequest.setMaxConcurrentShardRequests(maxConcurrentShardRequests); + } + + if (request.hasParam("allow_partial_search_results")) { + // only set if we have the parameter passed to override the cluster-level default + searchRequest.allowPartialSearchResults(request.paramAsBoolean("allow_partial_search_results", null)); + } + + // do not allow 'query_and_fetch' or 'dfs_query_and_fetch' search types + // from the REST layer. these modes are an internal optimization and should + // not be specified explicitly by the user. + String searchType = request.param("search_type"); + if ("query_and_fetch".equals(searchType) || "dfs_query_and_fetch".equals(searchType)) { + throw new IllegalArgumentException("Unsupported search type [" + searchType + "]"); + } else { + searchRequest.searchType(searchType); + } + parseSearchSource(searchRequest.source(), request, setSize); + searchRequest.requestCache(request.paramAsBoolean("request_cache", searchRequest.requestCache())); + + String scroll = request.param("scroll"); + if (scroll != null) { + searchRequest.scroll(new Scroll(parseTimeValue(scroll, null, "scroll"))); + } + + searchRequest.routing(request.param("routing")); + searchRequest.preference(request.param("preference")); + searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions())); + searchRequest.pipeline(request.param("search_pipeline")); + + checkRestTotalHits(request, searchRequest); + + if (searchRequest.pointInTimeBuilder() != null) { + preparePointInTime(searchRequest, request, namedWriteableRegistry); + } else { + searchRequest.setCcsMinimizeRoundtrips( + request.paramAsBoolean("ccs_minimize_roundtrips", searchRequest.isCcsMinimizeRoundtrips()) + ); + } + + searchRequest.setCancelAfterTimeInterval(request.paramAsTime("cancel_after_time_interval", null)); + System.out.println("Search request is: " + searchRequest.toString()); + } + + /** + * Parses the rest request on top of the SearchSourceBuilder, preserving + * values that are not overridden by the rest request. + */ + private static void parseSearchSource(final SearchSourceBuilder searchSourceBuilder, RestRequest request, IntConsumer setSize) { + QueryBuilder queryBuilder = RestActions.urlParamsToQueryBuilder(request); + if (queryBuilder != null) { + searchSourceBuilder.query(queryBuilder); + } + + int from = request.paramAsInt("from", -1); + if (from != -1) { + searchSourceBuilder.from(from); + } + int size = request.paramAsInt("size", -1); + if (size != -1) { + setSize.accept(size); + } + + if (request.hasParam("explain")) { + searchSourceBuilder.explain(request.paramAsBoolean("explain", null)); + } + if (request.hasParam("version")) { + searchSourceBuilder.version(request.paramAsBoolean("version", null)); + } + if (request.hasParam("seq_no_primary_term")) { + searchSourceBuilder.seqNoAndPrimaryTerm(request.paramAsBoolean("seq_no_primary_term", null)); + } + if (request.hasParam("timeout")) { + searchSourceBuilder.timeout(request.paramAsTime("timeout", null)); + } + if (request.hasParam("terminate_after")) { + int terminateAfter = request.paramAsInt("terminate_after", SearchContext.DEFAULT_TERMINATE_AFTER); + if (terminateAfter < 0) { + throw new IllegalArgumentException("terminateAfter must be > 0"); + } else if (terminateAfter > 0) { + searchSourceBuilder.terminateAfter(terminateAfter); + } + } + + StoredFieldsContext storedFieldsContext = StoredFieldsContext.fromRestRequest( + SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), + request + ); + if (storedFieldsContext != null) { + searchSourceBuilder.storedFields(storedFieldsContext); + } + String sDocValueFields = request.param("docvalue_fields"); + if (sDocValueFields != null) { + if (Strings.hasText(sDocValueFields)) { + String[] sFields = Strings.splitStringByCommaToArray(sDocValueFields); + for (String field : sFields) { + searchSourceBuilder.docValueField(field, null); + } + } + } + FetchSourceContext fetchSourceContext = FetchSourceContext.parseFromRestRequest(request); + if (fetchSourceContext != null) { + searchSourceBuilder.fetchSource(fetchSourceContext); + } + + if (request.hasParam("track_scores")) { + searchSourceBuilder.trackScores(request.paramAsBoolean("track_scores", false)); + } + + if (request.hasParam("track_total_hits")) { + if (Booleans.isBoolean(request.param("track_total_hits"))) { + searchSourceBuilder.trackTotalHits(request.paramAsBoolean("track_total_hits", true)); + } else { + searchSourceBuilder.trackTotalHitsUpTo( + request.paramAsInt("track_total_hits", SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO) + ); + } + } + + String sSorts = request.param("sort"); + if (sSorts != null) { + String[] sorts = Strings.splitStringByCommaToArray(sSorts); + for (String sort : sorts) { + int delimiter = sort.lastIndexOf(":"); + if (delimiter != -1) { + String sortField = sort.substring(0, delimiter); + String reverse = sort.substring(delimiter + 1); + if ("asc".equals(reverse)) { + searchSourceBuilder.sort(sortField, SortOrder.ASC); + } else if ("desc".equals(reverse)) { + searchSourceBuilder.sort(sortField, SortOrder.DESC); + } + } else { + searchSourceBuilder.sort(sort); + } + } + } + + String sStats = request.param("stats"); + if (sStats != null) { + searchSourceBuilder.stats(Arrays.asList(Strings.splitStringByCommaToArray(sStats))); + } + + String suggestField = request.param("suggest_field"); + if (suggestField != null) { + String suggestText = request.param("suggest_text", request.param("q")); + int suggestSize = request.paramAsInt("suggest_size", 5); + String suggestMode = request.param("suggest_mode"); + searchSourceBuilder.suggest( + new SuggestBuilder().addSuggestion( + suggestField, + termSuggestion(suggestField).text(suggestText).size(suggestSize).suggestMode(SuggestMode.resolve(suggestMode)) + ) + ); + } + } + + static void preparePointInTime(ProtobufSearchRequest request, RestRequest restRequest, NamedWriteableRegistry namedWriteableRegistry) { + assert request.pointInTimeBuilder() != null; + ActionRequestValidationException validationException = null; + if (request.indices().length > 0) { + validationException = addValidationError("[indices] cannot be used with point in time", validationException); + } + if (request.indicesOptions() != ProtobufSearchRequest.DEFAULT_INDICES_OPTIONS) { + validationException = addValidationError("[indicesOptions] cannot be used with point in time", validationException); + } + if (request.routing() != null) { + validationException = addValidationError("[routing] cannot be used with point in time", validationException); + } + if (request.preference() != null) { + validationException = addValidationError("[preference] cannot be used with point in time", validationException); + } + if (restRequest.paramAsBoolean("ccs_minimize_roundtrips", false)) { + validationException = addValidationError("[ccs_minimize_roundtrips] cannot be used with point in time", validationException); + request.setCcsMinimizeRoundtrips(false); + } + ExceptionsHelper.reThrowIfNotNull(validationException); + + final IndicesOptions indicesOptions = request.indicesOptions(); + final IndicesOptions stricterIndicesOptions = IndicesOptions.fromOptions( + indicesOptions.ignoreUnavailable(), + indicesOptions.allowNoIndices(), + false, + false, + false, + true, + true, + indicesOptions.ignoreThrottled() + ); + request.indicesOptions(stricterIndicesOptions); + final SearchContextId searchContextId = SearchContextId.decode(namedWriteableRegistry, request.pointInTimeBuilder().getId()); + request.indices(searchContextId.getActualIndices()); + } + + /** + * Modify the search request to accurately count the total hits that match the query + * if {@link #TOTAL_HITS_AS_INT_PARAM} is set. + * + * @throws IllegalArgumentException if {@link #TOTAL_HITS_AS_INT_PARAM} + * is used in conjunction with a lower bound value (other than {@link SearchContext#DEFAULT_TRACK_TOTAL_HITS_UP_TO}) + * for the track_total_hits option. + */ + public static void checkRestTotalHits(RestRequest restRequest, ProtobufSearchRequest searchRequest) { + boolean totalHitsAsInt = restRequest.paramAsBoolean(TOTAL_HITS_AS_INT_PARAM, false); + if (totalHitsAsInt == false) { + return; + } + if (searchRequest.source() == null) { + searchRequest.source(new SearchSourceBuilder()); + } + Integer trackTotalHitsUpTo = searchRequest.source().trackTotalHitsUpTo(); + if (trackTotalHitsUpTo == null) { + searchRequest.source().trackTotalHits(true); + } else if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_ACCURATE + && trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED) { + throw new IllegalArgumentException( + "[" + + TOTAL_HITS_AS_INT_PARAM + + "] cannot be used " + + "if the tracking of total hits is not accurate, got " + + trackTotalHitsUpTo + ); + } + } + + @Override + protected Set responseParams() { + return RESPONSE_PARAMS; + } + + @Override + public boolean allowsUnsafeBuffers() { + return true; + } +} diff --git a/server/src/main/java/org/opensearch/search/fetch/ProtobufShardFetchRequest.java b/server/src/main/java/org/opensearch/search/fetch/ProtobufShardFetchRequest.java new file mode 100644 index 0000000000000..aab3b66427f3b --- /dev/null +++ b/server/src/main/java/org/opensearch/search/fetch/ProtobufShardFetchRequest.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.fetch; + +import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.ScoreDoc; +import org.opensearch.action.search.ProtobufSearchShardTask; +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.common.Nullable; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.search.RescoreDocIds; +import org.opensearch.search.dfs.AggregatedDfs; +import org.opensearch.search.internal.ShardSearchRequest; +import org.opensearch.search.internal.ProtobufShardSearchRequest; +import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.tasks.ProtobufTaskId; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Collection; +import java.util.Map; + +/** + * Shard level fetch base request. Holds all the info needed to execute a fetch. + * Used with search scroll as the original request doesn't hold indices. + * + * @opensearch.internal + */ +public class ProtobufShardFetchRequest extends TransportRequest { + + // TODO: proto message + private ShardSearchContextId contextId; + + private int[] docIds; + + private int size; + + private ScoreDoc lastEmittedDoc; + + public ProtobufShardFetchRequest(ShardSearchContextId contextId, Collection list, ScoreDoc lastEmittedDoc) { + this.contextId = contextId; + this.docIds = list.stream().mapToInt(Integer::intValue).toArray(); + this.size = list.size(); + this.lastEmittedDoc = lastEmittedDoc; + } + + public ProtobufShardFetchRequest(byte[] in) throws IOException { + super(in); + // contextId = new ShardSearchContextId(in); + // size = in.readVInt(); + // docIds = new int[size]; + // for (int i = 0; i < size; i++) { + // docIds[i] = in.readVInt(); + // } + // byte flag = in.readByte(); + // if (flag == 1) { + // lastEmittedDoc = Lucene.readFieldDoc(in); + // } else if (flag == 2) { + // lastEmittedDoc = Lucene.readScoreDoc(in); + // } else if (flag != 0) { + // throw new IOException("Unknown flag: " + flag); + // } + } + + @Override + public void writeTo(OutputStream out) throws IOException { + super.writeTo(out); + // contextId.writeTo(out); + // out.writeVInt(size); + // for (int i = 0; i < size; i++) { + // out.writeVInt(docIds[i]); + // } + // if (lastEmittedDoc == null) { + // out.writeByte((byte) 0); + // } else if (lastEmittedDoc instanceof FieldDoc) { + // out.writeByte((byte) 1); + // Lucene.writeFieldDoc(out, (FieldDoc) lastEmittedDoc); + // } else { + // out.writeByte((byte) 2); + // Lucene.writeScoreDoc(out, lastEmittedDoc); + // } + } + + public ShardSearchContextId contextId() { + return contextId; + } + + public int[] docIds() { + return docIds; + } + + public int docIdsSize() { + return size; + } + + public ScoreDoc lastEmittedDoc() { + return lastEmittedDoc; + } + + @Override + public ProtobufTask createProtobufTask(long id, String type, String action, ProtobufTaskId parentTaskId, Map headers) { + return new ProtobufSearchShardTask(id, type, action, getDescription(), parentTaskId, headers); + } + + @Override + public String getDescription() { + return "id[" + contextId + "], size[" + size + "], lastEmittedDoc[" + lastEmittedDoc + "]"; + } + + @Nullable + public ShardSearchRequest getShardSearchRequest() { + return null; + } + + @Nullable + public ProtobufShardSearchRequest getProtobufShardSearchRequest() { + return null; + } + + @Nullable + public RescoreDocIds getRescoreDocIds() { + return null; + } + + @Nullable + public AggregatedDfs getAggregatedDfs() { + return null; + } +} diff --git a/server/src/main/java/org/opensearch/search/fetch/ProtobufShardFetchSearchRequest.java b/server/src/main/java/org/opensearch/search/fetch/ProtobufShardFetchSearchRequest.java new file mode 100644 index 0000000000000..978d22d335d99 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/fetch/ProtobufShardFetchSearchRequest.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.fetch; + +import org.apache.lucene.search.ScoreDoc; +import org.opensearch.action.IndicesRequest; +import org.opensearch.action.OriginalIndices; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.search.RescoreDocIds; +import org.opensearch.search.dfs.AggregatedDfs; +import org.opensearch.search.internal.ProtobufShardSearchRequest; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.List; + +/** + * Shard level fetch request used with search. Holds indices taken from the original search request + * and implements {@link org.opensearch.action.IndicesRequest}. + * + * @opensearch.internal + */ +public class ProtobufShardFetchSearchRequest extends ProtobufShardFetchRequest implements IndicesRequest { + + // TODO: proto message + private final OriginalIndices originalIndices; + private final ProtobufShardSearchRequest ProtobufShardSearchRequest; + private final RescoreDocIds rescoreDocIds; + private final AggregatedDfs aggregatedDfs; + + public ProtobufShardFetchSearchRequest( + OriginalIndices originalIndices, + ShardSearchContextId id, + ProtobufShardSearchRequest ProtobufShardSearchRequest, + List list, + ScoreDoc lastEmittedDoc, + RescoreDocIds rescoreDocIds, + AggregatedDfs aggregatedDfs + ) { + super(id, list, lastEmittedDoc); + this.originalIndices = originalIndices; + this.ProtobufShardSearchRequest = ProtobufShardSearchRequest; + this.rescoreDocIds = rescoreDocIds; + this.aggregatedDfs = aggregatedDfs; + } + + public ProtobufShardFetchSearchRequest(byte[] in) throws IOException { + super(in); + originalIndices = null; + ProtobufShardSearchRequest = null; + rescoreDocIds = null; + aggregatedDfs = null; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + super.writeTo(out); + // OriginalIndices.writeOriginalIndices(originalIndices, out); + // out.writeOptionalWriteable(ProtobufShardSearchRequest); + // rescoreDocIds.writeTo(out); + // out.writeOptionalWriteable(aggregatedDfs); + } + + @Override + public String[] indices() { + if (originalIndices == null) { + return null; + } + return originalIndices.indices(); + } + + @Override + public IndicesOptions indicesOptions() { + if (originalIndices == null) { + return null; + } + return originalIndices.indicesOptions(); + } + + @Override + public ProtobufShardSearchRequest getProtobufShardSearchRequest() { + return ProtobufShardSearchRequest; + } + + @Override + public RescoreDocIds getRescoreDocIds() { + return rescoreDocIds; + } + + @Override + public AggregatedDfs getAggregatedDfs() { + return aggregatedDfs; + } +} diff --git a/server/src/main/java/org/opensearch/search/internal/ProtobufShardSearchRequest.java b/server/src/main/java/org/opensearch/search/internal/ProtobufShardSearchRequest.java new file mode 100644 index 0000000000000..29f9b64364091 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/internal/ProtobufShardSearchRequest.java @@ -0,0 +1,673 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.internal; + +import org.opensearch.action.IndicesRequest; +import org.opensearch.action.OriginalIndices; +import org.opensearch.action.search.ProtobufSearchShardTask; +import org.opensearch.action.search.ProtobufSearchRequest; +import org.opensearch.action.search.SearchType; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.cluster.metadata.AliasMetadata; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.CheckedFunction; +import org.opensearch.common.Nullable; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.index.Index; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.MatchNoneQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryRewriteContext; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.Rewriteable; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.AliasFilterParsingException; +import org.opensearch.indices.InvalidAliasNameException; +import org.opensearch.search.Scroll; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.search.sort.FieldSortBuilder; +import org.opensearch.server.proto.ShardSearchRequestProto; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.tasks.ProtobufTaskId; +import org.opensearch.transport.TransportRequest; + +import com.google.protobuf.ByteString; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.OutputStream; +import java.util.Collections; +import java.util.EnumSet; +import java.util.Arrays; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.opensearch.search.internal.SearchContext.TRACK_TOTAL_HITS_DISABLED; + +/** + * Shard level request that represents a search. + * It provides all the methods that the {@link SearchContext} needs. + * Provides a cache key based on its content that can be used to cache shard level response. + * + * @opensearch.internal + */ +public class ProtobufShardSearchRequest extends TransportRequest implements IndicesRequest { + // TODO: proto message + public static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); + + private ShardSearchRequestProto.ShardSearchRequest shardSearchRequestProto; + // private final String clusterAlias; + // private final ShardId shardId; + // private final int numberOfShards; + // private final SearchType searchType; + // private final Scroll scroll; + // private final float indexBoost; + // private final Boolean requestCache; + // private final long nowInMillis; + // private long inboundNetworkTime; + // private long outboundNetworkTime; + // private final boolean allowPartialSearchResults; + // private final String[] indexRoutings; + // private final String preference; + // private final OriginalIndices originalIndices; + + // private boolean canReturnNullResponseIfMatchNoDocs; + // private SearchSortValuesAndFormats bottomSortValues; + + // these are the only mutable fields, as they are subject to rewriting + // private AliasFilter aliasFilter; + // private SearchSourceBuilder source; + // private final ShardSearchContextId readerId; + // private final TimeValue keepAlive; + + public ProtobufShardSearchRequest( + OriginalIndices originalIndices, + ProtobufSearchRequest searchRequest, + ShardId shardId, + int numberOfShards, + AliasFilter aliasFilter, + float indexBoost, + long nowInMillis, + @Nullable String clusterAlias, + String[] indexRoutings + ) { + this( + originalIndices, + searchRequest, + shardId, + numberOfShards, + aliasFilter, + indexBoost, + nowInMillis, + clusterAlias, + indexRoutings, + null, + null + ); + } + + public ProtobufShardSearchRequest( + OriginalIndices originalIndices, + ProtobufSearchRequest searchRequest, + ShardId shardId, + int numberOfShards, + AliasFilter aliasFilter, + float indexBoost, + long nowInMillis, + @Nullable String clusterAlias, + String[] indexRoutings, + ShardSearchContextId readerId, + TimeValue keepAlive + ) { + this( + originalIndices, + shardId, + numberOfShards, + searchRequest.searchType(), + searchRequest.source(), + searchRequest.requestCache(), + aliasFilter, + indexBoost, + searchRequest.allowPartialSearchResults(), + indexRoutings, + searchRequest.preference(), + searchRequest.scroll(), + nowInMillis, + clusterAlias, + readerId, + keepAlive + ); + // If allowPartialSearchResults is unset (ie null), the cluster-level default should have been substituted + // at this stage. Any NPEs in the above are therefore an error in request preparation logic. + assert searchRequest.allowPartialSearchResults() != null; + } + + public ProtobufShardSearchRequest(ShardId shardId, long nowInMillis, AliasFilter aliasFilter) { + this( + OriginalIndices.NONE, + shardId, + -1, + SearchType.QUERY_THEN_FETCH, + null, + null, + aliasFilter, + 1.0f, + false, + Strings.EMPTY_ARRAY, + null, + null, + nowInMillis, + null, + null, + null + ); + } + + private ProtobufShardSearchRequest( + OriginalIndices originalIndices, + ShardId shardId, + int numberOfShards, + SearchType searchType, + SearchSourceBuilder source, + Boolean requestCache, + AliasFilter aliasFilter, + float indexBoost, + Boolean allowPartialSearchResults, + String[] indexRoutings, + String preference, + Scroll scroll, + long nowInMillis, + @Nullable String clusterAlias, + ShardSearchContextId readerId, + TimeValue keepAlive + ) { + // this.shardId = shardId; + // this.numberOfShards = numberOfShards; + // this.searchType = searchType; + // this.source = source; + // this.requestCache = requestCache; + // this.aliasFilter = aliasFilter; + // this.indexBoost = indexBoost; + // this.allowPartialSearchResults = allowPartialSearchResults; + // this.indexRoutings = indexRoutings; + // this.preference = preference; + // this.scroll = scroll; + // this.nowInMillis = nowInMillis; + // this.inboundNetworkTime = 0; + // this.outboundNetworkTime = 0; + // this.clusterAlias = clusterAlias; + // this.originalIndices = originalIndices; + // this.readerId = readerId; + // this.keepAlive = keepAlive; + // assert keepAlive == null || readerId != null : "readerId: " + readerId + " keepAlive: " + keepAlive; + + ShardSearchRequestProto.OriginalIndices originalIndicesProto = ShardSearchRequestProto.OriginalIndices.newBuilder() + .addAllIndices(Arrays.stream(originalIndices.indices()).collect(Collectors.toList())) + .setIndicesOptions(ShardSearchRequestProto.OriginalIndices.IndicesOptions.newBuilder() + .setIgnoreUnavailable(originalIndices.indicesOptions().ignoreUnavailable()) + .setAllowNoIndices(originalIndices.indicesOptions().allowNoIndices()) + .setExpandWildcardsOpen(originalIndices.indicesOptions().expandWildcardsOpen()) + .setExpandWildcardsClosed(originalIndices.indicesOptions().expandWildcardsClosed()) + .setExpandWildcardsHidden(originalIndices.indicesOptions().allowAliasesToMultipleIndices()) + .setAllowAliasesToMultipleIndices(originalIndices.indicesOptions().allowAliasesToMultipleIndices()) + .setForbidClosedIndices(originalIndices.indicesOptions().forbidClosedIndices()) + .setIgnoreAliases(originalIndices.indicesOptions().ignoreAliases()) + .setIgnoreThrottled(originalIndices.indicesOptions().ignoreThrottled()) + .build()) + .build(); + ShardSearchRequestProto.ShardId shardIdProto = ShardSearchRequestProto.ShardId.newBuilder() + .setShardId(shardId.getId()) + .setHashCode(shardId.hashCode()) + .setIndexName(shardId.getIndexName()) + .setIndexUUID(shardId.getIndex().getUUID()) + .build(); + + ShardSearchRequestProto.ShardSearchContextId.Builder shardSearchContextId = ShardSearchRequestProto.ShardSearchContextId.newBuilder(); + System.out.println("Reader id: " + readerId); + if (readerId != null) { + shardSearchContextId.setSessionId(readerId.getSessionId()); + shardSearchContextId.setId(readerId.getId()); + } + + ShardSearchRequestProto.ShardSearchRequest.Builder builder = ShardSearchRequestProto.ShardSearchRequest.newBuilder(); + builder.setOriginalIndices(originalIndicesProto); + builder.setShardId(shardIdProto); + builder.setNumberOfShards(numberOfShards); + builder.setSearchType(ShardSearchRequestProto.ShardSearchRequest.SearchType.QUERY_THEN_FETCH); + builder.setSource(ByteString.copyFrom(convertToBytes(source))); + builder.setInboundNetworkTime(0); + builder.setOutboundNetworkTime(0); + + if (requestCache != null) { + builder.setRequestCache(requestCache); + } + + if (aliasFilter != null) { + builder.setAliasFilter(ByteString.copyFrom(convertToBytes(aliasFilter))); + } + builder.setIndexBoost(indexBoost); + + if (allowPartialSearchResults != null) { + builder.setAllowPartialSearchResults(allowPartialSearchResults); + } + + if (indexRoutings != null) { + builder.addAllIndexRoutings(Arrays.stream(indexRoutings).collect(Collectors.toList())); + } + + if (preference != null) { + builder.setPreference(preference); + } + + if (scroll != null) { + builder.setScroll(ByteString.copyFrom(convertToBytes(scroll))); + } + builder.setNowInMillis(nowInMillis); + + if (clusterAlias != null) { + builder.setClusterAlias(clusterAlias); + } + if (readerId != null) { + builder.setReaderId(shardSearchContextId.build()); + } + + System.out.println("Keep alive: " + keepAlive); + if (keepAlive != null) { + builder.setTimeValue(keepAlive.getStringRep()); + } + + this.shardSearchRequestProto = builder.build(); + } + + public ProtobufShardSearchRequest(byte[] in) throws IOException { + super(in); + this.shardSearchRequestProto = ShardSearchRequestProto.ShardSearchRequest.parseFrom(in); + } + + public ProtobufShardSearchRequest(ShardSearchRequestProto.ShardSearchRequest shardSearchRequest) { + this.shardSearchRequestProto = shardSearchRequest; + } + + public ProtobufShardSearchRequest(ProtobufShardSearchRequest clone) { + this.shardSearchRequestProto = clone.shardSearchRequestProto; + } + + private byte[] convertToBytes(Object obj) { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + try { + ObjectOutputStream oos = new ObjectOutputStream(bos); + oos.writeObject(obj); + oos.flush(); + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + return bos.toByteArray(); + } + + @Override + public void writeTo(OutputStream out) throws IOException { + super.writeTo(out); + out.write(this.shardSearchRequestProto.toByteArray()); + } + + // protected final void innerWriteTo(StreamOutput out, boolean asKey) throws IOException { + // shardId.writeTo(out); + // out.writeByte(searchType.id()); + // if (!asKey) { + // out.writeVInt(numberOfShards); + // } + // out.writeOptionalWriteable(scroll); + // out.writeOptionalWriteable(source); + // if (out.getVersion().before(Version.V_2_0_0)) { + // // types not supported so send an empty array to previous versions + // out.writeStringArray(Strings.EMPTY_ARRAY); + // } + // aliasFilter.writeTo(out); + // out.writeFloat(indexBoost); + // if (asKey == false) { + // out.writeVLong(nowInMillis); + // } + // out.writeOptionalBoolean(requestCache); + // if (asKey == false && out.getVersion().onOrAfter(Version.V_2_0_0)) { + // out.writeVLong(inboundNetworkTime); + // out.writeVLong(outboundNetworkTime); + // } + // out.writeOptionalString(clusterAlias); + // out.writeBoolean(allowPartialSearchResults); + // if (asKey == false) { + // out.writeStringArray(indexRoutings); + // out.writeOptionalString(preference); + // } + // if (asKey == false) { + // out.writeBoolean(canReturnNullResponseIfMatchNoDocs); + // out.writeOptionalWriteable(bottomSortValues); + // } + // if (asKey == false) { + // out.writeOptionalWriteable(readerId); + // out.writeOptionalTimeValue(keepAlive); + // } + // } + + @Override + public String[] indices() { + if (this.shardSearchRequestProto.getOriginalIndices() == null) { + return null; + } + return this.shardSearchRequestProto.getOriginalIndices().getIndicesList().toArray(new String[0]); + } + + @Override + public IndicesOptions indicesOptions() { + if (this.shardSearchRequestProto.getOriginalIndices() == null) { + return null; + } + IndicesOptions indicesOptions = new IndicesOptions(EnumSet.of(IndicesOptions.Option.ALLOW_NO_INDICES, IndicesOptions.Option.FORBID_CLOSED_INDICES, IndicesOptions.Option.IGNORE_THROTTLED), + EnumSet.of(IndicesOptions.WildcardStates.OPEN)); + return indicesOptions; + } + + public ShardId shardId() { + return new ShardId(this.shardSearchRequestProto.getShardId().getIndexName(), this.shardSearchRequestProto.getShardId().getIndexUUID(), this.shardSearchRequestProto.getShardId().getShardId()); + } + + public SearchSourceBuilder source() { + ByteArrayInputStream in = new ByteArrayInputStream(this.shardSearchRequestProto.getSource().toByteArray()); + try (ObjectInputStream is = new ObjectInputStream(in)) { + return (SearchSourceBuilder) is.readObject(); + } catch (ClassNotFoundException | IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + return null; + } + + public AliasFilter getAliasFilter() { + ByteArrayInputStream in = new ByteArrayInputStream(this.shardSearchRequestProto.getAliasFilter().toByteArray()); + try (ObjectInputStream is = new ObjectInputStream(in)) { + return (AliasFilter) is.readObject(); + } catch (ClassNotFoundException | IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + return null; + } + + public void setAliasFilter(AliasFilter aliasFilter) { + this.shardSearchRequestProto.toBuilder().setAliasFilter(ByteString.copyFrom(convertToBytes(aliasFilter))); + } + + public void source(SearchSourceBuilder source) { + this.shardSearchRequestProto.toBuilder().setSource(ByteString.copyFrom(convertToBytes(source))); + } + + public int numberOfShards() { + return this.shardSearchRequestProto.getNumberOfShards(); + } + + public SearchType searchType() { + return SearchType.QUERY_THEN_FETCH; + } + + public float indexBoost() { + return this.shardSearchRequestProto.getIndexBoost(); + } + + public long nowInMillis() { + return this.shardSearchRequestProto.getNowInMillis(); + } + + public long getInboundNetworkTime() { + return this.shardSearchRequestProto.getInboundNetworkTime(); + } + + public void setInboundNetworkTime(long newTime) { + this.shardSearchRequestProto.toBuilder().setInboundNetworkTime(newTime); + } + + public long getOutboundNetworkTime() { + return this.shardSearchRequestProto.getOutboundNetworkTime(); + } + + public void setOutboundNetworkTime(long newTime) { + this.shardSearchRequestProto.toBuilder().setOutboundNetworkTime(newTime); + } + + public Boolean requestCache() { + return this.shardSearchRequestProto.getRequestCache(); + } + + public boolean allowPartialSearchResults() { + return this.shardSearchRequestProto.getAllowPartialSearchResults(); + } + + public Scroll scroll() { + ByteArrayInputStream in = new ByteArrayInputStream(this.shardSearchRequestProto.getScroll().toByteArray()); + try (ObjectInputStream is = new ObjectInputStream(in)) { + return (Scroll) is.readObject(); + } catch (ClassNotFoundException | IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + return null; + } + + public String[] indexRoutings() { + return this.shardSearchRequestProto.getIndexRoutingsList().toArray(new String[0]); + } + + public String preference() { + return this.shardSearchRequestProto.getPreference(); + } + + // /** + // * Sets the bottom sort values that can be used by the searcher to filter documents + // * that are after it. This value is computed by coordinating nodes that throttles the + // * query phase. After a partial merge of successful shards the sort values of the + // * bottom top document are passed as an hint on subsequent shard requests. + // */ + // public void setBottomSortValues(SearchSortValuesAndFormats values) { + // this.bottomSortValues = values; + // } + + // public SearchSortValuesAndFormats getBottomSortValues() { + // return bottomSortValues; + // } + + /** + * Returns true if the caller can handle null response {@link QuerySearchResult#nullInstance()}. + * Defaults to false since the coordinator node needs at least one shard response to build the global + * response. + */ + public boolean canReturnNullResponseIfMatchNoDocs() { + return this.shardSearchRequestProto.getCanReturnNullResponseIfMatchNoDocs(); + } + + public void canReturnNullResponseIfMatchNoDocs(boolean value) { + this.shardSearchRequestProto.toBuilder().setCanReturnNullResponseIfMatchNoDocs(value); + } + + private static final ThreadLocal scratch = ThreadLocal.withInitial(BytesStreamOutput::new); + + /** + * Returns a non-null value if this request should execute using a specific point-in-time reader; + * otherwise, using the most up to date point-in-time reader. + */ + public ShardSearchContextId readerId() { + System.out.println("Getting readerId"); + if (this.shardSearchRequestProto.hasReaderId() == false) { + System.out.println("Returning null since the readerId is null"); + return null; + } + return new ShardSearchContextId(this.shardSearchRequestProto.getReaderId().getSessionId(), this.shardSearchRequestProto.getReaderId().getId()); + } + + /** + * Returns a non-null to specify the time to live of the point-in-time reader that is used to execute this request. + */ + public TimeValue keepAlive() { + if (!this.shardSearchRequestProto.hasTimeValue()) { + return null; + } + return TimeValue.parseTimeValue(this.shardSearchRequestProto.getTimeValue(), null, "keep_alive"); + } + + public String getClusterAlias() { + return this.shardSearchRequestProto.getClusterAlias(); + } + + @Override + public ProtobufTask createProtobufTask(long id, String type, String action, ProtobufTaskId parentTaskId, Map headers) { + return new ProtobufSearchShardTask(id, type, action, getDescription(), parentTaskId, headers, this::getMetadataSupplier); + } + + @Override + public String getDescription() { + // Shard id is enough here, the request itself can be found by looking at the parent task description + return "shardId[" + shardId() + "]"; + } + + public String getMetadataSupplier() { + StringBuilder sb = new StringBuilder(); + if (this.shardSearchRequestProto.getSource() != null) { + ByteArrayInputStream in = new ByteArrayInputStream(this.shardSearchRequestProto.getSource().toByteArray()); + try (ObjectInputStream is = new ObjectInputStream(in)) { + SearchSourceBuilder source = (SearchSourceBuilder) is.readObject(); + sb.append("source[").append(source.toString(FORMAT_PARAMS)).append("]"); + } catch (ClassNotFoundException | IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } else { + sb.append("source[]"); + } + return sb.toString(); + } + + public Rewriteable getRewriteable() { + return new RequestRewritable(this); + } + + static class RequestRewritable implements Rewriteable { + + final ProtobufShardSearchRequest request; + + RequestRewritable(ProtobufShardSearchRequest request) { + this.request = request; + } + + @Override + public Rewriteable rewrite(QueryRewriteContext ctx) throws IOException { + // System.out.println("Rewriting protobuf request source"); + // SearchSourceBuilder newSource = request.source() == null ? null : Rewriteable.rewrite(request.source(), ctx); + // System.out.println("Rewriting protobuf request source done"); + // System.out.println("Rewriting protobuf request alias filter"); + // AliasFilter newAliasFilter = Rewriteable.rewrite(request.getAliasFilter(), ctx); + // System.out.println("Rewriting protobuf request alias filter done"); + + SearchSourceBuilder newSource = request.source(); + AliasFilter newAliasFilter = request.getAliasFilter(); + + QueryShardContext shardContext = ctx.convertToShardContext(); + + FieldSortBuilder primarySort = FieldSortBuilder.getPrimaryFieldSortOrNull(newSource); + if (shardContext != null + && primarySort != null + // && primarySort.isBottomSortShardDisjoint(shardContext, request.getBottomSortValues()) + ) { + assert newSource != null : "source should contain a primary sort field"; + newSource = newSource.shallowCopy(); + int trackTotalHitsUpTo = ProtobufSearchRequest.resolveTrackTotalHitsUpTo(request.scroll(), request.source()); + if (trackTotalHitsUpTo == TRACK_TOTAL_HITS_DISABLED && newSource.suggest() == null && newSource.aggregations() == null) { + newSource.query(new MatchNoneQueryBuilder()); + } else { + newSource.size(0); + } + request.source(newSource); + // request.setBottomSortValues(null); + } + + if (newSource == request.source() && newAliasFilter == request.getAliasFilter()) { + return this; + } else { + request.source(newSource); + request.setAliasFilter(newAliasFilter); + return new RequestRewritable(request); + } + } + } + + /** + * Returns the filter associated with listed filtering aliases. + *

+ * The list of filtering aliases should be obtained by calling Metadata.filteringAliases. + * Returns {@code null} if no filtering is required.

+ */ + public static QueryBuilder parseAliasFilter( + CheckedFunction filterParser, + IndexMetadata metadata, + String... aliasNames + ) { + if (aliasNames == null || aliasNames.length == 0) { + return null; + } + Index index = metadata.getIndex(); + final Map aliases = metadata.getAliases(); + Function parserFunction = (alias) -> { + if (alias.filter() == null) { + return null; + } + try { + return filterParser.apply(alias.filter().uncompressed()); + } catch (IOException ex) { + throw new AliasFilterParsingException(index, alias.getAlias(), "Invalid alias filter", ex); + } + }; + if (aliasNames.length == 1) { + AliasMetadata alias = aliases.get(aliasNames[0]); + if (alias == null) { + // This shouldn't happen unless alias disappeared after filteringAliases was called. + throw new InvalidAliasNameException(index, aliasNames[0], "Unknown alias name was passed to alias Filter"); + } + return parserFunction.apply(alias); + } else { + // we need to bench here a bit, to see maybe it makes sense to use OrFilter + BoolQueryBuilder combined = new BoolQueryBuilder(); + for (String aliasName : aliasNames) { + AliasMetadata alias = aliases.get(aliasName); + if (alias == null) { + // This shouldn't happen unless alias disappeared after filteringAliases was called. + throw new InvalidAliasNameException(index, aliasNames[0], "Unknown alias name was passed to alias Filter"); + } + QueryBuilder parsedFilter = parserFunction.apply(alias); + if (parsedFilter != null) { + combined.should(parsedFilter); + } else { + // The filter might be null only if filter was removed after filteringAliases was called + return null; + } + } + return combined; + } + } + + public ShardSearchRequestProto.ShardSearchRequest request() { + return this.shardSearchRequestProto; + } + +} diff --git a/server/src/main/java/org/opensearch/search/query/ProtobufQuerySearchRequest.java b/server/src/main/java/org/opensearch/search/query/ProtobufQuerySearchRequest.java new file mode 100644 index 0000000000000..40b800776cd5d --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/ProtobufQuerySearchRequest.java @@ -0,0 +1,116 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.opensearch.action.IndicesRequest; +import org.opensearch.action.OriginalIndices; +import org.opensearch.action.search.ProtobufSearchShardTask; +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.Nullable; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.Strings; +import org.opensearch.search.dfs.AggregatedDfs; +import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.search.internal.ProtobufShardSearchRequest; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.ProtobufTaskId; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Map; + +/** + * Transport request for query search + * + * @opensearch.internal + */ +public class ProtobufQuerySearchRequest extends TransportRequest implements IndicesRequest { + + // TODO: proto message + private final ShardSearchContextId contextId; + private final AggregatedDfs dfs; + private final OriginalIndices originalIndices; + private final ProtobufShardSearchRequest shardSearchRequest; + + public ProtobufQuerySearchRequest( + OriginalIndices originalIndices, + ShardSearchContextId contextId, + ProtobufShardSearchRequest shardSearchRequest, + AggregatedDfs dfs + ) { + this.contextId = contextId; + this.dfs = dfs; + this.shardSearchRequest = shardSearchRequest; + this.originalIndices = originalIndices; + } + + public ProtobufQuerySearchRequest(byte[] in) throws IOException { + super(in); + contextId = null; + dfs = null; + originalIndices = null; + shardSearchRequest = null; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + super.writeTo(out); + // contextId.writeTo(out); + // dfs.writeTo(out); + // OriginalIndices.writeOriginalIndices(originalIndices, out); + // out.writeOptionalWriteable(shardSearchRequest); + } + + public ShardSearchContextId contextId() { + return contextId; + } + + public AggregatedDfs dfs() { + return dfs; + } + + @Nullable + public ProtobufShardSearchRequest shardSearchRequest() { + return shardSearchRequest; + } + + @Override + public String[] indices() { + return originalIndices.indices(); + } + + @Override + public IndicesOptions indicesOptions() { + return originalIndices.indicesOptions(); + } + + @Override + public ProtobufTask createProtobufTask(long id, String type, String action, ProtobufTaskId parentTaskId, Map headers) { + return new ProtobufSearchShardTask(id, type, action, getDescription(), parentTaskId, headers, this::getMetadataSupplier); + } + + public String getDescription() { + StringBuilder sb = new StringBuilder(); + sb.append("id["); + sb.append(contextId); + sb.append("], "); + sb.append("indices["); + Strings.arrayToDelimitedString(originalIndices.indices(), ",", sb); + sb.append("]"); + return sb.toString(); + } + + public String getMetadataSupplier() { + return shardSearchRequest().getMetadataSupplier(); + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufCancellableTask.java b/server/src/main/java/org/opensearch/tasks/ProtobufCancellableTask.java new file mode 100644 index 0000000000000..57245f8a13d7b --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufCancellableTask.java @@ -0,0 +1,99 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +* +*/ + +package org.opensearch.tasks; + +import org.opensearch.common.Nullable; +import org.opensearch.common.unit.TimeValue; + +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.opensearch.search.SearchService.NO_TIMEOUT; + +/** + * A protobuf task that can be canceled +* +* @opensearch.internal +*/ +public abstract class ProtobufCancellableTask extends ProtobufTask { + + private volatile String reason; + private final AtomicBoolean cancelled = new AtomicBoolean(false); + private final TimeValue cancelAfterTimeInterval; + + public ProtobufCancellableTask( + long id, + String type, + String action, + String description, + ProtobufTaskId parentTaskId, + Map headers + ) { + this(id, type, action, description, parentTaskId, headers, NO_TIMEOUT); + } + + public ProtobufCancellableTask( + long id, + String type, + String action, + String description, + ProtobufTaskId parentTaskId, + Map headers, + TimeValue cancelAfterTimeInterval + ) { + super(id, type, action, description, parentTaskId, headers); + this.cancelAfterTimeInterval = cancelAfterTimeInterval; + } + + /** + * This method is called by the task manager when this task is cancelled. + */ + public void cancel(String reason) { + assert reason != null; + if (cancelled.compareAndSet(false, true)) { + this.reason = reason; + onCancelled(); + } + } + + /** + * Returns true if this task should be automatically cancelled if the coordinating node that + * requested this task left the cluster. + */ + public boolean cancelOnParentLeaving() { + return true; + } + + /** + * Returns true if this task can potentially have children that need to be cancelled when it parent is cancelled. + */ + public abstract boolean shouldCancelChildrenOnCancellation(); + + public boolean isCancelled() { + return cancelled.get(); + } + + public TimeValue getCancellationTimeout() { + return cancelAfterTimeInterval; + } + + /** + * The reason the task was cancelled or null if it hasn't been cancelled. + */ + @Nullable + public final String getReasonCancelled() { + return reason; + } + + /** + * Called after the task is cancelled so that it can take any actions that it has to take. + */ + protected void onCancelled() {} +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTask.java b/server/src/main/java/org/opensearch/tasks/ProtobufTask.java new file mode 100644 index 0000000000000..857abb26febc1 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTask.java @@ -0,0 +1,456 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +* +*/ + +package org.opensearch.tasks; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.ProtobufActionResponse; +import org.opensearch.action.NotifyOnceListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.common.io.stream.NamedWriteable; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.tasks.proto.TaskResourceStatsProto; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Current protobuf task information +* +* @opensearch.internal +*/ +public class ProtobufTask { + + private static final Logger logger = LogManager.getLogger(ProtobufTask.class); + + /** + * The request header to mark tasks with specific ids + */ + public static final String X_OPAQUE_ID = "X-Opaque-Id"; + + private static final String TOTAL = "total"; + + private final long id; + + private final String type; + + private final String action; + + private final String description; + + private final ProtobufTaskId parentTask; + + private final Map headers; + + private final Map> resourceStats; + + private final List> resourceTrackingCompletionListeners; + + /** + * Keeps track of the number of active resource tracking threads for this task. It is initialized to 1 to track + * the task's own/self thread. When this value becomes 0, all threads have been marked inactive and the resource + * tracking can be stopped for this task. + */ + private final AtomicInteger numActiveResourceTrackingThreads = new AtomicInteger(1); + + /** + * The task's start time as a wall clock time since epoch ({@link System#currentTimeMillis()} style). + */ + private final long startTime; + + /** + * The task's start time as a relative time ({@link System#nanoTime()} style). + */ + private final long startTimeNanos; + + public ProtobufTask(long id, String type, String action, String description, ProtobufTaskId parentTask, Map headers) { + this( + id, + type, + action, + description, + parentTask, + System.currentTimeMillis(), + System.nanoTime(), + headers, + new ConcurrentHashMap<>(), + new ArrayList<>() + ); + } + + public ProtobufTask( + long id, + String type, + String action, + String description, + ProtobufTaskId parentTask, + long startTime, + long startTimeNanos, + Map headers, + ConcurrentHashMap> resourceStats, + List> resourceTrackingCompletionListeners + ) { + this.id = id; + this.type = type; + this.action = action; + this.description = description; + this.parentTask = parentTask; + this.startTime = startTime; + this.startTimeNanos = startTimeNanos; + this.headers = headers; + this.resourceStats = resourceStats; + this.resourceTrackingCompletionListeners = resourceTrackingCompletionListeners; + } + + /** + * Build a version of the task status you can throw over the wire and back + * to the user. + * + * @param localNodeId + * the id of the node this task is running on + * @param detailed + * should the information include detailed, potentially slow to + * generate data? + */ + public final ProtobufTaskInfo taskInfo(String localNodeId, boolean detailed) { + return taskInfo(localNodeId, detailed, detailed == false); + } + + /** + * Build a version of the task status you can throw over the wire and back + * with the option to include resource stats or not. + * This method is only used during creating TaskResult to avoid storing resource information into the task index. + * + * @param excludeStats should information exclude resource stats. + * By default, detailed flag is used to control including resource information. + * But inorder to avoid storing resource stats into task index as strict mapping is enforced and breaks when adding this field. + * In the future, task-index-mapping.json can be modified to add resource stats. + */ + private ProtobufTaskInfo taskInfo(String localNodeId, boolean detailed, boolean excludeStats) { + String description = null; + ProtobufTask.Status status = null; + ProtobufTaskResourceStats resourceStats = null; + if (detailed) { + description = getDescription(); + status = getStatus(); + } + if (excludeStats == false) { + resourceStats = new ProtobufTaskResourceStats(new HashMap<>() { + { + put(TOTAL, getTotalResourceStats()); + } + }); + } + return taskInfo(localNodeId, description, status, resourceStats); + } + + /** + * Build a {@link ProtobufTaskInfo} for this task without resource stats. + */ + protected final ProtobufTaskInfo taskInfo(String localNodeId, String description, Status status) { + return taskInfo(localNodeId, description, status, null); + } + + /** + * Build a proper {@link ProtobufTaskInfo} for this task. + */ + protected final ProtobufTaskInfo taskInfo( + String localNodeId, + String description, + Status status, + ProtobufTaskResourceStats resourceStats + ) { + return new ProtobufTaskInfo( + new ProtobufTaskId(localNodeId, getId()), + getType(), + getAction(), + description, + status, + startTime, + System.nanoTime() - startTimeNanos, + this instanceof ProtobufCancellableTask, + this instanceof ProtobufCancellableTask && ((ProtobufCancellableTask) this).isCancelled(), + parentTask, + headers, + resourceStats + ); + } + + /** + * Returns task id + */ + public long getId() { + return id; + } + + /** + * Returns task channel type (netty, transport, direct) + */ + public String getType() { + return type; + } + + /** + * Returns task action + */ + public String getAction() { + return action; + } + + /** + * Generates task description + */ + public String getDescription() { + return description; + } + + /** + * Returns the task's start time as a wall clock time since epoch ({@link System#currentTimeMillis()} style). + */ + public long getStartTime() { + return startTime; + } + + /** + * Returns the task's start time in nanoseconds ({@link System#nanoTime()} style). + */ + public long getStartTimeNanos() { + return startTimeNanos; + } + + /** + * Returns id of the parent task or NO_PARENT_ID if the task doesn't have any parent tasks + */ + public ProtobufTaskId getParentTaskId() { + return parentTask; + } + + /** + * Build a status for this task or null if this task doesn't have status. + * Since most tasks don't have status this defaults to returning null. While + * this can never perform IO it might be a costly operation, requiring + * collating lists of results, etc. So only use it if you need the value. + */ + public Status getStatus() { + return null; + } + + /** + * Returns thread level resource consumption of the task + */ + public Map> getResourceStats() { + return Collections.unmodifiableMap(resourceStats); + } + + /** + * Returns current total resource usage of the task. + * Currently, this method is only called on demand, during get and listing of tasks. + * In the future, these values can be cached as an optimization. + */ + public TaskResourceStatsProto.TaskResourceStats.TaskResourceUsage getTotalResourceStats() { + return TaskResourceStatsProto.TaskResourceStats.TaskResourceUsage.newBuilder() + .setCpuTimeInNanos(getTotalResourceUtilization(ResourceStats.CPU)) + .setMemoryInBytes(getTotalResourceUtilization(ResourceStats.MEMORY)) + .build(); + } + + /** + * Returns total resource consumption for a specific task stat. + */ + public long getTotalResourceUtilization(ResourceStats stats) { + long totalResourceConsumption = 0L; + for (List threadResourceInfosList : resourceStats.values()) { + for (ThreadResourceInfo threadResourceInfo : threadResourceInfosList) { + final ResourceUsageInfo.ResourceStatsInfo statsInfo = threadResourceInfo.getResourceUsageInfo().getStatsInfo().get(stats); + if (threadResourceInfo.getStatsType().isOnlyForAnalysis() == false && statsInfo != null) { + totalResourceConsumption += statsInfo.getTotalValue(); + } + } + } + return totalResourceConsumption; + } + + /** + * Adds thread's starting resource consumption information + * @param threadId ID of the thread + * @param statsType stats type + * @param resourceUsageMetrics resource consumption metrics of the thread + * @throws IllegalStateException matching active thread entry was found which is not expected. + */ + public void startThreadResourceTracking(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + final List threadResourceInfoList = resourceStats.computeIfAbsent(threadId, k -> new ArrayList<>()); + // active thread entry should not be present in the list + for (ThreadResourceInfo threadResourceInfo : threadResourceInfoList) { + if (threadResourceInfo.getStatsType() == statsType && threadResourceInfo.isActive()) { + throw new IllegalStateException( + "unexpected active thread resource entry present [" + threadId + "]:[" + threadResourceInfo + "]" + ); + } + } + threadResourceInfoList.add(new ThreadResourceInfo(threadId, statsType, resourceUsageMetrics)); + incrementResourceTrackingThreads(); + } + + /** + * This method is used to update the resource consumption stats so that the data isn't too stale for long-running task. + * If active thread entry is present in the list, the entry is updated. If one is not found, it throws an exception. + * @param threadId ID of the thread + * @param statsType stats type + * @param resourceUsageMetrics resource consumption metrics of the thread + * @throws IllegalStateException if no matching active thread entry was found. + */ + public void updateThreadResourceStats(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + final List threadResourceInfoList = resourceStats.get(threadId); + if (threadResourceInfoList != null) { + for (ThreadResourceInfo threadResourceInfo : threadResourceInfoList) { + // the active entry present in the list is updated + if (threadResourceInfo.getStatsType() == statsType && threadResourceInfo.isActive()) { + threadResourceInfo.recordResourceUsageMetrics(resourceUsageMetrics); + return; + } + } + } + throw new IllegalStateException("cannot update if active thread resource entry is not present"); + } + + /** + * Record the thread's final resource consumption values. + * If active thread entry is present in the list, the entry is updated. If one is not found, it throws an exception. + * @param threadId ID of the thread + * @param statsType stats type + * @param resourceUsageMetrics resource consumption metrics of the thread + * @throws IllegalStateException if no matching active thread entry was found. + */ + public void stopThreadResourceTracking(long threadId, ResourceStatsType statsType, ResourceUsageMetric... resourceUsageMetrics) { + final List threadResourceInfoList = resourceStats.get(threadId); + if (threadResourceInfoList != null) { + for (ThreadResourceInfo threadResourceInfo : threadResourceInfoList) { + if (threadResourceInfo.getStatsType() == statsType && threadResourceInfo.isActive()) { + threadResourceInfo.setActive(false); + threadResourceInfo.recordResourceUsageMetrics(resourceUsageMetrics); + decrementResourceTrackingThreads(); + return; + } + } + } + throw new IllegalStateException("cannot update final values if active thread resource entry is not present"); + } + + /** + * Individual tasks can override this if they want to support task resource tracking. We just need to make sure that + * the ThreadPool on which the task runs on have runnable wrapper similar to + * {@link org.opensearch.common.util.concurrent.OpenSearchExecutors#newResizable} + * + * @return true if resource tracking is supported by the task + */ + public boolean supportsResourceTracking() { + return false; + } + + /** + * Report of the internal status of a task. These can vary wildly from task + * to task because each task is implemented differently but we should try + * to keep each task consistent from version to version where possible. + * That means each implementation of {@linkplain ProtobufTask.Status#toXContent} + * should avoid making backwards incompatible changes to the rendered + * result. But if we change the way a request is implemented it might not + * be possible to preserve backwards compatibility. In that case, we + * can change this on version upgrade but we should be careful + * because some statuses (reindex) have become defacto standardized because + * they are used by systems like Kibana. + */ + public interface Status extends ToXContentObject, NamedWriteable {} + + /** + * Returns stored task header associated with the task + */ + public String getHeader(String header) { + return headers.get(header); + } + + public ProtobufTaskResult result(DiscoveryNode node, Exception error) throws IOException { + return new ProtobufTaskResult(taskInfo(node.getId(), true, true), error); + } + + public ProtobufTaskResult result(DiscoveryNode node, ProtobufActionResponse response) throws IOException { + if (response instanceof ToXContent) { + return new ProtobufTaskResult(taskInfo(node.getId(), true, true), (ToXContent) response); + } else { + throw new IllegalStateException("response has to implement ToXContent to be able to store the results"); + } + } + + /** + * Registers a task resource tracking completion listener on this task if resource tracking is still active. + * Returns true on successful subscription, false otherwise. + */ + public boolean addResourceTrackingCompletionListener(NotifyOnceListener listener) { + if (numActiveResourceTrackingThreads.get() > 0) { + resourceTrackingCompletionListeners.add(listener); + return true; + } + + return false; + } + + /** + * Increments the number of active resource tracking threads. + * + * @return the number of active resource tracking threads. + */ + public int incrementResourceTrackingThreads() { + return numActiveResourceTrackingThreads.incrementAndGet(); + } + + /** + * Decrements the number of active resource tracking threads. + * This method is called when threads finish execution, and also when the task is unregistered (to mark the task's + * own thread as complete). When the active thread count becomes zero, the onTaskResourceTrackingCompleted method + * is called exactly once on all registered listeners. + * + * Since a task is unregistered after the message is processed, it implies that the threads responsible to produce + * the response must have started prior to it (i.e. startThreadResourceTracking called before unregister). + * This ensures that the number of active threads doesn't drop to zero pre-maturely. + * + * Rarely, some threads may even start execution after the task is unregistered. As resource stats are piggy-backed + * with the response, any thread usage info captured after the task is unregistered may be irrelevant. + * + * @return the number of active resource tracking threads. + */ + public int decrementResourceTrackingThreads() { + int count = numActiveResourceTrackingThreads.decrementAndGet(); + + if (count == 0) { + List listenerExceptions = new ArrayList<>(); + resourceTrackingCompletionListeners.forEach(listener -> { + try { + listener.onResponse(this); + } catch (Exception e1) { + try { + listener.onFailure(e1); + } catch (Exception e2) { + listenerExceptions.add(e2); + } + } + }); + ExceptionsHelper.maybeThrowRuntimeAndSuppress(listenerExceptions); + } + + return count; + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskAwareRequest.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskAwareRequest.java new file mode 100644 index 0000000000000..c212120e2a2c2 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskAwareRequest.java @@ -0,0 +1,57 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +* +*/ + +package org.opensearch.tasks; + +import java.util.Map; + +/** + * An interface for a request that can be used to register a task manager task +* +* @opensearch.internal +*/ +public interface ProtobufTaskAwareRequest { + /** + * Set a reference to task that caused this task to be run. + */ + default void setProtobufParentTask(String parentTaskNode, long parentTaskId) { + setProtobufParentTask(new ProtobufTaskId(parentTaskNode, parentTaskId)); + } + + /** + * Set a reference to task that created this request. + */ + void setProtobufParentTask(ProtobufTaskId taskId); + + /** + * Get a reference to the task that created this request. Implementers should default to + * {@link ProtobufTaskId#EMPTY_TASK_ID}, meaning "there is no parent". + */ + ProtobufTaskId getProtobufParentTask(); + + /** + * Returns the task object that should be used to keep track of the processing of the request. + */ + default ProtobufTask createProtobufTask(long id, String type, String action, ProtobufTaskId parentTaskId, Map headers) { + System.out.println("Creating protobuf task"); + System.out.println("id: " + id); + System.out.println("type: " + type); + System.out.println("action: " + action); + System.out.println("parentTaskId: " + parentTaskId); + System.out.println("headers: " + headers); + return new ProtobufTask(id, type, action, getTaskDescription(), parentTaskId, headers); + } + + /** + * Returns optional description of the request to be displayed by the task manager + */ + default String getTaskDescription() { + return ""; + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java new file mode 100644 index 0000000000000..ece1190d9a4c7 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskCancellationService.java @@ -0,0 +1,237 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.tasks; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchSecurityException; +import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.action.support.ProtobufChannelActionListener; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ProtobufEmptyTransportResponseHandler; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.ProtobufTransportRequestHandler; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Collection; +import java.util.List; + +/** + * Service used to cancel a task +* +* @opensearch.internal +*/ +public class ProtobufTaskCancellationService { + public static final String BAN_PARENT_ACTION_NAME = "internal:admin/tasks/ban"; + private static final Logger logger = LogManager.getLogger(ProtobufTaskCancellationService.class); + private final TransportService transportService; + private final TaskManager taskManager; + + public ProtobufTaskCancellationService(TransportService transportService) { + this.transportService = transportService; + this.taskManager = transportService.getTaskManager(); + transportService.registerRequestHandlerProtobuf( + BAN_PARENT_ACTION_NAME, + ThreadPool.Names.SAME, + BanParentTaskRequest::new, + new BanParentRequestHandler() + ); + } + + private String localNodeId() { + return transportService.getLocalNode().getId(); + } + + void cancelTaskAndDescendants(ProtobufCancellableTask task, String reason, boolean waitForCompletion, ActionListener listener) { + final ProtobufTaskId taskId = task.taskInfo(localNodeId(), false).getTaskId(); + if (task.shouldCancelChildrenOnCancellation()) { + logger.trace("cancelling task [{}] and its descendants", taskId); + StepListener completedListener = new StepListener<>(); + GroupedActionListener groupedListener = new GroupedActionListener<>(ActionListener.map(completedListener, r -> null), 3); + Collection childrenNodes = taskManager.startBanOnChildrenNodesProtobuf(task.getId(), () -> { + logger.trace("child tasks of parent [{}] are completed", taskId); + groupedListener.onResponse(null); + }); + taskManager.cancelProtobufTask(task, reason, () -> { + logger.trace("task [{}] is cancelled", taskId); + groupedListener.onResponse(null); + }); + StepListener banOnNodesListener = new StepListener<>(); + setBanOnNodes(reason, waitForCompletion, task, childrenNodes, banOnNodesListener); + banOnNodesListener.whenComplete(groupedListener::onResponse, groupedListener::onFailure); + // If we start unbanning when the last child task completed and that child task executed with a specific user, then unban + // requests are denied because internal requests can't run with a user. We need to remove bans with the current thread context. + final Runnable removeBansRunnable = transportService.getThreadPool() + .getThreadContext() + .preserveContext(() -> removeBanOnNodes(task, childrenNodes)); + // We remove bans after all child tasks are completed although in theory we can do it on a per-node basis. + completedListener.whenComplete(r -> removeBansRunnable.run(), e -> removeBansRunnable.run()); + // if wait_for_completion is true, then only return when (1) bans are placed on child nodes, (2) child tasks are + // completed or failed, (3) the main task is cancelled. Otherwise, return after bans are placed on child nodes. + if (waitForCompletion) { + completedListener.whenComplete(r -> listener.onResponse(null), listener::onFailure); + } else { + banOnNodesListener.whenComplete(r -> listener.onResponse(null), listener::onFailure); + } + } else { + logger.trace("task [{}] doesn't have any children that should be cancelled", taskId); + if (waitForCompletion) { + taskManager.cancelProtobufTask(task, reason, () -> listener.onResponse(null)); + } else { + taskManager.cancelProtobufTask(task, reason, () -> {}); + listener.onResponse(null); + } + } + } + + private void setBanOnNodes( + String reason, + boolean waitForCompletion, + ProtobufCancellableTask task, + Collection childNodes, + ActionListener listener + ) { + if (childNodes.isEmpty()) { + listener.onResponse(null); + return; + } + final ProtobufTaskId taskId = new ProtobufTaskId(localNodeId(), task.getId()); + logger.trace("cancelling child tasks of [{}] on child nodes {}", taskId, childNodes); + GroupedActionListener groupedListener = new GroupedActionListener<>( + ActionListener.map(listener, r -> null), + childNodes.size() + ); + final BanParentTaskRequest banRequest = BanParentTaskRequest.createSetBanParentTaskRequest(taskId, reason, waitForCompletion); + for (DiscoveryNode node : childNodes) { + transportService.sendRequest( + node, + BAN_PARENT_ACTION_NAME, + banRequest, + new ProtobufEmptyTransportResponseHandler(ThreadPool.Names.SAME) { + @Override + public void handleResponse(TransportResponse.Empty response) { + logger.trace("sent ban for tasks with the parent [{}] to the node [{}]", taskId, node); + groupedListener.onResponse(null); + } + + @Override + public void handleException(TransportException exp) { + assert ExceptionsHelper.unwrapCause(exp) instanceof OpenSearchSecurityException == false; + logger.warn("Cannot send ban for tasks with the parent [{}] to the node [{}]", taskId, node); + groupedListener.onFailure(exp); + } + } + ); + } + } + + private void removeBanOnNodes(ProtobufCancellableTask task, Collection childNodes) { + final BanParentTaskRequest request = BanParentTaskRequest.createRemoveBanParentTaskRequest( + new ProtobufTaskId(localNodeId(), task.getId()) + ); + for (DiscoveryNode node : childNodes) { + logger.trace("Sending remove ban for tasks with the parent [{}] to the node [{}]", request.parentTaskId, node); + transportService.sendRequest( + node, + BAN_PARENT_ACTION_NAME, + request, + new ProtobufEmptyTransportResponseHandler(ThreadPool.Names.SAME) { + @Override + public void handleException(TransportException exp) { + assert ExceptionsHelper.unwrapCause(exp) instanceof OpenSearchSecurityException == false; + logger.info("failed to remove the parent ban for task {} on node {}", request.parentTaskId, node); + } + } + ); + } + } + + private static class BanParentTaskRequest extends TransportRequest { + + private final ProtobufTaskId parentTaskId; + private final boolean ban; + private final boolean waitForCompletion; + private final String reason; + + static BanParentTaskRequest createSetBanParentTaskRequest(ProtobufTaskId parentTaskId, String reason, boolean waitForCompletion) { + return new BanParentTaskRequest(parentTaskId, reason, waitForCompletion); + } + + static BanParentTaskRequest createRemoveBanParentTaskRequest(ProtobufTaskId parentTaskId) { + return new BanParentTaskRequest(parentTaskId); + } + + private BanParentTaskRequest(ProtobufTaskId parentTaskId, String reason, boolean waitForCompletion) { + this.parentTaskId = parentTaskId; + this.ban = true; + this.reason = reason; + this.waitForCompletion = waitForCompletion; + } + + private BanParentTaskRequest(ProtobufTaskId parentTaskId) { + this.parentTaskId = parentTaskId; + this.ban = false; + this.reason = null; + this.waitForCompletion = false; + } + + private BanParentTaskRequest(byte[] in) throws IOException { + super(in); + parentTaskId = null; + ban = false; + reason = null; + waitForCompletion = false; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + super.writeTo(out); + } + } + + private class BanParentRequestHandler implements ProtobufTransportRequestHandler { + @Override + public void messageReceived(final BanParentTaskRequest request, final TransportChannel channel, ProtobufTask task) + throws Exception { + if (request.ban) { + logger.debug( + "Received ban for the parent [{}] on the node [{}], reason: [{}]", + request.parentTaskId, + localNodeId(), + request.reason + ); + final List childTasks = taskManager.setBanProtobuf(request.parentTaskId, request.reason); + final GroupedActionListener listener = new GroupedActionListener<>( + ActionListener.map( + new ProtobufChannelActionListener<>(channel, BAN_PARENT_ACTION_NAME, request), + r -> TransportResponse.Empty.INSTANCE + ), + childTasks.size() + 1 + ); + for (ProtobufCancellableTask childTask : childTasks) { + cancelTaskAndDescendants(childTask, request.reason, request.waitForCompletion, listener); + } + listener.onResponse(null); + } else { + logger.debug("Removing ban for the parent [{}] on the node [{}]", request.parentTaskId, localNodeId()); + taskManager.removeBanProtobuf(request.parentTaskId); + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskId.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskId.java new file mode 100644 index 0000000000000..ec20aa83ebe12 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskId.java @@ -0,0 +1,83 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +* +*/ + +package org.opensearch.tasks; + +import org.opensearch.common.Strings; +import org.opensearch.core.common.io.stream.ProtobufWriteable; +import org.opensearch.tasks.proto.TaskIdProto; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * Task id that consists of node id and id of the task on the node +* +* @opensearch.internal +*/ +public final class ProtobufTaskId implements ProtobufWriteable { + + public static final ProtobufTaskId EMPTY_TASK_ID = new ProtobufTaskId(); + + private final TaskIdProto.TaskId taskId; + + public ProtobufTaskId(String nodeId, long id) { + this.taskId = TaskIdProto.TaskId.newBuilder().setNodeId(nodeId).setId(id).build(); + } + + /** + * Builds {@link #EMPTY_TASK_ID}. + */ + private ProtobufTaskId() { + this.taskId = TaskIdProto.TaskId.newBuilder().setNodeId("").setId(-1L).build(); + } + + public ProtobufTaskId(String taskId) { + if (org.opensearch.core.common.Strings.hasLength(taskId) && "unset".equals(taskId) == false) { + String[] s = Strings.split(taskId, ":"); + if (s == null || s.length != 2) { + throw new IllegalArgumentException("malformed task id " + taskId); + } + String nodeId = s[0]; + try { + long id = Long.parseLong(s[1]); + this.taskId = TaskIdProto.TaskId.newBuilder().setNodeId(nodeId).setId(id).build(); + } catch (NumberFormatException ex) { + throw new IllegalArgumentException("malformed task id " + taskId, ex); + } + } else { + this.taskId = EMPTY_TASK_ID.taskId; + } + } + + /** + * Read a {@linkplain ProtobufTaskId} from a stream. {@linkplain ProtobufTaskId} has this rather than the usual constructor that takes a + * {@linkplain byte[]} so we can return the {@link #EMPTY_TASK_ID} without allocating. + */ + public ProtobufTaskId(byte[] in) throws IOException { + this.taskId = TaskIdProto.TaskId.parseFrom(in); + } + + @Override + public void writeTo(OutputStream out) throws IOException { + out.write(this.taskId.toByteArray()); + } + + public String getNodeId() { + return this.taskId.getNodeId(); + } + + public long getId() { + return this.taskId.getId(); + } + + public boolean isSet() { + return this.taskId.getId() != -1L; + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java new file mode 100644 index 0000000000000..32ecfb818b156 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskInfo.java @@ -0,0 +1,205 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +* +*/ + +package org.opensearch.tasks; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.ProtobufWriteable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Information about a currently running task. +*

+* Tasks are used for communication with transport actions. As a result, they can contain callback +* references as well as mutable state. That makes it impractical to send tasks over transport channels +* and use in APIs. Instead, immutable and writeable ProtobufTaskInfo objects are used to represent +* snapshot information about currently running tasks. +* +* @opensearch.internal +*/ +public final class ProtobufTaskInfo implements ProtobufWriteable, ToXContentFragment { + private final ProtobufTaskId taskId; + + private final String type; + + private final String action; + + private final String description; + + private final long startTime; + + private final long runningTimeNanos; + + private final ProtobufTask.Status status; + + private final boolean cancellable; + + private final boolean cancelled; + + private final ProtobufTaskId parentTaskId; + + private final Map headers; + + private final ProtobufTaskResourceStats resourceStats; + + public ProtobufTaskInfo( + ProtobufTaskId taskId, + String type, + String action, + String description, + ProtobufTask.Status status, + long startTime, + long runningTimeNanos, + boolean cancellable, + boolean cancelled, + ProtobufTaskId parentTaskId, + Map headers, + ProtobufTaskResourceStats resourceStats + ) { + if (cancellable == false && cancelled == true) { + throw new IllegalArgumentException("task cannot be cancelled"); + } + this.taskId = taskId; + this.type = type; + this.action = action; + this.description = description; + this.status = status; + this.startTime = startTime; + this.runningTimeNanos = runningTimeNanos; + this.cancellable = cancellable; + this.cancelled = cancelled; + this.parentTaskId = parentTaskId; + this.headers = headers; + this.resourceStats = resourceStats; + } + + public ProtobufTaskId getTaskId() { + return taskId; + } + + public long getId() { + return taskId.getId(); + } + + public String getType() { + return type; + } + + public String getAction() { + return action; + } + + public String getDescription() { + return description; + } + + /** + * The status of the running task. Only available if TaskInfos were build + * with the detailed flag. + */ + public ProtobufTask.Status getStatus() { + return status; + } + + /** + * Returns the task start time + */ + public long getStartTime() { + return startTime; + } + + /** + * Returns the task running time + */ + public long getRunningTimeNanos() { + return runningTimeNanos; + } + + /** + * Returns true if the task supports cancellation + */ + public boolean isCancellable() { + return cancellable; + } + + /** + * Returns true if the task has been cancelled + */ + public boolean isCancelled() { + return cancelled; + } + + /** + * Returns the parent task id + */ + public ProtobufTaskId getParentTaskId() { + return parentTaskId; + } + + /** + * Returns the task headers + */ + public Map getHeaders() { + return headers; + } + + /** + * Returns the task resource information + */ + public ProtobufTaskResourceStats getResourceStats() { + return resourceStats; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("node", taskId.getNodeId()); + builder.field("id", taskId.getId()); + builder.field("type", type); + builder.field("action", action); + if (status != null) { + builder.field("status", status, params); + } + if (description != null) { + builder.field("description", description); + } + builder.timeField("start_time_in_millis", "start_time", startTime); + if (builder.humanReadable()) { + builder.field("running_time", new TimeValue(runningTimeNanos, TimeUnit.NANOSECONDS).toString()); + } + builder.field("running_time_in_nanos", runningTimeNanos); + builder.field("cancellable", cancellable); + builder.field("cancelled", cancelled); + if (parentTaskId.isSet()) { + builder.field("parent_task_id", parentTaskId.toString()); + } + builder.startObject("headers"); + for (Map.Entry attribute : headers.entrySet()) { + builder.field(attribute.getKey(), attribute.getValue()); + } + builder.endObject(); + if (resourceStats != null) { + builder.startObject("resource_stats"); + resourceStats.toXContent(builder, params); + builder.endObject(); + } + return builder; + } + + @Override + public void writeTo(OutputStream out) throws IOException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'writeTo'"); + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskListener.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskListener.java new file mode 100644 index 0000000000000..67602b0f90b47 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskListener.java @@ -0,0 +1,40 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.tasks; + +/** + * Listener for Task success or failure. +* +* @opensearch.internal +*/ +public interface ProtobufTaskListener { + /** + * Handle task response. This response may constitute a failure or a success + * but it is up to the listener to make that decision. + * + * @param task + * the task being executed. May be null if the action doesn't + * create a task + * @param response + * the response from the action that executed the task + */ + void onResponse(ProtobufTask task, Response response); + + /** + * A failure caused by an exception at some phase of the task. + * + * @param task + * the task being executed. May be null if the action doesn't + * create a task + * @param e + * the failure + */ + void onFailure(ProtobufTask task, Exception e); + +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceStats.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceStats.java new file mode 100644 index 0000000000000..3de0ff44afbe0 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceStats.java @@ -0,0 +1,66 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +* +*/ + +package org.opensearch.tasks; + +import org.opensearch.core.common.io.stream.ProtobufWriteable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.tasks.proto.TaskResourceStatsProto; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Map; + +/** + * Resource information about a currently running task. +*

+* Writeable TaskResourceStats objects are used to represent resource +* snapshot information about currently running task. +* +* @opensearch.internal +*/ +public class ProtobufTaskResourceStats implements ProtobufWriteable, ToXContentFragment { + private final TaskResourceStatsProto.TaskResourceStats taskResourceStats; + + public ProtobufTaskResourceStats(Map resourceUsage) { + this.taskResourceStats = TaskResourceStatsProto.TaskResourceStats.newBuilder().putAllResourceUsage(resourceUsage).build(); + } + + /** + * Read from a stream. + */ + public ProtobufTaskResourceStats(byte[] in) throws IOException { + this.taskResourceStats = TaskResourceStatsProto.TaskResourceStats.parseFrom(in); + } + + public Map getResourceUsageInfo() { + return this.taskResourceStats.getResourceUsageMap(); + } + + @Override + public void writeTo(OutputStream out) throws IOException { + out.write(this.taskResourceStats.toByteArray()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + Map resourceUsage = this.taskResourceStats + .getResourceUsageMap(); + for (Map.Entry resourceUsageEntry : resourceUsage.entrySet()) { + builder.startObject(resourceUsageEntry.getKey()); + if (resourceUsageEntry.getValue() != null) { + builder.field("cpu_time_in_nanos", resourceUsageEntry.getValue().getCpuTimeInNanos()); + builder.field("memory_in_bytes", resourceUsageEntry.getValue().getMemoryInBytes()); + } + builder.endObject(); + } + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java new file mode 100644 index 0000000000000..3e97e5c6c678e --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResourceTrackingService.java @@ -0,0 +1,271 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.tasks; + +import com.sun.management.ThreadMXBean; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.ExceptionsHelper; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.concurrent.ConcurrentMapLong; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.threadpool.RunnableTaskExecutionListener; +import org.opensearch.threadpool.ThreadPool; + +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.opensearch.tasks.ResourceStatsType.WORKER_STATS; + +/** + * Service that helps track resource usage of tasks running on a node. +*/ +@SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") +public class ProtobufTaskResourceTrackingService implements RunnableTaskExecutionListener { + + private static final Logger logger = LogManager.getLogger(TaskManager.class); + + public static final Setting TASK_RESOURCE_TRACKING_ENABLED = Setting.boolSetting( + "task_resource_tracking.enabled", + true, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final String TASK_ID = "TASK_ID"; + + private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); + + private final ConcurrentMapLong resourceAwareTasks = ConcurrentCollections + .newConcurrentMapLongWithAggressiveConcurrency(); + private final List taskCompletionListeners = new ArrayList<>(); + private final ThreadPool threadPool; + private volatile boolean taskResourceTrackingEnabled; + + @Inject + public ProtobufTaskResourceTrackingService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { + this.taskResourceTrackingEnabled = TASK_RESOURCE_TRACKING_ENABLED.get(settings); + this.threadPool = threadPool; + clusterSettings.addSettingsUpdateConsumer(TASK_RESOURCE_TRACKING_ENABLED, this::setTaskResourceTrackingEnabled); + } + + public void setTaskResourceTrackingEnabled(boolean taskResourceTrackingEnabled) { + this.taskResourceTrackingEnabled = taskResourceTrackingEnabled; + } + + public boolean isTaskResourceTrackingEnabled() { + return taskResourceTrackingEnabled; + } + + public boolean isTaskResourceTrackingSupported() { + return threadMXBean.isThreadAllocatedMemorySupported() && threadMXBean.isThreadAllocatedMemoryEnabled(); + } + + /** + * Executes logic only if task supports resource tracking and resource tracking setting is enabled. + *

+ * 1. Starts tracking the task in map of resourceAwareTasks. + * 2. Adds ProtobufTask Id in thread context to make sure it's available while task is processed across multiple threads. + * + * @param task for which resources needs to be tracked + * @return Autocloseable stored context to restore ThreadContext to the state before this method changed it. + */ + public ThreadContext.StoredContext startTracking(ProtobufTask task) { + if (task.supportsResourceTracking() == false + || isTaskResourceTrackingEnabled() == false + || isTaskResourceTrackingSupported() == false) { + return () -> {}; + } + + logger.debug("Starting resource tracking for task: {}", task.getId()); + resourceAwareTasks.put(task.getId(), task); + return addTaskIdToThreadContext(task); + } + + /** + * Stops tracking task registered earlier for tracking. + *

+ * It doesn't have feature enabled check to avoid any issues if setting was disable while the task was in progress. + *

+ * It's also responsible to stop tracking the current thread's resources against this task if not already done. + * This happens when the thread executing the request logic itself calls the unregister method. So in this case unregister + * happens before runnable finishes. + * + * @param task task which has finished and doesn't need resource tracking. + */ + public void stopTracking(ProtobufTask task) { + logger.debug("Stopping resource tracking for task: {}", task.getId()); + try { + if (isCurrentThreadWorkingOnTask(task)) { + taskExecutionFinishedOnThread(task.getId(), Thread.currentThread().getId()); + } + } catch (Exception e) { + logger.warn("Failed while trying to mark the task execution on current thread completed.", e); + assert false; + } finally { + resourceAwareTasks.remove(task.getId()); + } + + List exceptions = new ArrayList<>(); + for (TaskCompletionListener listener : taskCompletionListeners) { + try { + listener.onTaskCompleted(task); + } catch (Exception e) { + exceptions.add(e); + } + } + ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptions); + } + + /** + * Refreshes the resource stats for the tasks provided by looking into which threads are actively working on these + * and how much resources these have consumed till now. + * + * @param tasks for which resource stats needs to be refreshed. + */ + public void refreshResourceStats(ProtobufTask... tasks) { + if (isTaskResourceTrackingEnabled() == false || isTaskResourceTrackingSupported() == false) { + return; + } + + for (ProtobufTask task : tasks) { + if (task.supportsResourceTracking() && resourceAwareTasks.containsKey(task.getId())) { + refreshResourceStats(task); + } + } + } + + private void refreshResourceStats(ProtobufTask resourceAwareTask) { + try { + logger.debug("Refreshing resource stats for ProtobufTask: {}", resourceAwareTask.getId()); + List threadsWorkingOnTask = getThreadsWorkingOnTask(resourceAwareTask); + threadsWorkingOnTask.forEach( + threadId -> resourceAwareTask.updateThreadResourceStats(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)) + ); + } catch (IllegalStateException e) { + logger.debug("Resource stats already updated."); + } + + } + + /** + * Called when a thread starts working on a task's runnable. + * + * @param taskId of the task for which runnable is starting + * @param threadId of the thread which will be executing the runnable and we need to check resource usage for this + * thread + */ + @Override + public void taskExecutionStartedOnThread(long taskId, long threadId) { + try { + final ProtobufTask task = resourceAwareTasks.get(taskId); + if (task != null) { + logger.debug("ProtobufTask execution started on thread. ProtobufTask: {}, Thread: {}", taskId, threadId); + task.startThreadResourceTracking(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)); + } + } catch (Exception e) { + logger.warn(new ParameterizedMessage("Failed to mark thread execution started for task: [{}]", taskId), e); + assert false; + } + + } + + /** + * Called when a thread finishes working on a task's runnable. + * + * @param taskId of the task for which runnable is complete + * @param threadId of the thread which executed the runnable and we need to check resource usage for this thread + */ + @Override + public void taskExecutionFinishedOnThread(long taskId, long threadId) { + try { + final ProtobufTask task = resourceAwareTasks.get(taskId); + if (task != null) { + logger.debug("ProtobufTask execution finished on thread. ProtobufTask: {}, Thread: {}", taskId, threadId); + task.stopThreadResourceTracking(threadId, WORKER_STATS, getResourceUsageMetricsForThread(threadId)); + } + } catch (Exception e) { + logger.warn(new ParameterizedMessage("Failed to mark thread execution finished for task: [{}]", taskId), e); + assert false; + } + } + + public Map getResourceAwareTasks() { + return Collections.unmodifiableMap(resourceAwareTasks); + } + + private ResourceUsageMetric[] getResourceUsageMetricsForThread(long threadId) { + ResourceUsageMetric currentMemoryUsage = new ResourceUsageMetric( + ResourceStats.MEMORY, + threadMXBean.getThreadAllocatedBytes(threadId) + ); + ResourceUsageMetric currentCPUUsage = new ResourceUsageMetric(ResourceStats.CPU, threadMXBean.getThreadCpuTime(threadId)); + return new ResourceUsageMetric[] { currentMemoryUsage, currentCPUUsage }; + } + + private boolean isCurrentThreadWorkingOnTask(ProtobufTask task) { + long threadId = Thread.currentThread().getId(); + List threadResourceInfos = task.getResourceStats().getOrDefault(threadId, Collections.emptyList()); + + for (ThreadResourceInfo threadResourceInfo : threadResourceInfos) { + if (threadResourceInfo.isActive()) { + return true; + } + } + return false; + } + + private List getThreadsWorkingOnTask(ProtobufTask task) { + List activeThreads = new ArrayList<>(); + for (List threadResourceInfos : task.getResourceStats().values()) { + for (ThreadResourceInfo threadResourceInfo : threadResourceInfos) { + if (threadResourceInfo.isActive()) { + activeThreads.add(threadResourceInfo.getThreadId()); + } + } + } + return activeThreads; + } + + /** + * Adds ProtobufTask Id in the ThreadContext. + *

+ * Stashes the existing ThreadContext and preserves all the existing ThreadContext's data in the new ThreadContext + * as well. + * + * @param task for which ProtobufTask Id needs to be added in ThreadContext. + * @return StoredContext reference to restore the ThreadContext from which we created a new one. + * Caller can call context.restore() to get the existing ThreadContext back. + */ + private ThreadContext.StoredContext addTaskIdToThreadContext(ProtobufTask task) { + ThreadContext threadContext = threadPool.getThreadContext(); + ThreadContext.StoredContext storedContext = threadContext.newStoredContext(true, Collections.singletonList(TASK_ID)); + threadContext.putTransient(TASK_ID, task.getId()); + return storedContext; + } + + /** + * Listener that gets invoked when a task execution completes. + */ + public interface TaskCompletionListener { + void onTaskCompleted(ProtobufTask task); + } + + public void addTaskCompletionListener(TaskCompletionListener listener) { + this.taskCompletionListeners.add(listener); + } +} diff --git a/server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java new file mode 100644 index 0000000000000..633eaba2364c7 --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/ProtobufTaskResult.java @@ -0,0 +1,158 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +* +*/ + +package org.opensearch.tasks; + +import org.opensearch.OpenSearchException; +import org.opensearch.client.Requests; +import org.opensearch.common.Nullable; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.ProtobufWriteable; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentHelper; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Objects.requireNonNull; +import static org.opensearch.common.xcontent.XContentHelper.convertToMap; + +/** +* Information about a running task or a task that stored its result. Running tasks just have a {@link #getTask()} while +* tasks with stored result will have either a {@link #getError()} or {@link #getResponse()}. +* +* @opensearch.internal +*/ +public final class ProtobufTaskResult implements ProtobufWriteable, ToXContentObject { + private final boolean completed; + private final ProtobufTaskInfo task; + @Nullable + private final BytesReference error; + @Nullable + private final BytesReference response; + + /** + * Construct a {@linkplain TaskResult} for a task for which we don't have a result or error. That usually means that the task + * is incomplete, but it could also mean that we waited for the task to complete but it didn't save any error information. + */ + public ProtobufTaskResult(boolean completed, ProtobufTaskInfo task) { + this(completed, task, null, null); + } + + /** + * Construct a {@linkplain TaskResult} for a task that completed with an error. + */ + public ProtobufTaskResult(ProtobufTaskInfo task, Exception error) throws IOException { + this(true, task, toXContent(error), null); + } + + /** + * Construct a {@linkplain ProtobufTaskResult} for a task that completed successfully. + */ + public ProtobufTaskResult(ProtobufTaskInfo task, ToXContent response) throws IOException { + this(true, task, null, XContentHelper.toXContent(response, Requests.INDEX_CONTENT_TYPE, true)); + } + + public ProtobufTaskResult(boolean completed, ProtobufTaskInfo task, @Nullable BytesReference error, @Nullable BytesReference result) { + this.completed = completed; + this.task = requireNonNull(task, "task is required"); + this.error = error; + this.response = result; + } + + /** + * Get the task that this wraps. + */ + public ProtobufTaskInfo getTask() { + return task; + } + + /** + * Get the error that finished this task. Will return null if the task didn't finish with an error, it hasn't yet finished, or didn't + * store its result. + */ + public BytesReference getError() { + return error; + } + + /** + * Convert {@link #getError()} from XContent to a Map for easy processing. Will return an empty map if the task didn't finish with an + * error, hasn't yet finished, or didn't store its result. + */ + public Map getErrorAsMap() { + if (error == null) { + return emptyMap(); + } + return convertToMap(error, false).v2(); + } + + /** + * Get the response that this task finished with. Will return null if the task was finished by an error, it hasn't yet finished, or + * didn't store its result. + */ + public BytesReference getResponse() { + return response; + } + + /** + * Convert {@link #getResponse()} from XContent to a Map for easy processing. Will return an empty map if the task was finished with an + * error, hasn't yet finished, or didn't store its result. + */ + public Map getResponseAsMap() { + if (response == null) { + return emptyMap(); + } + return convertToMap(response, false).v2(); + } + + public boolean isCompleted() { + return completed; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerToXContent(builder, params); + return builder.endObject(); + } + + public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("completed", completed); + builder.startObject("task"); + task.toXContent(builder, params); + builder.endObject(); + if (error != null) { + XContentHelper.writeRawField("error", error, builder, params); + } + if (response != null) { + XContentHelper.writeRawField("response", response, builder, params); + } + return builder; + } + + private static BytesReference toXContent(Exception error) throws IOException { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(Requests.INDEX_CONTENT_TYPE)) { + builder.startObject(); + OpenSearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, error); + builder.endObject(); + return BytesReference.bytes(builder); + } + } + + @Override + public void writeTo(OutputStream out) throws IOException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'writeTo'"); + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufEmptyTransportResponseHandler.java b/server/src/main/java/org/opensearch/transport/ProtobufEmptyTransportResponseHandler.java new file mode 100644 index 0000000000000..2e20b818401be --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufEmptyTransportResponseHandler.java @@ -0,0 +1,58 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import java.io.IOException; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.threadpool.ThreadPool; + +/** + * Handler for empty transport response +* +* @opensearch.internal +*/ +public class ProtobufEmptyTransportResponseHandler implements TransportResponseHandler { + + public static final ProtobufEmptyTransportResponseHandler INSTANCE_SAME = new ProtobufEmptyTransportResponseHandler( + ThreadPool.Names.SAME + ); + + private final String executor; + + public ProtobufEmptyTransportResponseHandler(String executor) { + this.executor = executor; + } + + @Override + public void handleResponse(TransportResponse.Empty response) {} + + @Override + public String executor() { + return executor; + } + + @Override + public TransportResponse.Empty read(StreamInput in) throws IOException { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'read'"); + } + + @Override + public void handleException(TransportException exp) { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'handleException'"); + } + + @Override + public TransportResponse.Empty read(byte[] in) throws IOException { + return TransportResponse.Empty.INSTANCE; + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufOutboundMessage.java b/server/src/main/java/org/opensearch/transport/ProtobufOutboundMessage.java new file mode 100644 index 0000000000000..ebdb1c5102fa2 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufOutboundMessage.java @@ -0,0 +1,398 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import org.opensearch.Version; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.server.proto.ClusterStateRequestProto.ClusterStateRequest; +import org.opensearch.server.proto.ClusterStateResponseProto.ClusterStateResponse; +import org.opensearch.server.proto.NodesInfoProto.NodesInfo; +import org.opensearch.server.proto.NodesInfoRequestProto.NodesInfoRequest; +import org.opensearch.server.proto.NodesStatsProto.NodesStats; +import org.opensearch.server.proto.NodesStatsRequestProto.NodesStatsRequest; +import org.opensearch.server.proto.QueryFetchSearchResultProto.QueryFetchSearchResult; +import org.opensearch.server.proto.ShardSearchRequestProto.ShardSearchRequest; +import org.opensearch.server.proto.MessageProto.OutboundInboundMessage; +import org.opensearch.server.proto.MessageProto.OutboundInboundMessage.Header; +import org.opensearch.server.proto.MessageProto.OutboundInboundMessage.ResponseHandlersList; +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * Outbound data as a message +* +* @opensearch.internal +*/ +public class ProtobufOutboundMessage { + + private final OutboundInboundMessage message; + private static final byte[] PREFIX = { (byte) 'E', (byte) 'S' }; + + public ProtobufOutboundMessage( + long requestId, + byte[] status, + Version version, + ThreadContext threadContext, + ClusterStateRequest clusterStateReq, + String[] features, + String action + ) { + Header header = Header.newBuilder() + .addAllPrefix(Arrays.asList(ByteString.copyFrom(PREFIX))) + .setRequestId(requestId) + .setStatus(ByteString.copyFrom(status)) + .setVersionId(version.id) + .build(); + Map requestHeaders = threadContext.getHeaders(); + Map> responseHeaders = threadContext.getResponseHeaders(); + Map responseHandlers = new HashMap<>(); + for (Map.Entry> entry : responseHeaders.entrySet()) { + String key = entry.getKey(); + List value = entry.getValue(); + ResponseHandlersList responseHandlersList = ResponseHandlersList.newBuilder().addAllSetOfResponseHandlers(value).build(); + responseHandlers.put(key, responseHandlersList); + } + this.message = OutboundInboundMessage.newBuilder() + .setHeader(header) + .putAllRequestHeaders(requestHeaders) + .putAllResponseHandlers(responseHandlers) + .setVersion(version.toString()) + .setStatus(ByteString.copyFrom(status)) + .setRequestId(requestId) + .setClusterStateRequest(clusterStateReq) + .setAction(action) + .addAllFeatures(Arrays.asList(features)) + .setIsProtobuf(true) + .build(); + + } + + public ProtobufOutboundMessage( + long requestId, + byte[] status, + Version version, + ThreadContext threadContext, + ClusterStateResponse clusterStateRes, + Set features, + String action + ) { + Header header = Header.newBuilder() + .addAllPrefix(Arrays.asList(ByteString.copyFrom(PREFIX))) + .setRequestId(requestId) + .setStatus(ByteString.copyFrom(status)) + .setVersionId(version.id) + .build(); + Map requestHeaders = threadContext.getHeaders(); + Map> responseHeaders = threadContext.getResponseHeaders(); + Map responseHandlers = new HashMap<>(); + for (Map.Entry> entry : responseHeaders.entrySet()) { + String key = entry.getKey(); + List value = entry.getValue(); + ResponseHandlersList responseHandlersList = ResponseHandlersList.newBuilder().addAllSetOfResponseHandlers(value).build(); + responseHandlers.put(key, responseHandlersList); + } + this.message = OutboundInboundMessage.newBuilder() + .setHeader(header) + .putAllRequestHeaders(requestHeaders) + .putAllResponseHandlers(responseHandlers) + .setVersion(version.toString()) + .setStatus(ByteString.copyFrom(status)) + .setRequestId(requestId) + .setClusterStateResponse(clusterStateRes) + .setAction(action) + .addAllFeatures(features) + .setIsProtobuf(true) + .build(); + + } + + public ProtobufOutboundMessage( + long requestId, + byte[] status, + Version version, + ThreadContext threadContext, + NodesInfoRequest nodesInfoReq, + String[] features, + String action + ) { + Header header = Header.newBuilder() + .addAllPrefix(Arrays.asList(ByteString.copyFrom(PREFIX))) + .setRequestId(requestId) + .setStatus(ByteString.copyFrom(status)) + .setVersionId(version.id) + .build(); + Map requestHeaders = threadContext.getHeaders(); + Map> responseHeaders = threadContext.getResponseHeaders(); + Map responseHandlers = new HashMap<>(); + for (Map.Entry> entry : responseHeaders.entrySet()) { + String key = entry.getKey(); + List value = entry.getValue(); + ResponseHandlersList responseHandlersList = ResponseHandlersList.newBuilder().addAllSetOfResponseHandlers(value).build(); + responseHandlers.put(key, responseHandlersList); + } + this.message = OutboundInboundMessage.newBuilder() + .setHeader(header) + .putAllRequestHeaders(requestHeaders) + .putAllResponseHandlers(responseHandlers) + .setVersion(version.toString()) + .setStatus(ByteString.copyFrom(status)) + .setRequestId(requestId) + .setNodesInfoRequest(nodesInfoReq) + .setAction(action) + .addAllFeatures(Arrays.asList(features)) + .setIsProtobuf(true) + .build(); + + } + + public ProtobufOutboundMessage( + long requestId, + byte[] status, + Version version, + ThreadContext threadContext, + NodesInfo nodesInfoRes, + Set features, + String action + ) { + Header header = Header.newBuilder() + .addAllPrefix(Arrays.asList(ByteString.copyFrom(PREFIX))) + .setRequestId(requestId) + .setStatus(ByteString.copyFrom(status)) + .setVersionId(version.id) + .build(); + Map requestHeaders = threadContext.getHeaders(); + Map> responseHeaders = threadContext.getResponseHeaders(); + Map responseHandlers = new HashMap<>(); + for (Map.Entry> entry : responseHeaders.entrySet()) { + String key = entry.getKey(); + List value = entry.getValue(); + ResponseHandlersList responseHandlersList = ResponseHandlersList.newBuilder().addAllSetOfResponseHandlers(value).build(); + responseHandlers.put(key, responseHandlersList); + } + this.message = OutboundInboundMessage.newBuilder() + .setHeader(header) + .putAllRequestHeaders(requestHeaders) + .putAllResponseHandlers(responseHandlers) + .setVersion(version.toString()) + .setStatus(ByteString.copyFrom(status)) + .setRequestId(requestId) + .setNodesInfoResponse(nodesInfoRes) + .setAction(action) + .addAllFeatures(features) + .setIsProtobuf(true) + .build(); + + } + + public ProtobufOutboundMessage( + long requestId, + byte[] status, + Version version, + ThreadContext threadContext, + NodesStatsRequest nodesStatsReq, + String[] features, + String action + ) { + Header header = Header.newBuilder() + .addAllPrefix(Arrays.asList(ByteString.copyFrom(PREFIX))) + .setRequestId(requestId) + .setStatus(ByteString.copyFrom(status)) + .setVersionId(version.id) + .build(); + Map requestHeaders = threadContext.getHeaders(); + Map> responseHeaders = threadContext.getResponseHeaders(); + Map responseHandlers = new HashMap<>(); + for (Map.Entry> entry : responseHeaders.entrySet()) { + String key = entry.getKey(); + List value = entry.getValue(); + ResponseHandlersList responseHandlersList = ResponseHandlersList.newBuilder().addAllSetOfResponseHandlers(value).build(); + responseHandlers.put(key, responseHandlersList); + } + this.message = OutboundInboundMessage.newBuilder() + .setHeader(header) + .putAllRequestHeaders(requestHeaders) + .putAllResponseHandlers(responseHandlers) + .setVersion(version.toString()) + .setStatus(ByteString.copyFrom(status)) + .setRequestId(requestId) + .setNodesStatsRequest(nodesStatsReq) + .setAction(action) + .addAllFeatures(Arrays.asList(features)) + .setIsProtobuf(true) + .build(); + + } + + public ProtobufOutboundMessage( + long requestId, + byte[] status, + Version version, + ThreadContext threadContext, + NodesStats nodesStatsRes, + Set features, + String action + ) { + Header header = Header.newBuilder() + .addAllPrefix(Arrays.asList(ByteString.copyFrom(PREFIX))) + .setRequestId(requestId) + .setStatus(ByteString.copyFrom(status)) + .setVersionId(version.id) + .build(); + Map requestHeaders = threadContext.getHeaders(); + Map> responseHeaders = threadContext.getResponseHeaders(); + Map responseHandlers = new HashMap<>(); + for (Map.Entry> entry : responseHeaders.entrySet()) { + String key = entry.getKey(); + List value = entry.getValue(); + ResponseHandlersList responseHandlersList = ResponseHandlersList.newBuilder().addAllSetOfResponseHandlers(value).build(); + responseHandlers.put(key, responseHandlersList); + } + this.message = OutboundInboundMessage.newBuilder() + .setHeader(header) + .putAllRequestHeaders(requestHeaders) + .putAllResponseHandlers(responseHandlers) + .setVersion(version.toString()) + .setStatus(ByteString.copyFrom(status)) + .setRequestId(requestId) + .setNodesStatsResponse(nodesStatsRes) + .setAction(action) + .addAllFeatures(features) + .setIsProtobuf(true) + .build(); + + } + + public ProtobufOutboundMessage( + long requestId, + byte[] status, + Version version, + ThreadContext threadContext, + ShardSearchRequest shardSearchReq, + String[] features, + String action + ) { + Header header = Header.newBuilder() + .addAllPrefix(Arrays.asList(ByteString.copyFrom(PREFIX))) + .setRequestId(requestId) + .setStatus(ByteString.copyFrom(status)) + .setVersionId(version.id) + .build(); + Map requestHeaders = threadContext.getHeaders(); + Map> responseHeaders = threadContext.getResponseHeaders(); + Map responseHandlers = new HashMap<>(); + for (Map.Entry> entry : responseHeaders.entrySet()) { + String key = entry.getKey(); + List value = entry.getValue(); + ResponseHandlersList responseHandlersList = ResponseHandlersList.newBuilder().addAllSetOfResponseHandlers(value).build(); + responseHandlers.put(key, responseHandlersList); + } + this.message = OutboundInboundMessage.newBuilder() + .setHeader(header) + .putAllRequestHeaders(requestHeaders) + .putAllResponseHandlers(responseHandlers) + .setVersion(version.toString()) + .setStatus(ByteString.copyFrom(status)) + .setRequestId(requestId) + .setShardSearchRequest(shardSearchReq) + .setAction(action) + .addAllFeatures(Arrays.asList(features)) + .setIsProtobuf(true) + .build(); + + } + + public ProtobufOutboundMessage( + long requestId, + byte[] status, + Version version, + ThreadContext threadContext, + QueryFetchSearchResult queryFetchSearchResult, + Set features, + String action + ) { + Header header = Header.newBuilder() + .addAllPrefix(Arrays.asList(ByteString.copyFrom(PREFIX))) + .setRequestId(requestId) + .setStatus(ByteString.copyFrom(status)) + .setVersionId(version.id) + .build(); + Map requestHeaders = threadContext.getHeaders(); + Map> responseHeaders = threadContext.getResponseHeaders(); + Map responseHandlers = new HashMap<>(); + for (Map.Entry> entry : responseHeaders.entrySet()) { + String key = entry.getKey(); + List value = entry.getValue(); + ResponseHandlersList responseHandlersList = ResponseHandlersList.newBuilder().addAllSetOfResponseHandlers(value).build(); + responseHandlers.put(key, responseHandlersList); + } + this.message = OutboundInboundMessage.newBuilder() + .setHeader(header) + .putAllRequestHeaders(requestHeaders) + .putAllResponseHandlers(responseHandlers) + .setVersion(version.toString()) + .setStatus(ByteString.copyFrom(status)) + .setRequestId(requestId) + .setQueryFetchSearchResult(queryFetchSearchResult) + .setAction(action) + .addAllFeatures(features) + .setIsProtobuf(true) + .build(); + + } + + public ProtobufOutboundMessage(byte[] data) throws InvalidProtocolBufferException { + this.message = OutboundInboundMessage.parseFrom(data); + } + + public void writeTo(OutputStream out) throws IOException { + out.write(this.message.toByteArray()); + } + + public OutboundInboundMessage getMessage() { + return this.message; + } + + @Override + public String toString() { + return "ProtobufOutboundMessage [message=" + message + "]"; + } + + public Header getHeader() { + return this.message.getHeader(); + } + + public Map getRequestHeaders() { + return this.message.getRequestHeadersMap(); + } + + public Map> getResponseHandlers() { + Map responseHandlers = this.message.getResponseHandlersMap(); + Map> responseHandlersMap = new HashMap<>(); + for (Map.Entry entry : responseHandlers.entrySet()) { + String key = entry.getKey(); + ResponseHandlersList value = entry.getValue(); + Set setOfResponseHandlers = value.getSetOfResponseHandlersList().stream().collect(Collectors.toSet()); + responseHandlersMap.put(key, setOfResponseHandlers); + } + return responseHandlersMap; + } + + public boolean isProtobuf() { + return this.message.getIsProtobuf(); + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java new file mode 100644 index 0000000000000..2691f6b426b87 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufRequestHandlerRegistry.java @@ -0,0 +1,120 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.io.stream.ProtobufWriteable; +import org.opensearch.tasks.ProtobufCancellableTask; +import org.opensearch.tasks.ProtobufTask; +import org.opensearch.tasks.TaskManager; + +import java.io.IOException; + +/** + * Registry for OpenSearch RequestHandlers +* +* @opensearch.internal +*/ +public final class ProtobufRequestHandlerRegistry { + + private final String action; + private final ProtobufTransportRequestHandler handler; + private final boolean forceExecution; + private final boolean canTripCircuitBreaker; + private final String executor; + private final TaskManager taskManager; + private final ProtobufWriteable.Reader requestReader; + + public ProtobufRequestHandlerRegistry( + String action, + ProtobufWriteable.Reader requestReader, + TaskManager taskManager, + ProtobufTransportRequestHandler handler, + String executor, + boolean forceExecution, + boolean canTripCircuitBreaker + ) { + this.action = action; + this.requestReader = requestReader; + this.handler = handler; + this.forceExecution = forceExecution; + this.canTripCircuitBreaker = canTripCircuitBreaker; + this.executor = executor; + this.taskManager = taskManager; + } + + public String getAction() { + return action; + } + + public Request newRequest(byte[] in) throws IOException { + return requestReader.read(in); + } + + public void processMessageReceived(Request request, TransportChannel channel) throws Exception { + final ProtobufTask task = taskManager.registerProtobuf(channel.getChannelType(), action, request); + ThreadContext.StoredContext contextToRestore = taskManager.protobufTaskExecutionStarted(task); + + Releasable unregisterTask = () -> taskManager.unregisterProtobufTask(task); + try { + if (channel instanceof TcpTransportChannel && task instanceof ProtobufCancellableTask) { + final TcpChannel tcpChannel = ((TcpTransportChannel) channel).getChannel(); + final Releasable stopTracking = taskManager.startProtobufTrackingCancellableChannelTask( + tcpChannel, + (ProtobufCancellableTask) task + ); + unregisterTask = Releasables.wrap(unregisterTask, stopTracking); + } + final TaskTransportChannel taskTransportChannel = new TaskTransportChannel(channel, unregisterTask); + handler.messageReceived(request, taskTransportChannel, task); + unregisterTask = null; + } finally { + Releasables.close(unregisterTask); + contextToRestore.restore(); + } + } + + public boolean isForceExecution() { + return forceExecution; + } + + public boolean canTripCircuitBreaker() { + return canTripCircuitBreaker; + } + + public String getExecutor() { + return executor; + } + + public ProtobufTransportRequestHandler getHandler() { + return handler; + } + + @Override + public String toString() { + return handler.toString(); + } + + public static ProtobufRequestHandlerRegistry replaceHandler( + ProtobufRequestHandlerRegistry registry, + ProtobufTransportRequestHandler handler + ) { + return new ProtobufRequestHandlerRegistry<>( + registry.action, + registry.requestReader, + registry.taskManager, + handler, + registry.executor, + registry.forceExecution, + registry.canTripCircuitBreaker + ); + } +} diff --git a/server/src/main/java/org/opensearch/transport/ProtobufTransportRequestHandler.java b/server/src/main/java/org/opensearch/transport/ProtobufTransportRequestHandler.java new file mode 100644 index 0000000000000..3feb88696a51a --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtobufTransportRequestHandler.java @@ -0,0 +1,21 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +*/ + +package org.opensearch.transport; + +import org.opensearch.tasks.ProtobufTask; + +/** + * Handles transport requests +* +* @opensearch.internal +*/ +public interface ProtobufTransportRequestHandler { + + void messageReceived(T request, TransportChannel channel, ProtobufTask task) throws Exception; +} diff --git a/server/src/main/proto/server/ClusterStateRequestProto.proto b/server/src/main/proto/server/ClusterStateRequestProto.proto new file mode 100644 index 0000000000000..7326429ae0fbd --- /dev/null +++ b/server/src/main/proto/server/ClusterStateRequestProto.proto @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.server.proto; + +option java_outer_classname = "ClusterStateRequestProto"; + +message ClusterStateRequest { + bool routingTable = 1; + bool nodes = 2; + bool metadata = 3; + bool blocks = 4; + bool customs = 5; + int64 waitForMetadataVersion = 6; + string waitForTimeout = 7; + repeated string indices = 8; +} diff --git a/server/src/main/proto/server/ClusterStateResponseProto.proto b/server/src/main/proto/server/ClusterStateResponseProto.proto new file mode 100644 index 0000000000000..554a03b1975ab --- /dev/null +++ b/server/src/main/proto/server/ClusterStateResponseProto.proto @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.server.proto; + +option java_outer_classname = "ClusterStateResponseProto"; + +message ClusterStateResponse { + string clusterName = 1; + ClusterState clusterState = 2; + bool waitForTimedOut = 3; + + message ClusterState { + string clusterName = 1; + int64 version = 2; + string stateUUID = 3; + DiscoveryNodes nodes = 4; + + message DiscoveryNodes { + repeated Node allNodes = 1; + string clusterManagerNodeId = 2; + string localNodeId = 3; + string minNonClientNodeVersion = 4; + string maxNonClientNodeVersion = 5; + string maxNodeVersion = 6; + string minNodeVersion = 7; + + message Node { + string nodeName = 1; + string nodeId = 2; + string ephemeralId = 3; + string hostName = 4; + string hostAddress = 5; + string transportAddress = 6; + map attributes = 7; + repeated NodeRole roles = 8; + string version = 9; + + message NodeRole { + bool isKnownRole = 1; + bool isDynamicRole = 2; + string roleName = 3; + string roleNameAbbreviation = 4; + bool canContainData = 5; + } + } + } + } + +} diff --git a/server/src/main/proto/server/FetchSearchResultProto.proto b/server/src/main/proto/server/FetchSearchResultProto.proto new file mode 100644 index 0000000000000..91fe87158583c --- /dev/null +++ b/server/src/main/proto/server/FetchSearchResultProto.proto @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.server.proto; + +import "server/ShardSearchRequestProto.proto"; + +option java_outer_classname = "FetchSearchResultProto"; + +message FetchSearchResult { + ShardSearchContextId contextId = 1; + optional bytes hits = 2; +} diff --git a/server/src/main/proto/server/MessageProto.proto b/server/src/main/proto/server/MessageProto.proto new file mode 100644 index 0000000000000..190af96131a34 --- /dev/null +++ b/server/src/main/proto/server/MessageProto.proto @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.server.proto; + +import "server/ClusterStateRequestProto.proto"; +import "server/ClusterStateResponseProto.proto"; +import "server/NodesInfoRequestProto.proto"; +import "server/NodesInfoProto.proto"; +import "server/NodesStatsRequestProto.proto"; +import "server/NodesStatsProto.proto"; +import "server/ShardSearchRequestProto.proto"; +import "server/QueryFetchSearchResultProto.proto"; + +option java_outer_classname = "MessageProto"; + +message OutboundInboundMessage { + Header header = 1; + map requestHeaders = 2; + map responseHandlers = 3; + string version = 4; + repeated string features = 5; + bytes status = 6; + string action = 7; + int64 requestId = 8; + oneof message { + ClusterStateRequest clusterStateRequest = 9; + ClusterStateResponse clusterStateResponse = 10; + NodesInfoRequest nodesInfoRequest = 11; + NodesInfo nodesInfoResponse = 12; + NodesStatsRequest nodesStatsRequest = 13; + NodesStats nodesStatsResponse = 14; + ShardSearchRequest shardSearchRequest = 15; + QueryFetchSearchResult queryFetchSearchResult = 16; + } + bool isProtobuf = 17; + + message Header { + repeated bytes prefix = 1; + int64 requestId = 2; + bytes status = 3; + int32 versionId = 4; + } + + message ResponseHandlersList { + repeated string setOfResponseHandlers = 1; + } +} diff --git a/server/src/main/proto/server/NodesInfoProto.proto b/server/src/main/proto/server/NodesInfoProto.proto new file mode 100644 index 0000000000000..af5f6853760a6 --- /dev/null +++ b/server/src/main/proto/server/NodesInfoProto.proto @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.server.proto; + +option java_outer_classname = "NodesInfoProto"; + +message NodesInfo { + string nodeId = 1; + int64 processId = 2; + string address = 3; + string displayName = 4; + string hash = 5; + string jvmInfoVersion = 6; + string jvmHeapMax = 7; +} diff --git a/server/src/main/proto/server/NodesInfoRequestProto.proto b/server/src/main/proto/server/NodesInfoRequestProto.proto new file mode 100644 index 0000000000000..1c02f7d2364d7 --- /dev/null +++ b/server/src/main/proto/server/NodesInfoRequestProto.proto @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.server.proto; + +option java_outer_classname = "NodesInfoRequestProto"; + +message NodesInfoRequest { + repeated string requestedMetrics = 1; + string timeout = 2; +} diff --git a/server/src/main/proto/server/NodesInfoResponseProto.proto b/server/src/main/proto/server/NodesInfoResponseProto.proto new file mode 100644 index 0000000000000..6ba56dbd44040 --- /dev/null +++ b/server/src/main/proto/server/NodesInfoResponseProto.proto @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.server.proto; + +import "server/NodesInfoProto.proto"; + +option java_outer_classname = "NodesInfoResponseProto"; + +message NodesInfoResponse { + string clusterName = 1; + repeated NodesInfo nodesInfo = 2; +} diff --git a/server/src/main/proto/server/NodesStatsProto.proto b/server/src/main/proto/server/NodesStatsProto.proto new file mode 100644 index 0000000000000..6c040ccf4a33e --- /dev/null +++ b/server/src/main/proto/server/NodesStatsProto.proto @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.server.proto; + +option java_outer_classname = "NodesStatsProto"; + +message NodesStats { + string nodeId = 1; + int64 jvmHeapUsed = 2; + string jvmHeapUsedPercent = 3; + int64 jvmUpTime = 4; + int64 diskTotal = 5; + int64 diskAvailable = 6; + int64 osMemUsed = 7; + string osMemUsedPercent = 8; + int64 osMemTotal = 9; + string osCpuPercent = 10; + repeated double osCpuLoadAverage = 11; + int64 processOpenFileDescriptors = 12; + int64 processMaxFileDescriptors = 13; + CompletionStats completionStats = 14; + FieldDataStats fieldDataStats = 15; + QueryCacheStats queryCacheStats = 16; + RequestCacheStats requestCacheStats = 17; + FlushStats flushStats = 18; + GetStats getStats = 19; + IndexingStats indexingStats = 20; + MergeStats mergeStats = 21; + RefreshStats refreshStats = 22; + ScriptStats scriptStats = 23; + SearchStats searchStats = 24; + SegmentStats segmentStats = 25; + + message CompletionStats { + int64 size = 1; + } + + message FieldDataStats { + int64 memSize = 1; + int64 evictions = 2; + } + + message QueryCacheStats { + int64 ramBytesUsed = 1; + int64 hitCount = 2; + int64 missCount = 3; + int64 cacheCount = 4; + int64 cacheSize = 5; + } + + message RequestCacheStats { + int64 memorySize = 1; + int64 evictions = 2; + int64 hitCount = 3; + int64 missCount = 4; + } + + message FlushStats { + int64 total = 1; + int64 periodic = 2; + int64 totalTimeInMillis = 3; + } + + message GetStats { + int64 existsCount = 1; + int64 existsTimeInMillis = 2; + int64 missingCount = 3; + int64 missingTimeInMillis = 4; + int64 current = 5; + int64 count = 6; + int64 time = 7; + } + + message IndexingStats { + int64 indexCount = 1; + int64 indexTimeInMillis = 2; + int64 indexCurrent = 3; + int64 indexFailedCount = 4; + int64 deleteCount = 5; + int64 deleteTimeInMillis = 6; + int64 deleteCurrent = 7; + int64 noopUpdateCount = 8; + int64 throttleTimeInMillis = 9; + bool isThrottled = 10; + } + + message MergeStats { + int64 total = 1; + int64 totalTimeInMillis = 2; + int64 totalNumDocs = 3; + int64 totalSizeInBytes = 4; + int64 current = 5; + int64 currentNumDocs = 6; + int64 currentSizeInBytes = 7; + } + + message RefreshStats { + int64 total = 1; + int64 totalTimeInMillis = 2; + int64 externalTotal = 3; + int64 externalTotalTimeInMillis = 4; + int32 listeners = 5; + } + + message ScriptStats { + int64 compilations = 1; + int64 cacheEvictions = 2; + int64 compilationLimitTriggered = 3; + } + + message SearchStats { + int64 queryCount = 1; + int64 queryTimeInMillis = 2; + int64 queryCurrent = 3; + int64 fetchCount = 4; + int64 fetchTimeInMillis = 5; + int64 fetchCurrent = 6; + int64 scrollCount = 7; + int64 scrollTimeInMillis = 8; + int64 scrollCurrent = 9; + int64 suggestCount = 10; + int64 suggestTimeInMillis = 11; + int64 suggestCurrent = 12; + int64 pitCount = 13; + int64 pitTimeInMillis = 14; + int64 pitCurrent = 15; + int64 openContexts = 16; + } + + message SegmentStats { + int64 count = 1; + int64 indexWriterMemoryInBytes = 2; + int64 versionMapMemoryInBytes = 3; + int64 maxUnsafeAutoIdTimestamp = 4; + int64 bitsetMemoryInBytes = 5; + } +} diff --git a/server/src/main/proto/server/NodesStatsRequestProto.proto b/server/src/main/proto/server/NodesStatsRequestProto.proto new file mode 100644 index 0000000000000..b3da5cbf60aa0 --- /dev/null +++ b/server/src/main/proto/server/NodesStatsRequestProto.proto @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.server.proto; + +option java_outer_classname = "NodesStatsRequestProto"; + +message NodesStatsRequest { + repeated string requestedMetrics = 1; + string timeout = 2; +} diff --git a/server/src/main/proto/server/NodesStatsResponseProto.proto b/server/src/main/proto/server/NodesStatsResponseProto.proto new file mode 100644 index 0000000000000..59ba8afad3c33 --- /dev/null +++ b/server/src/main/proto/server/NodesStatsResponseProto.proto @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.server.proto; + +import "server/NodesStatsProto.proto"; + +option java_outer_classname = "NodesStatsResponseProto"; + +message NodesStatsResponse { + string clusterName = 1; + repeated NodesStats nodesStats = 2; +} diff --git a/server/src/main/proto/server/QueryFetchSearchResultProto.proto b/server/src/main/proto/server/QueryFetchSearchResultProto.proto new file mode 100644 index 0000000000000..29f384457ae66 --- /dev/null +++ b/server/src/main/proto/server/QueryFetchSearchResultProto.proto @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.server.proto; + +import "server/QuerySearchResultProto.proto"; +import "server/FetchSearchResultProto.proto"; + +option java_outer_classname = "QueryFetchSearchResultProto"; + +message QueryFetchSearchResult { + QuerySearchResult queryResult = 1; + FetchSearchResult fetchResult = 2; +} diff --git a/server/src/main/proto/server/QuerySearchResultProto.proto b/server/src/main/proto/server/QuerySearchResultProto.proto new file mode 100644 index 0000000000000..a7cba5a044d38 --- /dev/null +++ b/server/src/main/proto/server/QuerySearchResultProto.proto @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.server.proto; + +import "server/ShardSearchRequestProto.proto"; + +option java_outer_classname = "QuerySearchResultProto"; + +message QuerySearchResult { + ShardSearchContextId contextId = 1; + optional int32 from = 2; + optional int32 size = 3; + optional TopDocsAndMaxScore topDocsAndMaxScore = 4; + optional bool hasScoreDocs = 5; + optional TotalHits totalHits = 6; + optional float maxScore = 7; + optional TopDocs topDocs = 8; + optional bool hasAggs = 9; + optional bool hasSuggest = 10; + optional bool searchTimedOut = 11; + optional bool terminatedEarly = 12; + optional bytes profileShardResults = 13; + optional int64 serviceTimeEWMA = 14; + optional int32 nodeQueueSize = 15; + ShardSearchRequest shardSearchRequest = 16; + SearchShardTarget searchShardTarget = 17; + + + message TopDocsAndMaxScore { + TopDocs topDocs = 1; + float maxScore = 2; + } + + message TotalHits { + int64 value = 1; + Relation relation = 2; + + enum Relation { + EQUAL_TO = 0; + GREATER_THAN_OR_EQUAL_TO = 1; + } + } + + message TopDocs { + TotalHits totalHits = 1; + repeated ScoreDoc scoreDocs = 2; + + message ScoreDoc { + int32 doc = 1; + float score = 2; + int32 shardIndex = 3; + } + } + + message RescoreDocIds { + map docIds = 1; + + message setInteger { + repeated int32 values = 1; + } + } + + message SearchShardTarget { + string nodeId = 1; + ShardId shardId = 2; + string clusterAlias = 3; + } +} diff --git a/server/src/main/proto/server/ShardSearchRequestProto.proto b/server/src/main/proto/server/ShardSearchRequestProto.proto new file mode 100644 index 0000000000000..f230eeb0b7ebf --- /dev/null +++ b/server/src/main/proto/server/ShardSearchRequestProto.proto @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.server.proto; + +option java_outer_classname = "ShardSearchRequestProto"; + +message ShardSearchRequest { + OriginalIndices originalIndices = 1; + ShardId shardId = 2; + int32 numberOfShards = 3; + SearchType searchType = 4; + bytes source = 5; + bool requestCache = 6; + bytes aliasFilter = 7; + float indexBoost = 8; + bool allowPartialSearchResults = 9; + repeated string indexRoutings = 10; + string preference = 11; + bytes scroll = 12; + int64 nowInMillis = 13; + optional string clusterAlias = 14; + optional ShardSearchContextId readerId = 15; + optional string timeValue = 16; + int64 inboundNetworkTime = 17; + int64 outboundNetworkTime = 18; + bool canReturnNullResponseIfMatchNoDocs = 19; + + enum SearchType { + QUERY_THEN_FETCH = 0; + DFS_QUERY_THEN_FETCH = 1; + } +} + +message ShardSearchContextId { + string sessionId = 1; + int64 id = 2; +} + +message ShardId { + int32 shardId = 1; + int32 hashCode = 2; + string indexName = 3; + string indexUUID = 4; +} + +message OriginalIndices { + repeated string indices = 1; + IndicesOptions indicesOptions = 2; + + message IndicesOptions { + bool ignoreUnavailable = 1; + bool allowNoIndices = 2; + bool expandWildcardsOpen = 3; + bool expandWildcardsClosed = 4; + bool expandWildcardsHidden = 5; + bool allowAliasesToMultipleIndices = 6; + bool forbidClosedIndices = 7; + bool ignoreAliases = 8; + bool ignoreThrottled = 9; + } +} diff --git a/server/src/main/proto/tasks/TaskIdProto.proto b/server/src/main/proto/tasks/TaskIdProto.proto new file mode 100644 index 0000000000000..c2d34ca235c28 --- /dev/null +++ b/server/src/main/proto/tasks/TaskIdProto.proto @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.tasks.proto; + +option java_outer_classname = "TaskIdProto"; + +message TaskId { + string nodeId = 1; + int64 id = 2; +} diff --git a/server/src/main/proto/tasks/TaskResourceStatsProto.proto b/server/src/main/proto/tasks/TaskResourceStatsProto.proto new file mode 100644 index 0000000000000..0e2462ba51a73 --- /dev/null +++ b/server/src/main/proto/tasks/TaskResourceStatsProto.proto @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.tasks.proto; + +option java_outer_classname = "TaskResourceStatsProto"; + +message TaskResourceStats { + message TaskResourceUsage { + int64 cpuTimeInNanos = 1; + int64 memoryInBytes = 2; + } + map resourceUsage = 1; +}