Skip to content

Commit

Permalink
Merge branch 'main' into sampling
Browse files Browse the repository at this point in the history
Signed-off-by: Nishchay Malhotra <114057571+nishchay21@users.noreply.github.com>
  • Loading branch information
nishchay21 committed Mar 28, 2024
2 parents 4f0586a + 8ad0dc0 commit 178e48b
Show file tree
Hide file tree
Showing 32 changed files with 1,011 additions and 171 deletions.
17 changes: 2 additions & 15 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -103,22 +103,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),

## [Unreleased 2.x]
### Added
- [Tiered caching] Introducing cache plugins and exposing Ehcache as one of the pluggable disk cache option ([#11874](https://github.com/opensearch-project/OpenSearch/pull/11874))
- Add support for dependencies in plugin descriptor properties with semver range ([#11441](https://github.com/opensearch-project/OpenSearch/pull/11441))
- Add community_id ingest processor ([#12121](https://github.com/opensearch-project/OpenSearch/pull/12121))
- Introduce query level setting `index.query.max_nested_depth` limiting nested queries ([#3268](https://github.com/opensearch-project/OpenSearch/issues/3268)
- Add toString methods to MultiSearchRequest, MultiGetRequest and CreateIndexRequest ([#12163](https://github.com/opensearch-project/OpenSearch/pull/12163))
- Support for returning scores in matched queries ([#11626](https://github.com/opensearch-project/OpenSearch/pull/11626))
- Add shard id property to SearchLookup for use in field types provided by plugins ([#1063](https://github.com/opensearch-project/OpenSearch/pull/1063))
- Force merge API supports performing on primary shards only ([#11269](https://github.com/opensearch-project/OpenSearch/pull/11269))
- [Tiered caching] Make IndicesRequestCache implementation configurable [EXPERIMENTAL] ([#12533](https://github.com/opensearch-project/OpenSearch/pull/12533))
- Add kuromoji_completion analyzer and filter ([#4835](https://github.com/opensearch-project/OpenSearch/issues/4835))
- The org.opensearch.bootstrap.Security should support codebase for JAR files with classifiers ([#12586](https://github.com/opensearch-project/OpenSearch/issues/12586))
- [Metrics Framework] Adds support for asynchronous gauge metric type. ([#12642](https://github.com/opensearch-project/OpenSearch/issues/12642))
- Make search query counters dynamic to support all query types ([#12601](https://github.com/opensearch-project/OpenSearch/pull/12601))
- [Tiered caching] Add policies controlling which values can enter pluggable caches [EXPERIMENTAL] ([#12542](https://github.com/opensearch-project/OpenSearch/pull/12542))
- [Tiered caching] Add Stale keys Management and CacheCleaner to IndicesRequestCache ([#12625](https://github.com/opensearch-project/OpenSearch/pull/12625))
- Convert ingest processor supports ip type ([#12818](https://github.com/opensearch-project/OpenSearch/pull/12818))
- Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768))
- Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865))
- [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697))
- [Tracing Framework] Adds support for inferred sampling ([#12315](https://github.com/opensearch-project/OpenSearch/issues/12315))

Expand Down
12 changes: 7 additions & 5 deletions distribution/src/bin/opensearch
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,16 @@ fi

# get keystore password before setting java options to avoid
# conflicting GC configurations for the keystore tools
unset KEYSTORE_PASSWORD
KEYSTORE_PASSWORD=
if [[ $CHECK_KEYSTORE = true ]] \
&& bin/opensearch-keystore has-passwd --silent
then
if ! read -s -r -p "OpenSearch keystore password: " KEYSTORE_PASSWORD ; then
echo "Failed to read keystore password on console" 1>&2
exit 1
if [[ ! -z "${KEYSTORE_PASSWORD}" ]]; then
echo "Using value of KEYSTORE_PASSWORD from the environment"
else
if ! read -s -r -p "OpenSearch keystore password: " KEYSTORE_PASSWORD ; then
echo "Failed to read keystore password on console" 1>&2
exit 1
fi
fi
fi

Expand Down
13 changes: 8 additions & 5 deletions distribution/src/bin/opensearch.bat
Original file line number Diff line number Diff line change
Expand Up @@ -62,14 +62,17 @@ if not exist "%SERVICE_LOG_DIR%" (
mkdir "%SERVICE_LOG_DIR%"
)

SET KEYSTORE_PASSWORD=
IF "%checkpassword%"=="Y" (
CALL "%~dp0opensearch-keystore.bat" has-passwd --silent
IF !ERRORLEVEL! EQU 0 (
SET /P KEYSTORE_PASSWORD=OpenSearch keystore password:
IF !ERRORLEVEL! NEQ 0 (
ECHO Failed to read keystore password on standard input
EXIT /B !ERRORLEVEL!
if defined KEYSTORE_PASSWORD (
ECHO Using value of KEYSTORE_PASSWORD from the environment
) else (
SET /P KEYSTORE_PASSWORD=OpenSearch keystore password:
IF !ERRORLEVEL! NEQ 0 (
ECHO Failed to read keystore password on standard input
EXIT /B !ERRORLEVEL!
)
)
)
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@

package org.opensearch.ingest.common;

import org.opensearch.common.network.InetAddresses;
import org.opensearch.ingest.AbstractProcessor;
import org.opensearch.ingest.ConfigurationUtils;
import org.opensearch.ingest.IngestDocument;
Expand Down Expand Up @@ -118,6 +119,19 @@ public Object convert(Object value) {
return value.toString();
}
},
IP {
@Override
public Object convert(Object value) {
// If the value is a valid ipv4/ipv6 address, we return the original value directly because IpFieldType
// can accept string value, this is simpler than we return an InetAddress object which needs to do more
// work such as serialization
if (value instanceof String && InetAddresses.isInetAddress(value.toString())) {
return value;
} else {
throw new IllegalArgumentException("[" + value + "] is not a valid ipv4/ipv6 address");
}
}
},
AUTO {
@Override
public Object convert(Object value) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -550,4 +550,29 @@ public void testTargetField() throws Exception {
assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(String.valueOf(randomInt)));
assertThat(ingestDocument.getFieldValue(targetField, Integer.class), equalTo(randomInt));
}

public void testConvertIP() throws Exception {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
String validIPString;
if (randomBoolean()) {
validIPString = "1.2.3.4";
} else {
validIPString = "::1";
}
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, validIPString);

Processor processor = new ConvertProcessor(randomAlphaOfLength(10), null, fieldName, fieldName, Type.IP, false);
processor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(validIPString));

String invalidIPString = randomAlphaOfLength(10);
fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, invalidIPString);
Processor processorWithInvalidIP = new ConvertProcessor(randomAlphaOfLength(10), null, fieldName, fieldName, Type.IP, false);
try {
processorWithInvalidIP.execute(ingestDocument);
fail("processor execute should have failed");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("[" + invalidIPString + "] is not a valid ipv4/ipv6 address"));
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
---
teardown:
- do:
ingest.delete_pipeline:
id: "1"
ignore: 404

---
"Test convert processor with ip type":
- skip:
version: " - 2.13.99"
reason: "introduced in 2.14.0"
- do:
ingest.put_pipeline:
id: "1"
body: >
{
"processors": [
{
"convert" : {
"field" : "raw_ip",
"type": "ip"
}
}
]
}
- match: { acknowledged: true }

- do:
catch: /\[1.1.1.\] is not a valid ipv4\/ipv6 address/
index:
index: test
id: 1
pipeline: "1"
body: {
raw_ip: "1.1.1."
}

- do:
ingest.put_pipeline:
id: "1"
body: >
{
"processors": [
{
"convert" : {
"field" : "raw_ip",
"target_field" : "ip_field",
"type" : "ip",
"ignore_failure" : true
}
}
]
}
- match: { acknowledged: true }

- do:
index:
index: test
id: 1
pipeline: "1"
body: {
raw_ip: "1.1.1."
}
- do:
get:
index: test
id: 1
- match: { _source: { raw_ip: "1.1.1."} }

- do:
index:
index: test
id: 1
pipeline: "1"
body: {
raw_ip: "1.1.1.1"
}
- do:
get:
index: test
id: 1
- match: { _source: { raw_ip: "1.1.1.1", ip_field: "1.1.1.1"} }
Original file line number Diff line number Diff line change
Expand Up @@ -113,12 +113,12 @@ setup:
index.number_of_replicas: 0
index.number_of_shards: 6

# Related issue: https://github.com/opensearch-project/OpenSearch/issues/4845
---
"Returns error if target index's metadata write is blocked":

- skip:
version: " - 2.99.99"
reason: "only available in 3.0+"
version: " - 2.7.99"
reason: "the bug was fixed in 2.8.0"

# block source index's write operations
- do:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,12 +72,12 @@ setup:
- match: { _id: "1" }
- match: { _source: { foo: "hello world" } }

# Related issue: https://github.com/opensearch-project/OpenSearch/issues/4845
---
"Returns error if target index's metadata write is blocked":

- skip:
version: " - 2.99.99"
reason: "only available in 3.0+"
version: " - 2.7.99"
reason: "the bug was fixed in 2.8.0"

# block source index's write operations
- do:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -219,12 +219,12 @@ setup:
index.number_of_replicas: 0
index.number_of_shards: 6

# Related issue: https://github.com/opensearch-project/OpenSearch/issues/4845
---
"Returns error if target index's metadata write is blocked":

- skip:
version: " - 2.99.99"
reason: "only available in 3.0+"
version: " - 2.7.99"
reason: "the bug was fixed in 2.8.0"

# block source index's write operations
- do:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@
import org.opensearch.index.translog.TestTranslog;
import org.opensearch.index.translog.Translog;
import org.opensearch.index.translog.TranslogStats;
import org.opensearch.indices.DefaultRemoteStoreSettings;
import org.opensearch.indices.IndicesService;
import org.opensearch.indices.recovery.RecoveryState;
import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher;
Expand Down Expand Up @@ -711,9 +712,9 @@ public static final IndexShard newIndexShard(
SegmentReplicationCheckpointPublisher.EMPTY,
null,
null,
() -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL,
nodeId,
null,
DefaultRemoteStoreSettings.INSTANCE,
false
);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import org.opensearch.index.shard.IndexShardClosedException;
import org.opensearch.index.translog.Translog.Durability;
import org.opensearch.indices.IndicesService;
import org.opensearch.indices.RemoteStoreSettings;
import org.opensearch.indices.recovery.PeerRecoveryTargetService;
import org.opensearch.indices.recovery.RecoverySettings;
import org.opensearch.indices.recovery.RecoveryState;
Expand All @@ -56,7 +57,7 @@

import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS;
import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING;
import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.comparesEqualTo;
Expand Down Expand Up @@ -189,7 +190,7 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception {
Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata");

IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME);
int lastNMetadataFilesToKeep = indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles();
int lastNMetadataFilesToKeep = indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles();
// Delete is async.
assertBusy(() -> {
int actualFileCount = getFileCount(indexPath);
Expand Down Expand Up @@ -224,7 +225,7 @@ public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception {

public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception {
Settings.Builder settings = Settings.builder()
.put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "3");
.put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "3");
internalCluster().startNode(settings);

createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1));
Expand All @@ -243,7 +244,7 @@ public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception {

public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Exception {
Settings.Builder settings = Settings.builder()
.put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "-1");
.put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "-1");
internalCluster().startNode(settings);

createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1));
Expand Down Expand Up @@ -469,7 +470,7 @@ public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() throws E

private void assertClusterRemoteBufferInterval(TimeValue expectedBufferInterval, String dataNode) {
IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode);
assertEquals(expectedBufferInterval, indicesService.getClusterRemoteTranslogBufferInterval());
assertEquals(expectedBufferInterval, indicesService.getRemoteStoreSettings().getClusterRemoteTranslogBufferInterval());
}

private void assertBufferInterval(TimeValue expectedBufferInterval, IndexShard indexShard) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@
import org.opensearch.indices.IndicesQueryCache;
import org.opensearch.indices.IndicesRequestCache;
import org.opensearch.indices.IndicesService;
import org.opensearch.indices.RemoteStoreSettings;
import org.opensearch.indices.ShardLimitValidator;
import org.opensearch.indices.analysis.HunspellService;
import org.opensearch.indices.breaker.BreakerSettings;
Expand Down Expand Up @@ -297,7 +298,6 @@ public void apply(Settings value, Settings current, Settings previous) {
RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING,
RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING,
RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT,
RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
Expand Down Expand Up @@ -706,7 +706,6 @@ public void apply(Settings value, Settings current, Settings previous) {
RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING,
RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING,
RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING,
IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING,
IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING,
IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING,
IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING,
Expand All @@ -723,7 +722,10 @@ public void apply(Settings value, Settings current, Settings previous) {

// Concurrent segment search settings
SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING,
SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING
SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING,

RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING,
RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING
)
)
);
Expand Down
9 changes: 5 additions & 4 deletions server/src/main/java/org/opensearch/index/IndexModule.java
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@
import org.opensearch.index.store.remote.filecache.FileCache;
import org.opensearch.index.translog.TranslogFactory;
import org.opensearch.indices.IndicesQueryCache;
import org.opensearch.indices.RemoteStoreSettings;
import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.opensearch.indices.mapper.MapperRegistry;
import org.opensearch.indices.recovery.RecoverySettings;
Expand Down Expand Up @@ -604,8 +605,8 @@ public IndexService newIndexService(
IndexStorePlugin.DirectoryFactory remoteDirectoryFactory,
BiFunction<IndexSettings, ShardRouting, TranslogFactory> translogFactorySupplier,
Supplier<TimeValue> clusterDefaultRefreshIntervalSupplier,
Supplier<TimeValue> clusterRemoteTranslogBufferIntervalSupplier,
RecoverySettings recoverySettings
RecoverySettings recoverySettings,
RemoteStoreSettings remoteStoreSettings
) throws IOException {
final IndexEventListener eventListener = freeze();
Function<IndexService, CheckedFunction<DirectoryReader, DirectoryReader, IOException>> readerWrapperFactory = indexReaderWrapper
Expand Down Expand Up @@ -663,8 +664,8 @@ public IndexService newIndexService(
recoveryStateFactory,
translogFactorySupplier,
clusterDefaultRefreshIntervalSupplier,
clusterRemoteTranslogBufferIntervalSupplier,
recoverySettings
recoverySettings,
remoteStoreSettings
);
success = true;
return indexService;
Expand Down
Loading

0 comments on commit 178e48b

Please sign in to comment.