diff --git a/.chloggen/fix_elasticsearch-nilpanic.yaml b/.chloggen/fix_elasticsearch-nilpanic.yaml new file mode 100755 index 000000000000..6ed19e36e6ee --- /dev/null +++ b/.chloggen/fix_elasticsearch-nilpanic.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: elasticsearchreceifver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Fix nil panic on non-linux systems + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [30140] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/elasticsearchreceiver/client_test.go b/receiver/elasticsearchreceiver/client_test.go index d4d16e8fc5db..aeec239fcbf8 100644 --- a/receiver/elasticsearchreceiver/client_test.go +++ b/receiver/elasticsearchreceiver/client_test.go @@ -9,6 +9,7 @@ import ( "net/http" "net/http/httptest" "os" + "path/filepath" "strings" "testing" @@ -30,13 +31,12 @@ func TestCreateClientInvalidEndpoint(t *testing.T) { } func TestNodeStatsNoPassword(t *testing.T) { - nodeJSON, err := os.ReadFile("./testdata/sample_payloads/nodes_stats_linux.json") - require.NoError(t, err) + nodeJSON := readSamplePayload(t, "nodes_stats_linux.json") actualNodeStats := model.NodeStats{} require.NoError(t, json.Unmarshal(nodeJSON, &actualNodeStats)) - elasticsearchMock := mockServer(t, "", "") + elasticsearchMock := newMockServer(t) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -53,13 +53,35 @@ func TestNodeStatsNoPassword(t *testing.T) { } func TestNodeStatsNilNodes(t *testing.T) { - nodeJSON, err := os.ReadFile("./testdata/sample_payloads/nodes_stats_linux.json") + nodeJSON := readSamplePayload(t, "nodes_stats_linux.json") + + actualNodeStats := model.NodeStats{} + require.NoError(t, json.Unmarshal(nodeJSON, &actualNodeStats)) + + elasticsearchMock := newMockServer(t) + defer elasticsearchMock.Close() + + client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: elasticsearchMock.URL, + }, + }, componenttest.NewNopHost()) require.NoError(t, err) + ctx := context.Background() + nodeStats, err := client.NodeStats(ctx, nil) + require.NoError(t, err) + + require.Equal(t, &actualNodeStats, nodeStats) +} + +func TestNodeStatsNilIOStats(t *testing.T) { + nodeJSON := readSamplePayload(t, "nodes_stats_other.json") + actualNodeStats := model.NodeStats{} require.NoError(t, json.Unmarshal(nodeJSON, &actualNodeStats)) - elasticsearchMock := mockServer(t, "", "") + elasticsearchMock := newMockServer(t, withNodes(nodeJSON)) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -77,8 +99,7 @@ func TestNodeStatsNilNodes(t *testing.T) { } func TestNodeStatsAuthentication(t *testing.T) { - nodeJSON, err := os.ReadFile("./testdata/sample_payloads/nodes_stats_linux.json") - require.NoError(t, err) + nodeJSON := readSamplePayload(t, "nodes_stats_linux.json") actualNodeStats := model.NodeStats{} require.NoError(t, json.Unmarshal(nodeJSON, &actualNodeStats)) @@ -86,7 +107,7 @@ func TestNodeStatsAuthentication(t *testing.T) { username := "user" password := "pass" - elasticsearchMock := mockServer(t, username, password) + elasticsearchMock := newMockServer(t, withBasicAuth(username, password)) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -106,7 +127,7 @@ func TestNodeStatsAuthentication(t *testing.T) { } func TestNodeStatsNoAuthentication(t *testing.T) { - elasticsearchMock := mockServer(t, "user", "pass") + elasticsearchMock := newMockServer(t, withBasicAuth("user", "pass")) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -122,7 +143,7 @@ func TestNodeStatsNoAuthentication(t *testing.T) { } func TestNodeStatsBadAuthentication(t *testing.T) { - elasticsearchMock := mockServer(t, "user", "pass") + elasticsearchMock := newMockServer(t, withBasicAuth("user", "pass")) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -140,13 +161,12 @@ func TestNodeStatsBadAuthentication(t *testing.T) { } func TestClusterHealthNoPassword(t *testing.T) { - healthJSON, err := os.ReadFile("./testdata/sample_payloads/health.json") - require.NoError(t, err) + healthJSON := readSamplePayload(t, "health.json") actualClusterHealth := model.ClusterHealth{} require.NoError(t, json.Unmarshal(healthJSON, &actualClusterHealth)) - elasticsearchMock := mockServer(t, "", "") + elasticsearchMock := newMockServer(t) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -164,8 +184,7 @@ func TestClusterHealthNoPassword(t *testing.T) { } func TestClusterHealthAuthentication(t *testing.T) { - healthJSON, err := os.ReadFile("./testdata/sample_payloads/health.json") - require.NoError(t, err) + healthJSON := readSamplePayload(t, "health.json") actualClusterHealth := model.ClusterHealth{} require.NoError(t, json.Unmarshal(healthJSON, &actualClusterHealth)) @@ -173,7 +192,7 @@ func TestClusterHealthAuthentication(t *testing.T) { username := "user" password := "pass" - elasticsearchMock := mockServer(t, username, password) + elasticsearchMock := newMockServer(t, withBasicAuth(username, password)) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -193,7 +212,7 @@ func TestClusterHealthAuthentication(t *testing.T) { } func TestClusterHealthNoAuthentication(t *testing.T) { - elasticsearchMock := mockServer(t, "user", "pass") + elasticsearchMock := newMockServer(t, withBasicAuth("user", "pass")) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -209,7 +228,7 @@ func TestClusterHealthNoAuthentication(t *testing.T) { } func TestClusterHealthNoAuthorization(t *testing.T) { - elasticsearchMock := mockServer(t, "user", "pass") + elasticsearchMock := newMockServer(t, withBasicAuth("user", "pass")) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -227,13 +246,12 @@ func TestClusterHealthNoAuthorization(t *testing.T) { } func TestMetadataNoPassword(t *testing.T) { - metadataJSON, err := os.ReadFile("./testdata/sample_payloads/metadata.json") - require.NoError(t, err) + metadataJSON := readSamplePayload(t, "metadata.json") actualMetadata := model.ClusterMetadataResponse{} require.NoError(t, json.Unmarshal(metadataJSON, &actualMetadata)) - elasticsearchMock := mockServer(t, "", "") + elasticsearchMock := newMockServer(t) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -251,8 +269,7 @@ func TestMetadataNoPassword(t *testing.T) { } func TestMetadataAuthentication(t *testing.T) { - metadataJSON, err := os.ReadFile("./testdata/sample_payloads/metadata.json") - require.NoError(t, err) + metadataJSON := readSamplePayload(t, "metadata.json") actualMetadata := model.ClusterMetadataResponse{} require.NoError(t, json.Unmarshal(metadataJSON, &actualMetadata)) @@ -260,7 +277,7 @@ func TestMetadataAuthentication(t *testing.T) { username := "user" password := "pass" - elasticsearchMock := mockServer(t, username, password) + elasticsearchMock := newMockServer(t, withBasicAuth(username, password)) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -280,7 +297,7 @@ func TestMetadataAuthentication(t *testing.T) { } func TestMetadataNoAuthentication(t *testing.T) { - elasticsearchMock := mockServer(t, "user", "pass") + elasticsearchMock := newMockServer(t, withBasicAuth("user", "pass")) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -296,7 +313,7 @@ func TestMetadataNoAuthentication(t *testing.T) { } func TestMetadataNoAuthorization(t *testing.T) { - elasticsearchMock := mockServer(t, "user", "pass") + elasticsearchMock := newMockServer(t, withBasicAuth("user", "pass")) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -341,7 +358,7 @@ func TestDoRequestClientTimeout(t *testing.T) { } func TestDoRequest404(t *testing.T) { - elasticsearchMock := mockServer(t, "", "") + elasticsearchMock := newMockServer(t) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -357,13 +374,12 @@ func TestDoRequest404(t *testing.T) { } func TestIndexStatsNoPassword(t *testing.T) { - indexJSON, err := os.ReadFile("./testdata/sample_payloads/indices.json") - require.NoError(t, err) + indexJSON := readSamplePayload(t, "indices.json") actualIndexStats := model.IndexStats{} require.NoError(t, json.Unmarshal(indexJSON, &actualIndexStats)) - elasticsearchMock := mockServer(t, "", "") + elasticsearchMock := newMockServer(t) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -380,13 +396,12 @@ func TestIndexStatsNoPassword(t *testing.T) { } func TestIndexStatsNilNodes(t *testing.T) { - indexJSON, err := os.ReadFile("./testdata/sample_payloads/indices.json") - require.NoError(t, err) + indexJSON := readSamplePayload(t, "indices.json") actualIndexStats := model.IndexStats{} require.NoError(t, json.Unmarshal(indexJSON, &actualIndexStats)) - elasticsearchMock := mockServer(t, "", "") + elasticsearchMock := newMockServer(t) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -404,8 +419,7 @@ func TestIndexStatsNilNodes(t *testing.T) { } func TestIndexStatsAuthentication(t *testing.T) { - indexJSON, err := os.ReadFile("./testdata/sample_payloads/indices.json") - require.NoError(t, err) + indexJSON := readSamplePayload(t, "indices.json") actualIndexStats := model.IndexStats{} require.NoError(t, json.Unmarshal(indexJSON, &actualIndexStats)) @@ -413,7 +427,7 @@ func TestIndexStatsAuthentication(t *testing.T) { username := "user" password := "pass" - elasticsearchMock := mockServer(t, username, password) + elasticsearchMock := newMockServer(t, withBasicAuth(username, password)) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -433,7 +447,7 @@ func TestIndexStatsAuthentication(t *testing.T) { } func TestIndexStatsNoAuthentication(t *testing.T) { - elasticsearchMock := mockServer(t, "user", "pass") + elasticsearchMock := newMockServer(t, withBasicAuth("user", "pass")) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -449,7 +463,7 @@ func TestIndexStatsNoAuthentication(t *testing.T) { } func TestIndexStatsBadAuthentication(t *testing.T) { - elasticsearchMock := mockServer(t, "user", "pass") + elasticsearchMock := newMockServer(t, withBasicAuth("user", "pass")) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -467,13 +481,12 @@ func TestIndexStatsBadAuthentication(t *testing.T) { } func TestClusterStatsNoPassword(t *testing.T) { - clusterJSON, err := os.ReadFile("./testdata/sample_payloads/cluster.json") - require.NoError(t, err) + clusterJSON := readSamplePayload(t, "cluster.json") actualClusterStats := model.ClusterStats{} require.NoError(t, json.Unmarshal(clusterJSON, &actualClusterStats)) - elasticsearchMock := mockServer(t, "", "") + elasticsearchMock := newMockServer(t) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -490,13 +503,12 @@ func TestClusterStatsNoPassword(t *testing.T) { } func TestClusterStatsNilNodes(t *testing.T) { - clusterJSON, err := os.ReadFile("./testdata/sample_payloads/cluster.json") - require.NoError(t, err) + clusterJSON := readSamplePayload(t, "cluster.json") actualClusterStats := model.ClusterStats{} require.NoError(t, json.Unmarshal(clusterJSON, &actualClusterStats)) - elasticsearchMock := mockServer(t, "", "") + elasticsearchMock := newMockServer(t) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -514,8 +526,7 @@ func TestClusterStatsNilNodes(t *testing.T) { } func TestClusterStatsAuthentication(t *testing.T) { - clusterJSON, err := os.ReadFile("./testdata/sample_payloads/cluster.json") - require.NoError(t, err) + clusterJSON := readSamplePayload(t, "cluster.json") actualClusterStats := model.ClusterStats{} require.NoError(t, json.Unmarshal(clusterJSON, &actualClusterStats)) @@ -523,7 +534,7 @@ func TestClusterStatsAuthentication(t *testing.T) { username := "user" password := "pass" - elasticsearchMock := mockServer(t, username, password) + elasticsearchMock := newMockServer(t, withBasicAuth(username, password)) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -543,7 +554,7 @@ func TestClusterStatsAuthentication(t *testing.T) { } func TestClusterStatsNoAuthentication(t *testing.T) { - elasticsearchMock := mockServer(t, "user", "pass") + elasticsearchMock := newMockServer(t, withBasicAuth("user", "pass")) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -559,7 +570,7 @@ func TestClusterStatsNoAuthentication(t *testing.T) { } func TestClusterStatsBadAuthentication(t *testing.T) { - elasticsearchMock := mockServer(t, "user", "pass") + elasticsearchMock := newMockServer(t, withBasicAuth("user", "pass")) defer elasticsearchMock.Close() client, err := newElasticsearchClient(componenttest.NewNopTelemetrySettings(), Config{ @@ -576,69 +587,76 @@ func TestClusterStatsBadAuthentication(t *testing.T) { require.ErrorIs(t, err, errUnauthorized) } -// mockServer gives a mock elasticsearch server for testing; if username or password is included, they will be required for the client. -// otherwise, authorization is ignored. -func mockServer(t *testing.T, username, password string) *httptest.Server { - nodes, err := os.ReadFile("./testdata/sample_payloads/nodes_stats_linux.json") - require.NoError(t, err) - indices, err := os.ReadFile("./testdata/sample_payloads/indices.json") - require.NoError(t, err) - health, err := os.ReadFile("./testdata/sample_payloads/health.json") - require.NoError(t, err) - metadata, err := os.ReadFile("./testdata/sample_payloads/metadata.json") - require.NoError(t, err) - cluster, err := os.ReadFile("./testdata/sample_payloads/cluster.json") - require.NoError(t, err) +type mockServer struct { + auth func(username, password string) bool + metadata []byte + prefixes map[string][]byte +} + +type mockServerOption func(*mockServer) + +func withBasicAuth(username, password string) mockServerOption { // nolint:unparam + return func(m *mockServer) { + m.auth = func(u, p string) bool { + return u == username && p == password + } + } +} + +func withNodes(payload []byte) mockServerOption { + return func(m *mockServer) { + m.prefixes["/_nodes/_all/stats"] = payload + } +} + +// newMockServer gives a mock elasticsearch server for testing +func newMockServer(t *testing.T, opts ...mockServerOption) *httptest.Server { + mock := mockServer{ + metadata: readSamplePayload(t, "metadata.json"), + prefixes: map[string][]byte{ + "/_nodes/_all/stats": readSamplePayload(t, "nodes_stats_linux.json"), + "/_all/_stats": readSamplePayload(t, "indices.json"), + "/_cluster/health": readSamplePayload(t, "health.json"), + "/_cluster/stats": readSamplePayload(t, "cluster.json"), + }, + } + for _, opt := range opts { + opt(&mock) + } elasticsearchMock := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - if username != "" || password != "" { - authUser, authPass, ok := req.BasicAuth() + if mock.auth != nil { + username, password, ok := req.BasicAuth() if !ok { rw.WriteHeader(401) return - } else if authUser != username || authPass != password { + } else if !mock.auth(username, password) { rw.WriteHeader(403) return } } - - if strings.HasPrefix(req.URL.Path, "/_nodes/_all/stats") { - rw.WriteHeader(200) - _, err = rw.Write(nodes) - require.NoError(t, err) - return - } - - if strings.HasPrefix(req.URL.Path, "/_all/_stats") { - rw.WriteHeader(200) - _, err = rw.Write(indices) - require.NoError(t, err) - return - } - - if strings.HasPrefix(req.URL.Path, "/_cluster/health") { - rw.WriteHeader(200) - _, err = rw.Write(health) - require.NoError(t, err) - return - } - - if strings.HasPrefix(req.URL.Path, "/_cluster/stats") { - rw.WriteHeader(200) - _, err = rw.Write(cluster) - require.NoError(t, err) - return - } - - // metadata check if req.URL.Path == "/" { rw.WriteHeader(200) - _, err = rw.Write(metadata) + _, err := rw.Write(mock.metadata) require.NoError(t, err) return } + for prefix, payload := range mock.prefixes { + if strings.HasPrefix(req.URL.Path, prefix) { + rw.WriteHeader(200) + _, err := rw.Write(payload) + require.NoError(t, err) + return + } + } rw.WriteHeader(404) })) return elasticsearchMock } + +func readSamplePayload(t *testing.T, file string) []byte { + payload, err := os.ReadFile(filepath.Join("testdata", "sample_payloads", file)) + require.NoError(t, err) + return payload +} diff --git a/receiver/elasticsearchreceiver/documentation.md b/receiver/elasticsearchreceiver/documentation.md index 800a24b5d9da..f35723dd0490 100644 --- a/receiver/elasticsearchreceiver/documentation.md +++ b/receiver/elasticsearchreceiver/documentation.md @@ -331,6 +331,8 @@ The number of bytes sent and received on the network for internal cluster commun The total number of kilobytes read across all file stores for this node. +This metric is available only on Linux systems. + | Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | | ---- | ----------- | ---------- | ----------------------- | --------- | | KiBy | Sum | Int | Cumulative | false | @@ -339,6 +341,8 @@ The total number of kilobytes read across all file stores for this node. The total number of kilobytes written across all file stores for this node. +This metric is available only on Linux systems. + | Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | | ---- | ----------- | ---------- | ----------------------- | --------- | | KiBy | Sum | Int | Cumulative | false | diff --git a/receiver/elasticsearchreceiver/metadata.yaml b/receiver/elasticsearchreceiver/metadata.yaml index 01ebcd18e4c3..36e85e4c6ed0 100644 --- a/receiver/elasticsearchreceiver/metadata.yaml +++ b/receiver/elasticsearchreceiver/metadata.yaml @@ -293,6 +293,7 @@ metrics: value_type: int attributes: [ ] enabled: true + extended_documentation: This metric is available only on Linux systems. # The calculation for node.disk.io.write is actually in KiBy(1024 bytes), not kBy (1000 bytes) # The metric value calculation comes from sectors: https://github.com/elastic/elasticsearch/blob/3c6797f2d2271a30b24f505da55afbb5ea10733e/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java#L293 # The metric value is gathered by reading disk stats files from https://github.com/elastic/elasticsearch/blob/3c6797f2d2271a30b24f505da55afbb5ea10733e/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java#L117 @@ -307,6 +308,7 @@ metrics: value_type: int attributes: [ ] enabled: true + extended_documentation: This metric is available only on Linux systems. elasticsearch.node.cluster.io: description: The number of bytes sent and received on the network for internal cluster communication. unit: By diff --git a/receiver/elasticsearchreceiver/scraper.go b/receiver/elasticsearchreceiver/scraper.go index 9ce2c3e4526c..eaca50dcbfb3 100644 --- a/receiver/elasticsearchreceiver/scraper.go +++ b/receiver/elasticsearchreceiver/scraper.go @@ -135,8 +135,10 @@ func (r *elasticsearchScraper) scrapeNodeMetrics(ctx context.Context, now pcommo r.mb.RecordElasticsearchNodeFsDiskFreeDataPoint(now, info.FS.Total.FreeBytes) r.mb.RecordElasticsearchNodeFsDiskTotalDataPoint(now, info.FS.Total.TotalBytes) - r.mb.RecordElasticsearchNodeDiskIoReadDataPoint(now, info.FS.IOStats.Total.ReadBytes) - r.mb.RecordElasticsearchNodeDiskIoWriteDataPoint(now, info.FS.IOStats.Total.WriteBytes) + if info.FS.IOStats != nil { + r.mb.RecordElasticsearchNodeDiskIoReadDataPoint(now, info.FS.IOStats.Total.ReadBytes) + r.mb.RecordElasticsearchNodeDiskIoWriteDataPoint(now, info.FS.IOStats.Total.WriteBytes) + } r.mb.RecordElasticsearchNodeClusterIoDataPoint(now, info.TransportStats.ReceivedBytes, metadata.AttributeDirectionReceived) r.mb.RecordElasticsearchNodeClusterIoDataPoint(now, info.TransportStats.SentBytes, metadata.AttributeDirectionSent) diff --git a/receiver/elasticsearchreceiver/scraper_test.go b/receiver/elasticsearchreceiver/scraper_test.go index 5a18fdebdae2..06503a32dbc5 100644 --- a/receiver/elasticsearchreceiver/scraper_test.go +++ b/receiver/elasticsearchreceiver/scraper_test.go @@ -7,7 +7,6 @@ import ( "context" "encoding/json" "errors" - "os" "testing" "github.com/stretchr/testify/mock" @@ -24,7 +23,8 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver/internal/model" ) -const fullExpectedMetricsPath = "./testdata/expected_metrics/full.yaml" +const fullLinuxExpectedMetricsPath = "./testdata/expected_metrics/full_linux.yaml" +const fullOtherExpectedMetricsPath = "./testdata/expected_metrics/full_other.yaml" const skipClusterExpectedMetricsPath = "./testdata/expected_metrics/clusterSkip.yaml" const noNodesExpectedMetricsPath = "./testdata/expected_metrics/noNodes.yaml" @@ -70,12 +70,69 @@ func TestScraper(t *testing.T) { mockClient.On("ClusterHealth", mock.Anything).Return(clusterHealth(t), nil) mockClient.On("ClusterStats", mock.Anything, []string{"_all"}).Return(clusterStats(t), nil) mockClient.On("Nodes", mock.Anything, []string{"_all"}).Return(nodes(t), nil) - mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStats(t), nil) + mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStatsLinux(t), nil) mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(indexStats(t), nil) sc.client = &mockClient - expectedMetrics, err := golden.ReadMetrics(fullExpectedMetricsPath) + expectedMetrics, err := golden.ReadMetrics(fullLinuxExpectedMetricsPath) + require.NoError(t, err) + + actualMetrics, err := sc.scrape(context.Background()) + require.NoError(t, err) + + require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp())) +} + +func TestScraperNoIOStats(t *testing.T) { + t.Parallel() + + config := createDefaultConfig().(*Config) + + config.Metrics.ElasticsearchNodeOperationsGetCompleted.Enabled = true + config.Metrics.ElasticsearchNodeOperationsGetTime.Enabled = true + config.Metrics.ElasticsearchNodeSegmentsMemory.Enabled = true + + config.Metrics.JvmMemoryHeapUtilization.Enabled = true + + config.Metrics.ElasticsearchNodeOperationsCurrent.Enabled = true + + config.Metrics.ElasticsearchIndexOperationsMergeSize.Enabled = true + config.Metrics.ElasticsearchIndexOperationsMergeDocsCount.Enabled = true + config.Metrics.ElasticsearchIndexSegmentsCount.Enabled = true + config.Metrics.ElasticsearchIndexSegmentsSize.Enabled = true + config.Metrics.ElasticsearchIndexSegmentsMemory.Enabled = true + config.Metrics.ElasticsearchIndexTranslogOperations.Enabled = true + config.Metrics.ElasticsearchIndexTranslogSize.Enabled = true + config.Metrics.ElasticsearchIndexCacheMemoryUsage.Enabled = true + config.Metrics.ElasticsearchIndexCacheSize.Enabled = true + config.Metrics.ElasticsearchIndexCacheEvictions.Enabled = true + config.Metrics.ElasticsearchIndexDocuments.Enabled = true + + config.Metrics.ElasticsearchClusterIndicesCacheEvictions.Enabled = true + + config.Metrics.ElasticsearchNodeCacheSize.Enabled = true + config.Metrics.ElasticsearchProcessCPUUsage.Enabled = true + config.Metrics.ElasticsearchProcessCPUTime.Enabled = true + config.Metrics.ElasticsearchProcessMemoryVirtual.Enabled = true + + sc := newElasticSearchScraper(receivertest.NewNopCreateSettings(), config) + + err := sc.start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err) + + mockClient := mocks.MockElasticsearchClient{} + mockClient.On("ClusterMetadata", mock.Anything).Return(clusterMetadata(t), nil) + mockClient.On("ClusterHealth", mock.Anything).Return(clusterHealth(t), nil) + mockClient.On("ClusterStats", mock.Anything, []string{"_all"}).Return(clusterStats(t), nil) + mockClient.On("Nodes", mock.Anything, []string{"_all"}).Return(nodes(t), nil) + mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStatsOther(t), nil) + mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(indexStats(t), nil) + + sc.client = &mockClient + + expectedMetrics, err := golden.ReadMetrics(fullOtherExpectedMetricsPath) require.NoError(t, err) actualMetrics, err := sc.scrape(context.Background()) @@ -101,7 +158,7 @@ func TestScraperSkipClusterMetrics(t *testing.T) { mockClient.On("ClusterHealth", mock.Anything).Return(clusterHealth(t), nil) mockClient.On("ClusterStats", mock.Anything, []string{}).Return(clusterStats(t), nil) mockClient.On("Nodes", mock.Anything, []string{"_all"}).Return(nodes(t), nil) - mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStats(t), nil) + mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStatsLinux(t), nil) mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(indexStats(t), nil) sc.client = &mockClient @@ -132,7 +189,7 @@ func TestScraperNoNodesMetrics(t *testing.T) { mockClient.On("ClusterHealth", mock.Anything).Return(clusterHealth(t), nil) mockClient.On("ClusterStats", mock.Anything, []string{}).Return(clusterStats(t), nil) mockClient.On("Nodes", mock.Anything, []string{"_all"}).Return(nodes(t), nil) - mockClient.On("NodeStats", mock.Anything, []string{}).Return(nodeStats(t), nil) + mockClient.On("NodeStats", mock.Anything, []string{}).Return(nodeStatsLinux(t), nil) mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(indexStats(t), nil) sc.client = &mockClient @@ -212,7 +269,7 @@ func TestScrapingError(t *testing.T) { mockClient := mocks.MockElasticsearchClient{} mockClient.On("ClusterMetadata", mock.Anything).Return(clusterMetadata(t), nil) mockClient.On("Nodes", mock.Anything, []string{"_all"}).Return(nodes(t), nil) - mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStats(t), nil) + mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStatsLinux(t), nil) mockClient.On("ClusterHealth", mock.Anything).Return(nil, err404) mockClient.On("ClusterStats", mock.Anything, []string{"_all"}).Return(clusterStats(t), nil) mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(indexStats(t), nil) @@ -268,7 +325,7 @@ func TestScrapingError(t *testing.T) { mockClient := mocks.MockElasticsearchClient{} mockClient.On("ClusterMetadata", mock.Anything).Return(nil, err404) mockClient.On("Nodes", mock.Anything, []string{"_all"}).Return(nodes(t), nil) - mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStats(t), nil) + mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStatsLinux(t), nil) mockClient.On("ClusterHealth", mock.Anything).Return(clusterHealth(t), nil) mockClient.On("ClusterStats", mock.Anything, []string{"_all"}).Return(clusterStats(t), nil) mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(indexStats(t), nil) @@ -324,7 +381,7 @@ func TestScrapingError(t *testing.T) { mockClient := mocks.MockElasticsearchClient{} mockClient.On("ClusterMetadata", mock.Anything).Return(clusterMetadata(t), nil) mockClient.On("Nodes", mock.Anything, []string{"_all"}).Return(nodes(t), nil) - mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStats(t), nil) + mockClient.On("NodeStats", mock.Anything, []string{"_all"}).Return(nodeStatsLinux(t), nil) mockClient.On("ClusterHealth", mock.Anything).Return(ch, nil) mockClient.On("ClusterStats", mock.Anything, []string{"_all"}).Return(clusterStats(t), nil) mockClient.On("IndexStats", mock.Anything, []string{"_all"}).Return(indexStats(t), nil) @@ -348,57 +405,43 @@ func TestScrapingError(t *testing.T) { } func clusterHealth(t *testing.T) *model.ClusterHealth { - healthJSON, err := os.ReadFile("./testdata/sample_payloads/health.json") - require.NoError(t, err) - clusterHealth := model.ClusterHealth{} - require.NoError(t, json.Unmarshal(healthJSON, &clusterHealth)) - + require.NoError(t, json.Unmarshal(readSamplePayload(t, "health.json"), &clusterHealth)) return &clusterHealth } func clusterStats(t *testing.T) *model.ClusterStats { - statsJSON, err := os.ReadFile("./testdata/sample_payloads/cluster.json") - require.NoError(t, err) - clusterStats := model.ClusterStats{} - require.NoError(t, json.Unmarshal(statsJSON, &clusterStats)) - + require.NoError(t, json.Unmarshal(readSamplePayload(t, "cluster.json"), &clusterStats)) return &clusterStats } func nodes(t *testing.T) *model.Nodes { - nodeJSON, err := os.ReadFile("./testdata/sample_payloads/nodes_linux.json") - require.NoError(t, err) - nodes := model.Nodes{} - require.NoError(t, json.Unmarshal(nodeJSON, &nodes)) + require.NoError(t, json.Unmarshal(readSamplePayload(t, "nodes_linux.json"), &nodes)) return &nodes } -func nodeStats(t *testing.T) *model.NodeStats { - nodeJSON, err := os.ReadFile("./testdata/sample_payloads/nodes_stats_linux.json") - require.NoError(t, err) +func nodeStatsLinux(t *testing.T) *model.NodeStats { + nodeStats := model.NodeStats{} + require.NoError(t, json.Unmarshal(readSamplePayload(t, "nodes_stats_linux.json"), &nodeStats)) + return &nodeStats +} +func nodeStatsOther(t *testing.T) *model.NodeStats { nodeStats := model.NodeStats{} - require.NoError(t, json.Unmarshal(nodeJSON, &nodeStats)) + require.NoError(t, json.Unmarshal(readSamplePayload(t, "nodes_stats_other.json"), &nodeStats)) return &nodeStats } func indexStats(t *testing.T) *model.IndexStats { - indexJSON, err := os.ReadFile("./testdata/sample_payloads/indices.json") - require.NoError(t, err) - indexStats := model.IndexStats{} - require.NoError(t, json.Unmarshal(indexJSON, &indexStats)) + require.NoError(t, json.Unmarshal(readSamplePayload(t, "indices.json"), &indexStats)) return &indexStats } func clusterMetadata(t *testing.T) *model.ClusterMetadataResponse { - metadataJSON, err := os.ReadFile("./testdata/sample_payloads/metadata.json") - require.NoError(t, err) - metadataResponse := model.ClusterMetadataResponse{} - require.NoError(t, json.Unmarshal(metadataJSON, &metadataResponse)) + require.NoError(t, json.Unmarshal(readSamplePayload(t, "metadata.json"), &metadataResponse)) return &metadataResponse } diff --git a/receiver/elasticsearchreceiver/testdata/expected_metrics/full.yaml b/receiver/elasticsearchreceiver/testdata/expected_metrics/full_linux.yaml similarity index 100% rename from receiver/elasticsearchreceiver/testdata/expected_metrics/full.yaml rename to receiver/elasticsearchreceiver/testdata/expected_metrics/full_linux.yaml diff --git a/receiver/elasticsearchreceiver/testdata/expected_metrics/full_other.yaml b/receiver/elasticsearchreceiver/testdata/expected_metrics/full_other.yaml new file mode 100644 index 000000000000..5c1f5d380225 --- /dev/null +++ b/receiver/elasticsearchreceiver/testdata/expected_metrics/full_other.yaml @@ -0,0 +1,3047 @@ +resourceMetrics: + - resource: + attributes: + - key: elasticsearch.cluster.name + value: + stringValue: docker-cluster + scopeMetrics: + - metrics: + - description: The number of data nodes in the cluster. + name: elasticsearch.cluster.data_nodes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "25" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{nodes}' + - description: The health status of the cluster. + name: elasticsearch.cluster.health + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: status + value: + stringValue: green + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: status + value: + stringValue: red + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1" + attributes: + - key: status + value: + stringValue: yellow + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{status}' + - description: The number of unfinished fetches. + name: elasticsearch.cluster.in_flight_fetch + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{fetches}' + - description: The number of evictions from the cache for indices in cluster. + name: elasticsearch.cluster.indices.cache.evictions + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2" + attributes: + - key: cache_name + value: + stringValue: fielddata + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3" + attributes: + - key: cache_name + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{evictions}' + - description: The total number of nodes in the cluster. + name: elasticsearch.cluster.nodes + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "46" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{nodes}' + - description: The number of cluster-level changes that have not yet been executed. + name: elasticsearch.cluster.pending_tasks + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{tasks}' + - description: The number of shards in the cluster. + name: elasticsearch.cluster.shards + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "45" + attributes: + - key: state + value: + stringValue: active + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "23" + attributes: + - key: state + value: + stringValue: active_primary + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2" + attributes: + - key: state + value: + stringValue: initializing + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "10" + attributes: + - key: state + value: + stringValue: relocating + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3" + attributes: + - key: state + value: + stringValue: unassigned + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1" + attributes: + - key: state + value: + stringValue: unassigned_delayed + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{shards}' + - description: The current heap memory usage + gauge: + dataPoints: + - asInt: "285158912" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: jvm.memory.heap.used + unit: By + scope: + name: otelcol/elasticsearchreceiver + version: latest + - resource: + attributes: + - key: elasticsearch.cluster.name + value: + stringValue: docker-cluster + - key: elasticsearch.index.name + value: + stringValue: .geoip_databases + scopeMetrics: + - metrics: + - description: The number of evictions from the cache for an index. + name: elasticsearch.index.cache.evictions + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: cache_name + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2" + attributes: + - key: aggregation + value: + stringValue: total + - key: cache_name + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{evictions}' + - description: The size in bytes of the cache for an index. + name: elasticsearch.index.cache.memory.usage + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: cache_name + value: + stringValue: fielddata + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "12" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: cache_name + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: total + - key: cache_name + value: + stringValue: fielddata + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "12" + attributes: + - key: aggregation + value: + stringValue: total + - key: cache_name + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The number of elements of the query cache for an index. + name: elasticsearch.index.cache.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: primary_shards + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: "1" + - description: The number of documents for an index. + name: elasticsearch.index.documents + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "40" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: state + value: + stringValue: active + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "40" + attributes: + - key: aggregation + value: + stringValue: total + - key: state + value: + stringValue: active + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{documents}' + - description: The number of operations completed for an index. + name: elasticsearch.index.operations.completed + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "12" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: delete + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "43" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: fetch + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "4" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: flush + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "13" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: get + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "40" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: index + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "8" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: merge + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "43" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "10" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: refresh + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: scroll + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "5" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: suggest + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "6" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: warmer + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "12" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: delete + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "43" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: fetch + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "4" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: flush + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "13" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: get + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "40" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: index + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "8" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: merge + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "43" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "10" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: refresh + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: scroll + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "5" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: suggest + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "6" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: warmer + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: The total number of documents in merge operations for an index. + name: elasticsearch.index.operations.merge.docs_count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{documents}' + - description: The total size of merged segments for an index. + name: elasticsearch.index.operations.merge.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "64" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Time spent on operations for an index. + name: elasticsearch.index.operations.time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: delete + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "82" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: fetch + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "192" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: flush + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: get + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "938" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: index + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "12" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: merge + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "52" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "169" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: refresh + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "30" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: scroll + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: suggest + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "4" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: warmer + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: delete + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "82" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: fetch + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "192" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: flush + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: get + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "938" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: index + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "12" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: merge + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "52" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "169" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: refresh + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "30" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: scroll + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: suggest + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "4" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: warmer + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ms + - description: Number of segments of an index. + name: elasticsearch.index.segments.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5" + attributes: + - key: aggregation + value: + stringValue: primary_shards + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "5" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{segments}' + - description: Size of memory for segment object of an index. + name: elasticsearch.index.segments.memory + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "380" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: object + value: + stringValue: doc_value + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "21" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: object + value: + stringValue: fixed_bit_set + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "37" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: object + value: + stringValue: index_writer + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2560" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: object + value: + stringValue: term + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "380" + attributes: + - key: aggregation + value: + stringValue: total + - key: object + value: + stringValue: doc_value + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "21" + attributes: + - key: aggregation + value: + stringValue: total + - key: object + value: + stringValue: fixed_bit_set + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "37" + attributes: + - key: aggregation + value: + stringValue: total + - key: object + value: + stringValue: index_writer + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2560" + attributes: + - key: aggregation + value: + stringValue: total + - key: object + value: + stringValue: term + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Size of segments of an index. + name: elasticsearch.index.segments.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5460" + attributes: + - key: aggregation + value: + stringValue: primary_shards + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "5460" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The size of the shards assigned to this index. + name: elasticsearch.index.shards.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "40230884" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of transaction log operations for an index. + name: elasticsearch.index.translog.operations + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: Size of the transaction log for an index. + name: elasticsearch.index.translog.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "55" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + scope: + name: otelcol/elasticsearchreceiver + version: latest + - resource: + attributes: + - key: elasticsearch.cluster.name + value: + stringValue: docker-cluster + - key: elasticsearch.index.name + value: + stringValue: _all + scopeMetrics: + - metrics: + - description: The number of evictions from the cache for an index. + name: elasticsearch.index.cache.evictions + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: cache_name + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2" + attributes: + - key: aggregation + value: + stringValue: total + - key: cache_name + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{evictions}' + - description: The size in bytes of the cache for an index. + name: elasticsearch.index.cache.memory.usage + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: cache_name + value: + stringValue: fielddata + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "12" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: cache_name + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: total + - key: cache_name + value: + stringValue: fielddata + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "12" + attributes: + - key: aggregation + value: + stringValue: total + - key: cache_name + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The number of elements of the query cache for an index. + name: elasticsearch.index.cache.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: primary_shards + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: "1" + - description: The number of documents for an index. + name: elasticsearch.index.documents + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "40" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: state + value: + stringValue: active + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "40" + attributes: + - key: aggregation + value: + stringValue: total + - key: state + value: + stringValue: active + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{documents}' + - description: The number of operations completed for an index. + name: elasticsearch.index.operations.completed + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "12" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: delete + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "43" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: fetch + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "4" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: flush + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "13" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: get + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "40" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: index + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "8" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: merge + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "43" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "10" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: refresh + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: scroll + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "5" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: suggest + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "6" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: warmer + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "12" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: delete + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "43" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: fetch + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "4" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: flush + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "13" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: get + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "40" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: index + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "8" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: merge + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "43" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "10" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: refresh + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: scroll + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "5" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: suggest + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "6" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: warmer + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: The total number of documents in merge operations for an index. + name: elasticsearch.index.operations.merge.docs_count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{documents}' + - description: The total size of merged segments for an index. + name: elasticsearch.index.operations.merge.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "64" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: Time spent on operations for an index. + name: elasticsearch.index.operations.time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: delete + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "82" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: fetch + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "192" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: flush + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: get + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "938" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: index + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "12" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: merge + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "52" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "169" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: refresh + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "30" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: scroll + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: suggest + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "4" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: operation + value: + stringValue: warmer + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: delete + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "82" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: fetch + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "192" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: flush + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "3" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: get + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "938" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: index + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "12" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: merge + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "52" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "169" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: refresh + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "30" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: scroll + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: suggest + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "4" + attributes: + - key: aggregation + value: + stringValue: total + - key: operation + value: + stringValue: warmer + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ms + - description: Number of segments of an index. + name: elasticsearch.index.segments.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5" + attributes: + - key: aggregation + value: + stringValue: primary_shards + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "5" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{segments}' + - description: Size of memory for segment object of an index. + name: elasticsearch.index.segments.memory + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "380" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: object + value: + stringValue: doc_value + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "21" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: object + value: + stringValue: fixed_bit_set + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "37" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: object + value: + stringValue: index_writer + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2560" + attributes: + - key: aggregation + value: + stringValue: primary_shards + - key: object + value: + stringValue: term + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "380" + attributes: + - key: aggregation + value: + stringValue: total + - key: object + value: + stringValue: doc_value + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "21" + attributes: + - key: aggregation + value: + stringValue: total + - key: object + value: + stringValue: fixed_bit_set + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "37" + attributes: + - key: aggregation + value: + stringValue: total + - key: object + value: + stringValue: index_writer + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2560" + attributes: + - key: aggregation + value: + stringValue: total + - key: object + value: + stringValue: term + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Size of segments of an index. + name: elasticsearch.index.segments.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5460" + attributes: + - key: aggregation + value: + stringValue: primary_shards + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "5460" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The size of the shards assigned to this index. + name: elasticsearch.index.shards.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "40230884" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Number of transaction log operations for an index. + name: elasticsearch.index.translog.operations + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: Size of the transaction log for an index. + name: elasticsearch.index.translog.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "55" + attributes: + - key: aggregation + value: + stringValue: total + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + scope: + name: otelcol/elasticsearchreceiver + version: latest + - resource: + attributes: + - key: elasticsearch.cluster.name + value: + stringValue: docker-cluster + - key: elasticsearch.node.name + value: + stringValue: 917e13e55eed + - key: elasticsearch.node.version + value: + stringValue: 7.17.7 + scopeMetrics: + - metrics: + - description: Estimated memory used for the operation. + gauge: + dataPoints: + - asInt: "0" + attributes: + - key: name + value: + stringValue: accounting + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: name + value: + stringValue: fielddata + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: name + value: + stringValue: in_flight_requests + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: name + value: + stringValue: model_inference + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "305152000" + attributes: + - key: name + value: + stringValue: parent + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: name + value: + stringValue: request + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: elasticsearch.breaker.memory.estimated + unit: By + - description: Memory limit for the circuit breaker. + name: elasticsearch.breaker.memory.limit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "536870912" + attributes: + - key: name + value: + stringValue: accounting + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "214748364" + attributes: + - key: name + value: + stringValue: fielddata + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "536870912" + attributes: + - key: name + value: + stringValue: in_flight_requests + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "268435456" + attributes: + - key: name + value: + stringValue: model_inference + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "510027366" + attributes: + - key: name + value: + stringValue: parent + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "322122547" + attributes: + - key: name + value: + stringValue: request + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Total number of times the circuit breaker has been triggered and prevented an out of memory error. + name: elasticsearch.breaker.tripped + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: name + value: + stringValue: accounting + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: name + value: + stringValue: fielddata + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: name + value: + stringValue: in_flight_requests + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: name + value: + stringValue: model_inference + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: name + value: + stringValue: parent + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: name + value: + stringValue: request + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: "1" + - description: Number of differences between published cluster states. + name: elasticsearch.cluster.published_states.differences + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + attributes: + - key: state + value: + stringValue: compatible + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: state + value: + stringValue: incompatible + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: "1" + - description: Number of published cluster states. + name: elasticsearch.cluster.published_states.full + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: "1" + - description: Number of cluster states in queue. + name: elasticsearch.cluster.state_queue + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: state + value: + stringValue: committed + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: state + value: + stringValue: pending + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: "1" + - description: The number of cluster state update attempts that changed the cluster state since the node started. + name: elasticsearch.cluster.state_update.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: state + value: + stringValue: failure + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "7" + attributes: + - key: state + value: + stringValue: success + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "4" + attributes: + - key: state + value: + stringValue: unchanged + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: "1" + - description: The cumulative amount of time updating the cluster state since the node started. + name: elasticsearch.cluster.state_update.time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: state + value: + stringValue: failure + - key: type + value: + stringValue: commit + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: state + value: + stringValue: failure + - key: type + value: + stringValue: completion + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: state + value: + stringValue: failure + - key: type + value: + stringValue: computation + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: state + value: + stringValue: failure + - key: type + value: + stringValue: context_construction + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: state + value: + stringValue: failure + - key: type + value: + stringValue: master_apply + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: state + value: + stringValue: failure + - key: type + value: + stringValue: notification + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "113" + attributes: + - key: state + value: + stringValue: success + - key: type + value: + stringValue: commit + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "117" + attributes: + - key: state + value: + stringValue: success + - key: type + value: + stringValue: completion + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "40" + attributes: + - key: state + value: + stringValue: success + - key: type + value: + stringValue: computation + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "17" + attributes: + - key: state + value: + stringValue: success + - key: type + value: + stringValue: context_construction + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "484" + attributes: + - key: state + value: + stringValue: success + - key: type + value: + stringValue: master_apply + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2" + attributes: + - key: state + value: + stringValue: success + - key: type + value: + stringValue: notification + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "7" + attributes: + - key: state + value: + stringValue: unchanged + - key: type + value: + stringValue: computation + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "1" + attributes: + - key: state + value: + stringValue: unchanged + - key: type + value: + stringValue: notification + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ms + - description: Configured memory limit, in bytes, for the indexing requests. + gauge: + dataPoints: + - asInt: "53687091" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: elasticsearch.indexing_pressure.memory.limit + unit: By + - description: Cumulative number of indexing requests rejected in the primary stage. + name: elasticsearch.indexing_pressure.memory.total.primary_rejections + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: "1" + - description: Number of indexing requests rejected in the replica stage. + name: elasticsearch.indexing_pressure.memory.total.replica_rejections + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: "1" + - description: Memory consumed, in bytes, by indexing requests in the specified stage. + name: elasticsearch.memory.indexing_pressure + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: stage + value: + stringValue: coordinating + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: stage + value: + stringValue: primary + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: stage + value: + stringValue: replica + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Total count of query cache misses across all shards assigned to selected nodes. + name: elasticsearch.node.cache.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "333" + attributes: + - key: type + value: + stringValue: hit + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "5324" + attributes: + - key: type + value: + stringValue: miss + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{count}' + - description: The number of evictions from the cache on a node. + name: elasticsearch.node.cache.evictions + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "13212" + attributes: + - key: cache_name + value: + stringValue: fielddata + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "938" + attributes: + - key: cache_name + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{evictions}' + - description: The size in bytes of the cache on a node. + name: elasticsearch.node.cache.memory.usage + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "32" + attributes: + - key: cache_name + value: + stringValue: fielddata + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "394" + attributes: + - key: cache_name + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Total amount of memory used for the query cache across all shards assigned to the node. + name: elasticsearch.node.cache.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "394" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The number of open tcp connections for internal cluster communication. + name: elasticsearch.node.cluster.connections + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{connections}' + - description: The number of bytes sent and received on the network for internal cluster communication. + name: elasticsearch.node.cluster.io + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "129384" + attributes: + - key: direction + value: + stringValue: received + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "157732" + attributes: + - key: direction + value: + stringValue: sent + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: By + - description: The number of documents on the node. + name: elasticsearch.node.documents + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + attributes: + - key: state + value: + stringValue: active + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "200" + attributes: + - key: state + value: + stringValue: deleted + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{documents}' + - description: The amount of disk space available to the JVM across all file stores for this node. Depending on OS or process level restrictions, this might appear less than free. This is the actual amount of free disk space the Elasticsearch node can utilise. + name: elasticsearch.node.fs.disk.available + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "12293464064" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The amount of unallocated disk space across all file stores for this node. + name: elasticsearch.node.fs.disk.free + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "15746158592" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + unit: By + - description: The amount of disk space across all file stores for this node. + name: elasticsearch.node.fs.disk.total + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "67371577344" + startTimeUnixNano: "2000000" + timeUnixNano: "1000000" + unit: By + - description: The number of HTTP connections to the node. + name: elasticsearch.node.http.connections + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{connections}' + - description: Total number of documents ingested during the lifetime of this node. + name: elasticsearch.node.ingest.documents + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{documents}' + - description: Total number of documents currently being ingested. + name: elasticsearch.node.ingest.documents.current + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{documents}' + - description: Total number of failed ingest operations during the lifetime of this node. + name: elasticsearch.node.ingest.operations.failed + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operation}' + - description: The number of open file descriptors held by the node. + name: elasticsearch.node.open_files + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "270" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{files}' + - description: The number of operations completed by a node. + name: elasticsearch.node.operations.completed + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "400" + attributes: + - key: operation + value: + stringValue: delete + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "234" + attributes: + - key: operation + value: + stringValue: fetch + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "345" + attributes: + - key: operation + value: + stringValue: flush + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "600" + attributes: + - key: operation + value: + stringValue: get + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "200" + attributes: + - key: operation + value: + stringValue: index + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "5234" + attributes: + - key: operation + value: + stringValue: merge + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "124" + attributes: + - key: operation + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "958" + attributes: + - key: operation + value: + stringValue: refresh + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "235" + attributes: + - key: operation + value: + stringValue: scroll + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "5234" + attributes: + - key: operation + value: + stringValue: suggest + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: operation + value: + stringValue: warmer + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: Number of query operations currently running. + gauge: + dataPoints: + - asInt: "6723" + attributes: + - key: operation + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: elasticsearch.node.operations.current + unit: '{operations}' + - description: The number of hits and misses resulting from GET operations. + name: elasticsearch.node.operations.get.completed + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "512" + attributes: + - key: result + value: + stringValue: hit + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "512" + attributes: + - key: result + value: + stringValue: miss + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: The time spent on hits and misses resulting from GET operations. + name: elasticsearch.node.operations.get.time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "209" + attributes: + - key: result + value: + stringValue: hit + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "124" + attributes: + - key: result + value: + stringValue: miss + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ms + - description: Time spent on operations by a node. + name: elasticsearch.node.operations.time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "500" + attributes: + - key: operation + value: + stringValue: delete + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "256" + attributes: + - key: operation + value: + stringValue: fetch + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "995" + attributes: + - key: operation + value: + stringValue: flush + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "500" + attributes: + - key: operation + value: + stringValue: get + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "300" + attributes: + - key: operation + value: + stringValue: index + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "25345" + attributes: + - key: operation + value: + stringValue: merge + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2354" + attributes: + - key: operation + value: + stringValue: query + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "544" + attributes: + - key: operation + value: + stringValue: refresh + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "5234" + attributes: + - key: operation + value: + stringValue: scroll + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "2342" + attributes: + - key: operation + value: + stringValue: suggest + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "664" + attributes: + - key: operation + value: + stringValue: warmer + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ms + - description: Total number of documents currently being ingested by a pipeline. + name: elasticsearch.node.pipeline.ingest.documents.current + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: name + value: + stringValue: xpack_monitoring_6 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: name + value: + stringValue: xpack_monitoring_7 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{documents}' + - description: Number of documents preprocessed by the ingest pipeline. + name: elasticsearch.node.pipeline.ingest.documents.preprocessed + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: name + value: + stringValue: xpack_monitoring_6 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: name + value: + stringValue: xpack_monitoring_7 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{documents}' + - description: Total number of failed operations for the ingest pipeline. + name: elasticsearch.node.pipeline.ingest.operations.failed + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + attributes: + - key: name + value: + stringValue: xpack_monitoring_6 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "0" + attributes: + - key: name + value: + stringValue: xpack_monitoring_7 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operation}' + - description: Total number of times the script cache has evicted old data. + name: elasticsearch.node.script.cache_evictions + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: "1" + - description: Total number of times the script compilation circuit breaker has limited inline script compilations. + name: elasticsearch.node.script.compilation_limit_triggered + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: "1" + - description: Total number of inline script compilations performed by the node. + name: elasticsearch.node.script.compilations + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{compilations}' + - description: Size of memory for segment object of a node. + name: elasticsearch.node.segments.memory + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "100" + attributes: + - key: object + value: + stringValue: doc_value + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "200" + attributes: + - key: object + value: + stringValue: fixed_bit_set + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "300" + attributes: + - key: object + value: + stringValue: index_writer + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "400" + attributes: + - key: object + value: + stringValue: term + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Total data set size of all shards assigned to the node. This includes the size of shards not stored fully on the node, such as the cache for partially mounted indices. + name: elasticsearch.node.shards.data_set.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: A prediction of how much larger the shard stores on this node will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. A value of -1 indicates that this is not available. + name: elasticsearch.node.shards.reserved.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The size of the shards assigned to this node. + name: elasticsearch.node.shards.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "300" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The number of tasks finished by the thread pool. + name: elasticsearch.node.thread_pool.tasks.finished + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "6" + attributes: + - key: state + value: + stringValue: completed + - key: thread_pool_name + value: + stringValue: analyze + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "4" + attributes: + - key: state + value: + stringValue: rejected + - key: thread_pool_name + value: + stringValue: analyze + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tasks}' + - description: The number of queued tasks in the thread pool. + name: elasticsearch.node.thread_pool.tasks.queued + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "2" + attributes: + - key: thread_pool_name + value: + stringValue: analyze + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{tasks}' + - description: The number of threads in the thread pool. + name: elasticsearch.node.thread_pool.threads + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "3" + attributes: + - key: state + value: + stringValue: active + - key: thread_pool_name + value: + stringValue: analyze + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "-2" + attributes: + - key: state + value: + stringValue: idle + - key: thread_pool_name + value: + stringValue: analyze + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: '{threads}' + - description: Number of transaction log operations. + name: elasticsearch.node.translog.operations + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{operations}' + - description: Size of the transaction log. + name: elasticsearch.node.translog.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Size of uncommitted transaction log operations. + name: elasticsearch.node.translog.uncommitted.size + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: Fifteen-minute load average on the system (field is not present if fifteen-minute load average is not available). + gauge: + dataPoints: + - asDouble: 0.02 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: elasticsearch.os.cpu.load_avg.15m + unit: "1" + - description: One-minute load average on the system (field is not present if one-minute load average is not available). + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: elasticsearch.os.cpu.load_avg.1m + unit: "1" + - description: Five-minute load average on the system (field is not present if five-minute load average is not available). + gauge: + dataPoints: + - asDouble: 0.02 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: elasticsearch.os.cpu.load_avg.5m + unit: "1" + - description: Recent CPU usage for the whole system, or -1 if not supported. + gauge: + dataPoints: + - asInt: "3" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: elasticsearch.os.cpu.usage + unit: '%' + - description: Amount of physical memory. + gauge: + dataPoints: + - asInt: "294109184" + attributes: + - key: state + value: + stringValue: free + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "779632640" + attributes: + - key: state + value: + stringValue: used + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: elasticsearch.os.memory + unit: By + - description: CPU time used by the process on which the Java virtual machine is running. + name: elasticsearch.process.cpu.time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "42970" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ms + - description: CPU usage in percent. + gauge: + dataPoints: + - asDouble: 0 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: elasticsearch.process.cpu.usage + unit: "1" + - description: Size of virtual memory that is guaranteed to be available to the running process. + name: elasticsearch.process.memory.virtual + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "4961767424" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + unit: By + - description: The number of loaded classes + gauge: + dataPoints: + - asInt: "20695" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: jvm.classes.loaded + unit: "1" + - description: The total number of garbage collections that have occurred + name: jvm.gc.collections.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + attributes: + - key: name + value: + stringValue: old + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "20" + attributes: + - key: name + value: + stringValue: young + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: "1" + - description: The approximate accumulated collection elapsed time + name: jvm.gc.collections.elapsed + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5" + attributes: + - key: name + value: + stringValue: old + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "930" + attributes: + - key: name + value: + stringValue: young + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: ms + - description: The amount of memory that is guaranteed to be available for the heap + gauge: + dataPoints: + - asInt: "536870912" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: jvm.memory.heap.committed + unit: By + - description: The maximum amount of memory can be used for the heap + gauge: + dataPoints: + - asInt: "536870912" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: jvm.memory.heap.max + unit: By + - description: The current heap memory usage + gauge: + dataPoints: + - asInt: "305152000" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: jvm.memory.heap.used + unit: By + - description: Fraction of heap memory usage + gauge: + dataPoints: + - asDouble: 0.56 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: jvm.memory.heap.utilization + unit: "1" + - description: The amount of memory that is guaranteed to be available for non-heap purposes + gauge: + dataPoints: + - asInt: "131792896" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: jvm.memory.nonheap.committed + unit: By + - description: The current non-heap memory usage + gauge: + dataPoints: + - asInt: "128825192" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: jvm.memory.nonheap.used + unit: By + - description: The maximum amount of memory can be used for the memory pool + gauge: + dataPoints: + - asInt: "536870912" + attributes: + - key: name + value: + stringValue: old + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "736870912" + attributes: + - key: name + value: + stringValue: survivor + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "636870912" + attributes: + - key: name + value: + stringValue: young + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: jvm.memory.pool.max + unit: By + - description: The current memory pool memory usage + gauge: + dataPoints: + - asInt: "76562432" + attributes: + - key: name + value: + stringValue: old + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "10485760" + attributes: + - key: name + value: + stringValue: survivor + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + - asInt: "218103808" + attributes: + - key: name + value: + stringValue: young + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: jvm.memory.pool.used + unit: By + - description: The current number of threads + gauge: + dataPoints: + - asInt: "27" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: jvm.threads.count + unit: "1" + scope: + name: otelcol/elasticsearchreceiver + version: latest diff --git a/receiver/elasticsearchreceiver/testdata/sample_payloads/nodes_stats_other.json b/receiver/elasticsearchreceiver/testdata/sample_payloads/nodes_stats_other.json new file mode 100644 index 000000000000..3ce78cdf81ff --- /dev/null +++ b/receiver/elasticsearchreceiver/testdata/sample_payloads/nodes_stats_other.json @@ -0,0 +1,743 @@ +{ + "_nodes": { + "total": 1, + "successful": 1, + "failed": 0 + }, + "cluster_name": "docker-cluster", + "nodes": { + "szaFXm55RIeu8X-PTv5unQ": { + "timestamp": 1627669701946, + "name": "917e13e55eed", + "transport_address": "172.22.0.2:9300", + "host": "172.22.0.2", + "ip": "172.22.0.2:9300", + "roles": [ + "data", + "data_cold", + "data_content", + "data_frozen", + "data_hot", + "data_warm", + "ingest", + "master", + "ml", + "remote_cluster_client", + "transform" + ], + "attributes": { + "ml.machine_memory": "1073741824", + "xpack.installed": "true", + "transform.node": "true", + "ml.max_open_jobs": "512", + "ml.max_jvm_size": "536870912" + }, + "indices": { + "docs": { + "count": 100, + "deleted": 200 + }, + "store": { + "size_in_bytes": 300, + "total_data_set_size_in_bytes": 0, + "reserved_in_bytes": 0 + }, + "indexing": { + "index_total": 200, + "index_time_in_millis": 300, + "index_current": 0, + "index_failed": 0, + "delete_total": 400, + "delete_time_in_millis": 500, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 600, + "time_in_millis": 500, + "exists_total": 512, + "exists_time_in_millis": 209, + "missing_total": 512, + "missing_time_in_millis": 124, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 124, + "query_time_in_millis": 2354, + "query_current": 6723, + "fetch_total": 234, + "fetch_time_in_millis": 256, + "fetch_current": 234, + "scroll_total": 235, + "scroll_time_in_millis": 5234, + "scroll_current": 234, + "suggest_total": 5234, + "suggest_time_in_millis": 2342, + "suggest_current": 0 + }, + "merges": { + "current": 123, + "current_docs": 5123, + "current_size_in_bytes": 5123, + "total": 5234, + "total_time_in_millis": 25345, + "total_docs": 21, + "total_size_in_bytes": 423, + "total_stopped_time_in_millis": 283, + "total_throttled_time_in_millis": 213, + "total_auto_throttle_in_bytes": 1234 + }, + "refresh": { + "total": 958, + "total_time_in_millis": 544, + "external_total": 0, + "external_total_time_in_millis": 0, + "listeners": 0 + }, + "flush": { + "total": 345, + "periodic": 0, + "total_time_in_millis": 995 + }, + "warmer": { + "current": 6435, + "total": 0, + "total_time_in_millis": 664 + }, + "query_cache": { + "memory_size_in_bytes": 394, + "total_count": 983, + "hit_count": 333, + "miss_count": 5324, + "cache_size": 555, + "cache_count": 223, + "evictions": 938 + }, + "fielddata": { + "memory_size_in_bytes": 32, + "evictions": 13212 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 0, + "memory_in_bytes": 0, + "terms_memory_in_bytes": 400, + "stored_fields_memory_in_bytes": 0, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 0, + "points_memory_in_bytes": 0, + "doc_values_memory_in_bytes": 100, + "index_writer_memory_in_bytes": 300, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 200, + "max_unsafe_auto_id_timestamp": -9223372036854775808, + "file_sizes": {} + }, + "translog": { + "operations": 0, + "size_in_bytes": 0, + "uncommitted_operations": 0, + "uncommitted_size_in_bytes": 0, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "os": { + "timestamp": 1627669701947, + "cpu": { + "percent": 3, + "load_average": { + "1m": 0.0, + "5m": 0.02, + "15m": 0.02 + } + }, + "mem": { + "total_in_bytes": 1073741824, + "free_in_bytes": 294109184, + "used_in_bytes": 779632640, + "free_percent": 27, + "used_percent": 73 + }, + "swap": { + "total_in_bytes": 1073741824, + "free_in_bytes": 1073741824, + "used_in_bytes": 0 + }, + "cgroup": { + "cpuacct": { + "control_group": "/", + "usage_nanos": 45612972897 + }, + "cpu": { + "control_group": "/", + "cfs_period_micros": 100000, + "cfs_quota_micros": 100000, + "stat": { + "number_of_elapsed_periods": 12406, + "number_of_times_throttled": 298, + "time_throttled_nanos": 34855164850 + } + }, + "memory": { + "control_group": "/", + "limit_in_bytes": "1073741824", + "usage_in_bytes": "779632640" + } + } + }, + "process": { + "timestamp": 1627669701948, + "open_file_descriptors": 270, + "max_file_descriptors": 1048576, + "cpu": { + "percent": 0, + "total_in_millis": 42970 + }, + "mem": { + "total_virtual_in_bytes": 4961767424 + } + }, + "jvm": { + "timestamp": 1627669701948, + "uptime_in_millis": 2059021, + "mem": { + "heap_used_in_bytes": 305152000, + "heap_used_percent": 56, + "heap_committed_in_bytes": 536870912, + "heap_max_in_bytes": 536870912, + "non_heap_used_in_bytes": 128825192, + "non_heap_committed_in_bytes": 131792896, + "pools": { + "young": { + "used_in_bytes": 218103808, + "max_in_bytes": 636870912, + "peak_used_in_bytes": 314572800, + "peak_max_in_bytes": 0 + }, + "old": { + "used_in_bytes": 76562432, + "max_in_bytes": 536870912, + "peak_used_in_bytes": 76562432, + "peak_max_in_bytes": 536870912 + }, + "survivor": { + "used_in_bytes": 10485760, + "max_in_bytes": 736870912, + "peak_used_in_bytes": 41943040, + "peak_max_in_bytes": 0 + } + } + }, + "threads": { + "count": 27, + "peak_count": 28 + }, + "gc": { + "collectors": { + "young": { + "collection_count": 20, + "collection_time_in_millis": 930 + }, + "old": { + "collection_count": 10, + "collection_time_in_millis": 5 + } + } + }, + "buffer_pools": { + "mapped": { + "count": 0, + "used_in_bytes": 0, + "total_capacity_in_bytes": 0 + }, + "direct": { + "count": 9, + "used_in_bytes": 1070323, + "total_capacity_in_bytes": 1070322 + }, + "mapped - 'non-volatile memory'": { + "count": 0, + "used_in_bytes": 0, + "total_capacity_in_bytes": 0 + } + }, + "classes": { + "current_loaded_count": 20695, + "total_loaded_count": 20695, + "total_unloaded_count": 0 + } + }, + "thread_pool": { + "analyze": { + "threads": 1, + "queue": 2, + "active": 3, + "rejected": 4, + "largest": 5, + "completed": 6 + } + }, + "fs": { + "timestamp": 1627669701948, + "total": { + "total_in_bytes": 67371577344, + "free_in_bytes": 15746158592, + "available_in_bytes": 12293464064 + }, + "data": [ + { + "path": "/usr/share/elasticsearch/data/nodes/0", + "mount": "/ (overlay)", + "type": "overlay", + "total_in_bytes": 67371577344, + "free_in_bytes": 15746158592, + "available_in_bytes": 12293464064 + } + ] + }, + "transport": { + "server_open": 100, + "total_outbound_connections": 200, + "rx_count": 0, + "rx_size_in_bytes": 129384, + "tx_count": 0, + "tx_size_in_bytes": 157732 + }, + "http": { + "current_open": 2, + "total_opened": 3, + "clients": [ + { + "id": 1644878830, + "opened_time_millis": 1627669701929, + "closed_time_millis": 1627669701929, + "last_request_time_millis": -1, + "request_count": 0, + "request_size_bytes": 0 + }, + { + "id": 2001891351, + "agent": "Go-http-client/1.1", + "local_address": "172.22.0.2:9200", + "remote_address": "172.22.0.1:57136", + "last_uri": "/_cluster/health", + "opened_time_millis": 1627667715500, + "last_request_time_millis": 1627669695490, + "request_count": 399, + "request_size_bytes": 0 + }, + { + "id": 103547676, + "agent": "PostmanRuntime/7.28.2", + "local_address": "172.22.0.2:9200", + "remote_address": "172.22.0.1:57276", + "last_uri": "/_nodes/*/stats/", + "opened_time_millis": 1627669701929, + "last_request_time_millis": 1627669701929, + "request_count": 1, + "request_size_bytes": 0 + } + ] + }, + "breakers": { + "request": { + "limit_size_in_bytes": 322122547, + "limit_size": "307.1mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1.0, + "tripped": 0 + }, + "fielddata": { + "limit_size_in_bytes": 214748364, + "limit_size": "204.7mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1.03, + "tripped": 0 + }, + "in_flight_requests": { + "limit_size_in_bytes": 536870912, + "limit_size": "512mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 2.0, + "tripped": 0 + }, + "model_inference": { + "limit_size_in_bytes": 268435456, + "limit_size": "256mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1.0, + "tripped": 0 + }, + "accounting": { + "limit_size_in_bytes": 536870912, + "limit_size": "512mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1.0, + "tripped": 0 + }, + "parent": { + "limit_size_in_bytes": 510027366, + "limit_size": "486.3mb", + "estimated_size_in_bytes": 305152000, + "estimated_size": "291mb", + "overhead": 1.0, + "tripped": 0 + } + }, + "script": { + "compilations": 1, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + "discovery": { + "cluster_state_queue": { + "total": 0, + "pending": 0, + "committed": 0 + }, + "published_cluster_states": { + "full_states": 2, + "incompatible_diffs": 0, + "compatible_diffs": 1 + }, + "cluster_state_update": { + "unchanged": { + "count": 4, + "computation_time_millis": 7, + "notification_time_millis": 1 + }, + "success": { + "count": 7, + "computation_time_millis": 40, + "publication_time_millis": 627, + "context_construction_time_millis": 17, + "commit_time_millis": 113, + "completion_time_millis": 117, + "master_apply_time_millis": 484, + "notification_time_millis": 2 + }, + "failure": { + "count": 0, + "computation_time_millis": 0, + "publication_time_millis": 0, + "context_construction_time_millis": 0, + "commit_time_millis": 0, + "completion_time_millis": 0, + "master_apply_time_millis": 0, + "notification_time_millis": 0 + } + } + }, + "ingest": { + "total": { + "count": 0, + "time_in_millis": 0, + "current": 0, + "failed": 0 + }, + "pipelines": { + "xpack_monitoring_6": { + "count": 0, + "time_in_millis": 0, + "current": 0, + "failed": 0, + "processors": [ + { + "script": { + "type": "script", + "stats": { + "count": 0, + "time_in_millis": 0, + "current": 0, + "failed": 0 + } + } + }, + { + "gsub": { + "type": "gsub", + "stats": { + "count": 0, + "time_in_millis": 0, + "current": 0, + "failed": 0 + } + } + } + ] + }, + "xpack_monitoring_7": { + "count": 0, + "time_in_millis": 0, + "current": 0, + "failed": 0, + "processors": [] + } + } + }, + "adaptive_selection": {}, + "script_cache": { + "sum": { + "compilations": 1, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + "contexts": [ + { + "context": "aggregation_selector", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "aggs", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "aggs_combine", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "aggs_init", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "aggs_map", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "aggs_reduce", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "analysis", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "boolean_field", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "bucket_aggregation", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "date_field", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "double_field", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "field", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "filter", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "geo_point_field", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "ingest", + "compilations": 1, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "ingest_template", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "interval", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "ip_field", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "keyword_field", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "long_field", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "moving-function", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "number_sort", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "painless_test", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "processor_conditional", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "score", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "script_heuristic", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "similarity", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "similarity_weight", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "string_sort", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "template", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "terms_set", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "update", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "watcher_condition", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "watcher_transform", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + }, + { + "context": "xpack_template", + "compilations": 0, + "cache_evictions": 0, + "compilation_limit_triggered": 0 + } + ] + }, + "indexing_pressure": { + "memory": { + "current": { + "combined_coordinating_and_primary_in_bytes": 0, + "coordinating_in_bytes": 0, + "primary_in_bytes": 0, + "replica_in_bytes": 0, + "all_in_bytes": 0 + }, + "total": { + "combined_coordinating_and_primary_in_bytes": 0, + "coordinating_in_bytes": 0, + "primary_in_bytes": 0, + "replica_in_bytes": 0, + "all_in_bytes": 0, + "coordinating_rejections": 0, + "primary_rejections": 0, + "replica_rejections": 0 + }, + "limit_in_bytes": 53687091 + } + } + } + } +}