diff --git a/auditbeat/magefile.go b/auditbeat/magefile.go index f755fef8a0c..3e5577fd394 100644 --- a/auditbeat/magefile.go +++ b/auditbeat/magefile.go @@ -98,11 +98,22 @@ func Update() error { return sh.Run("make", "update") } -// Fields generates a fields.yml for the Beat. -func Fields() error { +// Fields generates a fields.yml and include/fields.go for the Beat. +func Fields() { + mg.SerialDeps(fieldsYML, mage.GenerateAllInOneFieldsGo) +} + +// fieldsYML generates a fields.yml. +func fieldsYML() error { return mage.GenerateFieldsYAML("module") } +// Dashboards collects all the dashboards and generates index patterns. +func Dashboards() error { + mg.Deps(Fields) + return mage.KibanaDashboards("module") +} + // GoTestUnit executes the Go unit tests. // Use TEST_COVERAGE=true to enable code coverage profiling. // Use RACE_DETECTOR=true to enable the race detector. diff --git a/dev-tools/cmd/kibana_index_pattern/kibana_index_pattern.go b/dev-tools/cmd/kibana_index_pattern/kibana_index_pattern.go index 9841a4f69f6..cf024ebe4c5 100644 --- a/dev-tools/cmd/kibana_index_pattern/kibana_index_pattern.go +++ b/dev-tools/cmd/kibana_index_pattern/kibana_index_pattern.go @@ -19,51 +19,73 @@ package main import ( "flag" - "fmt" - "os" + "log" + "path/filepath" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/kibana" "github.com/elastic/beats/libbeat/version" ) +var usageText = ` +Usage: kibana_index_pattern [flags] + kibana_index_pattern generates Kibana index patterns from the Beat's + fields.yml file. It will create a index pattern file that is usable with both + Kibana 5.x and 6.x. +Options: +`[1:] + +var ( + beatName string + beatVersion string + indexPattern string + fieldsYAMLFile string + outputDir string +) + +func init() { + flag.StringVar(&beatName, "beat", "", "Name of the beat. (Required)") + flag.StringVar(&beatVersion, "version", version.GetDefaultVersion(), "Beat version. (Required)") + flag.StringVar(&indexPattern, "index", "", "Kibana index pattern. (Required)") + flag.StringVar(&fieldsYAMLFile, "fields", "fields.yml", "fields.yml file containing all fields used by the Beat.") + flag.StringVar(&outputDir, "out", "build/kibana", "Output dir.") +} + func main() { - index := flag.String("index", "", "The name of the index pattern. (required)") - beatName := flag.String("beat-name", "", "The name of the beat. (required)") - beatDir := flag.String("beat-dir", "", "The local beat directory. (required)") - beatVersion := flag.String("version", version.GetDefaultVersion(), "The beat version.") + log.SetFlags(0) flag.Parse() - if *index == "" { - fmt.Fprint(os.Stderr, "The name of the index pattern must be set.") - os.Exit(1) + if beatName == "" { + log.Fatal("Name of the Beat must be set (-beat).") } - if *beatName == "" { - fmt.Fprint(os.Stderr, "The name of the beat must be set.") - os.Exit(1) + if beatVersion == "" { + log.Fatal("Beat version must be set (-version).") } - if *beatDir == "" { - fmt.Fprint(os.Stderr, "The beat directory must be set.") - os.Exit(1) + if indexPattern == "" { + log.Fatal("Index pattern must be set (-index).") } version5, _ := common.NewVersion("5.0.0") version6, _ := common.NewVersion("6.0.0") - versions := []*common.Version{version5, version6} + versions := []common.Version{*version5, *version6} for _, version := range versions { + indexPattern, err := kibana.NewGenerator(indexPattern, beatName, fieldsYAMLFile, outputDir, beatVersion, version) + if err != nil { + log.Fatal(err) + } - indexPatternGenerator, err := kibana.NewGenerator(*index, *beatName, *beatDir, *beatVersion, *version) + file, err := indexPattern.Generate() if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) + log.Fatal(err) } - pattern, err := indexPatternGenerator.Generate() + + // Log output file location. + absFile, err := filepath.Abs(file) if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) + absFile = file } - fmt.Fprintf(os.Stdout, "-- The index pattern was created under %v\n", pattern) + log.Printf(">> The index pattern was created under %v", absFile) } } diff --git a/dev-tools/cmd/module_fields/main.go b/dev-tools/cmd/module_fields/main.go new file mode 100644 index 00000000000..e4275f5e275 --- /dev/null +++ b/dev-tools/cmd/module_fields/main.go @@ -0,0 +1,118 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "log" + "os" + "path" + + "github.com/elastic/beats/libbeat/asset" + "github.com/elastic/beats/libbeat/generator/fields" + "github.com/elastic/beats/licenses" +) + +var usageText = ` +Usage: module_fields [flags] [module-dir] + module_fields generates a fields.go file containing a copy of the module's + field.yml data in a format that can be embedded in Beat's binary. module-dir + should be the directory containing modules (e.g. filebeat/module). +Options: +`[1:] + +var ( + beatName string + license string +) + +func init() { + flag.StringVar(&beatName, "beat", "", "Name of the beat. (Required)") + flag.StringVar(&license, "license", "ASL2", "License header for generated file.") + flag.Usage = usageFlag +} + +func main() { + log.SetFlags(0) + flag.Parse() + + if beatName == "" { + log.Fatal("You must use -beat to specify the beat name.") + } + + license, err := licenses.Find(license) + if err != nil { + log.Fatalf("Invalid license specifier: %v", err) + } + + args := flag.Args() + if len(args) != 1 { + log.Fatal("module-dir must be passed as an argument.") + } + dir := args[0] + + modules, err := fields.GetModules(dir) + if err != nil { + log.Fatalf("Error fetching modules: %v", err) + } + + for _, module := range modules { + files, err := fields.CollectFiles(module, dir) + if err != nil { + log.Fatalf("Error fetching files for module %v: %v", module, err) + } + + data, err := fields.GenerateFieldsYml(files) + if err != nil { + log.Fatalf("Error fetching files for module %v: %v", module, err) + } + + encData, err := asset.EncodeData(string(data)) + if err != nil { + log.Fatalf("Error encoding the data: %v", err) + } + + var buf bytes.Buffer + asset.Template.Execute(&buf, asset.Data{ + License: license, + Beat: beatName, + Name: module, + Data: encData, + Package: module, + }) + + bs, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatalf("Error creating golang file from template: %v", err) + } + + err = ioutil.WriteFile(path.Join(dir, module, "fields.go"), bs, 0644) + if err != nil { + log.Fatalf("Error writing fields.go: %v", err) + } + } +} + +func usageFlag() { + fmt.Fprintf(os.Stderr, usageText) + flag.PrintDefaults() +} diff --git a/dev-tools/mage/clean.go b/dev-tools/mage/clean.go index 2f170e9e505..912d372e1df 100644 --- a/dev-tools/mage/clean.go +++ b/dev-tools/mage/clean.go @@ -35,10 +35,6 @@ var DefaultCleanPaths = []string{ "_meta/kibana.generated", "_meta/kibana/5/index-pattern/{{.BeatName}}.json", "_meta/kibana/6/index-pattern/{{.BeatName}}.json", - - "../x-pack/{{.BeatName}}/build", - "../x-pack/{{.BeatName}}/{{.BeatName}}", - "../x-pack/{{.BeatName}}/{{.BeatName}}.exe", } // Clean clean generated build artifacts. diff --git a/dev-tools/mage/config.go b/dev-tools/mage/config.go new file mode 100644 index 00000000000..a63176724cb --- /dev/null +++ b/dev-tools/mage/config.go @@ -0,0 +1,152 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/pkg/errors" + "gopkg.in/yaml.v2" +) + +const moduleConfigTemplate = ` +#========================== Modules configuration ============================= +{{.BeatName}}.modules: +{{range $mod := .Modules}} +#{{$mod.Dashes}} {{$mod.Title | title}} Module {{$mod.Dashes}} +{{$mod.Config}} +{{- end}} + +` + +type moduleConfigTemplateData struct { + ID string + Title string + Dashes string + Config string +} + +type moduleFieldsYmlData []struct { + Title string `json:"title"` + ShortConfig bool `json:"short_config"` +} + +func readModuleFieldsYml(path string) (title string, useShort bool, err error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return "", false, err + } + + var fd moduleFieldsYmlData + if err = yaml.Unmarshal(data, &fd); err != nil { + return "", false, err + } + + if len(fd) == 0 { + return "", false, errors.New("module not found in fields.yml") + } + + return fd[0].Title, fd[0].ShortConfig, nil +} + +// moduleDashes returns a string containing the correct number of dashes '-' to +// center the modules title in the middle of the line surrounded by an equal +// number of dashes on each side. +func moduleDashes(name string) string { + const ( + lineLen = 80 + headerLen = len("#") + titleSuffixLen = len(" Module ") + ) + + numDashes := lineLen - headerLen - titleSuffixLen - len(name) + numDashes /= 2 + return strings.Repeat("-", numDashes) +} +func GenerateModuleReferenceConfig(out string, moduleDirs ...string) error { + var moduleConfigs []moduleConfigTemplateData + for _, dir := range moduleDirs { + modules, err := ioutil.ReadDir(dir) + if err != nil { + return err + } + + for _, modDirInfo := range modules { + if !modDirInfo.IsDir() { + continue + } + name := modDirInfo.Name() + + // Get title from fields.yml. + title, _, err := readModuleFieldsYml(filepath.Join(dir, name, "_meta/fields.yml")) + if err != nil { + title = strings.Title(name) + } + + // Prioritize config.reference.yml, but fallback to config.yml. + files := []string{ + filepath.Join(dir, name, "_meta/config.reference.yml"), + filepath.Join(dir, name, "_meta/config.yml"), + } + + var data []byte + for _, f := range files { + data, err = ioutil.ReadFile(f) + if err != nil { + if os.IsNotExist(err) { + continue + } + return err + } + + break + } + if data == nil { + continue + } + + moduleConfigs = append(moduleConfigs, moduleConfigTemplateData{ + ID: name, + Title: title, + Dashes: moduleDashes(title), + Config: string(data), + }) + } + } + + // Sort them by their module dir name, but put system first. + sort.Slice(moduleConfigs, func(i, j int) bool { + // Bubble system to the top of the list. + if moduleConfigs[i].ID == "system" { + return true + } else if moduleConfigs[j].ID == "system" { + return false + } + return moduleConfigs[i].ID < moduleConfigs[j].ID + }) + + config := MustExpand(moduleConfigTemplate, map[string]interface{}{ + "Modules": moduleConfigs, + }) + + return ioutil.WriteFile(out, []byte(config), 0644) +} diff --git a/dev-tools/mage/fields.go b/dev-tools/mage/fields.go index f709e16a39f..76d2a20422e 100644 --- a/dev-tools/mage/fields.go +++ b/dev-tools/mage/fields.go @@ -27,10 +27,24 @@ import ( // the common fields specified by libbeat, the common fields for the Beat, // and any additional fields.yml files you specify. // -// fieldsFiles specifies additional directories to search recursively for files -// named fields.yml. The contents of each fields.yml will be included in the -// generated file. -func GenerateFieldsYAML(fieldsFiles ...string) error { +// moduleDirs specifies additional directories to search for modules. The +// contents of each fields.yml will be included in the generated file. +func GenerateFieldsYAML(moduleDirs ...string) error { + return generateFieldsYAML(OSSBeatDir(), moduleDirs...) +} + +func OSSBeatDir(path ...string) string { + ossDir := CWD() + + // Check if we need to correct ossDir because it's in x-pack. + if parentDir := filepath.Base(filepath.Dir(ossDir)); parentDir == "x-pack" { + ossDir = filepath.Join(ossDir, "../..", BeatName) + } + + return filepath.Join(append([]string{ossDir}, path...)...) +} + +func generateFieldsYAML(baseDir string, moduleDirs ...string) error { const globalFieldsCmdPath = "libbeat/scripts/cmd/global_fields/main.go" beatsDir, err := ElasticBeatsDir() @@ -41,9 +55,57 @@ func GenerateFieldsYAML(fieldsFiles ...string) error { globalFieldsCmd := sh.RunCmd("go", "run", filepath.Join(beatsDir, globalFieldsCmdPath), "-es_beats_path", beatsDir, - "-beat_path", CWD(), + "-beat_path", baseDir, "-out", "fields.yml", ) - return globalFieldsCmd(fieldsFiles...) + return globalFieldsCmd(moduleDirs...) +} + +// GenerateAllInOneFieldsGo generates an all-in-one fields.go file. +func GenerateAllInOneFieldsGo() error { + return GenerateFieldsGo("fields.yml", "include/fields.go") +} + +// GenerateFieldsGo generates a .go file containing the fields.yml data. +func GenerateFieldsGo(fieldsYML, out string) error { + const assetCmdPath = "dev-tools/cmd/asset/asset.go" + + beatsDir, err := ElasticBeatsDir() + if err != nil { + return err + } + + assetCmd := sh.RunCmd("go", "run", + filepath.Join(beatsDir, assetCmdPath), + "-pkg", "include", + "-in", fieldsYML, + "-out", createDir(out), + BeatName, + ) + + return assetCmd() +} + +func GenerateModuleFieldsGo() error { + const moduleFieldsCmdPath = "dev-tools/cmd/module_fields/main.go" + + beatsDir, err := ElasticBeatsDir() + if err != nil { + return err + } + + licenseType := BeatLicense + if licenseType == "ASL 2.0" { + licenseType = "ASL2" + } + + moduleFieldsCmd := sh.RunCmd("go", "run", + filepath.Join(beatsDir, moduleFieldsCmdPath), + "-beat", BeatName, + "-license", licenseType, + filepath.Join(CWD(), "module"), + ) + + return moduleFieldsCmd() } diff --git a/dev-tools/mage/kibana.go b/dev-tools/mage/kibana.go new file mode 100644 index 00000000000..d9cda7892a6 --- /dev/null +++ b/dev-tools/mage/kibana.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "os" + "path/filepath" + + "github.com/magefile/mage/sh" + "github.com/pkg/errors" +) + +// KibanaDashboards collects the Kibana dashboards files and generates the +// index patterns based on the fields.yml file. It outputs to build/kibana. +func KibanaDashboards(moduleDirs ...string) error { + var kibanaBuildDir = "build/kibana" + + if err := os.MkdirAll(kibanaBuildDir, 0755); err != nil { + return err + } + + // Copy the OSS Beat's common dashboards if they exist. This assumes that + // X-Pack Beats only add dashboards with modules (this will require a + // change if we have X-Pack only Beats). + cp := &CopyTask{Source: OSSBeatDir("_meta/kibana"), Dest: kibanaBuildDir} + if err := cp.Execute(); err != nil && !os.IsNotExist(errors.Cause(err)) { + return err + } + + // Copy dashboards from modules. + for _, dir := range moduleDirs { + kibanaDirs, err := filepath.Glob(filepath.Join(dir, "*/_meta/kibana")) + if err != nil { + return err + } + + for _, kibanaDir := range kibanaDirs { + cp := &CopyTask{Source: kibanaDir, Dest: kibanaBuildDir} + if err = cp.Execute(); err != nil { + return err + } + } + } + + esBeatsDir, err := ElasticBeatsDir() + if err != nil { + return err + } + + // Convert 6.x dashboards to strings. + err = sh.Run("python", + filepath.Join(esBeatsDir, "libbeat/scripts/unpack_dashboards.py"), + "--glob="+filepath.Join(kibanaBuildDir, "6/dashboard/*.json")) + if err != nil { + return err + } + + beatVersion, err := BeatQualifiedVersion() + if err != nil { + return err + } + + // Generate Kibana index pattern files from fields.yml. + indexPatternCmd := sh.RunCmd("go", "run", + filepath.Join(esBeatsDir, "dev-tools/cmd/kibana_index_pattern/kibana_index_pattern.go"), + "-beat", BeatName, + "-version", beatVersion, + "-index", BeatIndexPrefix+"-*", + "-fields", "fields.yml", + "-out", kibanaBuildDir, + ) + + return indexPatternCmd() +} diff --git a/dev-tools/mage/pkgspecs.go b/dev-tools/mage/pkgspecs.go index 20767d9a429..432b508d0b5 100644 --- a/dev-tools/mage/pkgspecs.go +++ b/dev-tools/mage/pkgspecs.go @@ -18,6 +18,7 @@ package mage import ( + "bytes" "io/ioutil" "log" "path/filepath" @@ -82,10 +83,22 @@ func UseElasticBeatWithoutXPackPackaging() { } } +func LoadLocalNamedSpec(name string) { + beatsDir, err := ElasticBeatsDir() + if err != nil { + panic(err) + } + + err = LoadNamedSpec(name, filepath.Join(beatsDir, packageSpecFile), "packages.yml") + if err != nil { + panic(err) + } +} + // LoadNamedSpec loads a packaging specification with the given name from the // specified YAML file. name should be a sub-key of 'specs'. -func LoadNamedSpec(name, file string) error { - specs, err := LoadSpecs(file) +func LoadNamedSpec(name string, files ...string) error { + specs, err := LoadSpecs(files...) if err != nil { return errors.Wrap(err, "failed to load spec file") } @@ -95,16 +108,20 @@ func LoadNamedSpec(name, file string) error { return errors.Errorf("%v not found in package specs", name) } - log.Printf("%v package spec loaded from %v", name, file) + log.Printf("%v package spec loaded from %v", name, files) Packages = packages return nil } // LoadSpecs loads the packaging specifications from the specified YAML file. -func LoadSpecs(file string) (map[string][]OSPackageArgs, error) { - data, err := ioutil.ReadFile(file) - if err != nil { - return nil, errors.Wrap(err, "failed to read from spec file") +func LoadSpecs(files ...string) (map[string][]OSPackageArgs, error) { + var data [][]byte + for _, file := range files { + d, err := ioutil.ReadFile(file) + if err != nil { + return nil, errors.Wrap(err, "failed to read from spec file") + } + data = append(data, d) } type PackageYAML struct { @@ -112,7 +129,7 @@ func LoadSpecs(file string) (map[string][]OSPackageArgs, error) { } var packages PackageYAML - if err = yaml.Unmarshal(data, &packages); err != nil { + if err := yaml.Unmarshal(bytes.Join(data, []byte{'\n'}), &packages); err != nil { return nil, errors.Wrap(err, "failed to unmarshal spec data") } diff --git a/dev-tools/packaging/packages.yml b/dev-tools/packaging/packages.yml index 33504fe59f7..cfe21fc726f 100644 --- a/dev-tools/packaging/packages.yml +++ b/dev-tools/packaging/packages.yml @@ -48,7 +48,7 @@ shared: mode: 0600 config: true /usr/share/{{.BeatName}}/kibana: - source: _meta/kibana.generated + source: build/kibana mode: 0644 /usr/share/{{.BeatName}}/bin/{{.BeatName}}-god: source: build/golang-crossbuild/god-{{.GOOS}}-{{.Platform.Arch}} @@ -95,7 +95,7 @@ shared: template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl' mode: 0644 /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/kibana: - source: _meta/kibana.generated + source: build/kibana mode: 0644 /etc/{{.BeatName}}/fields.yml: source: fields.yml @@ -136,7 +136,7 @@ shared: mode: 0600 config: true kibana: - source: _meta/kibana.generated + source: build/kibana mode: 0644 # Binary package spec (tar.gz for linux/darwin) for community beats. @@ -349,8 +349,9 @@ specs: spec: <<: *windows_binary_spec <<: *elastic_license_for_binaries - '{{.BeatName}}{{.BinaryExt}}': - source: ../x-pack/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ../x-pack/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - os: darwin types: [tgz] diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index fba51aceb04..108816ab7b6 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -8,10 +8,10 @@ # https://www.elastic.co/guide/en/beats/filebeat/index.html -#========================== Modules configuration ============================ +#========================== Modules configuration ============================= filebeat.modules: -#------------------------------- System Module ------------------------------- +#-------------------------------- System Module -------------------------------- #- module: system # Syslog #syslog: @@ -43,7 +43,39 @@ filebeat.modules: # can be added under this section. #input: -#------------------------------- Apache2 Module ------------------------------ +#-------------------------------- System Module -------------------------------- +#- module: system + # Syslog + #syslog: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + #var.convert_timezone: false + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Authorization logs + #auth: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + #var.convert_timezone: false + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#-------------------------------- Apache2 Module -------------------------------- #- module: apache2 # Access logs #access: @@ -69,7 +101,46 @@ filebeat.modules: # can be added under this section. #input: -#------------------------------- Auditd Module ------------------------------- +#-------------------------------- Apache2 Module -------------------------------- +#- module: apache2 + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#-------------------------------- Auditd Module -------------------------------- +#- module: auditd + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#-------------------------------- Auditd Module -------------------------------- #- module: auditd #log: #enabled: true @@ -82,7 +153,7 @@ filebeat.modules: # can be added under this section. #input: -#---------------------------- elasticsearch Module --------------------------- +#----------------------------- Elasticsearch Module ----------------------------- - module: elasticsearch # Server log server: @@ -116,7 +187,54 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: -#------------------------------- haproxy Module ------------------------------ +#----------------------------- Elasticsearch Module ----------------------------- +- module: elasticsearch + # Server log + server: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + gc: + enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + audit: + enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + slowlog: + enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + deprecation: + enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + +#-------------------------------- Haproxy Module -------------------------------- +- module: haproxy + # All logs + log: + enabled: true + + # Set which input to use between syslog (default) or file. + #var.input: + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + +#-------------------------------- Haproxy Module -------------------------------- - module: haproxy # All logs log: @@ -129,7 +247,45 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: -#------------------------------- Icinga Module ------------------------------- +#-------------------------------- Icinga Module -------------------------------- +#- module: icinga + # Main logs + #main: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Debug logs + #debug: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Startup logs + #startup: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#-------------------------------- Icinga Module -------------------------------- #- module: icinga # Main logs #main: @@ -167,7 +323,7 @@ filebeat.modules: # can be added under this section. #input: -#--------------------------------- IIS Module -------------------------------- +#---------------------------------- IIS Module ---------------------------------- #- module: iis # Access logs #access: @@ -193,7 +349,50 @@ filebeat.modules: # can be added under this section. #input: -#-------------------------------- Kafka Module ------------------------------- +#---------------------------------- IIS Module ---------------------------------- +#- module: iis + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#--------------------------------- Kafka Module --------------------------------- +- module: kafka + # All logs + log: + enabled: true + + # Set custom paths for Kafka. If left empty, + # Filebeat will look under /opt. + #var.kafka_home: + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + #var.convert_timezone: false + +#--------------------------------- Kafka Module --------------------------------- - module: kafka # All logs log: @@ -210,7 +409,7 @@ filebeat.modules: # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. #var.convert_timezone: false -#------------------------------- kibana Module ------------------------------- +#-------------------------------- Kibana Module -------------------------------- - module: kibana # All logs log: @@ -220,7 +419,34 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: -#------------------------------ logstash Module ------------------------------ +#-------------------------------- Kibana Module -------------------------------- +- module: kibana + # All logs + log: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + +#------------------------------- Logstash Module ------------------------------- +#- module: logstash + # logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + # var.paths: + + # Slow logs + #slowlog: + #enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + +#------------------------------- Logstash Module ------------------------------- #- module: logstash # logs #log: @@ -237,7 +463,7 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: -#------------------------------- mongodb Module ------------------------------ +#-------------------------------- Mongodb Module -------------------------------- #- module: mongodb # Logs #log: @@ -251,7 +477,47 @@ filebeat.modules: # can be added under this section. #input: -#-------------------------------- MySQL Module ------------------------------- +#-------------------------------- Mongodb Module -------------------------------- +#- module: mongodb + # Logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#--------------------------------- MySQL Module --------------------------------- +#- module: mysql + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Slow logs + #slowlog: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#--------------------------------- MySQL Module --------------------------------- #- module: mysql # Error logs #error: @@ -277,7 +543,33 @@ filebeat.modules: # can be added under this section. #input: -#-------------------------------- Nginx Module ------------------------------- +#--------------------------------- Nginx Module --------------------------------- +#- module: nginx + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#--------------------------------- Nginx Module --------------------------------- #- module: nginx # Access logs #access: @@ -303,7 +595,7 @@ filebeat.modules: # can be added under this section. #input: -#------------------------------- Osquery Module ------------------------------ +#-------------------------------- Osquery Module -------------------------------- - module: osquery result: enabled: true @@ -317,7 +609,35 @@ filebeat.modules: # of the document. The default is true. #var.use_namespace: true -#----------------------------- PostgreSQL Module ----------------------------- +#-------------------------------- Osquery Module -------------------------------- +- module: osquery + result: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # If true, all fields created by this module are prefixed with + # `osquery.result`. Set to false to copy the fields in the root + # of the document. The default is true. + #var.use_namespace: true + +#------------------------------ PostgreSQL Module ------------------------------ +#- module: postgresql + # Logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#------------------------------ PostgreSQL Module ------------------------------ #- module: postgresql # Logs #log: @@ -331,7 +651,27 @@ filebeat.modules: # can be added under this section. #input: -#-------------------------------- Redis Module ------------------------------- +#--------------------------------- Redis Module --------------------------------- +#- module: redis + # Main logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: ["/var/log/redis/redis-server.log*"] + + # Slow logs, retrieved via the Redis API (SLOWLOG) + #slowlog: + #enabled: true + + # The Redis hosts to connect to. + #var.hosts: ["localhost:6379"] + + # Optional, the password to use when connecting to Redis. + #var.password: + +#--------------------------------- Redis Module --------------------------------- #- module: redis # Main logs #log: @@ -351,7 +691,21 @@ filebeat.modules: # Optional, the password to use when connecting to Redis. #var.password: -#------------------------------- Traefik Module ------------------------------ +#-------------------------------- Traefik Module -------------------------------- +#- module: traefik + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#-------------------------------- Traefik Module -------------------------------- #- module: traefik # Access logs #access: diff --git a/filebeat/magefile.go b/filebeat/magefile.go index 50b749746ee..5e0c639877c 100644 --- a/filebeat/magefile.go +++ b/filebeat/magefile.go @@ -22,7 +22,10 @@ package main import ( "context" "fmt" + "os" "path/filepath" + "regexp" + "strings" "time" "github.com/magefile/mage/mg" @@ -57,11 +60,6 @@ func CrossBuild() error { return mage.CrossBuild() } -// CrossBuildXPack cross-builds the beat with XPack for all target platforms. -func CrossBuildXPack() error { - return mage.CrossBuildXPack() -} - // CrossBuildGoDaemon cross-builds the go-daemon binary using Docker. func CrossBuildGoDaemon() error { return mage.CrossBuildGoDaemon() @@ -80,11 +78,11 @@ func Package() { start := time.Now() defer func() { fmt.Println("package ran for", time.Since(start)) }() - mage.UseElasticBeatPackaging() + mage.UseCommunityBeatPackaging() customizePackaging() - mg.Deps(Update, prepareModulePackagingOSS, prepareModulePackagingXPack) - mg.Deps(CrossBuild, CrossBuildXPack, CrossBuildGoDaemon) + mg.Deps(Fields, Dashboards, Config, prepareModulePackaging) + mg.Deps(CrossBuild, CrossBuildGoDaemon) mg.SerialDeps(mage.Package, TestPackages) } @@ -98,11 +96,27 @@ func Update() error { return sh.Run("make", "update") } -// Fields generates a fields.yml for the Beat. -func Fields() error { +// Fields generates a fields.yml and include/fields.go for the Beat. +func Fields() { + mg.SerialDeps(fieldsYML, mage.GenerateAllInOneFieldsGo) +} + +// fieldsYML generates a fields.yml. +func fieldsYML() error { return mage.GenerateFieldsYAML("module") } +// Dashboards collects all the dashboards and generates index patterns. +func Dashboards() error { + mg.Deps(Fields) + return mage.KibanaDashboards("module") +} + +// Config generates both the short and reference configs. +func Config() { + mg.Deps(shortConfig, referenceConfig, createDirModulesD) +} + // GoTestUnit executes the Go unit tests. // Use TEST_COVERAGE=true to enable code coverage profiling. // Use RACE_DETECTOR=true to enable the race detector. @@ -122,9 +136,10 @@ func GoTestIntegration(ctx context.Context) error { // - Include modules directory in packages (minus _meta and test files). // - Include modules.d directory in packages. -var modulesDirGeneratedOSS = filepath.Clean("build/packaging/modules-oss") -var modulesDirGeneratedXPack = filepath.Clean("build/packaging/modules-x-pack") -var modules_D_DirGeneratedXpack = filepath.Clean("build/packaging/modules.d-x-pack") +const ( + dirModuleGenerated = "build/package/module" + dirModulesDGenerated = "build/package/modules.d" +) // customizePackaging modifies the package specs to add the modules and // modules.d directory. @@ -133,85 +148,52 @@ func customizePackaging() { moduleTarget = "module" module = mage.PackageFile{ Mode: 0644, - Source: modulesDirGeneratedOSS, - } - moduleXPack = mage.PackageFile{ - Mode: 0644, - Source: modulesDirGeneratedXPack, + Source: dirModuleGenerated, } modulesDTarget = "modules.d" modulesD = mage.PackageFile{ Mode: 0644, - Source: "modules.d", - Config: true, - } - modulesDXPack = mage.PackageFile{ - Mode: 0644, - Source: modules_D_DirGeneratedXpack, + Source: dirModulesDGenerated, Config: true, } ) for _, args := range mage.Packages { - mods := module - modsD := modulesD - if args.Spec.License == "Elastic License" { - mods = moduleXPack - modsD = modulesDXPack - } pkgType := args.Types[0] switch pkgType { case mage.TarGz, mage.Zip: - args.Spec.Files[moduleTarget] = mods - args.Spec.Files[modulesDTarget] = modsD + args.Spec.Files[moduleTarget] = module + args.Spec.Files[modulesDTarget] = modulesD case mage.Deb, mage.RPM: - args.Spec.Files["/usr/share/{{.BeatName}}/"+moduleTarget] = mods - args.Spec.Files["/etc/{{.BeatName}}/"+modulesDTarget] = modsD + args.Spec.Files["/usr/share/{{.BeatName}}/"+moduleTarget] = module + args.Spec.Files["/etc/{{.BeatName}}/"+modulesDTarget] = modulesD case mage.DMG: - args.Spec.Files["/Library/Application Support/{{.BeatVendor}}/{{.BeatName}}"+moduleTarget] = mods - args.Spec.Files["/etc/{{.BeatName}}/"+modulesDTarget] = modsD + args.Spec.Files["/Library/Application Support/{{.BeatVendor}}/{{.BeatName}}"+moduleTarget] = module + args.Spec.Files["/etc/{{.BeatName}}/"+modulesDTarget] = modulesD default: panic(errors.Errorf("unhandled package type: %v", pkgType)) } } } -// prepareModulePackagingOSS copies the module dir to the build dir and excludes +// prepareModulePackaging copies the module dir to the build dir and excludes // _meta and test files so that they are not included in packages. -func prepareModulePackagingOSS() error { - if err := sh.Rm(modulesDirGeneratedOSS); err != nil { +func prepareModulePackaging() error { + mg.Deps(createDirModulesD) + + err := mage.Clean([]string{ + dirModuleGenerated, + dirModulesDGenerated, + }) + if err != nil { return err } - copy := &mage.CopyTask{ - Source: "module", - Dest: modulesDirGeneratedOSS, - Mode: 0644, - DirMode: 0755, - Exclude: []string{ - "/_meta", - "/test", - }, - } - return copy.Execute() -} - -// prepareModulePackagingXPack generates modules and modules.d directories -// for an x-pack distribution, excluding _meta and test files so that they are -// not included in packages. -func prepareModulePackagingXPack() error { - for _, dir := range []string{modulesDirGeneratedXPack, modules_D_DirGeneratedXpack} { - if err := sh.Rm(dir); err != nil { - return err - } - } for _, copyAction := range []struct { src, dst string }{ - {"module", modulesDirGeneratedXPack}, - {"../x-pack/filebeat/module", modulesDirGeneratedXPack}, - {"modules.d", modules_D_DirGeneratedXpack}, - {"../x-pack/filebeat/modules.d", modules_D_DirGeneratedXpack}, + {mage.OSSBeatDir("module"), dirModuleGenerated}, + {mage.OSSBeatDir("modules.d"), dirModulesDGenerated}, } { err := (&mage.CopyTask{ Source: copyAction.src, @@ -221,6 +203,7 @@ func prepareModulePackagingXPack() error { Exclude: []string{ "/_meta", "/test", + "fields.go", }, }).Execute() if err != nil { @@ -229,3 +212,76 @@ func prepareModulePackagingXPack() error { } return nil } + +func shortConfig() error { + var configParts = []string{ + mage.OSSBeatDir("_meta/common.p1.yml"), + mage.OSSBeatDir("_meta/common.p2.yml"), + "{{ elastic_beats_dir }}/libbeat/_meta/config.yml", + } + + for i, f := range configParts { + configParts[i] = mage.MustExpand(f) + } + + configFile := mage.BeatName + ".yml" + mage.MustFileConcat(configFile, 0640, configParts...) + mage.MustFindReplace(configFile, regexp.MustCompile("beatname"), mage.BeatName) + mage.MustFindReplace(configFile, regexp.MustCompile("beat-index-prefix"), mage.BeatIndexPrefix) + return nil +} + +func referenceConfig() error { + const modulesConfigYml = "build/config.modules.yml" + err := mage.GenerateModuleReferenceConfig(modulesConfigYml, mage.OSSBeatDir("module"), "module") + if err != nil { + return err + } + defer os.Remove(modulesConfigYml) + + var configParts = []string{ + mage.OSSBeatDir("_meta/common.reference.p1.yml"), + modulesConfigYml, + mage.OSSBeatDir("_meta/common.reference.p2.yml"), + "{{ elastic_beats_dir }}/libbeat/_meta/config.reference.yml", + } + + for i, f := range configParts { + configParts[i] = mage.MustExpand(f) + } + + configFile := mage.BeatName + ".reference.yml" + mage.MustFileConcat(configFile, 0640, configParts...) + mage.MustFindReplace(configFile, regexp.MustCompile("beatname"), mage.BeatName) + mage.MustFindReplace(configFile, regexp.MustCompile("beat-index-prefix"), mage.BeatIndexPrefix) + return nil +} + +func createDirModulesD() error { + if err := os.RemoveAll("modules.d"); err != nil { + return err + } + + shortConfigs, err := filepath.Glob("module/*/_meta/config.yml") + if err != nil { + return err + } + + for _, f := range shortConfigs { + parts := strings.Split(filepath.ToSlash(f), "/") + if len(parts) < 2 { + continue + } + moduleName := parts[1] + + cp := mage.CopyTask{ + Source: f, + Dest: filepath.Join("modules.d", moduleName+".yml.disabled"), + Mode: 0644, + } + if err = cp.Execute(); err != nil { + return err + } + } + return nil +} diff --git a/heartbeat/magefile.go b/heartbeat/magefile.go index 314e74cd9c3..f63cc73a961 100644 --- a/heartbeat/magefile.go +++ b/heartbeat/magefile.go @@ -96,11 +96,22 @@ func Update() error { return sh.Run("make", "update") } -// Fields generates a fields.yml for the Beat. -func Fields() error { +// Fields generates a fields.yml and include/fields.go for the Beat. +func Fields() { + mg.SerialDeps(fieldsYML, mage.GenerateAllInOneFieldsGo) +} + +// fieldsYML generates a fields.yml. +func fieldsYML() error { return mage.GenerateFieldsYAML("monitors/active") } +// Dashboards collects all the dashboards and generates index patterns. +func Dashboards() error { + mg.Deps(Fields) + return mage.KibanaDashboards("monitors/active") +} + // GoTestUnit executes the Go unit tests. // Use TEST_COVERAGE=true to enable code coverage profiling. // Use RACE_DETECTOR=true to enable the race detector. diff --git a/libbeat/generator/fields/fields.go b/libbeat/generator/fields/fields.go index 83df7102219..36f697b8c39 100644 --- a/libbeat/generator/fields/fields.go +++ b/libbeat/generator/fields/fields.go @@ -76,7 +76,7 @@ func isLibbeat(beatPath string) bool { return filepath.Base(beatPath) == "libbeat" } -func writeGeneratedFieldsYml(beatPath string, fieldFiles []*YmlFile, output string) error { +func writeGeneratedFieldsYml(fieldFiles []*YmlFile, output string) error { data, err := GenerateFieldsYml(fieldFiles) if err != nil { return err @@ -91,8 +91,7 @@ func writeGeneratedFieldsYml(beatPath string, fieldFiles []*YmlFile, output stri return fw.Flush() } - outPath := filepath.Join(beatPath, output) - f, err := os.Create(outPath) + f, err := os.Create(output) if err != nil { return err } @@ -143,5 +142,5 @@ func Generate(esBeatsPath, beatPath string, files []*YmlFile, output string) err return err } - return writeGeneratedFieldsYml(beatPath, files, output) + return writeGeneratedFieldsYml(files, output) } diff --git a/libbeat/kibana/index_pattern_generator.go b/libbeat/kibana/index_pattern_generator.go index 46a1a741751..784fa42f2c3 100644 --- a/libbeat/kibana/index_pattern_generator.go +++ b/libbeat/kibana/index_pattern_generator.go @@ -37,20 +37,19 @@ type IndexPatternGenerator struct { } // Create an instance of the Kibana Index Pattern Generator -func NewGenerator(indexName, beatName, beatDir, beatVersion string, version common.Version) (*IndexPatternGenerator, error) { +func NewGenerator(indexName, beatName, fieldsYAMLFile, outputDir, beatVersion string, version common.Version) (*IndexPatternGenerator, error) { beatName = clean(beatName) - fieldsYaml := filepath.Join(beatDir, "fields.yml") - if _, err := os.Stat(fieldsYaml); err != nil { + if _, err := os.Stat(fieldsYAMLFile); err != nil { return nil, err } return &IndexPatternGenerator{ indexName: indexName, - fieldsYaml: fieldsYaml, + fieldsYaml: fieldsYAMLFile, beatVersion: beatVersion, version: version, - targetDir: createTargetDir(beatDir, version), + targetDir: createTargetDir(outputDir, version), targetFilename: beatName + ".json", }, nil } @@ -169,9 +168,9 @@ func dumpToFile(f string, pattern common.MapStr) error { } func createTargetDir(baseDir string, version common.Version) string { - targetDir := filepath.Join(baseDir, "_meta", "kibana.generated", getVersionPath(version), "index-pattern") + targetDir := filepath.Join(baseDir, getVersionPath(version), "index-pattern") if _, err := os.Stat(targetDir); os.IsNotExist(err) { - os.MkdirAll(targetDir, 0777) + os.MkdirAll(targetDir, 0755) } return targetDir } diff --git a/libbeat/kibana/index_pattern_generator_test.go b/libbeat/kibana/index_pattern_generator_test.go index 3dd81157ede..516afbd57dc 100644 --- a/libbeat/kibana/index_pattern_generator_test.go +++ b/libbeat/kibana/index_pattern_generator_test.go @@ -31,37 +31,47 @@ import ( "github.com/elastic/beats/libbeat/common" ) +const ( + fieldsYml = "testdata/fields.yml" +) + func TestNewGenerator(t *testing.T) { - beatDir := tmpPath() - defer teardown(beatDir) + tmpDir := tmpPath(t) + defer os.RemoveAll(tmpDir) v, _ := common.NewVersion("7.0.0") // checks for fields.yml - generator, err := NewGenerator("beat-index", "mybeat.", filepath.Join(beatDir, "nonexistent"), "7.0", *v) + generator, err := NewGenerator("beat-index", "mybeat.", fieldsYml+".missing", tmpDir, "7.0", *v) assert.Error(t, err) - generator, err = NewGenerator("beat-index", "mybeat.", beatDir, "7.0", *v) - assert.NoError(t, err) + generator, err = NewGenerator("beat-index", "mybeat.", fieldsYml, tmpDir, "7.0", *v) + if err != nil { + t.Fatal(err) + } assert.Equal(t, "7.0", generator.beatVersion) assert.Equal(t, "beat-index", generator.indexName) - assert.Equal(t, filepath.Join(beatDir, "fields.yml"), generator.fieldsYaml) // creates file dir and sets name - expectedDir := filepath.Join(beatDir, "_meta/kibana.generated/6/index-pattern") + expectedDir := filepath.Join(tmpDir, "6/index-pattern") assert.Equal(t, expectedDir, generator.targetDir) _, err = os.Stat(generator.targetDir) - assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } v, _ = common.NewVersion("5.0.0") // checks for fields.yml - generator, err = NewGenerator("beat-index", "mybeat.", beatDir, "7.0", *v) - assert.NoError(t, err) + generator, err = NewGenerator("beat-index", "mybeat.", fieldsYml, tmpDir, "7.0", *v) + if err != nil { + t.Fatal(err) + } - expectedDir = filepath.Join(beatDir, "_meta/kibana.generated/5/index-pattern") + expectedDir = filepath.Join(tmpDir, "5/index-pattern") assert.Equal(t, expectedDir, generator.targetDir) _, err = os.Stat(generator.targetDir) - - assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } assert.Equal(t, "mybeat.json", generator.targetFilename) } @@ -83,14 +93,19 @@ func TestCleanName(t *testing.T) { } func TestGenerateFieldsYaml(t *testing.T) { - beatDir := tmpPath() - defer teardown(beatDir) + tmpDir := tmpPath(t) + defer os.RemoveAll(tmpDir) v, _ := common.NewVersion("6.0.0") - generator, err := NewGenerator("metricbeat-*", "metric beat ?!", beatDir, "7.0.0-alpha1", *v) + generator, err := NewGenerator("metricbeat-*", "metric beat ?!", fieldsYml, tmpDir, "7.0.0-alpha1", *v) + if err != nil { + t.Fatal(err) + } _, err = generator.Generate() - assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } generator.fieldsYaml = "" _, err = generator.Generate() @@ -98,90 +113,120 @@ func TestGenerateFieldsYaml(t *testing.T) { } func TestDumpToFile5x(t *testing.T) { - beatDir := tmpPath() - defer teardown(beatDir) + tmpDir := tmpPath(t) + defer os.RemoveAll(tmpDir) + v, _ := common.NewVersion("5.0.0") - generator, err := NewGenerator("metricbeat-*", "metric beat ?!", beatDir, "7.0.0-alpha1", *v) + generator, err := NewGenerator("metricbeat-*", "metric beat ?!", fieldsYml, tmpDir, "7.0.0-alpha1", *v) + if err != nil { + t.Fatal(err) + } _, err = generator.Generate() - assert.NoError(t, err) - - generator.targetDir = "./non-existing/something" + if err != nil { + t.Fatal(err) + } + generator.targetDir = filepath.Join(tmpDir, "non-existing/something") _, err = generator.Generate() assert.Error(t, err) } func TestDumpToFileDefault(t *testing.T) { - beatDir := tmpPath() - defer teardown(beatDir) + tmpDir := tmpPath(t) + defer os.RemoveAll(tmpDir) v, _ := common.NewVersion("7.0.0") - generator, err := NewGenerator("metricbeat-*", "metric beat ?!", beatDir, "7.0.0-alpha1", *v) + generator, err := NewGenerator("metricbeat-*", "metric beat ?!", fieldsYml, tmpDir, "7.0.0-alpha1", *v) + if err != nil { + t.Fatal(err) + } _, err = generator.Generate() - assert.NoError(t, err) - - generator.targetDir = "./non-existing/something" + if err != nil { + t.Fatal(err) + } + generator.targetDir = filepath.Join(tmpDir, "./non-existing/something") _, err = generator.Generate() assert.Error(t, err) } func TestGenerate(t *testing.T) { - beatDir := tmpPath() - defer teardown(beatDir) + tmpDir := tmpPath(t) + defer os.RemoveAll(tmpDir) v5, _ := common.NewVersion("5.0.0") v6, _ := common.NewVersion("6.0.0") versions := []*common.Version{v5, v6} for _, version := range versions { - generator, err := NewGenerator("beat-*", "b eat ?!", beatDir, "7.0.0-alpha1", *version) - assert.NoError(t, err) + generator, err := NewGenerator("beat-*", "b eat ?!", fieldsYml, tmpDir, "7.0.0-alpha1", *version) + if err != nil { + t.Fatal(err) + } _, err = generator.Generate() - assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } } tests := []map[string]string{ - {"existing": "beat-5.json", "created": "_meta/kibana.generated/5/index-pattern/beat.json"}, - {"existing": "beat-6.json", "created": "_meta/kibana.generated/6/index-pattern/beat.json"}, + { + "existing": "testdata/beat-5.json", + "created": filepath.Join(tmpDir, "5/index-pattern/beat.json"), + }, + { + "existing": "testdata/beat-6.json", + "created": filepath.Join(tmpDir, "6/index-pattern/beat.json"), + }, } - testGenerate(t, beatDir, tests, true) + testGenerate(t, tests, true) } func TestGenerateExtensive(t *testing.T) { - beatDir, err := filepath.Abs("./testdata/extensive") - if err != nil { - panic(err) - } - defer teardown(beatDir) + tmpDir := tmpPath(t) + defer os.RemoveAll(tmpDir) version5, _ := common.NewVersion("5.0.0") version6, _ := common.NewVersion("6.0.0") versions := []*common.Version{version5, version6} for _, version := range versions { - generator, err := NewGenerator("metricbeat-*", "metric be at ?!", beatDir, "7.0.0-alpha1", *version) - assert.NoError(t, err) + generator, err := NewGenerator("metricbeat-*", "metric be at ?!", "testdata/extensive/fields.yml", tmpDir, "7.0.0-alpha1", *version) + if err != nil { + t.Fatal(err) + } _, err = generator.Generate() - assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } } tests := []map[string]string{ - {"existing": "metricbeat-5.json", "created": "_meta/kibana.generated/5/index-pattern/metricbeat.json"}, - {"existing": "metricbeat-6.json", "created": "_meta/kibana.generated/6/index-pattern/metricbeat.json"}, + { + "existing": "testdata/extensive/metricbeat-5.json", + "created": filepath.Join(tmpDir, "5/index-pattern/metricbeat.json"), + }, + { + "existing": "testdata/extensive/metricbeat-6.json", + "created": filepath.Join(tmpDir, "6/index-pattern/metricbeat.json"), + }, } - testGenerate(t, beatDir, tests, false) + testGenerate(t, tests, false) } -func testGenerate(t *testing.T, beatDir string, tests []map[string]string, sourceFilters bool) { +func testGenerate(t *testing.T, tests []map[string]string, sourceFilters bool) { for _, test := range tests { // compare default - existing, err := readJson(filepath.Join(beatDir, test["existing"])) - assert.NoError(t, err) - created, err := readJson(filepath.Join(beatDir, test["created"])) - assert.NoError(t, err) + existing, err := readJson(test["existing"]) + if err != nil { + t.Fatal(err) + } + created, err := readJson(test["created"]) + if err != nil { + t.Fatal(err) + } var attrExisting, attrCreated common.MapStr @@ -205,17 +250,25 @@ func testGenerate(t *testing.T, beatDir string, tests []map[string]string, sourc // check fieldFormatMap var ffmExisting, ffmCreated map[string]interface{} err = json.Unmarshal([]byte(attrExisting["fieldFormatMap"].(string)), &ffmExisting) - assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } err = json.Unmarshal([]byte(attrCreated["fieldFormatMap"].(string)), &ffmCreated) - assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } assert.Equal(t, ffmExisting, ffmCreated) // check fields var fieldsExisting, fieldsCreated []map[string]interface{} err = json.Unmarshal([]byte(attrExisting["fields"].(string)), &fieldsExisting) - assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } err = json.Unmarshal([]byte(attrCreated["fields"].(string)), &fieldsCreated) - assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } assert.Equal(t, len(fieldsExisting), len(fieldsCreated)) for _, e := range fieldsExisting { idx := find(fieldsCreated, "name", e["name"].(string)) @@ -227,9 +280,13 @@ func testGenerate(t *testing.T, beatDir string, tests []map[string]string, sourc if sourceFilters { var sfExisting, sfCreated []map[string]interface{} err = json.Unmarshal([]byte(attrExisting["sourceFilters"].(string)), &sfExisting) - assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } err = json.Unmarshal([]byte(attrCreated["sourceFilters"].(string)), &sfCreated) - assert.NoError(t, err) + if err != nil { + t.Fatal(err) + } assert.Equal(t, len(sfExisting), len(sfCreated)) for _, e := range sfExisting { idx := find(sfCreated, "value", e["value"].(string)) @@ -254,6 +311,7 @@ func readJson(path string) (map[string]interface{}, error) { if err != nil { return nil, err } + var data map[string]interface{} err = json.Unmarshal(f, &data) if err != nil { @@ -262,17 +320,10 @@ func readJson(path string) (map[string]interface{}, error) { return data, nil } -func tmpPath() string { - beatDir, err := filepath.Abs("./testdata") +func tmpPath(t testing.TB) string { + tmpDir, err := ioutil.TempDir("", "kibana-tests") if err != nil { - panic(err) - } - return beatDir -} - -func teardown(path string) { - if path == "" { - path = tmpPath() + t.Fatal(err) } - os.RemoveAll(filepath.Join(path, "_meta")) + return tmpDir } diff --git a/libbeat/magefile.go b/libbeat/magefile.go index 61290e0835c..8440ad92446 100644 --- a/libbeat/magefile.go +++ b/libbeat/magefile.go @@ -23,7 +23,7 @@ import ( "context" "github.com/elastic/beats/dev-tools/mage" -) + ) // Build builds the Beat binary. func Build() error { diff --git a/libbeat/scripts/Makefile b/libbeat/scripts/Makefile index 916cd6e4ee0..ba2458f302a 100755 --- a/libbeat/scripts/Makefile +++ b/libbeat/scripts/Makefile @@ -307,9 +307,13 @@ coverage-report: fields: mage @mage fields +.PHONY: dashboards +dashboards: mage + @mage dashboards + .PHONY: update update: ## @build Update expects the most recent version of libbeat in the GOPATH -update: python-env fields collect +update: python-env fields collect dashboards @echo "Updating generated files for ${BEAT_NAME}" @mkdir -p _meta @@ -328,6 +332,7 @@ ifeq ($(BEAT_REF_YAML),true) @chmod 0640 ${BEAT_NAME}.reference.yml endif +# TODO: Remove as this becomes part of mage fields. ifneq ($(shell [[ $(BEAT_NAME) == libbeat || $(BEAT_NAME) == metricbeat ]] && echo true ),true) mkdir -p include go run ${ES_BEATS}/dev-tools/cmd/asset/asset.go -pkg include -in fields.yml -out include/fields.go $(BEAT_NAME) @@ -339,17 +344,6 @@ ifneq ($(shell [[ $(BEAT_NAME) == libbeat ]] && echo true ),true) @${PYTHON_ENV}/bin/python ${ES_BEATS}/libbeat/scripts/generate_fields_docs.py $(PWD) ${BEAT_TITLE} ${ES_BEATS} endif - @mkdir -p $(PWD)/_meta/kibana.generated - @# Generate Kibana index pattern and copy dashboard files - if [ -d $(PWD)/_meta/kibana ]; then \ - cp -pr $(PWD)/_meta/kibana/* $(PWD)/_meta/kibana.generated ; \ - fi - @# Convert all dashboards to string - @python ${ES_BEATS}/libbeat/scripts/unpack_dashboards.py --glob="./_meta/kibana.generated/6/dashboard/*.json" - @mkdir -p $(PWD)/_meta/kibana.generated/5/index-pattern - @mkdir -p $(PWD)/_meta/kibana.generated/6/index-pattern - @go run ${ES_BEATS}/dev-tools/cmd/kibana_index_pattern/kibana_index_pattern.go -index '${BEAT_INDEX_PREFIX}-*' -beat-name ${BEAT_NAME} -beat-dir $(PWD) -version ${BEAT_VERSION} - .PHONY: docs docs: ## @build Builds the documents for the beat sh ${ES_BEATS}/script/build_docs.sh ${BEAT_NAME} ${BEAT_PATH}/docs ${BUILD_DIR} diff --git a/libbeat/scripts/cmd/global_fields/main.go b/libbeat/scripts/cmd/global_fields/main.go index 238ce040b8b..0758818e3b2 100644 --- a/libbeat/scripts/cmd/global_fields/main.go +++ b/libbeat/scripts/cmd/global_fields/main.go @@ -78,9 +78,7 @@ func main() { var fieldsFiles []*fields.YmlFile for _, fieldsFilePath := range beatFieldsPaths { - pathToModules := filepath.Join(beatPath, fieldsFilePath) - - fieldsFile, err := fields.CollectModuleFiles(pathToModules) + fieldsFile, err := fields.CollectModuleFiles(fieldsFilePath) if err != nil { fmt.Fprintf(os.Stderr, "Cannot collect fields.yml files: %+v\n", err) os.Exit(2) @@ -95,5 +93,9 @@ func main() { os.Exit(3) } - fmt.Fprintf(os.Stderr, "Generated fields.yml for %s to %s\n", name, filepath.Join(beatPath, output)) + outputPath, _ := filepath.Abs(output) + if err != nil { + outputPath = output + } + fmt.Fprintf(os.Stderr, "Generated fields.yml for %s to %s\n", name, outputPath) } diff --git a/metricbeat/magefile.go b/metricbeat/magefile.go index 535d4ae33ad..cab36f12839 100644 --- a/metricbeat/magefile.go +++ b/metricbeat/magefile.go @@ -98,11 +98,35 @@ func Update() error { return sh.Run("make", "update") } -// Fields generates a fields.yml for the Beat. -func Fields() error { +// Fields generates a top-level fields.yml, fields.go for each module, fields.go +// for metricbeat's common fields, and fields.go for libbeat's common fields. +func Fields() { + mg.SerialDeps(fieldsYML, mage.GenerateModuleFieldsGo, + metricbeatFieldsGo, libbeatFieldsGo) +} + +func metricbeatFieldsGo() error { + return mage.GenerateFieldsGo("_meta/fields.common.yml", "include/fields.go") +} + +func libbeatFieldsGo() error { + // TODO: generate the fields.go from libbeat data only. + // go run ${ES_BEATS}/libbeat/scripts/cmd/global_fields/main.go -es_beats_path ${ES_BEATS} -beat_path ${PWD} + // | go run ${ES_BEATS}/dev-tools/cmd/asset/asset.go -license ${LICENSE} -out ./include/fields/fields.go -pkg include ${ES_BEATS}/libbeat/fields.yml $(BEAT_NAME) + return nil +} + +// fieldsYML generates a fields.yml. +func fieldsYML() error { return mage.GenerateFieldsYAML("module") } +// Dashboards collects all the dashboards and generates index patterns. +func Dashboards() error { + mg.Deps(Fields) + return mage.KibanaDashboards("module") +} + // GoTestUnit executes the Go unit tests. // Use TEST_COVERAGE=true to enable code coverage profiling. // Use RACE_DETECTOR=true to enable the race detector. diff --git a/metricbeat/scripts/assets/assets.go b/metricbeat/scripts/assets/assets.go index e103eb70707..21c3f940991 100644 --- a/metricbeat/scripts/assets/assets.go +++ b/metricbeat/scripts/assets/assets.go @@ -32,7 +32,6 @@ import ( ) func main() { - flag.Parse() args := flag.Args() diff --git a/packetbeat/magefile.go b/packetbeat/magefile.go index e577033b818..35e559fcd06 100644 --- a/packetbeat/magefile.go +++ b/packetbeat/magefile.go @@ -141,6 +141,12 @@ func Fields() error { return mage.GenerateFieldsYAML("protos") } +// Dashboards collects all the dashboards and generates index patterns. +func Dashboards() error { + mg.Deps(Fields) + return mage.KibanaDashboards("protos") +} + // GoTestUnit executes the Go unit tests. // Use TEST_COVERAGE=true to enable code coverage profiling. // Use RACE_DETECTOR=true to enable the race detector. diff --git a/winlogbeat/magefile.go b/winlogbeat/magefile.go index bc0c349eaf8..38c0d982c94 100644 --- a/winlogbeat/magefile.go +++ b/winlogbeat/magefile.go @@ -101,6 +101,12 @@ func Fields() error { return mage.GenerateFieldsYAML() } +// Dashboards collects all the dashboards and generates index patterns. +func Dashboards() error { + mg.Deps(Fields) + return mage.KibanaDashboards() +} + // GoTestUnit executes the Go unit tests. // Use TEST_COVERAGE=true to enable code coverage profiling. // Use RACE_DETECTOR=true to enable the race detector. diff --git a/x-pack/.gitignore b/x-pack/.gitignore index 8de00a22385..2ceb926778f 100644 --- a/x-pack/.gitignore +++ b/x-pack/.gitignore @@ -1,2 +1,5 @@ # Directories */build +*beat/logs +*beat/data +*beat/fields.yml diff --git a/x-pack/filebeat/cmd/root.go b/x-pack/filebeat/cmd/root.go index dd76fced042..42beaf6eef6 100644 --- a/x-pack/filebeat/cmd/root.go +++ b/x-pack/filebeat/cmd/root.go @@ -4,7 +4,11 @@ package cmd -import "github.com/elastic/beats/filebeat/cmd" +import ( + "github.com/elastic/beats/filebeat/cmd" + + _ "github.com/elastic/beats/x-pack/filebeat/include" +) // RootCmd to handle beats cli var RootCmd = cmd.RootCmd diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml new file mode 100644 index 00000000000..994cd49c89e --- /dev/null +++ b/x-pack/filebeat/filebeat.reference.yml @@ -0,0 +1,1918 @@ +######################## Filebeat Configuration ############################ + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see filebeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/filebeat/index.html + + +#========================== Modules configuration ============================= +filebeat.modules: + +#-------------------------------- System Module -------------------------------- +#- module: system + # Syslog + #syslog: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + #var.convert_timezone: false + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Authorization logs + #auth: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + #var.convert_timezone: false + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#-------------------------------- Apache2 Module -------------------------------- +#- module: apache2 + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#-------------------------------- Auditd Module -------------------------------- +#- module: auditd + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#----------------------------- Elasticsearch Module ----------------------------- +- module: elasticsearch + # Server log + server: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + gc: + enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + audit: + enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + slowlog: + enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + deprecation: + enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + +#-------------------------------- Haproxy Module -------------------------------- +- module: haproxy + # All logs + log: + enabled: true + + # Set which input to use between syslog (default) or file. + #var.input: + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + +#-------------------------------- Icinga Module -------------------------------- +#- module: icinga + # Main logs + #main: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Debug logs + #debug: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Startup logs + #startup: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#---------------------------------- IIS Module ---------------------------------- +#- module: iis + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#--------------------------------- Kafka Module --------------------------------- +- module: kafka + # All logs + log: + enabled: true + + # Set custom paths for Kafka. If left empty, + # Filebeat will look under /opt. + #var.kafka_home: + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Convert the timestamp to UTC. Requires Elasticsearch >= 6.1. + #var.convert_timezone: false + +#-------------------------------- Kibana Module -------------------------------- +- module: kibana + # All logs + log: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + +#------------------------------- Logstash Module ------------------------------- +#- module: logstash + # logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + # var.paths: + + # Slow logs + #slowlog: + #enabled: true + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + +#-------------------------------- Mongodb Module -------------------------------- +#- module: mongodb + # Logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#--------------------------------- MySQL Module --------------------------------- +#- module: mysql + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Slow logs + #slowlog: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#--------------------------------- Nginx Module --------------------------------- +#- module: nginx + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + # Error logs + #error: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#-------------------------------- Osquery Module -------------------------------- +- module: osquery + result: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # If true, all fields created by this module are prefixed with + # `osquery.result`. Set to false to copy the fields in the root + # of the document. The default is true. + #var.use_namespace: true + +#------------------------------ PostgreSQL Module ------------------------------ +#- module: postgresql + # Logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + +#--------------------------------- Redis Module --------------------------------- +#- module: redis + # Main logs + #log: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: ["/var/log/redis/redis-server.log*"] + + # Slow logs, retrieved via the Redis API (SLOWLOG) + #slowlog: + #enabled: true + + # The Redis hosts to connect to. + #var.hosts: ["localhost:6379"] + + # Optional, the password to use when connecting to Redis. + #var.password: + +#------------------------------- Suricata Module ------------------------------- +- module: suricata + # All logs + eve: + enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + +#-------------------------------- Traefik Module -------------------------------- +#- module: traefik + # Access logs + #access: + #enabled: true + + # Set custom paths for the log files. If left empty, + # Filebeat will choose the paths depending on your OS. + #var.paths: + + # Input configuration (advanced). Any input configuration option + # can be added under this section. + #input: + + +#=========================== Filebeat inputs ============================= + +# List of inputs to fetch data. +filebeat.inputs: +# Each - is an input. Most options can be set at the input level, so +# you can use different inputs for various configurations. +# Below are the input specific configurations. + +# Type of the files. Based on this the way the file is read is decided. +# The different types cannot be mixed in one input +# +# Possible options are: +# * log: Reads every line of the log file (default) +# * stdin: Reads the standard in + +#------------------------------ Log input -------------------------------- +- type: log + + # Change to true to enable this input configuration. + enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Configure the file encoding for reading files with international characters + # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). + # Some sample encodings: + # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, + # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... + #encoding: plain + + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, no lines are dropped. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, all the lines are exported. + #include_lines: ['^ERR', '^WARN'] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #exclude_files: ['.gz$'] + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + # Set to true to store the additional fields as top level fields instead + # of under the "fields" sub-dictionary. In case of name conflicts with the + # fields added by Filebeat itself, the custom fields overwrite the default + # fields. + #fields_under_root: false + + # Ignore files which were modified more then the defined timespan in the past. + # ignore_older is disabled by default, so no files are ignored by setting it to 0. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #ignore_older: 0 + + # How often the input checks for new files in the paths that are specified + # for harvesting. Specify 1s to scan the directory as frequently as possible + # without causing Filebeat to scan too frequently. Default: 10s. + #scan_frequency: 10s + + # Defines the buffer size every harvester uses when fetching the file + #harvester_buffer_size: 16384 + + # Maximum number of bytes a single log event can have + # All bytes after max_bytes are discarded and not sent. The default is 10MB. + # This is especially useful for multiline log messages which can get large. + #max_bytes: 10485760 + + ### Recursive glob configuration + + # Expand "**" patterns into regular glob patterns. + #recursive_glob.enabled: true + + ### JSON configuration + + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #json.message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied top level in the output document. + #json.keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #json.overwrite_keys: false + + # If this setting is enabled, Filebeat adds a "error.message" and "error.key: json" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #json.add_error_key: false + + ### Multiline options + + # Multiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #multiline.pattern: ^\[ + + # Defines if the pattern set under pattern should be negated or not. Default is false. + #multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + #multiline.match: after + + # The maximum number of lines that are combined to one event. + # In case there are more the max_lines the additional lines are discarded. + # Default is 500 + #multiline.max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #multiline.timeout: 5s + + # Setting tail_files to true means filebeat starts reading new files at the end + # instead of the beginning. If this is used in combination with log rotation + # this can mean that the first entries of a new file are skipped. + #tail_files: false + + # The Ingest Node pipeline ID associated with this input. If this is set, it + # overwrites the pipeline option from the Elasticsearch output. + #pipeline: + + # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the + # original for harvesting but will report the symlink name as source. + #symlinks: false + + # Backoff values define how aggressively filebeat crawls new files for updates + # The default values can be used in most cases. Backoff defines how long it is waited + # to check a file again after EOF is reached. Default is 1s which means the file + # is checked every second if new lines were added. This leads to a near real time crawling. + # Every time a new line appears, backoff is reset to the initial value. + #backoff: 1s + + # Max backoff defines what the maximum backoff time is. After having backed off multiple times + # from checking the files, the waiting time will never exceed max_backoff independent of the + # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log + # file after having backed off multiple times, it takes a maximum of 10s to read the new line + #max_backoff: 10s + + # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor, + # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen. + # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached + #backoff_factor: 2 + + # Max number of harvesters that are started in parallel. + # Default is 0 which means unlimited + #harvester_limit: 0 + + ### Harvester closing options + + # Close inactive closes the file handler after the predefined period. + # The period starts when the last line of the file was, not the file ModTime. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #close_inactive: 5m + + # Close renamed closes a file handler when the file is renamed or rotated. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close_renamed: false + + # When enabling this option, a file handler is closed immediately in case a file can't be found + # any more. In case the file shows up again later, harvesting will continue at the last known position + # after scan_frequency. + #close_removed: true + + # Closes the file handler as soon as the harvesters reaches the end of the file. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close_eof: false + + ### State options + + # Files for the modification data is older then clean_inactive the state from the registry is removed + # By default this is disabled. + #clean_inactive: 0 + + # Removes the state for file which cannot be found on disk anymore immediately + #clean_removed: true + + # Close timeout closes the harvester after the predefined time. + # This is independent if the harvester did finish reading the file or not. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close_timeout: 0 + + # Defines if inputs is enabled + #enabled: true + +#----------------------------- Stdin input ------------------------------- +# Configuration to use stdin input +#- type: stdin + +#------------------------- Redis slowlog input --------------------------- +# Experimental: Config options for the redis slow log input +#- type: redis + #enabled: false + + # List of hosts to pool to retrieve the slow log information. + #hosts: ["localhost:6379"] + + # How often the input checks for redis slow log. + #scan_frequency: 10s + + # Timeout after which time the input should return an error + #timeout: 1s + + # Network type to be used for redis connection. Default: tcp + #network: tcp + + # Max number of concurrent connections. Default: 10 + #maxconn: 10 + + # Redis AUTH password. Empty by default. + #password: foobared + +#------------------------------ Udp input -------------------------------- +# Experimental: Config options for the udp input +#- type: udp + #enabled: false + + # Maximum size of the message received over UDP + #max_message_size: 10KiB + +#------------------------------ TCP input -------------------------------- +# Experimental: Config options for the TCP input +#- type: tcp + #enabled: false + + # The host and port to receive the new event + #host: "localhost:9000" + + # Character used to split new message + #line_delimiter: "\n" + + # Maximum size in bytes of the message received over TCP + #max_message_size: 20MiB + + # The number of seconds of inactivity before a remote connection is closed. + #timeout: 300s + + # Use SSL settings for TCP. + #ssl.enabled: true + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for client verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL server authentication. + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Server Certificate Key, + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections. + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites. + #ssl.curve_types: [] + + # Configure what types of client authentication are supported. Valid options + # are `none`, `optional`, and `required`. Default is required. + #ssl.client_authentication: "required" + +#------------------------------ Syslog input -------------------------------- +# Experimental: Config options for the Syslog input +# Accept RFC3164 formatted syslog event via UDP. +#- type: syslog + #enabled: false + #protocol.udp: + # The host and port to receive the new event + #host: "localhost:9000" + + # Maximum size of the message received over UDP + #max_message_size: 10KiB + +# Accept RFC3164 formatted syslog event via TCP. +#- type: syslog + #enabled: false + + #protocol.tcp: + # The host and port to receive the new event + #host: "localhost:9000" + + # Character used to split new message + #line_delimiter: "\n" + + # Maximum size in bytes of the message received over TCP + #max_message_size: 20MiB + + # The number of seconds of inactivity before a remote connection is closed. + #timeout: 300s + + # Use SSL settings for TCP. + #ssl.enabled: true + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for client verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL server authentication. + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Server Certificate Key, + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections. + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites. + #ssl.curve_types: [] + + # Configure what types of client authentication are supported. Valid options + # are `none`, `optional`, and `required`. Default is required. + #ssl.client_authentication: "required" + +#------------------------------ Docker input -------------------------------- +# Experimental: Docker input reads and parses `json-file` logs from Docker +#- type: docker + #enabled: false + + # Combine partial lines flagged by `json-file` format + #combine_partials: true + + # Use this to read from all containers, replace * with a container id to read from one: + #containers: + # stream: all # can be all, stdout or stderr + # ids: + # - '*' + +#========================== Filebeat autodiscover ============================== + +# Autodiscover allows you to detect changes in the system and spawn new modules +# or inputs as they happen. + +#filebeat.autodiscover: + # List of enabled autodiscover providers +# providers: +# - type: docker +# templates: +# - condition: +# equals.docker.container.image: busybox +# config: +# - type: log +# paths: +# - /var/lib/docker/containers/${data.docker.container.id}/*.log + +#========================= Filebeat global options ============================ + +# Name of the registry file. If a relative path is used, it is considered relative to the +# data path. +#filebeat.registry_file: ${path.data}/registry + +# The permissions mask to apply on registry file. The default value is 0600. +# Must be a valid Unix-style file permissions mask expressed in octal notation. +# This option is not supported on Windows. +#filebeat.registry_file_permissions: 0600 + +# By default Ingest pipelines are not updated if a pipeline with the same ID +# already exists. If this option is enabled Filebeat overwrites pipelines +# everytime a new Elasticsearch connection is established. +#filebeat.overwrite_pipelines: false + +# How long filebeat waits on shutdown for the publisher to finish. +# Default is 0, not waiting. +#filebeat.shutdown_timeout: 0 + +# Enable filebeat config reloading +#filebeat.config: + #inputs: + #enabled: false + #path: inputs.d/*.yml + #reload.enabled: true + #reload.period: 10s + #modules: + #enabled: false + #path: modules.d/*.yml + #reload.enabled: true + #reload.period: 10s + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # The default value is set to 2048. + # A value of 0 ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < min_flush_events. + #flush.timeout: 1s + + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed, if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch, as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, +# decode_json_fields, and add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# +# The following example tokenizes the string into fields: +# +#processors: +#- dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +#- add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +#- add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +#- add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +#- add_docker_metadata: ~ +# +# The following example enriches each event with host metadata. +# +#processors: +#- add_host_metadata: +# netinfo.enabled: false +# +# The following example enriches each event with process metadata using +# process IDs included in the event. +# +#processors: +#- add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent +# +# The following example decodes fields containing JSON strings +# and replaces the strings with valid JSON objects. +# +#processors: +#- decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false + +#============================= Elastic Cloud ================================== + +# These settings simplify using filebeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ====================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------- +output.elasticsearch: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Configure escaping html symbols in strings. + #escape_html: true + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "filebeat" plus date + # and generates [filebeat-]YYYY.MM.DD keys. + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + #index: "filebeat-%{[beat.version]}-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure http request timeout before failing a request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + +#----------------------------- Logstash output --------------------------------- +#output.logstash: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Configure escaping html symbols in strings. + #escape_html: true + + # Optional maximum time to live for a connection to Logstash, after which the + # connection will be re-established. A value of `0s` (the default) will + # disable this feature. + # + # Not yet supported for async connections (i.e. with the "pipelining" option set) + #ttl: 30s + + # Optional load balance the events between the Logstash hosts. Default is false. + #loadbalance: false + + # Number of batches to be sent asynchronously to Logstash while processing + # new batches. + #pipelining: 2 + + # If enabled only a subset of events in a batch of events is transferred per + # transaction. The number of events to be sent increases up to `bulk_max_size` + # if no error is encountered. + #slow_start: false + + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + + # Optional index name. The default index name is set to filebeat + # in all lowercase. + #index: 'filebeat' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting + # and retry until all events are published. Set max_retries to a value less + # than 0 to retry until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Logstash request. The + # default is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Logstash server before + # timing out. The default is 30s. + #timeout: 30s + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version filebeat is assumed to run against. Defaults to the "1.0.0". + #version: '1.0.0' + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # Set the compression level. Currently only gzip provides a compression level + # between 0 and 9. The default value is chosen by the compression algorithm. + #compression_level: 4 + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # The list of Redis servers to connect to. If load balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The Redis port to use if hosts does not contain a port number. The default + # is 6379. + #port: 6379 + + # The name of the Redis list or channel the events are published to. The + # default is filebeat. + #key: filebeat + + # The password to authenticate with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The number of seconds to wait before trying to reconnect to Redis + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Redis after a network error. The default is 60s. + #backoff.max: 60s + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/filebeat" + + # Name of the generated files. The default is `filebeat` and it generates + # files: `filebeat`, `filebeat.1`, `filebeat.2`, etc. + #filename: filebeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every filebeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + +#================================= Paths ====================================== + +# The home path for the filebeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the filebeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the filebeat installation. This is the default base path +# for all the files in which filebeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a filebeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards are disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +#setup.dashboards.enabled: false + +# The directory from where to read the dashboards. The default is the `kibana` +# folder in the home path. +#setup.dashboards.directory: ${path.home}/kibana + +# The URL from where to download the dashboards archive. It is used instead of +# the directory if it has a value. +#setup.dashboards.url: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the directory when it has a value. +#setup.dashboards.file: + +# In case the archive contains the dashboards from multiple Beats, this lets you +# select which one to load. You can load all the dashboards in the archive by +# setting this to the empty string. +#setup.dashboards.beat: filebeat + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#setup.dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +#setup.dashboards.index: + +# Always use the Kibana API for loading the dashboards instead of autodetecting +# how to install the dashboards by first querying Elasticsearch. +#setup.dashboards.always_kibana: false + +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + + +#============================== Template ===================================== + +# A template is used to set the mapping in Elasticsearch +# By default template loading is enabled and the template is loaded. +# These settings can be adjusted to load your own template or overwrite existing ones. + +# Set to false to disable template loading. +#setup.template.enabled: true + +# Template name. By default the template name is "filebeat-%{[beat.version]}" +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +#setup.template.name: "filebeat-%{[beat.version]}" + +# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings. +# The first part is the version of the beat and then -* is used to match all daily indices. +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +#setup.template.pattern: "filebeat-%{[beat.version]}-*" + +# Path to fields.yml file to generate the template +#setup.template.fields: "${path.config}/fields.yml" + +# A list of fields to be added to the template and Kibana index pattern. Also +# specify setup.template.overwrite: true to overwrite the existing template. +# This setting is experimental. +#setup.template.append_fields: +#- name: field_name +# type: field_type + +# Enable json template loading. If this is enabled, the fields.yml is ignored. +#setup.template.json.enabled: false + +# Path to the json template file +#setup.template.json.path: "${path.config}/template.json" + +# Name under which the template is stored in Elasticsearch +#setup.template.json.name: "" + +# Overwrite existing template +#setup.template.overwrite: false + +# Elasticsearch template settings +setup.template.settings: + + # A dictionary of settings to place into the settings.index dictionary + # of the Elasticsearch template. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html + #index: + #number_of_shards: 1 + #codec: best_compression + #number_of_routing_shards: 30 + + # A dictionary of settings for the _source field. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html + #_source: + #enabled: false + +#============================== Kibana ===================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Optional HTTP Path + #path: "" + + # Use SSL settings for HTTPS. Default is true. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + + +#================================ Logging ====================================== +# There are four options for the log output: file, stderr, syslog, eventlog +# The file output is the default. + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: false + +# Send all logging output to Windows Event Logs. The default is false. +#logging.to_eventlog: false + +# If enabled, filebeat periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: true + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/filebeat + + # The name of the files where the logs are written to. + #name: filebeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + +# Set to true to log messages in json format. +#logging.json: false + + +#============================== Xpack Monitoring ===================================== +# filebeat can export internal metrics to a central Elasticsearch monitoring cluster. +# This requires xpack monitoring to be enabled in Elasticsearch. +# The reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#xpack.monitoring.enabled: false + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. Any setting that is not set is +# automatically inherited from the Elasticsearch output configuration, so if you +# have the Elasticsearch output configured, you can simply uncomment the +# following line, and leave the rest commented out. +#xpack.monitoring.elasticsearch: + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "beats_system" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + #metrics.period: 10s + #state.period: 1m + +#================================ HTTP Endpoint ====================================== +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 + +#============================= Process Security ================================ + +# Enable or disable seccomp system call filtering on Linux. Default is enabled. +#seccomp.enabled: true diff --git a/x-pack/filebeat/filebeat.yml b/x-pack/filebeat/filebeat.yml new file mode 100644 index 00000000000..5ae43216b7d --- /dev/null +++ b/x-pack/filebeat/filebeat.yml @@ -0,0 +1,191 @@ +###################### Filebeat Configuration Example ######################### + +# This file is an example configuration file highlighting only the most common +# options. The filebeat.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/filebeat/index.html + +# For more available modules and options, please see the filebeat.reference.yml sample +# configuration file. + +#=========================== Filebeat inputs ============================= + +filebeat.inputs: + +# Each - is an input. Most options can be set at the input level, so +# you can use different inputs for various configurations. +# Below are the input specific configurations. + +- type: log + + # Change to true to enable this input configuration. + enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. + #include_lines: ['^ERR', '^WARN'] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #exclude_files: ['.gz$'] + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + ### Multiline options + + # Multiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #multiline.pattern: ^\[ + + # Defines if the pattern set under pattern should be negated or not. Default is false. + #multiline.negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash + #multiline.match: after + + +#============================= Filebeat modules =============================== + +filebeat.config.modules: + # Glob pattern for configuration loading + path: ${path.config}/modules.d/*.yml + + # Set to true to enable config reloading + reload.enabled: false + + # Period on which files under path should be checked for changes + #reload.period: 10s + +#==================== Elasticsearch template setting ========================== + +setup.template.settings: + index.number_of_shards: 3 + #index.codec: best_compression + #_source.enabled: false + +#================================ General ===================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. +#fields: +# env: staging + + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards is disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +#setup.dashboards.enabled: false + +# The URL from where to download the dashboards archive. By default this URL +# has a value which is computed based on the Beat name and version. For released +# versions, this URL points to the dashboard archive on the artifacts.elastic.co +# website. +#setup.dashboards.url: + +#============================== Kibana ===================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + +#============================= Elastic Cloud ================================== + +# These settings simplify using filebeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ===================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------ +output.elasticsearch: + # Array of hosts to connect to. + hosts: ["localhost:9200"] + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + +#----------------------------- Logstash output -------------------------------- +#output.logstash: + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + +#================================ Logging ===================================== + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: debug + +# At debug level, you can selectively enable logging only for some components. +# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# "publish", "service". +#logging.selectors: ["*"] + +#============================== Xpack Monitoring =============================== +# filebeat can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The +# reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#xpack.monitoring.enabled: false + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. Any setting that is not set is +# automatically inherited from the Elasticsearch output configuration, so if you +# have the Elasticsearch output configured, you can simply uncomment the +# following line. +#xpack.monitoring.elasticsearch: diff --git a/x-pack/filebeat/include/list.go b/x-pack/filebeat/include/list.go new file mode 100644 index 00000000000..9ced710f59a --- /dev/null +++ b/x-pack/filebeat/include/list.go @@ -0,0 +1,9 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package include + +import ( + _ "github.com/elastic/beats/x-pack/filebeat/module/suricata" +) diff --git a/x-pack/filebeat/magefile.go b/x-pack/filebeat/magefile.go new file mode 100644 index 00000000000..ca62bf59d41 --- /dev/null +++ b/x-pack/filebeat/magefile.go @@ -0,0 +1,233 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build mage + +package main + +import ( + "context" + "fmt" + "time" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/elastic/beats/dev-tools/mage" + "github.com/magefile/mage/mg" +) + +func init() { + mage.BeatDescription = "Filebeat sends log files to Logstash or directly to Elasticsearch." + mage.BeatLicense = "Elastic" +} + +// Build builds the Beat binary. +func Build() error { + return mage.Build(mage.DefaultBuildArgs()) +} + +// GolangCrossBuild build the Beat binary inside of the golang-builder. +// Do not use directly, use crossBuild instead. +func GolangCrossBuild() error { + return mage.GolangCrossBuild(mage.DefaultGolangCrossBuildArgs()) +} + +// BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon). +func BuildGoDaemon() error { + return mage.BuildGoDaemon() +} + +// CrossBuild cross-builds the beat for all target platforms. +func CrossBuild() error { + return mage.CrossBuild() +} + +// CrossBuildGoDaemon cross-builds the go-daemon binary using Docker. +func CrossBuildGoDaemon() error { + return mage.CrossBuildGoDaemon() +} + +// Clean cleans all generated files and build artifacts. +func Clean() error { + return mage.Clean() +} + +// Package packages the Beat for distribution. +// Use SNAPSHOT=true to build snapshots. +// Use PLATFORMS to control the target platforms. +// Use BEAT_VERSION_QUALIFIER to control the version qualifier. +func Package() { + start := time.Now() + defer func() { fmt.Println("package ran for", time.Since(start)) }() + + mage.LoadLocalNamedSpec("xpack") + + mg.SerialDeps(Fields, Dashboards, Config, prepareModulePackaging) + //mg.Deps(CrossBuild, CrossBuildGoDaemon) + mg.SerialDeps(mage.Package, TestPackages) +} + +// TestPackages tests the generated packages (i.e. file modes, owners, groups). +func TestPackages() error { + return mage.TestPackages(mage.WithModules(), mage.WithModulesD()) +} + +// Fields generates a fields.yml and fields.go for each module. +func Fields() { + mg.Deps(mage.GenerateModuleFieldsGo, fieldsYML) +} + +// fieldsYML generates a fields.yml based on filebeat + x-pack/filebeat/modules. +func fieldsYML() error { + return mage.GenerateFieldsYAML(mage.OSSBeatDir("module"), "module") +} + +// Dashboards collects all the dashboards and generates index patterns. +func Dashboards() error { + return mage.KibanaDashboards(mage.OSSBeatDir("module"), "module") +} + +// Config generates both the short and reference configs. +func Config() { + mg.Deps(shortConfig, referenceConfig, createDirModulesD) +} + +// GoTestUnit executes the Go unit tests. +// Use TEST_COVERAGE=true to enable code coverage profiling. +// Use RACE_DETECTOR=true to enable the race detector. +func GoTestUnit(ctx context.Context) error { + return mage.GoTest(ctx, mage.DefaultGoTestUnitArgs()) +} + +// GoTestIntegration executes the Go integration tests. +// Use TEST_COVERAGE=true to enable code coverage profiling. +// Use RACE_DETECTOR=true to enable the race detector. +func GoTestIntegration(ctx context.Context) error { + return mage.GoTest(ctx, mage.DefaultGoTestIntegrationArgs()) +} + +// ----------------------------------------------------------------------------- +// Customizations specific to Filebeat. +// - Include modules directory in packages (minus _meta and test files). +// - Include modules.d directory in packages. + +const ( + dirModuleGenerated = "build/package/module" + dirModulesDGenerated = "build/package/modules.d" +) + +// prepareModulePackaging generates modules and modules.d directories +// for an x-pack distribution, excluding _meta and test files so that they are +// not included in packages. +func prepareModulePackaging() error { + mg.Deps(createDirModulesD) + + err := mage.Clean([]string{ + dirModuleGenerated, + dirModulesDGenerated, + }) + if err != nil { + return err + } + + for _, copyAction := range []struct { + src, dst string + }{ + {mage.OSSBeatDir("module"), dirModuleGenerated}, + {"module", dirModuleGenerated}, + {mage.OSSBeatDir("modules.d"), dirModulesDGenerated}, + {"modules.d", dirModulesDGenerated}, + } { + err := (&mage.CopyTask{ + Source: copyAction.src, + Dest: copyAction.dst, + Mode: 0644, + DirMode: 0755, + Exclude: []string{ + "/_meta", + "/test", + "fields.go", + }, + }).Execute() + if err != nil { + return err + } + } + return nil +} + +func shortConfig() error { + var configParts = []string{ + mage.OSSBeatDir("_meta/common.p1.yml"), + mage.OSSBeatDir("_meta/common.p2.yml"), + "{{ elastic_beats_dir }}/libbeat/_meta/config.yml", + } + + for i, f := range configParts { + configParts[i] = mage.MustExpand(f) + } + + configFile := mage.BeatName + ".yml" + mage.MustFileConcat(configFile, 0640, configParts...) + mage.MustFindReplace(configFile, regexp.MustCompile("beatname"), mage.BeatName) + mage.MustFindReplace(configFile, regexp.MustCompile("beat-index-prefix"), mage.BeatIndexPrefix) + return nil +} + +func referenceConfig() error { + const modulesConfigYml = "build/config.modules.yml" + err := mage.GenerateModuleReferenceConfig(modulesConfigYml, mage.OSSBeatDir("module"), "module") + if err != nil { + return err + } + defer os.Remove(modulesConfigYml) + + var configParts = []string{ + mage.OSSBeatDir("_meta/common.reference.p1.yml"), + modulesConfigYml, + mage.OSSBeatDir("_meta/common.reference.p2.yml"), + "{{ elastic_beats_dir }}/libbeat/_meta/config.reference.yml", + } + + for i, f := range configParts { + configParts[i] = mage.MustExpand(f) + } + + configFile := mage.BeatName + ".reference.yml" + mage.MustFileConcat(configFile, 0640, configParts...) + mage.MustFindReplace(configFile, regexp.MustCompile("beatname"), mage.BeatName) + mage.MustFindReplace(configFile, regexp.MustCompile("beat-index-prefix"), mage.BeatIndexPrefix) + return nil +} + +func createDirModulesD() error { + if err := os.RemoveAll("modules.d"); err != nil { + return err + } + + shortConfigs, err := filepath.Glob("module/*/_meta/config.yml") + if err != nil { + return err + } + + for _, f := range shortConfigs { + parts := strings.Split(filepath.ToSlash(f), "/") + if len(parts) < 2 { + continue + } + moduleName := parts[1] + + cp := mage.CopyTask{ + Source: f, + Dest: filepath.Join("modules.d", moduleName+".yml.disabled"), + Mode: 0644, + } + if err = cp.Execute(); err != nil { + return err + } + } + return nil +} diff --git a/x-pack/filebeat/module/suricata/fields.go b/x-pack/filebeat/module/suricata/fields.go new file mode 100644 index 00000000000..e5b55771241 --- /dev/null +++ b/x-pack/filebeat/module/suricata/fields.go @@ -0,0 +1,22 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by beats/dev-tools/cmd/asset/asset.go - DO NOT EDIT. + +package suricata + +import ( + "github.com/elastic/beats/libbeat/asset" +) + +func init() { + if err := asset.SetFields("filebeat", "suricata", Asset); err != nil { + panic(err) + } +} + +// Asset returns asset data +func Asset() string { + return "eJyUkc1qwzAQhO95ioFemzyAD72UtFDoDwR6DYq0kkUUrdBPU799sRO7duIWouNKO/PNaIk9NRVSiVaKLBZAttlRhc3vRFGS0YZs2Vd4WADAK6viCJojauGVs94g14T15xovm/c3ODYJIbIqkhR2zaC3WgDaklOp6pSW8OJAE4L25CZQBRO5hPNkhqI9T50WdORDR9D7dCiODbR1tDo/HxuPzemLhtmc9z/+Iwb6DhzzKe5VGaOFS4pJDVFubZhc9UAX435DUco3rmjHx61Vsyt7ao4cp3d3eCbGB1ufE1LNxSns6JTxcXPOc49dyfCMo2iQGZK9tub0MTXF4Q+u8xrilWMp2mpnmQzxNrTufxdwg8ZPAAAA//9+KNCR" +} diff --git a/x-pack/filebeat/packages.yml b/x-pack/filebeat/packages.yml new file mode 100644 index 00000000000..5526e29a472 --- /dev/null +++ b/x-pack/filebeat/packages.yml @@ -0,0 +1,71 @@ +specs: + xpack: + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + <<: *elastic_license_for_binaries + files: + modules.d: + mode: 0644 + source: build/packaging/modules.d + config: true + module: + mode: 0644 + source: build/packaging/module + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + files: + modules.d: + mode: 0644 + source: build/packaging/modules.d + config: true + module: + mode: 0644 + source: build/packaging/module + + - os: darwin + types: [dmg] + spec: + <<: *macos_beat_pkg_spec + <<: *elastic_license_for_macos_pkg + files: + /etc/{{.BeatName}}/modules.d: + mode: 0644 + source: build/packaging/modules.d + config: true + '/Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/module': + mode: 0644 + source: build/packaging/module + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + files: + modules.d: + mode: 0644 + source: build/packaging/modules.d + config: true + module: + mode: 0644 + source: build/packaging/module + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_spec + <<: *elastic_license_for_deb_rpm + files: + '/etc/{{.BeatName}}/modules.d': + mode: 0644 + source: build/packaging/modules.d + config: true + '/usr/share/{{.BeatName}}/module': + mode: 0644 + source: build/packaging/module