##################################### # pfelk System Information ########## #####################################\n Linux 4.19.0-14-amd64 x86_64 PRETTY_NAME="Debian GNU/Linux 10 (buster)" NAME="Debian GNU/Linux" VERSION_ID="10" VERSION="10 (buster)" VERSION_CODENAME=buster ID=debian HOME_URL="https://www.debian.org/" SUPPORT_URL="https://www.debian.org/support" BUG_REPORT_URL="https://bugs.debian.org/" total used free shared buff/cache available Mem: 7,8Gi 5,8Gi 1,1Gi 16Mi 822Mi 1,7Gi Swap: 975Mi 0B 975Mi \n##################################### # Listing pfelk Directory Structure # #####################################\n /etc/pfelk/ /etc/pfelk/templates /etc/pfelk/scripts /etc/pfelk/scripts/error-data.sh.2 /etc/pfelk/scripts/error-data.sh.3 /etc/pfelk/scripts/error-data.sh /etc/pfelk/scripts/error-data.sh.1 /etc/pfelk/patterns /etc/pfelk/patterns/pfelk.grok.3 /etc/pfelk/patterns/pfelk.grok.1 /etc/pfelk/patterns/pfelk.grok /etc/pfelk/patterns/pfelk.grok.2 /etc/pfelk/logs /etc/pfelk/logs/error.pfelk.log /etc/pfelk/conf.d /etc/pfelk/conf.d/02-types.conf.2 /etc/pfelk/conf.d/35-rules-desc.conf.1 /etc/pfelk/conf.d/30-geoip.conf.2 /etc/pfelk/conf.d/45-cleanup.conf.2 /etc/pfelk/conf.d/50-outputs.conf.3 /etc/pfelk/conf.d/01-inputs.conf /etc/pfelk/conf.d/05-apps.conf.1 /etc/pfelk/conf.d/03-filter.conf /etc/pfelk/conf.d/20-interfaces.conf.2 /etc/pfelk/conf.d/45-cleanup.conf.3 /etc/pfelk/conf.d/01-inputs.conf.1 /etc/pfelk/conf.d/01-inputs.conf.2 /etc/pfelk/conf.d/50-outputs.conf /etc/pfelk/conf.d/02-types.conf.3 /etc/pfelk/conf.d/45-cleanup.conf.1 /etc/pfelk/conf.d/20-interfaces.conf /etc/pfelk/conf.d/36-ports-desc.conf.1 /etc/pfelk/conf.d/05-apps.conf.3 /etc/pfelk/conf.d/35-rules-desc.conf.3 /etc/pfelk/conf.d/36-ports-desc.conf.3 /etc/pfelk/conf.d/03-filter.conf.2 /etc/pfelk/conf.d/05-apps.conf /etc/pfelk/conf.d/20-interfaces.conf.3 /etc/pfelk/conf.d/03-filter.conf.3 /etc/pfelk/conf.d/02-types.conf /etc/pfelk/conf.d/30-geoip.conf /etc/pfelk/conf.d/05-apps.conf.2 /etc/pfelk/conf.d/35-rules-desc.conf.2 /etc/pfelk/conf.d/36-ports-desc.conf.2 /etc/pfelk/conf.d/01-inputs.conf.3 /etc/pfelk/conf.d/30-geoip.conf.3 /etc/pfelk/conf.d/35-rules-desc.conf /etc/pfelk/conf.d/50-outputs.conf.1 /etc/pfelk/conf.d/45-cleanup.conf /etc/pfelk/conf.d/50-outputs.conf.2 /etc/pfelk/conf.d/36-ports-desc.conf /etc/pfelk/conf.d/30-geoip.conf.1 /etc/pfelk/conf.d/02-types.conf.1 /etc/pfelk/conf.d/03-filter.conf.1 /etc/pfelk/conf.d/20-interfaces.conf.1 /etc/pfelk/config /etc/pfelk/databases /etc/pfelk/databases/rule-names.csv.3 /etc/pfelk/databases/private-hostnames.csv /etc/pfelk/databases/service-names-port-numbers.csv /etc/pfelk/databases/private-hostnames.csv.3 /etc/pfelk/databases/service-names-port-numbers.csv.3 /etc/pfelk/databases/private-hostnames.csv.1 /etc/pfelk/databases/rule-names.csv.1 /etc/pfelk/databases/rule-names.csv /etc/pfelk/databases/service-names-port-numbers.csv.2 /etc/pfelk/databases/private-hostnames.csv.2 /etc/pfelk/databases/service-names-port-numbers.csv.1 /etc/pfelk/databases/rule-names.csv.2 /etc/logstash/ /etc/logstash/logstash.yml /etc/logstash/pipelines.yml /etc/logstash/jvm.options /etc/logstash/conf.d /etc/logstash/log4j2.properties /etc/logstash/logstash-sample.conf /etc/logstash/startup.options \n##################################### # pfelk Config File Details ######### #####################################\n # 01-inputs.conf ################################################################################ # Version: 21.02 # # Required: Yes # # Description: Sets the type and port to listen. User may remove unutilized # # inputs, as desired. # ################################################################################ # input { ### Firewall 1 ### udp { id => "pfelk-1" type => "firewall-1" port => 5140 #ssl => true #ssl_certificate_authorities => ["/etc/logstash/ssl/YOURCAHERE.crt"] #ssl_certificate => "/etc/logstash/ssl/SERVER.crt" #ssl_key => "/etc/logstash/ssl/SERVER.key" #ssl_verify_mode => "force_peer" } ### Firewall 2 ### udp { id => "pfelk-2" type => "firewall-2" port => 5141 #ssl => true #ssl_certificate_authorities => ["/etc/logstash/ssl/YOURCAHERE.crt"] #ssl_certificate => "/etc/logstash/ssl/SERVER.crt" #ssl_key => "/etc/logstash/ssl/SERVER.key" #ssl_verify_mode => "force_peer" } ### Suricata ### tcp { id => "pfelk-suricata" type => "suricata" port => 5040 #ssl => true #ssl_certificate_authorities => ["/etc/logstash/ssl/YOURCAHERE.crt"] #ssl_certificate => "/etc/logstash/ssl/SERVER.crt" #ssl_key => "/etc/logstash/ssl/SERVER.key" #ssl_verify_mode => "force_peer" } ### HAProxy ### udp { id => "pfelk-haproxy" type => "haproxy" port => 5190 #ssl => true #ssl_certificate_authorities => ["/etc/logstash/ssl/YOURCAHERE.crt"] #ssl_certificate => "/etc/logstash/ssl/SERVER.crt" #ssl_key => "/etc/logstash/ssl/SERVER.key" #ssl_verify_mode => "force_peer" } ### Beats ### beats { id => "Beats" type => "beats" port => 5044 } } # 02-types.conf ################################################################################ # Version: 21.01 # # Required: Yes # # Description: Adds customized fileds based on type. The user may amend the # # observer.name, observer.product and observer.serial_number fields as desired # ################################################################################ # filter { ### PF-Firewall-1 ### if [type] == "firewall-1" { mutate { add_field => [ "[observer][type]", "firewall" ] ### Adjust the name, product and serial_number as desired ### add_field => [ "[observer][name]", "OPNsense" ] add_field => [ "[observer][product]", "Supermicro" ] add_field => [ "[observer][serial_number]", "001" ] rename => { "host" => "[observer][ip]" } } } ### PF-Firewall-2 ### if [type] == "firewall-2" { mutate { add_field => [ "[observer][type]", "firewall" ] ### Adjust the name, product and serial_number as desired ### add_field => [ "[observer][name]", "pfSense" ] add_field => [ "[observer][product]", "Supermicro" ] add_field => [ "[observer][serial_number]", "001" ] rename => { "host" => "[observer][ip]" } } } ### SURICATA ### if [type] == "suricata" { mutate { add_field => [ "[observer][type]", "suricata" ] ### Adjust the name, product and serial_number as desired ### add_field => [ "[observer][name]", "IDS" ] add_field => [ "[observer][product]", "Supermicro" ] add_field => [ "[observer][serial_number]", "001" ] rename => { "host" => "[observer][hostname]" } } } ### HAPROXY ### if [type] == "haproxy" { mutate { add_field => [ "[observer][type]", "haproxy" ] ### Adjust the name, product and serial_number as desired ### add_field => [ "[observer][name]", "haproxy-1" ] add_field => [ "[observer][product]", "Supermicro" ] add_field => [ "[observer][serial_number]", "001" ] rename => { "host" => "[observer][ip]" } } } ### BEATS ### if [type] == "beats" { mutate { add_field => [ "[observer][type]", "beats" ] } } } # 03-filter.conf ################################################################################ # Version: 21.03 # # Required: Yes # # Descritpion: Inital GROK pattern filtering for delinating syslog messages # # Works for pfSense 2.5.0 (RFC 5424/RFC3164) and OPNsense 21.1+. # ################################################################################ # filter { if [observer][type] == "firewall" or [observer][type] == "haproxy" or [observer][type] == "suricata" { grok { match => {"message" => "%{POSINT:[log][syslog][priority]}?(%{INT:[log][syslog][version]}\s*)?(%{SYSLOGTIMESTAMP:[event][created]}|%{TIMESTAMP_ISO8601:[event][created]})\s(%{SYSLOGHOST:[host][name]}\s+)?%{PROG:[process][name]}\s*?(\[)?%{POSINT:[process][pid]}(\]:)?\s*(\-\s*\-)?\s*%{GREEDYDATA:filter_message}"} } date { match => [ "[event][created]", "MMM d HH:mm:ss", "MMM dd HH:mm:ss", "ISO8601" ] target => "[event][created]" } } } # 05-apps.conf ################################################################################ # Version: 21.03 # # Required: Yes # # Description: Parses events based on process.name and further enriches events # # # ################################################################################ # filter { ### captive portal ### # Rename pfSense captive portal log from logportalauth to captiveportal if [process][name] =~ /^logportalauth/ { mutate { replace => { "[process][name]" => "captiveportal" } } } if [process][name] =~ /^captiveportal/ { mutate { add_tag => [ "captive" ] add_field => { "[ecs][version]" => "1.7.0" } add_field => { "[event][dataset]" => "pfelk.captive" } rename => { "filter_message" => "captiveportalmessage" } } grok { patterns_dir => [ "/etc/pfelk/patterns" ] match => [ "captiveportalmessage", "%{CAPTIVEPORTAL}" ] } } ### dhcpd ### if [process][name] =~ /^dhcpd$/ { mutate { add_tag => [ "dhcp", "dhcpdv4", "firewall" ] add_field => { "[event][dataset]" => "pfelk.dhcp" } } grok { patterns_dir => [ "/etc/pfelk/patterns" ] match => [ "filter_message", "%{DHCPD}"] } } ### dhcp6 ### if [process][name] =~ /^dhcp6c/ { mutate { add_tag => [ "dhcp", "dhcpdv6", "firewall" ] add_field => { "[event][dataset]" => "pfelk.dhcp" } } grok { patterns_dir => ["/etc/pfelk/patterns" ] match => [ "filter_message", "%{DHCPDv6}" ] } } ### dpinger ### if [process][name] =~ /^dpinger/ { mutate { add_tag => [ "dpinger", "firewall" ] add_field => { "[event][dataset]" => "pfelk.dpinger" } } } ### filterlog ### if [process][name] =~ /^filterlog$/ { mutate { add_tag => [ "firewall" ] add_field => { "[ecs][version]" => "1.7.0" } add_field => { "[event][dataset]" => "pfelk.firewall" } } grok { patterns_dir => [ "/etc/pfelk/patterns" ] match => [ "filter_message", "%{PF_LOG_ENTRY}" ] } } ### haproxy ### if [process][name] =~ /^haproxy/ { mutate { add_tag => [ "haproxy" ] add_field => { "[event][dataset]" => "pfelk.haproxy" } } grok { patterns_dir => [ "/etc/pfelk/patterns" ] match => [ "filter_message", "%{HAPROXY}" ] } } ### openvpn ### if [process][name] =~ /^openvpn/ { mutate { add_tag => [ "openvpn", " firewall" ] add_field => { "[event][dataset]" => "pfelk.openvpn" } } grok { patterns_dir => [ "/etc/pfelk/patterns" ] match => [ "filter_message", "%{OPENVPN}" ] } } ### named ### if [process][name] =~ /^named/ { mutate { add_tag => [ "bind9", "firewall" ] add_field => { "[event][dataset]" => "pfelk.bind9" } } grok { #patterns_dir => [ "/etc/pfelk/patterns" ] match => [ "filter_message", "%{BIND9}" ] } } ### ntpd ### if [process][name] =~ /^ntpd/ { mutate { add_tag => [ "ntpd", "firewall" ] add_field => { "[event][dataset]" => "pfelk.ntpd" } } } ### php-fpm ### if [process][name] =~ /^php-fpm/ { mutate { add_tag => [ "web_portal", "firewall" ] add_field => { "[event][dataset]" => "pfelk.webportal" } } grok { patterns_dir => [ "/etc/pfelk/patterns" ] match => { "filter_message" => [ "%{PF_APP}", "%{PF_APP_DATA}" ] } } mutate { lowercase => [ 'pf_ACTION' ] } } ### snort ### if [process][name] =~ /^snort/ { mutate { add_tag => [ "snort" ] add_field => { "[ecs][version]" => "1.7.0" } add_field => { "[event][dataset]" => "pfelk.snort" } add_field => { "[event][category]" => "intrusion_detection" } add_field => { "[agent][type]" => "snort" } } grok { patterns_dir => [ "/etc/pfelk/patterns" ] match => [ "filter_message", "%{SNORT}" ] } } ### suricata ### if [process][name] =~ /^suricata$/ { if [filter_message] =~ /^{.*}$/ { json { source => "filter_message" target => "[suricata][eve]" add_tag => "suricata_json" } } if [suricata][eve][src_ip] and ![source][ip] { mutate { add_field => { "[source][ip]" => "%{[suricata][eve][src_ip]}" } } } if [suricata][eve][dest_ip] and ![destination][ip] { mutate { add_field => { "[destination][ip]" => "%{[suricata][eve][dest_ip]}" } } } if [suricata][eve][src_port] and ![source][port] { mutate { add_field => { "[source][port]" => "%{[suricata][eve][src_port]}" } } } if [suricata][eve][dest_port] and ![destination][port] { mutate { add_field => { "[destination][port]" => "%{[suricata][eve][dest_port]}" } } } if "suricata_json" not in [tags] { grok { patterns_dir => [ "/etc/pfelk/patterns" ] match => [ "filter_message", "%{SURICATA}" ] } } mutate { remove_tag => "suricata_json" add_tag => "suricata" add_field => { "[event][dataset]" => "pfelk.suricata" } } } ### squid ### if [process][name] == "(squid-1)" { mutate { replace => [ "[process][name]", "squid" ] add_field => { "[event][dataset]" => "pfelk.squid" } } if [filter_message] =~ /^{.*}$/ { json { source => "filter_message" add_tag => "squid_json" } } if "squid_json" not in [tags] { grok { patterns_dir => [ "/etc/pfelk/patterns" ] match => [ "filter_message", "%{SQUID}" ] } } ### squid ECS => Built-in SIEM JSON ### if "squid_json" in [tags] { grok { match => [ "[url][original]", "%{URIPROTO}://%{URIHOST:referer_domain}%{GREEDYDATA:[url][path]}" ] } mutate { rename => { "[http][response][body][status_code]" => "[http][response][status_code]" } rename => { "referer_domain" => "[url][domain]" } } } mutate { rename => { "[host][hostname]" => "[client][ip]" } remove_tag => "squid_json" add_tag => "squid" } } ### unbound ### if [process][name] =~ /^unbound/ { mutate { add_tag => "unbound" add_field => { "[ecs][version]" => "1.7.0" } add_field => { "[event][dataset]" => "pfelk.unbound" } } grok { patterns_dir => [ "/etc/pfelk/patterns" ] match => [ "filter_message", "%{UNBOUND}" ] } ### unbound ECS => Built-in SIEM ### grok { match => [ "[dns][question][name]", "(\.)?(?<[dns][question][registered_domain]>[^.]+\.[^.]+)$" ] add_tag => "unbound-registered_domain" } if "unbound-registered_domain" not in [tags] { grok { match => [ "[dns][question][name]", "(?<[dns][question][registered_domain]>[^.]+\.[^.]+)$" ] } } mutate { remove_tag => "unbound-registered_domain" } } } # 20-interfaces.conf ################################################################################ # Version: 21.02 # # Required: No - Optional # # Description: Adds interface.alias and network.name based on interface.name # # The interface.alias and network.name fields may be amended as desired # ################################################################################ # ### firewall-1 ### filter { if [type] == "firewall-1" { ### Change interface as desired ### if [interface][name] =~ /^igb0$/ { mutate { add_field => { "[interface][alias]" => "DEV" } add_field => { "[network][name]" => "Lab" } } } ### Change interface as desired ### if [interface][name] =~ /^igb1$/ { mutate { add_field => { "[interface][alias]" => "LAN" } add_field => { "[network][name]" => "Home Network" } } } ### Change interface as desired ### if [interface][name] =~ /^igb2$/ { mutate { add_field => { "[interface][alias]" => "WAN" } add_field => { "[network][name]" => "FiOS" } } } ### Change interface as desired ### if [interface][name] =~ /^igb3$/ { mutate { add_field => { "[interface][alias]" => "DMZ" } add_field => { "[network][name]" => "Exposed Network" } } } ### Change interface as desired ### if [interface][name] =~ /^igb1_vlan2000$/ { mutate { add_field => { "[interface][alias]" => "VLAN" } add_field => { "[network][name]" => "Isolated Network" } } } ### Change interface as desired ### if [interface][name] =~ /^lo0$/ { mutate { add_field => { "[interface][alias]" => "Link-Local" } update => { "[network][direction]" => "%{[network][direction]}bound" } update => { "[network][type]" => "ipv%{[network][type]}" } } } ### Fallback interface ### if ![interface][alias] and [interface][name] { mutate { add_field => { "[interface][alias]" => "%{[interface][name]}" } add_field => { "[network][name]" => "%{[interface][name]}" } } } } } ### firewall-2 ### filter { if [type] == "firewall-2" { ### Change interface as desired ### if [interface][name] =~ /^igb0$/ { mutate { add_field => { "[interface][alias]" => "WAN" } add_field => { "[network][name]" => "FiOS" } } } ### Change interface as desired ### if [interface][name] =~ /^igb1$/ { mutate { add_field => { "[interface][alias]" => "LAN" } add_field => { "[network][name]" => "Home Network" } } } ### Change interface as desired ### if [interface][name] =~ /^igb2$/ { mutate { add_field => { "[interface][alias]" => "DEV" } add_field => { "[network][name]" => "Lab" } } } ### Change interface as desired ### if [interface][name] =~ /^igb3$/ { mutate { add_field => { "[interface][alias]" => "DMZ" } add_field => { "[network][name]" => "Exposed Network" } } } ### Change interface as desired ### if [interface][name] =~ /^igb1_vlan2000$/ { mutate { add_field => { "[interface][alias]" => "VLAN" } add_field => { "[network][name]" => "Isolated Network" } } } ### Change interface as desired ### if [interface][name] =~ /^lo0$/ { mutate { add_field => { "[interface][alias]" => "Link-Local" } update => { "[network][direction]" => "%{[network][direction]}bound" } update => { "[network][type]" => "ipv%{[network][type]}" } } } ### Fallback interface ### if ![interface][alias] and [interface][name] { mutate { add_field => { "[interface][alias]" => "%{[interface][name]}" } add_field => { "[network][name]" => "%{[interface][name]}" } } } } } # 30-geoip.conf ################################################################################ # Version: 21.02 # # Required: No - Optional # # Description: Enriches source.ip and destination.ip fields with GeoIP data # # For MaxMind, remove all instances of "#MMR#" or leave for built-in GeoIP # ################################################################################ # filter { if [observer][type] == "firewall" or [observer][type] == "suricata" { if [source][ip] { ### Check if source.ip address is private cidr { address => [ "%{[source][ip]}" ] network => [ "0.0.0.0/32", "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "fc00::/7", "127.0.0.0/8", "::1/128", "169.254.0.0/16", "fe80::/10", "224.0.0.0/4", "ff00::/8", "255.255.255.255/32", "::" ] add_tag => "IP_Private_Source" } if "IP_Private_Source" not in [tags] { geoip { source => "[source][ip]" #MMR# database => "/var/lib/GeoIP/GeoLite2-City.mmdb" target => "[source][geo]" } geoip { default_database_type => 'ASN' #MMR# database => "/var/lib/GeoIP/GeoLite2-ASN.mmdb" source => "[source][ip]" target => "[source][as]" } mutate { rename => { "[source][as][asn]" => "[source][as][number]"} rename => { "[source][as][as_org]" => "[source][as][organization][name]"} rename => { "[source][geo][country_code2]" => "[source][geo][country_iso_code]"} rename => { "[source][geo][region_code]" => "[source][geo][region_iso_code]"} add_tag => "GeoIP_Source" } } } if [destination][ip] { ### Check if destination.ip address is private cidr { address => [ "%{[destination][ip]}" ] network => [ "0.0.0.0/32", "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "fc00::/7", "127.0.0.0/8", "::1/128", "169.254.0.0/16", "fe80::/10", "224.0.0.0/4", "ff00::/8", "255.255.255.255/32", "::" ] add_tag => "IP_Private_Destination" } if "IP_Private_Destination" not in [tags] { geoip { source => "[destination][ip]" #MMR# database => "/var/lib/GeoIP/GeoLite2-City.mmdb" target => "[destination][geo]" } geoip { default_database_type => 'ASN' #MMR# database => "/var/lib/GeoIP/GeoLite2-ASN.mmdb" source => "[destination][ip]" target => "[destination][as]" } mutate { rename => { "[destination][as][asn]" => "[destination][as][number]"} rename => { "[destination][as][as_org]" => "[destination][as][organization][name]"} rename => { "[destination][geo][country_code2]" => "[destination][geo][country_iso_code]"} rename => { "[destination][geo][region_code]" => "[destination][geo][region_iso_code]"} add_tag => "GeoIP_Destination" } } } } ### HAPROXY ### if [type] == "haproxy" { if [client][ip] { # Check if client.ip address is private cidr { address => [ "%{[client][ip]}" ] network => [ "0.0.0.0/32", "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "fc00::/7", "127.0.0.0/8", "::1/128", "169.254.0.0/16", "fe80::/10", "224.0.0.0/4", "ff00::/8", "255.255.255.255/32", "::" ] add_tag => "IP_Private_HAProxy" } if "IP_Private_HAProxy" not in [tags] { geoip { source => "[client][ip]" #MMR# database => "/var/lib/GeoIP/GeoLite2-City.mmdb" target => "[source][geo]" } geoip { default_database_type => 'ASN' #MMR# database => "/var/lib/GeoIP/GeoLite2-ASN.mmdb" source => "[client][ip]" target => "[source][as]" } mutate { rename => { "[source][as][asn]" => "[source][as][number]"} rename => { "[source][as][as_org]" => "[source][as][organization][name]"} rename => { "[source][geo][country_code2]" => "[source][geo][country_iso_code]"} rename => { "[source][geo][region_code]" => "[source][geo][region_iso_code]"} add_tag => "GeoIP_Source" } } } } } # 35-rules-desc.conf ################################################################################ # Version: 21.02 # # Required: No - Optional # # Description: Checks for the presense of the rule_number field, if present # # runs translates the rule_number into a referenced description. # ################################################################################ # filter { if [observer][type] == "firewall" { if [rule][ruleset] { translate { field => "[rule][ruleset]" destination => "[rule][alias]" dictionary_path => "/etc/pfelk/databases/rule-names.csv" refresh_interval => 60 refresh_behaviour => replace fallback => "%{[rule][ruleset]}" } mutate { add_field => { "[rule][description]" => "%{[interface][alias]}: %{[rule][alias]}" } } } } } # 36-ports-desc.conf ################################################################################ # Version: 21.02 # # Required: No - Optional # # Description: Checks for the presense of the port field, if present runs # # translates the port into a referenced description. # ################################################################################ # filter { if [observer][type] == "firewall" { if [destination][port] { translate { field => "[destination][port]" destination => "[destination][service]" dictionary_path => "/etc/pfelk/databases/service-names-port-numbers.csv" refresh_interval => 300 refresh_behaviour => replace #fallback => "%{[destination][port]}" } } if [source][port] { translate { field => "[source][port]" destination => "[source][service]" dictionary_path => "/etc/pfelk/databases/service-names-port-numbers.csv" refresh_interval => 300 refresh_behaviour => replace #fallback => "%{[source][port]}" } } } } # 45-cleanup.conf ################################################################################ # Version: 21.02 # # Required: No - Optional # # Description: Removed unwanted logs based on the process.pid field and # # additional fields. Additionally, pf.tcp.options is split (multiple values) # ################################################################################ # # Update as needed to remove unwanted logs based on the process.pid field filter { # if [process][pid] in ["78", "46", "45", "43"] { # drop { } # } mutate { remove_field => ["pfelk_message"] remove_field => ["filter_message"] remove_field => ["type"] split => { "[pf][tcp][options]" => ";" } rename => { "message" => "[event][original]" } remove_field => [ "host" ] } } # 50-outputs.conf ################################################################################ # Version: 21.03 # # Required: Yes # # Description: Sends enriched logs to Elasticsearch. Remove "" to enable # # ILM # ################################################################################ # output { ################################################################################ ### firewall ### ################################################################################ if "firewall" in [tags] { elasticsearch { hosts => ["http://localhost:9200"] index => "pfelk-firewall-%{+YYYY.MM}" ilm_enabled => true ilm_rollover_alias => "pfelk-firewall" ilm_pattern => "000001" ilm_policy => "pfelk-ilm" ecs_compatibility => "v1" manage_template => false ### X-Pack Username and Password ### # user => USERNAMEHERE # password => PASSWORDHERE } } ################################################################################ ### captive portal ### ################################################################################ if "captive" in [tags] { elasticsearch { hosts => ["http://localhost:9200"] index => "pfelk-captive-%{+YYYY.MM}" ilm_enabled => true ilm_rollover_alias => "pfelk-captive" ilm_pattern => "000001" ilm_policy => "pfelk-ilm" ecs_compatibility => "v1" manage_template => false ### X-Pack Username and Password ### # user => USERNAMEHERE # password => PASSWORDHERE } } ################################################################################ ### dhcp ### ################################################################################ if "dhcp" in [tags] { elasticsearch { hosts => ["http://localhost:9200"] index => "pfelk-dhcp-%{+YYYY.MM}" ilm_enabled => true ilm_rollover_alias => "pfelk-dhcp" ilm_pattern => "000001" ilm_policy => "pfelk-ilm" ecs_compatibility => "v1" manage_template => false ### X-Pack Username and Password ### # user => USERNAMEHERE # password => PASSWORDHERE } } ################################################################################ ### unbound ### ################################################################################ if [process][name] == "unbound" { elasticsearch { hosts => ["http://localhost:9200"] index => "pfelk-unbound-%{+YYYY.MM}" ilm_enabled => true ilm_rollover_alias => "pfelk-unbound" ilm_pattern => "000001" ilm_policy => "pfelk-ilm" ecs_compatibility => "v1" manage_template => false ### X-Pack Username and Password ### # user => USERNAMEHERE # password => PASSWORDHERE } } ################################################################################ ### suricata ### ################################################################################ if [process][name] == "suricata" { elasticsearch { hosts => ["http://localhost:9200"] index => "pfelk-suricata-%{+YYYY.MM}" ilm_enabled => true ilm_rollover_alias => "pfelk-suricata" ilm_pattern => "000001" ilm_policy => "pfelk-ilm" ecs_compatibility => "v1" manage_template => false ### X-Pack Username and Password ### # user => USERNAMEHERE # password => PASSWORDHERE } } ################################################################################ ### snort ### ################################################################################ if [process][name] == "snort" { elasticsearch { hosts => ["http://localhost:9200"] index => "pfelk-snort-%{+YYYY.MM}" ilm_enabled => true ilm_rollover_alias => "pfelk-snort" ilm_pattern => "000001" ilm_policy => "pfelk-ilm" ecs_compatibility => "v1" manage_template => false ### X-Pack Username and Password ### # user => USERNAMEHERE # password => PASSWORDHERE } } ################################################################################ ### squid ### ################################################################################ if [process][name] == "squid" { elasticsearch { hosts => ["http://localhost:9200"] index => "pfelk-squid-%{+YYYY.MM}" ilm_enabled => true ilm_rollover_alias => "pfelk-squid" ilm_pattern => "000001" ilm_policy => "pfelk-ilm" ecs_compatibility => "v1" manage_template => false ### X-Pack Username and Password ### # user => USERNAMEHERE # password => PASSWORDHERE } } ################################################################################ ### HAPROXY ### ################################################################################ if [process][name] == "haproxy" { elasticsearch { hosts => ["http://localhost:9200"] index => "pfelk-haproxy-%{+YYYY.MM}" ilm_enabled => true ilm_rollover_alias => "pfelk-haproxy" ilm_pattern => "000001" ilm_policy => "pfelk-ilm" ecs_compatibility => "v1" manage_template => false ### X-Pack Username and Password ### # user => USERNAMEHERE # password => PASSWORDHERE } } ################################################################################ ### BEATS ### ################################################################################ if [type] == "beats" { elasticsearch { hosts => ["http://localhost:9200"] index => "%{[@metadata][beat]}-%{[@metadata][version]}" # ecs_compatibility => "v1" # manage_template => false ### X-Pack Username and Password ### # user => USERNAMEHERE # password => PASSWORDHERE } } } \n##################################### # Logstash Config File Details ######### #####################################\n \n##################################### # Listing Logstash Pipelines.yml # #####################################\n # pipelines.yml ################################################################################ # Version: 21.03 # # Required: Yes # # Description: This is a required file for a pfelk installation # # This file is where you define your pipelines. You can define multiple. # # For more information on multiple pipelines, see the documentation: # # https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html # # # ################################################################################ # - pipeline.id: pfelk path.config: "/etc/pfelk/conf.d/*.conf" # - pipeline.id: main # path.config: "/etc/logstash/conf.d/*.conf" \n##################################### # Listing Logstash Logstash.yml Log Path # #####################################\n path.logs: /var/log/logstash \n##################################### # Listing Kibana kibana.yml Log Path # #####################################\n \n##################################### # grok pattern # #####################################\n # pfelk.grok ################################################################################ # Version: 21.03a # # # # # # # ################################################################################ # # CAPTIVE PORTAL (Optional) CAPTIVEPORTAL (%{CP_PFSENSE}|%{CP_OPNSENSE}) CP_OPNSENSE %{WORD:[event][action]}\s%{GREEDYDATA:[client][user][name]}\s\(%{IP:[client][ip]}\)\s%{WORD:[observer][ingress][interface][alias]}\s%{INT:[observer][ingress][zone]} # ToDo - Clean-up pfSense GROK pattern below CP_PFSENSE (%{CAPTIVE1}|%{CAPTIVE2}) CAPTIVE1 %{WORD:[observer][ingress][interface][alias]}:\s%{DATA:[observer][ingress][zone]}\s\-\s%{WORD:[event][action]}\:\s%{GREEDYDATA:[client][user][name]},\s%{MAC:[client][mac]},\s%{IP:[client][ip]}(,\s%{GREEDYDATA:[event][reason]})? CAPTIVE2 %{WORD:[observer][ingress][interface][alias]}:\s%{DATA:[observer][ingress][zone]}\s\-\s%{GREEDYDATA:[event][action]}\:\s%{GREEDYDATA:[client][user][name]},\s%{MAC:[client][mac]},\s%{IP:[client][ip]}(,\s%{GREEDYDATA:[event][reason]})? # DHCPv4 (Optional) DHCPD DHCP(%{DHCPD_DISCOVER}|%{DHCPD_DUPLICATE}|%{DHCPD_OFFER_ACK}|%{DHCPD_REQUEST}|%{DHCPD_DECLINE}|%{DHCPD_RELEASE}|%{DHCPD_INFORM}|%{DHCPD_LEASE})(: %{GREEDYDATA:[dhcpv4][option][message]})? DHCPD_DISCOVER (?<[dhcp][operation]>DISCOVER) from %{MAC:[dhcpv4][client][mac]}( \(%{DATA:[dhcpv4][option][hostname]}\))? %{DHCPD_VIA} DHCPD_DECLINE (?<[dhcp][operation]>DECLINE) of %{IP:[dhcpv4][client][ip]} from %{MAC:[dhcpv4][client][mac]}( \(%{DATA:[dhcpv4][option][hostname]}\))? %{DHCPD_VIA} DHCPD_DUPLICATE uid %{WORD:[dhcp][operation]} %{IP:[dhcpv4][client][ip]} for client %{MAC:[dhcpv4][client][mac]} is %{WORD:[error][code]} on %{GREEDYDATA:[dhcpv4][client][address]} DHCPD_INFORM (?<[dhcp][operation]>INFORM) from %{IP:[dhcpv4][client][ip]}? %{DHCPD_VIA} DHCPD_LEASE (?<[dhcp][operation]>LEASE(QUERY|UNKNOWN|ACTIVE|UNASSIGNED)) (from|to) %{IP:[dhcpv4][client][ip]} for (IP %{IP:[dhcpv4][query][ip]}|client-id %{NOTSPACE:[dhcpv4][query][id]}|MAC address %{MAC:[dhcpv4][query][mac]})( \(%{NUMBER:[dhcpv4][query][associated]} associated IPs\))? DHCPD_OFFER_ACK (?<[dhcp][operation]>(OFFER|N?ACK)) on %{IP:[dhcpv4][client][ip]} to %{MAC:[dhcpv4][client][mac]}( \(%{DATA:[dhcpv4][option][hostname]}\))? %{DHCPD_VIA} DHCPD_RELEASE (?<[dhcp][operation]>RELEASE) of %{IP:[dhcpv4][client][ip]} from %{MAC:[dhcpv4][client][mac]}( \(%{DATA:[dhcpv4][option][hostname]}\))? %{DHCPD_VIA} \((?(not )?found)\) DHCPD_REQUEST (?<[dhcp][operation]>REQUEST) for %{IP:[dhcpv4][client][ip]}( \(%{DATA:[dhcpv4][server][ip]}\))? from %{MAC:[dhcpv4][client][mac]}( \(%{DATA:[dhcpv4][option][hostname]}\))? %{DHCPD_VIA} DHCPD_VIA via (%{IP:[dhcpv4][relay][ip]}|(?<[interface][name]>[^: ]+)) # DHCPv6 (Optional - In Development) DHCPDv6 %{GREEDYDATA:[dhcpv6][operation]} # HAPROXY HAPROXY %{IP:[client][ip]}:%{INT:[client][port]} \[%{HAPROXYDATE:[haproxy][timestamp]}\] %{NOTSPACE:[haproxy][frontend_name]} %{NOTSPACE:[haproxy][backend_name]}/%{NOTSPACE:[haproxy][server_name]} %{INT:[haproxy][time_request]}/%{INT:[haproxy][time_queue]}/%{INT:[haproxy][time_backend_connect]}/%{INT:[haproxy][time_backend_response]}/%{NOTSPACE:[host][uptime]} %{INT:[http][response][status_code]} %{NOTSPACE:[haproxy][bytes_read]} %{DATA:[haproxy][http][request][captured_cookie]} %{DATA:[haproxy][http][response][captured_cookie]} %{NOTSPACE:[haproxy][termination_state]} %{INT:[haproxy][connections][active]}/%{INT:[haproxy][connections][frontend]}/%{INT:[haproxy][connections][backend]}/%{INT:[haproxy][connections][server]}/%{NOTSPACE:[haproxy][connections][retries]} %{INT:[haproxy][server_queue]}/%{INT:[haproxy][backend_queue]} (\{%{HAPROXYCAPTUREDREQUESTHEADERS}\})?( )?(\{%{HAPROXYCAPTUREDRESPONSEHEADERS}\})?( )?"(|(%{WORD:[http][request][method]} (%{URIPROTO:[haproxy][mode]}://)?(?:%{USER:[user][name]}(?::[^@]*)?@)?(?:%{URIHOST:[http][request][referrer]})?(?:%{URIPATHPARAM:[http][mode]})?( HTTP/%{NUMBER:[http][version]})?))?"? # OPENVPN OPENVPN (%{OPENVPNIP}|%{OPENVPNUSER}|%{OPENVPNLOG}) OPENVPNIP %{IP:[vpn][source][ip]}\:%{INT:[vpn][source][port]}%{SPACE}\[%{DATA:[vpn][client]}\]%{SPACE}Peer%{SPACE}Connection%{SPACE}Initiated%{SPACE}with%{GREEDYDATA} OPENVPNUSER (%{WORD:[vpn][domain]}?\\)?(?<[vpn][user]>\b[+\w\.-]+\b)?/?%{IP:[vpn][source][ip]}:%{INT:[vpn][source][port]} peer info: IV_PLAT=%{WORD:[vpn][plat]} OPENVPNLOG %{GREEDYDATA:[vpn][log][message]} # PF PF_LOG_ENTRY %{PF_LOG_DATA}%{PF_IP_SPECIFIC_DATA}%{PF_IP_DATA}%{PF_PROTOCOL_DATA}? PF_LOG_DATA %{INT:[rule][ruleset]},%{INT:[rule][id]}?,,%{INT:[rule][uuid]},%{DATA:[interface][name]},(?<[event][reason]>\b[\w\-]+\b),%{WORD:[event][action]},%{WORD:[network][direction]}, PF_IP_SPECIFIC_DATA %{PF_IPv4_SPECIFIC_DATA}|%{PF_IPv6_SPECIFIC_DATA} PF_IPv4_SPECIFIC_DATA (?<[network][type]>(4)),%{BASE16NUM:[pf][ipv4][tos]},%{WORD:[pf][ipv4][ecn]}?,%{INT:[pf][ipv4][ttl]},%{INT:[pf][ipv4][packet][id]},%{INT:[pf][ipv4][offset]},%{WORD:[pf][ipv4][flags]},%{INT:[network][iana_number]},%{WORD:[network][transport]}, PF_IP_DATA %{INT:[pf][packet][length]},%{IP:[source][ip]},%{IP:[destination][ip]}, PF_PROTOCOL_DATA %{PF_TCP_DATA}|%{PF_UDP_DATA}|%{PF_ICMP_DATA}|%{PF_IGMP_DATA}|%{PF_IPv6_VAR}|%{PF_IPv6_ICMP} PF_IPv6_SPECIFIC_DATA (?<[network][type]>(6)),%{BASE16NUM:[pf][ipv6][class]},%{WORD:[pf][ipv6][flow_label]},%{WORD:[pf][ipv6][hop_limit]},%{DATA:[pf][protocol][type]},%{INT:[pf][protocol][id]}, PF_IPv6_VAR %{WORD:type},%{WORD:option},%{WORD:Flags},%{WORD:Flags} PF_IPv6_ICMP # PF PROTOCOL PF_TCP_DATA %{INT:[source][port]},%{INT:[destination][port]},%{INT:[pf][transport][data_length]},(?<[pf][tcp][flags]>(\w*)?),(?<[pf][tcp][sequence_number]>(\d*)?):?\d*,(?<[pf][tcp][ack_number]>(\d*)?),(?<[pf][tcp][window]>(\d*)?),(?<[pf][tcp][urg]>(\w*)?),%{GREEDYDATA:[pf][tcp][options]} PF_UDP_DATA %{INT:[source][port]},%{INT:[destination][port]},%{INT:[pf][transport][data_length]}$ PF_IGMP_DATA datalength=%{INT:[network][packets]} PF_ICMP_DATA %{PF_ICMP_TYPE}%{PF_ICMP_RESPONSE} PF_ICMP_TYPE (?(request|reply|unreachproto|unreachport|unreach|timeexceed|paramprob|redirect|maskreply|needfrag|tstamp|tstampreply)), PF_ICMP_RESPONSE %{PF_ICMP_ECHO_REQ_REPLY}|%{PF_ICMP_UNREACHPORT}|%{PF_ICMP_UNREACHPROTO}|%{PF_ICMP_UNREACHABLE}|%{PF_ICMP_NEED_FLAG}|%{PF_ICMP_TSTAMP}|%{PF_ICMP_TSTAMP_REPLY} PF_ICMP_ECHO_REQ_REPLY %{INT:[pf][icmp][echo][id]},%{INT:[pf][icmp][echo][sequence]} PF_ICMP_UNREACHPORT %{IP:[pf][icmp][unreachport][destination][ip]},%{WORD:[pf][icmp][unreachport][protocol]},%{INT:[pf][icmp][unreachport][port]} PF_ICMP_UNREACHPROTO %{IP:[pf][icmp][unreach][destination][ip]},%{WORD:[pf][icmp][unreach][network][transport]} PF_ICMP_UNREACHABLE %{GREEDYDATA:[pf][icmp][unreachable]} PF_ICMP_NEED_FLAG %{IP:[pf][icmp][need_flag][ip]},%{INT:[pf][icmp][need_flag][mtu]} PF_ICMP_TSTAMP %{INT:[pf][icmp][tstamp][id]},%{INT:[pf][icmp][tstamp][sequence]} PF_ICMP_TSTAMP_REPLY %{INT:[pf][icmp][tstamp][reply][id]},%{INT:[pf][icmp][tstamp][reply][sequence]},%{INT:[pf][icmp][tstamp][reply][otime]},%{INT:[pf][icmp][tstamp][reply][rtime]},%{INT:[pf][icmp][tstamp][reply][ttime]} PF_SPEC \+ # PF PF_CARP_DATA (%{WORD:[pf][carp][type]}),(%{INT:[pf][carp][ttl]}),(%{INT:[pf][carp][vhid]}),(%{INT:[pf][carp][version]}),(%{INT:[pf][carp][advbase]}),(%{INT:[pf][carp][advskew]}) PF_APP (%{DATA:[pf][app]}): PF_APP_DATA (%{PF_APP_LOGOUT}|%{PF_APP_LOGIN}|%{PF_APP_ERROR}|%{PF_APP_GEN}) PF_APP_LOGIN (%{DATA:[pf][app][action]}) for user \'(%{DATA:[pf][app][user]})\' from: (%{GREEDYDATA:[pf][remote][ip]}) PF_APP_LOGOUT User (%{DATA:[pf][app][action]}) for user \'(%{DATA:[pf][app][user]})\' from: (%{GREEDYDATA:[pf][remote][ip]}) PF_APP_ERROR webConfigurator (%{DATA:[pf][app][action]}) for \'(%{DATA:[pf][app][user]})\' from (%{GREEDYDATA:[pf][remote][ip]}) PF_APP_GEN (%{GREEDYDATA:[pf][app][action]}) # SURICATA SURICATA \[%{NUMBER:[suricata][rule][uuid]}:%{NUMBER:[suricata][rule][id]}:%{NUMBER:[suricata][rule][version]}\]%{SPACE}%{GREEDYDATA:[suricata][rule][description]}%{SPACE}\[Classification:%{SPACE}%{GREEDYDATA:[suricata][rule][category]}\]%{SPACE}\[Priority:%{SPACE}%{NUMBER:[suricata][priority]}\]%{SPACE}{%{WORD:[network][transport]}}%{SPACE}%{IP:[source][ip]}:%{NUMBER:[source][port]}%{SPACE}->%{SPACE}%{IP:[destination][ip]}:%{NUMBER:[destination][port]} # SNORT SNORT \[%{INT:[rule][uuid]}\:%{INT:[rule][reference]}\:%{INT:[rule][version]}\].%{GREEDYDATA:[vulnerability][description]}.\[Classification\: %{DATA:[vulnerability][classification]}\].\[Priority\: %{INT:[event][severity]}\].\{%{DATA:[network][transport]}\}.%{IP:[source][ip]}(\:%{INT:[source][port]})?.->.%{IP:[destination][ip]}(\:%{INT:[destination][port]})? # SQUID SQUID %{IPORHOST:[client][ip]} %{NOTSPACE:[labels][request_status]}/%{NUMBER:[http][response][body][status_code]} %{NUMBER:[http][response][bytes]} %{NOTSPACE:[http][request][method]} (%{URIPROTO:[url][scheme]}://)?(?<[url][domain]>\S+?)(:%{INT:[url][port]})?(/%{NOTSPACE:[url][path]})?\s+%{NOTSPACE:[http][request][referrer]}\s+%{NOTSPACE:[lables][hierarchy_status]}/%{NOTSPACE:[destination][address]}\s+%{NOTSPACE:[http][response][mime_type]} # UNBOUND UNBOUND %{INT:[process][pgid]}:%{INT:[process][thread][id]}] %{LOGLEVEL:[log][level]}: %{IP:[client][ip]} %{GREEDYDATA:[dns][question][name]}\. %{WORD:[dns][question][type]} %{WORD:[dns][question][class]} \n##################################### # Appending Logstash Logs ########### #####################################\n [2021-02-25T17:49:23,893][INFO ][logstash.filters.geoip ][pfelk] Using geoip database {:path=>"/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-6.0.3-java/vendor/GeoLite2-ASN.mmdb"} [2021-02-25T17:49:23,943][INFO ][logstash.filters.geoip ][pfelk] Using geoip database {:path=>"/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-6.0.3-java/vendor/GeoLite2-City.mmdb"} [2021-02-25T17:49:24,062][INFO ][logstash.filters.geoip ][pfelk] Using geoip database {:path=>"/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-6.0.3-java/vendor/GeoLite2-City.mmdb"} [2021-02-25T17:49:24,067][INFO ][logstash.filters.geoip ][pfelk] Using geoip database {:path=>"/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-6.0.3-java/vendor/GeoLite2-ASN.mmdb"} [2021-02-25T17:49:24,193][INFO ][logstash.filters.geoip ][pfelk] Using geoip database {:path=>"/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-6.0.3-java/vendor/GeoLite2-City.mmdb"} [2021-02-25T17:49:24,210][INFO ][logstash.filters.geoip ][pfelk] Using geoip database {:path=>"/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-6.0.3-java/vendor/GeoLite2-ASN.mmdb"} [2021-02-25T17:49:24,325][INFO ][logstash.javapipeline ][pfelk] Starting pipeline {:pipeline_id=>"pfelk", "pipeline.workers"=>4, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50, "pipeline.max_inflight"=>500, "pipeline.sources"=>["/etc/pfelk/conf.d/01-inputs.conf", "/etc/pfelk/conf.d/02-types.conf", "/etc/pfelk/conf.d/03-filter.conf", "/etc/pfelk/conf.d/05-apps.conf", "/etc/pfelk/conf.d/20-interfaces.conf", "/etc/pfelk/conf.d/30-geoip.conf", "/etc/pfelk/conf.d/35-rules-desc.conf", "/etc/pfelk/conf.d/36-ports-desc.conf", "/etc/pfelk/conf.d/45-cleanup.conf", "/etc/pfelk/conf.d/50-outputs.conf"], :thread=>"#"} [2021-02-25T17:49:26,389][INFO ][logstash.javapipeline ][pfelk] Pipeline Java execution initialization time {"seconds"=>2.06} [2021-02-25T17:49:26,409][INFO ][logstash.inputs.beats ][pfelk] Starting input listener {:address=>"0.0.0.0:5044"} [2021-02-25T17:49:26,582][INFO ][logstash.javapipeline ][pfelk] Pipeline started {"pipeline.id"=>"pfelk"} [2021-02-25T17:49:26,582][INFO ][logstash.inputs.tcp ][pfelk][pfelk-suricata] Starting tcp input listener {:address=>"0.0.0.0:5040", :ssl_enable=>"false"} [2021-02-25T17:49:26,633][INFO ][org.logstash.beats.Server][pfelk][Beats] Starting server on port: 5044 [2021-02-25T17:49:26,682][INFO ][logstash.inputs.udp ][pfelk][pfelk-2] Starting UDP listener {:address=>"0.0.0.0:5141"} [2021-02-25T17:49:26,686][INFO ][logstash.inputs.udp ][pfelk][pfelk-haproxy] Starting UDP listener {:address=>"0.0.0.0:5190"} [2021-02-25T17:49:26,708][INFO ][logstash.inputs.udp ][pfelk][pfelk-1] Starting UDP listener {:address=>"0.0.0.0:5140"} [2021-02-25T17:49:26,736][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:pfelk], :non_running_pipelines=>[]} [2021-02-25T17:49:26,746][INFO ][logstash.inputs.udp ][pfelk][pfelk-2] UDP listener started {:address=>"0.0.0.0:5141", :receive_buffer_bytes=>"106496", :queue_size=>"2000"} [2021-02-25T17:49:26,747][INFO ][logstash.inputs.udp ][pfelk][pfelk-1] UDP listener started {:address=>"0.0.0.0:5140", :receive_buffer_bytes=>"106496", :queue_size=>"2000"} [2021-02-25T17:49:26,752][INFO ][logstash.inputs.udp ][pfelk][pfelk-haproxy] UDP listener started {:address=>"0.0.0.0:5190", :receive_buffer_bytes=>"106496", :queue_size=>"2000"} [2021-02-25T17:49:26,848][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600} \n##################################### # ELK Services Check ################ #####################################\n \n###Elasticsearch.service:###\n ● elasticsearch.service - Elasticsearch Loaded: loaded (/lib/systemd/system/elasticsearch.service; enabled; vendor preset: enabled) Active: active (running) since Thu 2021-02-25 17:49:04 CET; 7h ago Docs: https://www.elastic.co Main PID: 505 (java) Tasks: 87 (limit: 4915) Memory: 4.6G CGroup: /system.slice/elasticsearch.service ├─505 /usr/share/elasticsearch/jdk/bin/java -Xshare:auto -Des.networkaddress.cache.ttl=60 -Des.networkaddress.cache.negative.ttl=10 -XX:+AlwaysPreTouch -Xss1m -Djava.awt.headless=true -Dfile.encoding=UTF-8 -Djna.nosys=true -XX:-OmitStackTraceInFastThrow -XX:+ShowCodeDetailsInExceptionMessages -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true -Dio.netty.recycler.maxCapacityPerThread=0 -Dio.netty.allocator.numDirectArenas=0 -Dlog4j.shutdownHookEnabled=false -Dlog4j2.disable.jmx=true -Djava.locale.providers=SPI,COMPAT -XX:+UseG1GC -Djava.io.tmpdir=/tmp/elasticsearch-5918205715958425328 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/lib/elasticsearch -XX:ErrorFile=/var/log/elasticsearch/hs_err_pid%p.log -Xlog:gc*,gc+age=trace,safepoint:file=/var/log/elasticsearch/gc.log:utctime,pid,tags:filecount=32,filesize=64m -Xms3987m -Xmx3987m -XX:MaxDirectMemorySize=2090860544 -XX:G1HeapRegionSize=4m -XX:InitiatingHeapOccupancyPercent=30 -XX:G1ReservePercent=15 -Des.path.home=/usr/share/elasticsearch -Des.path.conf=/etc/elasticsearch -Des.distribution.flavor=default -Des.distribution.type=deb -Des.bundled_jdk=true -cp /usr/share/elasticsearch/lib/* org.elasticsearch.bootstrap.Elasticsearch -p /var/run/elasticsearch/elasticsearch.pid --quiet └─739 /usr/share/elasticsearch/modules/x-pack-ml/platform/linux-x86_64/bin/controller Feb 25 17:48:55 Deb-Grafana-VM systemd[1]: Starting Elasticsearch... Feb 25 17:49:04 Deb-Grafana-VM systemd[1]: Started Elasticsearch. \n###Logstash.service:###\n ● logstash.service - logstash Loaded: loaded (/etc/systemd/system/logstash.service; enabled; vendor preset: enabled) Active: active (running) since Thu 2021-02-25 17:48:54 CET; 7h ago Main PID: 463 (java) Tasks: 74 (limit: 4915) Memory: 1.0G CGroup: /system.slice/logstash.service └─463 /usr/share/logstash/jdk/bin/java -Xms1g -Xmx1g -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -XX:+UseCMSInitiatingOccupancyOnly -Djava.awt.headless=true -Dfile.encoding=UTF-8 -Djruby.compile.invokedynamic=true -Djruby.jit.threshold=0 -Djruby.regexp.interruptible=true -XX:+HeapDumpOnOutOfMemoryError -Djava.security.egd=file:/dev/urandom -Dlog4j2.isThreadContextMapInheritable=true -cp /usr/share/logstash/logstash-core/lib/jars/animal-sniffer-annotations-1.14.jar:/usr/share/logstash/logstash-core/lib/jars/checker-compat-qual-2.0.0.jar:/usr/share/logstash/logstash-core/lib/jars/commons-codec-1.14.jar:/usr/share/logstash/logstash-core/lib/jars/commons-compiler-3.1.0.jar:/usr/share/logstash/logstash-core/lib/jars/commons-logging-1.2.jar:/usr/share/logstash/logstash-core/lib/jars/error_prone_annotations-2.1.3.jar:/usr/share/logstash/logstash-core/lib/jars/google-java-format-1.1.jar:/usr/share/logstash/logstash-core/lib/jars/gradle-license-report-0.7.1.jar:/usr/share/logstash/logstash-core/lib/jars/guava-24.1.1-jre.jar:/usr/share/logstash/logstash-core/lib/jars/j2objc-annotations-1.1.jar:/usr/share/logstash/logstash-core/lib/jars/jackson-annotations-2.9.10.jar:/usr/share/logstash/logstash-core/lib/jars/jackson-core-2.9.10.jar:/usr/share/logstash/logstash-core/lib/jars/jackson-databind-2.9.10.8.jar:/usr/share/logstash/logstash-core/lib/jars/jackson-dataformat-cbor-2.9.10.jar:/usr/share/logstash/logstash-core/lib/jars/janino-3.1.0.jar:/usr/share/logstash/logstash-core/lib/jars/javassist-3.26.0-GA.jar:/usr/share/logstash/logstash-core/lib/jars/jruby-complete-9.2.13.0.jar:/usr/share/logstash/logstash-core/lib/jars/jsr305-1.3.9.jar:/usr/share/logstash/logstash-core/lib/jars/log4j-api-2.13.3.jar:/usr/share/logstash/logstash-core/lib/jars/log4j-core-2.13.3.jar:/usr/share/logstash/logstash-core/lib/jars/log4j-jcl-2.13.3.jar:/usr/share/logstash/logstash-core/lib/jars/log4j-slf4j-impl-2.13.3.jar:/usr/share/logstash/logstash-core/lib/jars/logstash-core.jar:/usr/share/logstash/logstash-core/lib/jars/org.eclipse.core.commands-3.6.0.jar:/usr/share/logstash/logstash-core/lib/jars/org.eclipse.core.contenttype-3.4.100.jar:/usr/share/logstash/logstash-core/lib/jars/org.eclipse.core.expressions-3.4.300.jar:/usr/share/logstash/logstash-core/lib/jars/org.eclipse.core.filesystem-1.3.100.jar:/usr/share/logstash/logstash-core/lib/jars/org.eclipse.core.jobs-3.5.100.jar:/usr/share/logstash/logstash-core/lib/jars/org.eclipse.core.resources-3.7.100.jar:/usr/share/logstash/logstash-core/lib/jars/org.eclipse.core.runtime-3.7.0.jar:/usr/share/logstash/logstash-core/lib/jars/org.eclipse.equinox.app-1.3.100.jar:/usr/share/logstash/logstash-core/lib/jars/org.eclipse.equinox.common-3.6.0.jar:/usr/share/logstash/logstash-core/lib/jars/org.eclipse.equinox.preferences-3.4.1.jar:/usr/share/logstash/logstash-core/lib/jars/org.eclipse.equinox.registry-3.5.101.jar:/usr/share/logstash/logstash-core/lib/jars/org.eclipse.jdt.core-3.10.0.jar:/usr/share/logstash/logstash-core/lib/jars/org.eclipse.osgi-3.7.1.jar:/usr/share/logstash/logstash-core/lib/jars/org.eclipse.text-3.5.101.jar:/usr/share/logstash/logstash-core/lib/jars/reflections-0.9.11.jar:/usr/share/logstash/logstash-core/lib/jars/slf4j-api-1.7.25.jar org.logstash.Logstash --path.settings /etc/logstash Feb 25 17:49:26 Deb-Grafana-VM logstash[463]: [2021-02-25T17:49:26,582][INFO ][logstash.inputs.tcp ][pfelk][pfelk-suricata] Starting tcp input listener {:address=>"0.0.0.0:5040", :ssl_enable=>"false"} Feb 25 17:49:26 Deb-Grafana-VM logstash[463]: [2021-02-25T17:49:26,633][INFO ][org.logstash.beats.Server][pfelk][Beats] Starting server on port: 5044 Feb 25 17:49:26 Deb-Grafana-VM logstash[463]: [2021-02-25T17:49:26,682][INFO ][logstash.inputs.udp ][pfelk][pfelk-2] Starting UDP listener {:address=>"0.0.0.0:5141"} Feb 25 17:49:26 Deb-Grafana-VM logstash[463]: [2021-02-25T17:49:26,686][INFO ][logstash.inputs.udp ][pfelk][pfelk-haproxy] Starting UDP listener {:address=>"0.0.0.0:5190"} Feb 25 17:49:26 Deb-Grafana-VM logstash[463]: [2021-02-25T17:49:26,708][INFO ][logstash.inputs.udp ][pfelk][pfelk-1] Starting UDP listener {:address=>"0.0.0.0:5140"} Feb 25 17:49:26 Deb-Grafana-VM logstash[463]: [2021-02-25T17:49:26,736][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:pfelk], :non_running_pipelines=>[]} Feb 25 17:49:26 Deb-Grafana-VM logstash[463]: [2021-02-25T17:49:26,746][INFO ][logstash.inputs.udp ][pfelk][pfelk-2] UDP listener started {:address=>"0.0.0.0:5141", :receive_buffer_bytes=>"106496", :queue_size=>"2000"} Feb 25 17:49:26 Deb-Grafana-VM logstash[463]: [2021-02-25T17:49:26,747][INFO ][logstash.inputs.udp ][pfelk][pfelk-1] UDP listener started {:address=>"0.0.0.0:5140", :receive_buffer_bytes=>"106496", :queue_size=>"2000"} Feb 25 17:49:26 Deb-Grafana-VM logstash[463]: [2021-02-25T17:49:26,752][INFO ][logstash.inputs.udp ][pfelk][pfelk-haproxy] UDP listener started {:address=>"0.0.0.0:5190", :receive_buffer_bytes=>"106496", :queue_size=>"2000"} Feb 25 17:49:26 Deb-Grafana-VM logstash[463]: [2021-02-25T17:49:26,848][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600} \n###Kibana.service:###\n ● kibana.service - Kibana Loaded: loaded (/etc/systemd/system/kibana.service; enabled; vendor preset: enabled) Active: active (running) since Thu 2021-02-25 17:48:55 CET; 7h ago Docs: https://www.elastic.co Main PID: 504 (node) Tasks: 11 (limit: 4915) Memory: 509.3M CGroup: /system.slice/kibana.service └─504 /usr/share/kibana/bin/../node/bin/node /usr/share/kibana/bin/../src/cli/dist --logging.dest=/var/log/kibana/kibana.log --pid.file=/run/kibana/kibana.pid Feb 25 17:48:55 Deb-Grafana-VM systemd[1]: Started Kibana.