Monitoring pfsense with Logstash / Elasticsearch / Kibana

Edit: This post is pretty old and Elasticsearch/Logstash/Kibana have evolved a lot since it was written.

I have been on a logging kick (or obsession) lately. See the previous series of posts.

I’ll start with a picture. This is seriously cool. If you’re running pfsense, you want this.
pfsense-kibana

BACKGROUND
My home network is pretty boring. Network is 192.168.1.0/24. Router is 192.168.1.254. Logstash is installed on 192.168.1.126.

I’m running pfsense version 2.1 and keeping current on updates.
pfsense-version

So in my pfsense admin gui, in Status -> System Logs, in the Settings tab, check the box for “Send log messages to remote syslog server”. In Server 1, I point it to my logstash server on port 514. so IP:Port.
pfsense-status_system_logs_settings

I’m forwarding everything because even if I don’t parse everything useful right away, its still easy to search.

I’m not using a distributed setup for this. I’m just taking the pfsense syslog input and parsing a few things and passing to elasticsearch on the same computer.

I’m sure you will want to add more to this configuration. Check out Grokdebug. I probably wouldn’t have been able to do this without Grokdebug.
http://grokdebug.herokuapp.com

And of course the logstash documentation!
http://logstash.net/docs/1.3.3/

You will want to change if [host] =~ /192.168.1.254/ to reflect the IP address of your pfsense box.

logstash.conf
[text]
input {
tcp {
type => syslog
port => 514
}
udp {
type => syslog
port => 514
}

}

filter {
if [host] =~ /192.168.1.254/ {
grok {
add_tag => [ "firewall" ]
match => [ "message", "<(?<evtid>.*)>(?<datetime>(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)s+(?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9]) (?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:[0-5][0-9])) (?<prog>.*?): (?<msg>.*)" ]
}
mutate {
gsub => ["datetime"," "," "]
}
date {
match => [ "datetime", "MMM dd HH:mm:ss" ]
}
mutate {
replace => [ "message", "%{msg}" ]
}
mutate {
remove_field => [ "msg", "datetime" ]
}
}
if [prog] =~ /^pf$/ {
mutate {
add_tag => [ "packetfilter" ]
}
multiline {
pattern => "^s+|^ts+"
what => "previous"
}
mutate {
remove_field => [ "msg", "datetime" ]
remove_tag => [ "multiline" ]
}
grok {
match => [ "message", "rule (?<rule>.*)(.*): (?<action>pass|block) .* on (?<iface>.*): .* proto (?<proto>TCP|UDP|IGMP|ICMP) .*ns*(?<src_ip>(d+.d+.d+.d+)).?(?<src_port>(d*)) [<|>] (?<dest_ip>(d+.d+.d+.d+)).?(?<dest_port>(d*)):" ]
}
}
if [prog] =~ /^dhcpd$/ {
if [message] =~ /^DHCPACK|^DHCPREQUEST|^DHCPOFFER/ {
grok {
match => [ "message", "(?<action>.*) (on|for|to) (?<src_ip>[0-2]?[0-9]?[0-9].[0-2]?[0-9]?[0-9].[0-2]?[0-9]?[0-9].[0-2]?[0-9]?[0-9]) .*(?<mac_address>[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]).* via (?<iface>.*)" ]
}
}
if [message] =~ /^DHCPDISCOVER/ {
grok {
match => [ "message", "(?<action>.*) from (?<mac_address>[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]).* via (?<iface>.*)" ]
}
}
if [message] =~ /^DHCPINFORM/ {
grok {
match => [ "message", "(?<action>.*) from (?<src_ip>.*).* via (?<iface>.*)" ]
}
}
}
}

output {

elasticsearch {
host => "127.0.0.1"
cluster => "logcatcher"
}
}
[/text]

Edit: Per request, dashboard template.
Note: Had to add a .txt extension to get wordpress to accept the upload.
FirewallActivity-Dashboard

8 thoughts on “Monitoring pfsense with Logstash / Elasticsearch / Kibana

  1. Cool post!

    Could you also attach “Firewall Activity” dashboard configuration from the screenshot? It looks awesome.

    Have a nice day!

    Like

  2. Thanks for the useful config.

    I added a bit of code for the snort package log parsing in pfSense. This will follow the format you have outlined. As long as the option to send the snort logs to the system log in snort is enabled, this will work . One thing to note is that the prog values will actuall be prog[pid] for some of the packages (cron and dhcp aslo) that are more aligned with syslog and I use a grok filter to separate them. There is no need to turn on barnyard2 and ship the logs to logstash from there because the same information is shipped in both cases.

    if [prog] =~ /^snort/ {
    mutate {
    add_tag => [ “snort” ]
    }
    grok {
    match => [ “message”, “\[%{NONNEGINT:generatorID}:%{NONNEGINT:signatureID}:%{NONNEGINT:signatureID_rev}\] %{GREEDYDATA:description} \[Classification: %{GREEDYDATA:class}\] \[Priority: %{POSINT:priority}\] {%{WORD:proto}} %{IP:src_ip}:%{WORD:src_port} -> %{IP:dest_ip}:%{WORD:dest_port}” ]
    }
    grok {
    match => [ “prog”, “%{PROG:prog}\[%{POSINT:pid}\]” ]
    overwrite => [ “prog” ]
    }

    }

    Like

  3. I fixed the Parse for IPV6 Addresses as well. So far it looks good, and works with the dashboard

    grok {
    match => [ “message”, “rule (?.*)\(.*\): (?pass|block) .* on (?\S+) .* %{IP:src_ip}.(?(?:[+-]?(?:[0-9]+))) > %{IP:dest_ip}.(?(?:[+-]?(?:[0-9]+))).* (?TCP|UDP|IGMP|ICMP|igmp|icmp)” ]
    }

    Like


  4. input {
    tcp {
    type => syslog
    port => 514
    }
    udp {
    type => syslog
    port => 514
    }

    }

    filter {
    if [host] =~ /192\.168\.1\.1/ {
    grok {
    add_tag => [ "firewall" ]
    match => [ "message", "<(?.*)>(?(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\s+(?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9]) (?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:[0-5][0-9])) (?.*?): (?.*)" ]
    }
    mutate {
    gsub => ["datetime"," "," "]
    }
    date {
    match => [ "datetime", "MMM dd HH:mm:ss" ]
    }
    mutate {
    replace => [ "message", "%{msg}" ]
    }
    mutate {
    remove_field => [ "msg", "datetime" ]
    }
    }
    if [prog] =~ /^pf$/ {
    mutate {
    add_tag => [ "packetfilter" ]
    }
    multiline {
    pattern => "^\s+|^\t\s+"
    what => "previous"
    }
    mutate {
    remove_field => [ "msg", "datetime" ]
    remove_tag => [ "multiline" ]
    }
    grok {
    match => [
    "message", "rule (?.*)\(.*\): (?pass|block) .* on (?\S+) .* (?TCP|UDP|IGMP|ICMP|igmp|icmp) .* %{IP:src_ip}.(?(?:[+-]?(?:[0-9]+))) > %{IP:dest_ip}.(?(?:[+-]?(?:[0-9]+))).* ",
    "message", "rule (?.*)\(.*\): (?pass|block) .* on (?.*): .* proto (?TCP|UDP|IGMP|ICMP) .*\n\s*(?(\d+\.\d+\.\d+\.\d+))\.?(?(\d*)) [] (?(\d+\.\d+\.\d+\.\d+))\.?(?(\d*)):"
    ]
    }
    }
    if [prog] =~ /^dhcpd$/ {
    if [message] =~ /^DHCPACK|^DHCPREQUEST|^DHCPOFFER/ {
    grok {
    match => [ "message", "(?.*) (on|for|to) (?[0-2]?[0-9]?[0-9]\.[0-2]?[0-9]?[0-9]\.[0-2]?[0-9]?[0-9]\.[0-2]?[0-9]?[0-9]) .*(?[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]).* via (?.*)" ]
    }
    }
    if [message] =~ /^DHCPDISCOVER/ {
    grok {
    match => [ "message", "(?.*) from (?[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]).* via (?.*)" ]
    }
    }
    if [message] =~ /^DHCPINFORM/ {
    grok {
    match => [ "message", "(?.*) from (?.*).* via (?.*)" ]
    }
    }
    }
    }

    output {

    elasticsearch {
    host => "127.0.0.1"
    # cluster => "logcatcher"
    }
    }
    <\code

    Like

  5. Hi,
    By any chance once the VM gets rebooted and when I try to access the URL Im getting upgrade required your version of elastic search is too old. Kibana requires Elastix Search 0.90.9 or above

    and another error:

    error could not reach http://192.168.3.199:80/_nodes. If you are using a proxy, ensure it is configured correctly.


    Anyone getting this error?

    Thank you

    Like

Leave a Reply

Fill in your details below or click an icon to log in:

WordPress.com Logo

You are commenting using your WordPress.com account. Log Out /  Change )

Google photo

You are commenting using your Google account. Log Out /  Change )

Twitter picture

You are commenting using your Twitter account. Log Out /  Change )

Facebook photo

You are commenting using your Facebook account. Log Out /  Change )

Connecting to %s

%d bloggers like this: