#!/usr/bin/perl -w # -*- perl -*- =head1 NAME postfix_mailvolume - Plugin to monitor the volume of mails delivered by postfix. =head1 APPLICABLE SYSTEMS Any postfix. =head1 CONFIGURATION The following shows the default configuration. [postfix*] env.logdir /var/log env.logfile syslog =head1 INTERPRETATION The plugin shows the number of bytes of mail that has passed through the postfix installation. =head1 MAGIC MARKERS #%# family=auto #%# capabilities=autoconf =head1 BUGS None known =head1 VERSION v1.1 2018-03-24 * calculate extra field for mail volume that is actually delivered ("volume_delivered") =head1 AUTHOR Copyright (C) 2002-2008. No author is documented. =head1 LICENSE GPLv2 =cut use strict; use warnings; use Munin::Plugin; my $pos = undef; # the volume that was actually delivered my $volume_delivered = 0; my %volumes_per_queue_id = (); my $serialized_volumes_queue; my %expired_queue_ids = (); # Discard old queue IDs after a while (otherwise the state storage grows infinitely). We need to # store the IDs long enough for the gap between two delivery attempts. Thus multiple hours are # recommended. use constant queue_id_expiry => 6 * 3600; sub parseLogfile { my ($start) = @_; open(my $LOGFILE, '-|', "journalctl --facility=mail --after-cursor='$start' --show-cursor"); while (my $line = <$LOGFILE>) { chomp $line; if ($line =~ /qmgr.*: ([0-9A-Za-z]+): from=.*, size=([0-9]+)/) { # The line with queue ID and size may pass along multiple times (every time the mail # is moved into the active queue for another delivery attempt). The size should always # be the same. if (not exists($volumes_per_queue_id{$1})) { $volumes_per_queue_id{$1} = {timestamp => time}; } # probably it is the same value as before $volumes_per_queue_id{$1}->{size} = $2; } elsif ($line =~ / ([0-9A-Za-z]+): to=.*, status=sent /) { # The "sent" line is repeated for every successful delivery for each recipient. if (exists($volumes_per_queue_id{$1})) { $volume_delivered += $volumes_per_queue_id{$1}->{size}; $volumes_per_queue_id{$1}->{timestamp} = time; } } elsif ($line =~ /^-- cursor: (.+)$/) { $start = $1; } } close($LOGFILE); # remove all expired queue IDs my @expired_queue_ids; for my $key (keys %volumes_per_queue_id) { if (time > $volumes_per_queue_id{$key}->{timestamp} + queue_id_expiry) { push @expired_queue_ids, $key; } } delete(@volumes_per_queue_id{@expired_queue_ids}); return $start; } if ( $ARGV[0] and $ARGV[0] eq "autoconf" ) { my $logfile; `which postconf >/dev/null 2>/dev/null`; if (!$?) { # if postconf returns success `journalctl --facility=mail --show-cursor -n 0`; if (!$?) { # if journalctl returns success print "yes\n"; } else { print "no (journalctl returned error)\n"; } } else { print "no (postfix not found)\n"; } exit 0; } if ( $ARGV[0] and $ARGV[0] eq "config" ) { print "graph_title Postfix bytes throughput\n"; print "graph_args --base 1000 -l 0\n"; print "graph_scale yes\n"; print "graph_category postfix\n"; print "graph_vlabel bytes / \${graph_period}\n"; print "graph_period minute\n"; print "volume.label delivered volume\n"; print "volume.type DERIVE\n"; print "volume.min 0\n"; exit 0; } # load the stored data ($pos, $volume_delivered, $serialized_volumes_queue) = restore_state(); if (!defined($volume_delivered) || !defined($pos) || !$pos) { # they could be defined but 0 if the old plugin was run # No state file present. Avoid startup spike: Do not read log # file up to now, but remember how large it is now, and next # time read from there. my $cursor = `journalctl --facility=mail --show-cursor -n 0 | tail -n 1`; $pos = ($cursor =~ s/^-- cursor: //r); $volume_delivered = 0; %volumes_per_queue_id = (); } else { # decode the serialized hash # source format: "$id1=$size1:$timestamp1 $id2=$size2:$timestamp2 ..." # The "serialized" value may be undefined, in case we just upgraded from the version before # 2018, since that old version stored only two fields in the state file. Tolerate this. for my $queue_item_descriptor (split(/ /, $serialized_volumes_queue || "")) { (my $queue_item_id, my $queue_item_content) = split(/=/, $queue_item_descriptor); (my $size, my $timestamp) = split(/:/, $queue_item_content); $volumes_per_queue_id{$queue_item_id} = { size => int($size), timestamp => int($timestamp) }; } $pos = parseLogfile($pos); } print "volume.value $volume_delivered\n"; # serialize the hash to a string (see "source format" above) $serialized_volumes_queue = join(" ", map { sprintf("%s=%s", $_, sprintf("%d:%d", $volumes_per_queue_id{$_}->{size}, $volumes_per_queue_id{$_}->{timestamp})) } keys %volumes_per_queue_id); save_state($pos, $volume_delivered, $serialized_volumes_queue); # vim:syntax=perl