]> jfr.im git - munin-plugins.git/blame - postfix_mailvolume
add mailstats - stock version
[munin-plugins.git] / postfix_mailvolume
CommitLineData
3decc5d1
JR
1#!/usr/bin/perl -w
2# -*- perl -*-
3
4=head1 NAME
5
6postfix_mailvolume - Plugin to monitor the volume of mails delivered
7 by postfix.
8
9=head1 APPLICABLE SYSTEMS
10
11Any postfix.
12
13=head1 CONFIGURATION
14
15The following shows the default configuration.
16
17 [postfix*]
18 env.logdir /var/log
19 env.logfile syslog
20
21=head1 INTERPRETATION
22
23The plugin shows the number of bytes of mail that has passed through
24the postfix installation.
25
26=head1 MAGIC MARKERS
27
28 #%# family=auto
29 #%# capabilities=autoconf
30
31=head1 BUGS
32
33None known
34
35=head1 VERSION
36
37v1.1 2018-03-24
38* calculate extra field for mail volume that is actually delivered ("volume_delivered")
39
40=head1 AUTHOR
41
42Copyright (C) 2002-2008.
43
44No author is documented.
45
46=head1 LICENSE
47
48GPLv2
49
50=cut
51
52use strict;
53use warnings;
54use Munin::Plugin;
55
56my $pos = undef;
57# the volume that was actually delivered
58my $volume_delivered = 0;
59my %volumes_per_queue_id = ();
60my $serialized_volumes_queue;
61my %expired_queue_ids = ();
62# Discard old queue IDs after a while (otherwise the state storage grows infinitely). We need to
63# store the IDs long enough for the gap between two delivery attempts. Thus multiple hours are
64# recommended.
65use constant queue_id_expiry => 6 * 3600;
66
67my $LOGDIR = $ENV{'logdir'} || '/var/log';
68my $LOGFILE = $ENV{'logfile'} || 'syslog';
69
70
71sub parseLogfile {
72 my ($fname, $start) = @_;
73
74 my ($LOGFILE, $rotated) = tail_open($fname, $start || 0);
75
76 while (my $line = <$LOGFILE>) {
77 chomp ($line);
78
79 if ($line =~ /qmgr.*: ([0-9A-Za-z]+): from=.*, size=([0-9]+)/) {
80 # The line with queue ID and size may pass along multiple times (every time the mail
81 # is moved into the active queue for another delivery attempt). The size should always
82 # be the same.
83 if (not exists($volumes_per_queue_id{$1})) {
84 $volumes_per_queue_id{$1} = {timestamp => time};
85 }
86 # probably it is the same value as before
87 $volumes_per_queue_id{$1}->{size} = $2;
88 } elsif ($line =~ / ([0-9A-Za-z]+): to=.*, status=sent /) {
89 # The "sent" line is repeated for every successful delivery for each recipient.
90 if (exists($volumes_per_queue_id{$1})) {
91 $volume_delivered += $volumes_per_queue_id{$1}->{size};
92 $volumes_per_queue_id{$1}->{timestamp} = time;
93 }
94 }
95 }
96 # remove all expired queue IDs
97 my @expired_queue_ids;
98 for my $key (keys %volumes_per_queue_id) {
99 if (time > $volumes_per_queue_id{$key}->{timestamp} + queue_id_expiry) {
100 push @expired_queue_ids, $key;
101 }
102 }
103 delete(@volumes_per_queue_id{@expired_queue_ids});
104 return tail_close($LOGFILE);
105}
106
107if ( $ARGV[0] and $ARGV[0] eq "autoconf" ) {
108 my $logfile;
109 `which postconf >/dev/null 2>/dev/null`;
110 if (!$?) {
111 $logfile = "$LOGDIR/$LOGFILE";
112
113 if (-f $logfile) {
114 if (-r "$logfile") {
115 print "yes\n";
116 exit 0;
117 } else {
118 print "no (logfile '$logfile' not readable)\n";
119 }
120 } else {
121 print "no (logfile '$logfile' not found)\n";
122 }
123 } else {
124 print "no (postfix not found)\n";
125 }
126
127 exit 0;
128}
129
130
131if ( $ARGV[0] and $ARGV[0] eq "config" ) {
132 print "graph_title Postfix bytes throughput\n";
133 print "graph_args --base 1000 -l 0\n";
134 print "graph_vlabel bytes / \${graph_period}\n";
135 print "graph_scale yes\n";
136 print "graph_category postfix\n";
137 print "volume.label delivered volume\n";
138 print "volume.type DERIVE\n";
139 print "volume.min 0\n";
140 exit 0;
141}
142
143
144my $logfile = "$LOGDIR/$LOGFILE";
145
146if (! -f $logfile) {
147 print "volume.value U\n";
148 exit 0;
149}
150
151# load the stored data
152($pos, $volume_delivered, $serialized_volumes_queue) = restore_state();
153
154
155if (!defined($volume_delivered)) {
156
157 # No state file present. Avoid startup spike: Do not read log
158 # file up to now, but remember how large it is now, and next
159 # time read from there.
160
161 $pos = (stat $logfile)[7]; # File size
162
163 $volume_delivered = 0;
164 %volumes_per_queue_id = ();
165} else {
166 # decode the serialized hash
167 # source format: "$id1=$size1:$timestamp1 $id2=$size2:$timestamp2 ..."
168 # The "serialized" value may be undefined, in case we just upgraded from the version before
169 # 2018, since that old version stored only two fields in the state file. Tolerate this.
170 for my $queue_item_descriptor (split(/ /, $serialized_volumes_queue || "")) {
171 (my $queue_item_id, my $queue_item_content) = split(/=/, $queue_item_descriptor);
172 (my $size, my $timestamp) = split(/:/, $queue_item_content);
173 $volumes_per_queue_id{$queue_item_id} = { size => int($size), timestamp => int($timestamp) };
174 }
175 $pos = parseLogfile ($logfile, $pos);
176}
177
178print "volume.value $volume_delivered\n";
179
180# serialize the hash to a string (see "source format" above)
181$serialized_volumes_queue = join(" ", map { sprintf("%s=%s", $_, sprintf("%d:%d", $volumes_per_queue_id{$_}->{size}, $volumes_per_queue_id{$_}->{timestamp})) } keys %volumes_per_queue_id);
182save_state($pos, $volume_delivered, $serialized_volumes_queue);
183
184# vim:syntax=perl