1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
|
# Example configuration file for Munin, generated by 'make build'
# The next three variables specifies where the location of the RRD
# databases, the HTML output, logs and the lock/pid files. They all
# must be writable by the user running munin-cron. They are all
# defaulted to the values you see here.
#
#dbdir /var/lib/munin
#htmldir /var/cache/munin/www
#logdir /var/log/munin
#rundir /var/run/munin
# Where to look for the HTML templates
#
#tmpldir /etc/munin/templates
# Where to look for the static www files
#
#staticdir /etc/munin/static
# temporary cgi files are here. note that it has to be writable by
# the cgi user (usually nobody or httpd).
#
# cgitmpdir /var/lib/munin/cgi-tmp
# (Exactly one) directory to include all files from.
includedir /etc/munin/munin-conf.d
# You can choose the time reference for "DERIVE" like graphs, and show
# "per minute", "per hour" values instead of the default "per second"
#
#graph_period second
# Graphics files are generated either via cron or by a CGI process.
# See http://munin-monitoring.org/wiki/CgiHowto2 for more
# documentation.
# Since 2.0, munin-graph has been rewritten to use the cgi code.
# It is single threaded *by design* now.
#
#graph_strategy cron
# munin-cgi-graph is invoked by the web server up to very many times at the
# same time. This is not optimal since it results in high CPU and memory
# consumption to the degree that the system can thrash. Again the default is
# 6. Most likely the optimal number for max_cgi_graph_jobs is the same as
# max_graph_jobs.
#
#munin_cgi_graph_jobs 6
# If the automatic CGI url is wrong for your system override it here:
#
#cgiurl_graph /munin-cgi/munin-cgi-graph
# max_size_x and max_size_y are the max size of images in pixel.
# Default is 4000. Do not make it too large otherwise RRD might use all
# RAM to generate the images.
#
#max_size_x 4000
#max_size_y 4000
# HTML files are normally generated by munin-html, no matter if the
# files are used or not. You can change this to on-demand generation
# by following the instructions in http://munin-monitoring.org/wiki/CgiHowto2
#
# Notes:
# - moving to CGI for HTML means you cannot have graph generated by cron.
# - cgi html has some bugs, mostly you still have to launch munin-html by hand
#
#html_strategy cron
# munin-update runs in parallel.
#
# The default max number of processes is 16, and is probably ok for you.
#
# If set too high, it might hit some process/ram/filedesc limits.
# If set too low, munin-update might take more than 5 min.
#
# If you want munin-update to not be parallel set it to 0.
#
#max_processes 16
# RRD updates are per default, performed directly on the rrd files.
# To reduce IO and enable the use of the rrdcached, uncomment it and set it to
# the location of the socket that rrdcached uses.
#
#rrdcached_socket /var/run/rrdcached.sock
# Drop somejuser@fnord.comm and anotheruser@blibb.comm an email everytime
# something changes (OK -> WARNING, CRITICAL -> OK, etc)
#contact.someuser.command mail -s "Munin notification" somejuser@fnord.comm
#contact.anotheruser.command mail -s "Munin notification" anotheruser@blibb.comm
#
# For those with Nagios, the following might come in handy. In addition,
# the services must be defined in the Nagios server as well.
#contact.nagios.command /usr/bin/send_nsca nagios.host.comm -c /etc/nsca.conf
contacts me
contact.me.command mail -s "Munin notification ${var:host}" root
# a simple host tree
[jenkins.debian.net]
address 127.0.0.1
use_node_name yes
cpu.graph_args --base 1000 -r --lower-limit 0 # also graph values about current limit (eg from when the machine had more cores)
df._srv_workspace.warning 75
df._srv_workspace.critical 85
diskstats_latency.vda.avgwrwait.warning 0:20
diskstats_latency.vdb.avgwrwait.warning 0:20
diskstats_latency.vda.avgrdwait.warning 0:10
diskstats_latency.vdb.avgrdwait.warning 0:10
diskstats_latency.graph no
diskstats_throughput.graph no
diskstats_utilization.graph no
diskstats_iops.graph no
[profitbricks-build1-amd64.debian.net]
address 78.137.99.97
use_node_name yes
df._srv_workspace.warning 90
df._srv_workspace.critical 95
diskstats_latency.vda.avgwrwait.warning 0:20
diskstats_latency.vdb.avgwrwait.warning 0:20
diskstats_latency.vda.avgrdwait.warning 0:10
diskstats_latency.vdb.avgrdwait.warning 0:10
diskstats_latency.graph no
diskstats_throughput.graph no
diskstats_utilization.graph no
diskstats_iops.graph no
[profitbricks-build2-amd64.debian.net]
address 78.137.96.192
use_node_name yes
df._srv_workspace.warning 90
df._srv_workspace.critical 95
diskstats_latency.vda.avgwrwait.warning 0:20
diskstats_latency.vdb.avgwrwait.warning 0:20
diskstats_latency.vda.avgrdwait.warning 0:10
diskstats_latency.vdb.avgrdwait.warning 0:10
diskstats_latency.graph no
diskstats_throughput.graph no
diskstats_utilization.graph no
diskstats_iops.graph no
[profitbricks-build3-amd64.debian.net]
address 78.137.96.53
use_node_name yes
df._srv_workspace.warning 90
df._srv_workspace.critical 95
diskstats_latency.vda.avgwrwait.warning 0:20
diskstats_latency.vdb.avgwrwait.warning 0:20
diskstats_latency.vda.avgrdwait.warning 0:10
diskstats_latency.vdb.avgrdwait.warning 0:10
diskstats_latency.graph no
diskstats_throughput.graph no
diskstats_utilization.graph no
diskstats_iops.graph no
[profitbricks-build4-amd64.debian.net]
address 46.16.78.222
use_node_name yes
df._srv_workspace.warning 90
df._srv_workspace.critical 95
diskstats_latency.vda.avgwrwait.warning 0:20
diskstats_latency.vdb.avgwrwait.warning 0:20
diskstats_latency.vda.avgrdwait.warning 0:10
diskstats_latency.vdb.avgrdwait.warning 0:10
diskstats_latency.graph no
diskstats_throughput.graph no
diskstats_utilization.graph no
diskstats_iops.graph no
[profitbricks-build5-amd64.debian.net]
address 185.48.117.167
use_node_name yes
df._srv_workspace.warning 90
df._srv_workspace.critical 95
diskstats_latency.vda.avgwrwait.warning 0:20
diskstats_latency.vdb.avgwrwait.warning 0:20
diskstats_latency.vda.avgrdwait.warning 0:10
diskstats_latency.vdb.avgrdwait.warning 0:10
diskstats_latency.graph no
diskstats_throughput.graph no
diskstats_utilization.graph no
diskstats_iops.graph no
[profitbricks-build6-amd64.debian.net]
address 85.184.251.97
use_node_name yes
df._srv_workspace.warning 90
df._srv_workspace.critical 95
diskstats_latency.vda.avgwrwait.warning 0:20
diskstats_latency.vdb.avgwrwait.warning 0:20
diskstats_latency.vda.avgrdwait.warning 0:10
diskstats_latency.vdb.avgrdwait.warning 0:10
diskstats_latency.graph no
diskstats_throughput.graph no
diskstats_utilization.graph no
diskstats_iops.graph no
[bpi0-armhf-rb.debian.net]
address ssh://jenkins@bpi0-armhf-rb.debian.net:2222/bin/nc localhost 4949
use_node_name yes
diskstats_latency.sda.avgwrwait.warning 0:20
diskstats_latency.sda.avgrdwait.warning 0:10
diskstats_latency.graph no
diskstats_throughput.graph no
diskstats_utilization.graph no
diskstats_iops.graph no
[hb0-armhf-rb.debian.net]
address ssh://jenkins@hb0-armhf-rb.debian.net:2224/bin/nc localhost 4949
use_node_name yes
diskstats_latency.sda.avgwrwait.warning 0:20
diskstats_latency.sda.avgrdwait.warning 0:10
diskstats_latency.graph no
diskstats_throughput.graph no
diskstats_utilization.graph no
diskstats_iops.graph no
[wbq0-armhf-rb.debian.net]
address ssh://jenkins@wbq0-armhf-rb.debian.net:2225/bin/nc localhost 4949
use_node_name yes
diskstats_latency.sda.avgwrwait.warning 0:20
diskstats_latency.sda.avgrdwait.warning 0:10
diskstats_latency.graph no
diskstats_throughput.graph no
diskstats_utilization.graph no
diskstats_iops.graph no
[cbxi4pro0-armhf-rb.debian.net]
address ssh://jenkins@cbxi4pro0-armhf-rb.debian.net:2226/bin/nc localhost 4949
use_node_name yes
diskstats_latency.sda.avgwrwait.warning 0:20
diskstats_latency.sda.avgrdwait.warning 0:10
diskstats_latency.graph no
diskstats_throughput.graph no
diskstats_utilization.graph no
diskstats_iops.graph no
[odxu4-armhf-rb.debian.net]
address ssh://jenkins@odxu4-armhf-rb.debian.net:2229/bin/nc localhost 4949
use_node_name yes
diskstats_latency.sda.avgwrwait.warning 0:20
diskstats_latency.sda.avgrdwait.warning 0:10
diskstats_latency.graph no
diskstats_throughput.graph no
diskstats_utilization.graph no
diskstats_iops.graph no
[wbd0-armhf-rb.debian.net]
address ssh://jenkins@wbd0-armhf-rb.debian.net:2223/bin/nc localhost 4949
use_node_name yes
diskstats_latency.sda.avgwrwait.warning 0:20
diskstats_latency.sda.avgrdwait.warning 0:10
diskstats_latency.graph no
diskstats_throughput.graph no
diskstats_utilization.graph no
diskstats_iops.graph no
[rpi2b-armhf-rb.debian.net]
address ssh://jenkins@rpi2b-armhf-rb.debian.net:2230/bin/nc localhost 4949
use_node_name yes
diskstats_latency.sda.avgwrwait.warning 0:20
diskstats_latency.sda.avgrdwait.warning 0:10
diskstats_latency.graph no
diskstats_throughput.graph no
diskstats_utilization.graph no
diskstats_iops.graph no
[freebsd-jenkins.debian.net]
address 46.16.73.236
use_node_name yes
diskstats_latency.graph no
diskstats_throughput.graph no
diskstats_utilization.graph no
diskstats_iops.graph no
#
# A more complex example of a host tree
#
## First our "normal" host.
# [fii.foo.com]
# address foo
#
## Then our other host...
# [fay.foo.com]
# address fay
#
## Then we want totals...
# [foo.com;Totals] #Force it into the "foo.com"-domain...
# update no # Turn off data-fetching for this "host".
#
# # The graph "load1". We want to see the loads of both machines...
# # "fii=fii.foo.com:load.load" means "label=machine:graph.field"
# load1.graph_title Loads side by side
# load1.graph_order fii=fii.foo.com:load.load fay=fay.foo.com:load.load
#
# # The graph "load2". Now we want them stacked on top of each other.
# load2.graph_title Loads on top of each other
# load2.dummy_field.stack fii=fii.foo.com:load.load fay=fay.foo.com:load.load
# load2.dummy_field.draw AREA # We want area instead the default LINE2.
# load2.dummy_field.label dummy # This is needed. Silly, really.
#
# # The graph "load3". Now we want them summarised into one field
# load3.graph_title Loads summarised
# load3.combined_loads.sum fii.foo.com:load.load fay.foo.com:load.load
# load3.combined_loads.label Combined loads # Must be set, as this is
# # not a dummy field!
#
## ...and on a side note, I want them listen in another order (default is
## alphabetically)
#
# # Since [foo.com] would be interpreted as a host in the domain "com", we
# # specify that this is a domain by adding a semicolon.
# [foo.com;]
# node_order Totals fii.foo.com fay.foo.com
#
[debian.net;]
node_order jenkins.debian.net profitbricks-build1-amd64.debian.net profitbricks-build2-amd64.debian.net profitbricks-build3-amd64.debian.net profitbricks-build4-amd64.debian.net profitbricks-build5-amd64.debian.net profitbricks-build6-amd64.debian.net wbq0-armhf-rb.debian.net cbxi4pro0-armhf-rb.debian.net bpi0-armhf-rb.debian.net hb0-armhf-rb.debian.net odxu4-armhf-rb.debian.net wbd0-armhf-rb.debian.net rpi2b-armhf-rb.debian.net freebsd-jenkins.debian.net
|