# Example configuration file for Munin, generated by 'make build' # The next three variables specifies where the location of the RRD # databases, the HTML output, logs and the lock/pid files. They all # must be writable by the user running munin-cron. They are all # defaulted to the values you see here. # #dbdir /var/lib/munin #htmldir /var/cache/munin/www #logdir /var/log/munin #rundir /var/run/munin # Where to look for the HTML templates # #tmpldir /etc/munin/templates # Where to look for the static www files # #staticdir /etc/munin/static # temporary cgi files are here. note that it has to be writable by # the cgi user (usually nobody or httpd). # # cgitmpdir /var/lib/munin/cgi-tmp # (Exactly one) directory to include all files from. includedir /etc/munin/munin-conf.d # You can choose the time reference for "DERIVE" like graphs, and show # "per minute", "per hour" values instead of the default "per second" # #graph_period second # Graphics files are generated either via cron or by a CGI process. # See http://munin-monitoring.org/wiki/CgiHowto2 for more # documentation. # Since 2.0, munin-graph has been rewritten to use the cgi code. # It is single threaded *by design* now. # #graph_strategy cron # munin-cgi-graph is invoked by the web server up to very many times at the # same time. This is not optimal since it results in high CPU and memory # consumption to the degree that the system can thrash. Again the default is # 6. Most likely the optimal number for max_cgi_graph_jobs is the same as # max_graph_jobs. # #munin_cgi_graph_jobs 6 # If the automatic CGI url is wrong for your system override it here: # #cgiurl_graph /munin-cgi/munin-cgi-graph # max_size_x and max_size_y are the max size of images in pixel. # Default is 4000. Do not make it too large otherwise RRD might use all # RAM to generate the images. # #max_size_x 4000 #max_size_y 4000 # HTML files are normally generated by munin-html, no matter if the # files are used or not. You can change this to on-demand generation # by following the instructions in http://munin-monitoring.org/wiki/CgiHowto2 # # Notes: # - moving to CGI for HTML means you cannot have graph generated by cron. # - cgi html has some bugs, mostly you still have to launch munin-html by hand # #html_strategy cron # munin-update runs in parallel. # # The default max number of processes is 16, and is probably ok for you. # # If set too high, it might hit some process/ram/filedesc limits. # If set too low, munin-update might take more than 5 min. # # If you want munin-update to not be parallel set it to 0. # #max_processes 16 # RRD updates are per default, performed directly on the rrd files. # To reduce IO and enable the use of the rrdcached, uncomment it and set it to # the location of the socket that rrdcached uses. # #rrdcached_socket /var/run/rrdcached.sock # Drop somejuser@fnord.comm and anotheruser@blibb.comm an email everytime # something changes (OK -> WARNING, CRITICAL -> OK, etc) #contact.someuser.command mail -s "Munin notification" somejuser@fnord.comm #contact.anotheruser.command mail -s "Munin notification" anotheruser@blibb.comm # # For those with Nagios, the following might come in handy. In addition, # the services must be defined in the Nagios server as well. #contact.nagios.command /usr/bin/send_nsca nagios.host.comm -c /etc/nsca.conf contacts me contact.me.command mail -s "Munin notification ${var:host}" qa-jenkins-scm@lists.alioth.debian.org # a simple host tree [jenkins.debian.net] address 127.0.0.1 use_node_name yes cpu.graph_args --base 1000 -r --lower-limit 0 # also graph values about current limit (eg from when the machine had more cores) df._srv_workspace.warning 75 df._srv_workspace.critical 85 diskstats_latency.vda.avgwrwait.warning 0:20 diskstats_latency.vdb.avgwrwait.warning 0:20 diskstats_latency.vda.avgrdwait.warning 0:10 diskstats_latency.vdb.avgrdwait.warning 0:10 diskstats_latency.graph no diskstats_throughput.graph no diskstats_utilization.graph no diskstats_iops.graph no [profitbricks-build1-amd64.debian.net] address 78.137.99.97 use_node_name yes df._srv_workspace.warning 90 df._srv_workspace.critical 95 diskstats_latency.vda.avgwrwait.warning 0:20 diskstats_latency.vdb.avgwrwait.warning 0:20 diskstats_latency.vda.avgrdwait.warning 0:10 diskstats_latency.vdb.avgrdwait.warning 0:10 diskstats_latency.graph no diskstats_throughput.graph no diskstats_utilization.graph no diskstats_iops.graph no [profitbricks-build2-amd64.debian.net] address 78.137.96.192 use_node_name yes df._srv_workspace.warning 90 df._srv_workspace.critical 95 diskstats_latency.vda.avgwrwait.warning 0:20 diskstats_latency.vdb.avgwrwait.warning 0:20 diskstats_latency.vda.avgrdwait.warning 0:10 diskstats_latency.vdb.avgrdwait.warning 0:10 diskstats_latency.graph no diskstats_throughput.graph no diskstats_utilization.graph no diskstats_iops.graph no [profitbricks-build3-amd64.debian.net] address 185.48.117.167 use_node_name yes df._srv_workspace.warning 90 df._srv_workspace.critical 95 diskstats_latency.vda.avgwrwait.warning 0:20 diskstats_latency.vdb.avgwrwait.warning 0:20 diskstats_latency.vda.avgrdwait.warning 0:10 diskstats_latency.vdb.avgrdwait.warning 0:10 diskstats_latency.graph no diskstats_throughput.graph no diskstats_utilization.graph no diskstats_iops.graph no [profitbricks-build4-amd64.debian.net] address 46.16.78.222 use_node_name yes df._srv_workspace.warning 90 df._srv_workspace.critical 95 diskstats_latency.vda.avgwrwait.warning 0:20 diskstats_latency.vdb.avgwrwait.warning 0:20 diskstats_latency.vda.avgrdwait.warning 0:10 diskstats_latency.vdb.avgrdwait.warning 0:10 diskstats_latency.graph no diskstats_throughput.graph no diskstats_utilization.graph no diskstats_iops.graph no [bpi0-armhf-rb.debian.net] address ssh://jenkins@bpi0-armhf-rb.debian.net:2222/bin/nc localhost 4949 use_node_name yes diskstats_latency.sda.avgwrwait.warning 0:20 diskstats_latency.sda.avgrdwait.warning 0:10 diskstats_latency.graph no diskstats_throughput.graph no diskstats_utilization.graph no diskstats_iops.graph no [hb0-armhf-rb.debian.net] address ssh://jenkins@hb0-armhf-rb.debian.net:2224/bin/nc localhost 4949 use_node_name yes diskstats_latency.sda.avgwrwait.warning 0:20 diskstats_latency.sda.avgrdwait.warning 0:10 diskstats_latency.graph no diskstats_throughput.graph no diskstats_utilization.graph no diskstats_iops.graph no [wbq0-armhf-rb.debian.net] address ssh://jenkins@wbq0-armhf-rb.debian.net:2225/bin/nc localhost 4949 use_node_name yes diskstats_latency.sda.avgwrwait.warning 0:20 diskstats_latency.sda.avgrdwait.warning 0:10 diskstats_latency.graph no diskstats_throughput.graph no diskstats_utilization.graph no diskstats_iops.graph no [cbxi4pro0-armhf-rb.debian.net] address ssh://jenkins@cbxi4pro0-armhf-rb.debian.net:2226/bin/nc localhost 4949 use_node_name yes diskstats_latency.sda.avgwrwait.warning 0:20 diskstats_latency.sda.avgrdwait.warning 0:10 diskstats_latency.graph no diskstats_throughput.graph no diskstats_utilization.graph no diskstats_iops.graph no [freebsd-jenkins.debian.net] address 46.16.73.236 use_node_name yes diskstats_latency.graph no diskstats_throughput.graph no diskstats_utilization.graph no diskstats_iops.graph no # # A more complex example of a host tree # ## First our "normal" host. # [fii.foo.com] # address foo # ## Then our other host... # [fay.foo.com] # address fay # ## Then we want totals... # [foo.com;Totals] #Force it into the "foo.com"-domain... # update no # Turn off data-fetching for this "host". # # # The graph "load1". We want to see the loads of both machines... # # "fii=fii.foo.com:load.load" means "label=machine:graph.field" # load1.graph_title Loads side by side # load1.graph_order fii=fii.foo.com:load.load fay=fay.foo.com:load.load # # # The graph "load2". Now we want them stacked on top of each other. # load2.graph_title Loads on top of each other # load2.dummy_field.stack fii=fii.foo.com:load.load fay=fay.foo.com:load.load # load2.dummy_field.draw AREA # We want area instead the default LINE2. # load2.dummy_field.label dummy # This is needed. Silly, really. # # # The graph "load3". Now we want them summarised into one field # load3.graph_title Loads summarised # load3.combined_loads.sum fii.foo.com:load.load fay.foo.com:load.load # load3.combined_loads.label Combined loads # Must be set, as this is # # not a dummy field! # ## ...and on a side note, I want them listen in another order (default is ## alphabetically) # # # Since [foo.com] would be interpreted as a host in the domain "com", we # # specify that this is a domain by adding a semicolon. # [foo.com;] # node_order Totals fii.foo.com fay.foo.com # [debian.net;] node_order jenkins.debian.net profitbricks-build1-amd64.debian.net profitbricks-build2-amd64.debian.net profitbricks-build3-amd64.debian.net profitbricks-build4-amd64.debian.net wbq0-armhf-rb.debian.net cbxi4pro0-armhf-rb.debian.net bpi0-armhf-rb.debian.net hb0-armhf-rb.debian.net freebsd-jenkins.debian.net