/usr/lib/check_mk_agent/plugins/mk_logwatch is in check-mk-agent-logwatch 1.2.8p16-1ubuntu0.1.
This file is owned by root:root, with mode 0o755.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# Call with -d for debug mode: colored output, no saving of status
import sys, os, re, time, glob
if '-d' in sys.argv[1:] or '--debug' in sys.argv[1:]:
tty_red = '\033[1;31m'
tty_green = '\033[1;32m'
tty_yellow = '\033[1;33m'
tty_blue = '\033[1;34m'
tty_normal = '\033[0m'
debug = True
else:
tty_red = ''
tty_green = ''
tty_yellow = ''
tty_blue = ''
tty_normal = ''
debug = False
# The configuration file and status file are searched
# in the directory named by the environment variable
# LOGWATCH_DIR. If that is not set, MK_CONFDIR is used.
# If that is not set either, the current directory ist
# used.
logwatch_dir = os.getenv("LOGWATCH_DIR")
if logwatch_dir:
mk_confdir = logwatch_dir
mk_vardir = logwatch_dir
else:
mk_confdir = os.getenv("MK_CONFDIR") or "."
mk_vardir = os.getenv("MK_VARDIR") or "."
print "<<<logwatch>>>"
config_filename = mk_confdir + "/logwatch.cfg"
config_dir = mk_confdir + "/logwatch.d/*.cfg"
# Determine the name of the state file
# $REMOTE set -> logwatch.state.$REMOTE
# $REMOTE not set and a tty -> logwatch.state.local
# $REMOTE not set and not a tty -> logwatch.state
remote_hostname = os.getenv("REMOTE", "")
if remote_hostname != "":
status_filename = "%s/logwatch.state.%s" % (mk_vardir, remote_hostname)
else:
if sys.stdout.isatty():
status_filename = "%s/logwatch.state.local" % mk_vardir
else:
status_filename = "%s/logwatch.state" % mk_vardir
# Copy the last known state from the logwatch.state when there is no status_filename yet.
if not os.path.exists(status_filename) and os.path.exists("%s/logwatch.state" % mk_vardir):
import shutil
shutil.copy("%s/logwatch.state" % mk_vardir, status_filename)
def is_not_comment(line):
if line.lstrip().startswith('#') or \
line.strip() == '':
return False
return True
def parse_filenames(line):
return line.split()
def parse_pattern(level, pattern, line):
if level not in [ 'C', 'W', 'I', 'O' ]:
raise Exception("Invalid pattern line '%s'" % line)
try:
compiled = re.compile(pattern)
except:
raise Exception("Invalid regular expression in line '%s'" % line)
return (level, compiled)
def read_config():
config_lines = [ line.rstrip() for line in filter(is_not_comment, file(config_filename).readlines()) ]
# Add config from a logwatch.d folder
for config_file in glob.glob(config_dir):
config_lines += [ line.rstrip() for line in filter(is_not_comment, file(config_file).readlines()) ]
have_filenames = False
config = []
for line in config_lines:
rewrite = False
if line[0].isspace(): # pattern line
if not have_filenames:
raise Exception("Missing logfile names")
level, pattern = line.split(None, 1)
if level == 'A':
cont_list.append(parse_cont_pattern(pattern))
elif level == 'R':
rewrite_list.append(pattern)
else:
level, compiled = parse_pattern(level, pattern, line)
cont_list = [] # List of continuation patterns
rewrite_list = [] # List of rewrite patterns
patterns.append((level, compiled, cont_list, rewrite_list))
else: # filename line
patterns = []
config.append((parse_filenames(line), patterns))
have_filenames = True
return config
def parse_cont_pattern(pattern):
try:
return int(pattern)
except:
try:
return re.compile(pattern)
except:
if debug:
raise
raise Exception("Invalid regular expression in line '%s'" % pattern)
# structure of statusfile
# # LOGFILE OFFSET INODE
# /var/log/messages|7767698|32455445
# /var/test/x12134.log|12345|32444355
def read_status():
if debug:
return {}
status = {}
for line in file(status_filename):
# TODO: Remove variants with spaces. rsplit is
# not portable. split fails if logfilename contains
# spaces
inode = -1
try:
parts = line.split('|')
filename = parts[0]
offset = parts[1]
if len(parts) >= 3:
inode = parts[2]
except:
try:
filename, offset = line.rsplit(None, 1)
except:
filename, offset = line.split(None, 1)
status[filename] = int(offset), int(inode)
return status
def save_status(status):
f = file(status_filename, "w")
for filename, (offset, inode) in status.items():
f.write("%s|%d|%d\n" % (filename, offset, inode))
pushed_back_line = None
def next_line(file_handle):
global pushed_back_line
if pushed_back_line != None:
line = pushed_back_line
pushed_back_line = None
return line
else:
try:
line = file_handle.next()
# Avoid parsing of (yet) incomplete lines (when acutal application
# is just in the process of writing)
if not line.endswith(os.linesep):
begin_of_line_offset = file_handle.tell() - len(line)
os.lseek(file_handle.fileno(), begin_of_line_offset, 0)
return None
return line
except:
return None
def process_logfile(logfile, patterns):
global pushed_back_line
# Look at which file offset we have finished scanning
# the logfile last time. If we have never seen this file
# before, we set the offset to -1
offset, prev_inode = status.get(logfile, (-1, -1))
try:
file_desc = os.open(logfile, os.O_RDONLY)
inode = os.fstat(file_desc)[1] # 1 = st_ino
except:
if debug:
raise
print "[[[%s:cannotopen]]]" % logfile
return
print "[[[%s]]]" % logfile
# Seek to the current end in order to determine file size
current_end = os.lseek(file_desc, 0, 2) # os.SEEK_END not available in Python 2.4
status[logfile] = current_end, inode
# If we have never seen this file before, we just set the
# current pointer to the file end. We do not want to make
# a fuss about ancient log messages...
if offset == -1:
if not debug:
return
else:
offset = 0
# If the inode of the logfile has changed it has appearently
# been started from new (logfile rotation). At least we must
# assume that. In some rare cases (restore of a backup, etc)
# we are wrong and resend old log messages
if prev_inode >= 0 and inode != prev_inode:
offset = 0
# Our previously stored offset is the current end ->
# no new lines in this file
if offset == current_end:
return # nothing new
# If our offset is beyond the current end, the logfile has been
# truncated or wrapped while keeping the same inode. We assume
# that it contains all new data in that case and restart from
# offset 0.
if offset > current_end:
offset = 0
# now seek to offset where interesting data begins
os.lseek(file_desc, offset, 0) # os.SEEK_SET not available in Python 2.4
file_handle = os.fdopen(file_desc)
worst = -1
outputtxt = ""
lines_parsed = 0
start_time = time.time()
while True:
line = next_line(file_handle)
if line == None:
break # End of file
# Handle option maxlinesize
if opt_maxlinesize != None and len(line) > opt_maxlinesize:
line = line[:opt_maxlinesize] + "[TRUNCATED]\n"
lines_parsed += 1
# Check if maximum number of new log messages is exceeded
if opt_maxlines != None and lines_parsed > opt_maxlines:
outputtxt += "%s Maximum number (%d) of new log messages exceeded.\n" % (
opt_overflow, opt_maxlines)
worst = max(worst, opt_overflow_level)
os.lseek(file_desc, 0, 2) # Seek to end of file, skip all other messages
break
# Check if maximum processing time (per file) is exceeded. Check only
# every 100'th line in order to save system calls
if opt_maxtime != None and lines_parsed % 100 == 10 \
and time.time() - start_time > opt_maxtime:
outputtxt += "%s Maximum parsing time (%.1f sec) of this log file exceeded.\n" % (
opt_overflow, opt_maxtime)
worst = max(worst, opt_overflow_level)
os.lseek(file_desc, 0, 2) # Seek to end of file, skip all other messages
break
level = "."
for lev, pattern, cont_patterns, replacements in patterns:
matches = pattern.search(line[:-1])
if matches:
level = lev
levelint = {'C': 2, 'W': 1, 'O': 0, 'I': -1, '.': -1}[lev]
worst = max(levelint, worst)
# Check for continuation lines
for cont_pattern in cont_patterns:
if type(cont_pattern) == int: # add that many lines
for x in range(cont_pattern):
cont_line = next_line(file_handle)
if cont_line == None: # end of file
break
line = line[:-1] + "\1" + cont_line
else: # pattern is regex
while True:
cont_line = next_line(file_handle)
if cont_line == None: # end of file
break
elif cont_pattern.search(cont_line[:-1]):
line = line[:-1] + "\1" + cont_line
else:
pushed_back_line = cont_line # sorry for stealing this line
break
# Replacement
for replace in replacements:
line = replace.replace('\\0', line.rstrip()) + "\n"
for nr, group in enumerate(matches.groups()):
line = line.replace('\\%d' % (nr+1), group)
break # matching rule found and executed
color = {'C': tty_red, 'W': tty_yellow, 'O': tty_green, 'I': tty_blue, '.': ''}[level]
if debug:
line = line.replace("\1", "\nCONT:")
if level == "I":
level = "."
if opt_nocontext and level == '.':
continue
outputtxt += "%s%s %s%s\n" % (color, level, line[:-1], tty_normal)
new_offset = os.lseek(file_desc, 0, 1) # os.SEEK_CUR not available in Python 2.4
status[logfile] = new_offset, inode
# output all lines if at least one warning, error or ok has been found
if worst > -1:
sys.stdout.write(outputtxt)
sys.stdout.flush()
# Handle option maxfilesize, regardless of warning or errors that have happened
if opt_maxfilesize != None and (offset / opt_maxfilesize) < (new_offset / opt_maxfilesize):
sys.stdout.write("%sW Maximum allowed logfile size (%d bytes) exceeded for the %dth time.%s\n" %
(tty_yellow, opt_maxfilesize, new_offset / opt_maxfilesize, tty_normal))
try:
config = read_config()
except Exception, e:
if debug:
raise
print "CANNOT READ CONFIG FILE: %s" % e
sys.exit(1)
# Simply ignore errors in the status file. In case of a corrupted status file we simply begin
# with an empty status. That keeps the monitoring up and running - even if we might lose a
# message in the extreme case of a corrupted status file.
try:
status = read_status()
except Exception, e:
status = {}
logfile_patterns = {}
# The filename line may contain options like 'maxlines=100' or 'maxtime=10'
for filenames, patterns in config:
# Initialize options with default values
opt_maxlines = None
opt_maxtime = None
opt_maxlinesize = None
opt_maxfilesize = None
opt_regex = None
opt_overflow = 'C'
opt_overflow_level = 2
opt_nocontext = False
try:
options = [ o.split('=', 1) for o in filenames if '=' in o ]
for key, value in options:
if key == 'maxlines':
opt_maxlines = int(value)
elif key == 'maxtime':
opt_maxtime = float(value)
elif key == 'maxlinesize':
opt_maxlinesize = int(value)
elif key == 'maxfilesize':
opt_maxfilesize = int(value)
elif key == 'overflow':
if value not in [ 'C', 'I', 'W', 'O' ]:
raise Exception("Invalid value %s for overflow. Allowed are C, I, O and W" % value)
opt_overflow = value
opt_overflow_level = {'C':2, 'W':1, 'O':0, 'I':0}[value]
elif key == 'regex':
opt_regex = re.compile(value)
elif key == 'iregex':
opt_regex = re.compile(value, re.I)
elif key == 'nocontext':
opt_nocontext = True
else:
raise Exception("Invalid option %s" % key)
except Exception, e:
if debug:
raise
print "INVALID CONFIGURATION: %s" % e
sys.exit(1)
for glob_pattern in filenames:
if '=' in glob_pattern:
continue
logfiles = glob.glob(glob_pattern)
if opt_regex:
logfiles = [ f for f in logfiles if opt_regex.search(f) ]
if len(logfiles) == 0:
print '[[[%s:missing]]]' % glob_pattern
else:
for logfile in logfiles:
logfile_patterns[logfile] = logfile_patterns.get(logfile, []) + patterns
for logfile, patterns in logfile_patterns.items():
process_logfile(logfile, patterns)
if not debug:
save_status(status)
|