/usr/lib/python2.7/dist-packages/carbon/writer.py is in graphite-carbon 0.9.15-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 | """Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import os
import time
from os.path import exists, dirname
import whisper
from carbon import state
from carbon.cache import MetricCache
from carbon.storage import getFilesystemPath, loadStorageSchemas,\
loadAggregationSchemas
from carbon.conf import settings
from carbon import log, events, instrumentation
from carbon.util import TokenBucket
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from twisted.application.service import Service
try:
import signal
except ImportError:
log.msg("Couldn't import signal module")
SCHEMAS = loadStorageSchemas()
AGGREGATION_SCHEMAS = loadAggregationSchemas()
CACHE_SIZE_LOW_WATERMARK = settings.MAX_CACHE_SIZE * 0.95
# Inititalize token buckets so that we can enforce rate limits on creates and
# updates if the config wants them.
CREATE_BUCKET = None
UPDATE_BUCKET = None
if settings.MAX_CREATES_PER_MINUTE != float('inf'):
capacity = settings.MAX_CREATES_PER_MINUTE
fill_rate = float(settings.MAX_CREATES_PER_MINUTE) / 60
CREATE_BUCKET = TokenBucket(capacity, fill_rate)
if settings.MAX_UPDATES_PER_SECOND != float('inf'):
capacity = settings.MAX_UPDATES_PER_SECOND
fill_rate = settings.MAX_UPDATES_PER_SECOND
UPDATE_BUCKET = TokenBucket(capacity, fill_rate)
def optimalWriteOrder():
"""Generates metrics with the most cached values first and applies a soft
rate limit on new metrics"""
while MetricCache:
(metric, datapoints) = MetricCache.pop()
if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK:
events.cacheSpaceAvailable()
dbFilePath = getFilesystemPath(metric)
dbFileExists = exists(dbFilePath)
if not dbFileExists and CREATE_BUCKET:
# If our tokenbucket has enough tokens available to create a new metric
# file then yield the metric data to complete that operation. Otherwise
# we'll just drop the metric on the ground and move on to the next
# metric.
# XXX This behavior should probably be configurable to no tdrop metrics
# when rate limitng unless our cache is too big or some other legit
# reason.
if CREATE_BUCKET.drain(1):
yield (metric, datapoints, dbFilePath, dbFileExists)
continue
yield (metric, datapoints, dbFilePath, dbFileExists)
def writeCachedDataPoints():
"Write datapoints until the MetricCache is completely empty"
while MetricCache:
dataWritten = False
for (metric, datapoints, dbFilePath, dbFileExists) in optimalWriteOrder():
dataWritten = True
if not dbFileExists:
archiveConfig = None
xFilesFactor, aggregationMethod = None, None
for schema in SCHEMAS:
if schema.matches(metric):
log.creates('new metric %s matched schema %s' % (metric, schema.name))
archiveConfig = [archive.getTuple() for archive in schema.archives]
break
for schema in AGGREGATION_SCHEMAS:
if schema.matches(metric):
log.creates('new metric %s matched aggregation schema %s' % (metric, schema.name))
xFilesFactor, aggregationMethod = schema.archives
break
if not archiveConfig:
raise Exception("No storage schema matched the metric '%s', check your storage-schemas.conf file." % metric)
dbDir = dirname(dbFilePath)
try:
if not exists(dbDir):
os.makedirs(dbDir)
except OSError, e:
log.err("%s" % e)
log.creates("creating database file %s (archive=%s xff=%s agg=%s)" %
(dbFilePath, archiveConfig, xFilesFactor, aggregationMethod))
try:
whisper.create(
dbFilePath,
archiveConfig,
xFilesFactor,
aggregationMethod,
settings.WHISPER_SPARSE_CREATE,
settings.WHISPER_FALLOCATE_CREATE)
instrumentation.increment('creates')
except:
log.err("Error creating %s" % (dbFilePath))
continue
# If we've got a rate limit configured lets makes sure we enforce it
if UPDATE_BUCKET:
UPDATE_BUCKET.drain(1, blocking=True)
try:
t1 = time.time()
whisper.update_many(dbFilePath, datapoints)
updateTime = time.time() - t1
except Exception:
log.msg("Error writing to %s" % (dbFilePath))
log.err()
instrumentation.increment('errors')
else:
pointCount = len(datapoints)
instrumentation.increment('committedPoints', pointCount)
instrumentation.append('updateTimes', updateTime)
if settings.LOG_UPDATES:
log.updates("wrote %d datapoints for %s in %.5f seconds" % (pointCount, metric, updateTime))
# Avoid churning CPU when only new metrics are in the cache
if not dataWritten:
time.sleep(0.1)
def writeForever():
while reactor.running:
try:
writeCachedDataPoints()
except Exception:
log.err()
time.sleep(1) # The writer thread only sleeps when the cache is empty or an error occurs
def reloadStorageSchemas():
global SCHEMAS
try:
SCHEMAS = loadStorageSchemas()
except Exception:
log.msg("Failed to reload storage SCHEMAS")
log.err()
def reloadAggregationSchemas():
global AGGREGATION_SCHEMAS
try:
AGGREGATION_SCHEMAS = loadAggregationSchemas()
except Exception:
log.msg("Failed to reload aggregation SCHEMAS")
log.err()
def shutdownModifyUpdateSpeed():
try:
shut = settings.MAX_UPDATES_PER_SECOND_ON_SHUTDOWN
if UPDATE_BUCKET:
UPDATE_BUCKET.setCapacityAndFillRate(shut,shut)
if CREATE_BUCKET:
CREATE_BUCKET.setCapacityAndFillRate(shut,shut)
log.msg("Carbon shutting down. Changed the update rate to: " + str(settings.MAX_UPDATES_PER_SECOND_ON_SHUTDOWN))
except KeyError:
log.msg("Carbon shutting down. Update rate not changed")
class WriterService(Service):
def __init__(self):
self.storage_reload_task = LoopingCall(reloadStorageSchemas)
self.aggregation_reload_task = LoopingCall(reloadAggregationSchemas)
def startService(self):
if 'signal' in globals().keys():
log.msg("Installing SIG_IGN for SIGHUP")
signal.signal(signal.SIGHUP, signal.SIG_IGN)
self.storage_reload_task.start(60, False)
self.aggregation_reload_task.start(60, False)
reactor.addSystemEventTrigger('before', 'shutdown', shutdownModifyUpdateSpeed)
reactor.callInThread(writeForever)
Service.startService(self)
def stopService(self):
self.storage_reload_task.stop()
self.aggregation_reload_task.stop()
Service.stopService(self)
|