/usr/lib/python2.7/dist-packages/gnocchi/statsd.py is in python-gnocchi 4.2.0-0ubuntu5.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 | # Copyright (c) 2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import uuid
try:
import asyncio
except ImportError:
import trollius as asyncio
import daiquiri
from oslo_config import cfg
import six
from gnocchi import incoming
from gnocchi import indexer
from gnocchi import service
from gnocchi import utils
LOG = daiquiri.getLogger(__name__)
class Stats(object):
def __init__(self, conf):
self.conf = conf
self.incoming = incoming.get_driver(self.conf)
self.indexer = indexer.get_driver(self.conf)
try:
self.indexer.create_resource('generic',
self.conf.statsd.resource_id,
self.conf.statsd.creator)
except indexer.ResourceAlreadyExists:
LOG.debug("Resource %s already exists",
self.conf.statsd.resource_id)
else:
LOG.info("Created resource %s", self.conf.statsd.resource_id)
self.gauges = {}
self.counters = {}
self.times = {}
self.metrics = {
metric.name: metric
for metric
in self.indexer.get_resource('generic',
self.conf.statsd.resource_id,
with_metrics=True).metrics
}
def reset(self):
self.gauges.clear()
self.counters.clear()
self.times.clear()
def treat_metric(self, metric_name, metric_type, value, sampling):
metric_name += "|" + metric_type
if metric_type == "ms":
if sampling is not None:
raise ValueError(
"Invalid sampling for ms: `%d`, should be none"
% sampling)
self.times[metric_name] = incoming.Measure(
utils.dt_in_unix_ns(utils.utcnow()), value)
elif metric_type == "g":
if sampling is not None:
raise ValueError(
"Invalid sampling for g: `%d`, should be none"
% sampling)
self.gauges[metric_name] = incoming.Measure(
utils.dt_in_unix_ns(utils.utcnow()), value)
elif metric_type == "c":
sampling = 1 if sampling is None else sampling
if metric_name in self.counters:
current_value = self.counters[metric_name].value
else:
current_value = 0
self.counters[metric_name] = incoming.Measure(
utils.dt_in_unix_ns(utils.utcnow()),
current_value + (value * (1 / sampling)))
# TODO(jd) Support "set" type
# elif metric_type == "s":
# pass
else:
raise ValueError("Unknown metric type `%s'" % metric_type)
def flush(self):
for metric_name, measure in itertools.chain(
six.iteritems(self.gauges),
six.iteritems(self.counters),
six.iteritems(self.times)):
try:
# NOTE(jd) We avoid considering any concurrency here as statsd
# is not designed to run in parallel and we do not envision
# operators manipulating the resource/metrics using the Gnocchi
# API at the same time.
metric = self.metrics.get(metric_name)
if not metric:
ap_name = self._get_archive_policy_name(metric_name)
metric = self.indexer.create_metric(
uuid.uuid4(),
self.conf.statsd.creator,
archive_policy_name=ap_name,
name=metric_name,
resource_id=self.conf.statsd.resource_id)
self.metrics[metric_name] = metric
self.incoming.add_measures(metric.id, (measure,))
except Exception as e:
LOG.error("Unable to add measure %s: %s",
metric_name, e)
self.reset()
def _get_archive_policy_name(self, metric_name):
if self.conf.statsd.archive_policy_name:
return self.conf.statsd.archive_policy_name
# NOTE(sileht): We didn't catch NoArchivePolicyRuleMatch to log it
ap = self.indexer.get_archive_policy_for_metric(metric_name)
return ap.name
class StatsdServer(asyncio.Protocol):
def __init__(self, stats):
self.stats = stats
@staticmethod
def connection_made(transport):
pass
def datagram_received(self, data, addr):
LOG.debug("Received data `%r' from %s", data, addr)
try:
messages = [m for m in data.decode().split("\n") if m]
except Exception as e:
LOG.error("Unable to decode datagram: %s", e)
return
for message in messages:
metric = message.split("|")
if len(metric) == 2:
metric_name, metric_type = metric
sampling = None
elif len(metric) == 3:
metric_name, metric_type, sampling = metric
else:
LOG.error("Invalid number of | in `%s'", message)
continue
sampling = float(sampling[1:]) if sampling is not None else None
metric_name, metric_str_val = metric_name.split(':')
# NOTE(jd): We do not support +/- gauge, and we delete gauge on
# each flush.
value = float(metric_str_val)
try:
self.stats.treat_metric(metric_name, metric_type,
value, sampling)
except Exception as e:
LOG.error("Unable to treat metric %s: %s", message, str(e))
def start():
conf = service.prepare_service()
if conf.statsd.resource_id is None:
raise cfg.RequiredOptError("resource_id", cfg.OptGroup("statsd"))
stats = Stats(conf)
loop = asyncio.get_event_loop()
# TODO(jd) Add TCP support
listen = loop.create_datagram_endpoint(
lambda: StatsdServer(stats),
local_addr=(conf.statsd.host, conf.statsd.port))
def _flush():
loop.call_later(conf.statsd.flush_delay, _flush)
stats.flush()
loop.call_later(conf.statsd.flush_delay, _flush)
transport, protocol = loop.run_until_complete(listen)
LOG.info("Started on %s:%d", conf.statsd.host, conf.statsd.port)
LOG.info("Flush delay: %d seconds", conf.statsd.flush_delay)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
transport.close()
loop.close()
|