/usr/share/pyshared/glance/store/scrubber.py is in python-glance 2012.1.3+stable~20120821-120fcf-0ubuntu1.5.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import calendar
import eventlet
import logging
import time
import os
import glance.store.filesystem
import glance.store.http
import glance.store.s3
import glance.store.swift
from glance import registry
from glance import store
from glance.common import cfg
from glance.common import utils
from glance.registry import client
logger = logging.getLogger('glance.store.scrubber')
class Daemon(object):
def __init__(self, wakeup_time=300, threads=1000):
logger.info(_("Starting Daemon: wakeup_time=%(wakeup_time)s "
"threads=%(threads)s") % locals())
self.wakeup_time = wakeup_time
self.event = eventlet.event.Event()
self.pool = eventlet.greenpool.GreenPool(threads)
def start(self, application):
self._run(application)
def wait(self):
try:
self.event.wait()
except KeyboardInterrupt:
msg = _("Daemon Shutdown on KeyboardInterrupt")
logger.info(msg)
def _run(self, application):
logger.debug(_("Runing application"))
self.pool.spawn_n(application.run, self.pool, self.event)
eventlet.spawn_after(self.wakeup_time, self._run, application)
logger.debug(_("Next run scheduled in %s seconds") % self.wakeup_time)
class Scrubber(object):
CLEANUP_FILE = ".cleanup"
opts = [
cfg.BoolOpt('cleanup_scrubber', default=False),
cfg.IntOpt('cleanup_scrubber_time', default=86400)
]
def __init__(self, conf, **local_conf):
self.conf = conf
self.conf.register_opts(self.opts)
self.datadir = store.get_scrubber_datadir(conf)
self.cleanup = self.conf.cleanup_scrubber
self.cleanup_time = self.conf.cleanup_scrubber_time
host, port = registry.get_registry_addr(conf)
logger.info(_("Initializing scrubber with conf: %s") %
{'datadir': self.datadir, 'cleanup': self.cleanup,
'cleanup_time': self.cleanup_time,
'registry_host': host, 'registry_port': port})
self.registry = client.RegistryClient(host, port)
utils.safe_mkdirs(self.datadir)
store.create_stores(conf)
def run(self, pool, event=None):
now = time.time()
if not os.path.exists(self.datadir):
logger.info(_("%s does not exist") % self.datadir)
return
delete_work = []
for root, dirs, files in os.walk(self.datadir):
for id in files:
if id == self.CLEANUP_FILE:
continue
file_name = os.path.join(root, id)
delete_time = os.stat(file_name).st_mtime
if delete_time > now:
continue
uri, delete_time = read_queue_file(file_name)
if delete_time > now:
continue
delete_work.append((id, uri, now))
logger.info(_("Deleting %s images") % len(delete_work))
pool.starmap(self._delete, delete_work)
if self.cleanup:
self._cleanup(pool)
def _delete(self, id, uri, now):
file_path = os.path.join(self.datadir, str(id))
try:
logger.debug(_("Deleting %(uri)s") % {'uri': uri})
store.delete_from_backend(uri)
except store.UnsupportedBackend:
msg = _("Failed to delete image from store (%(uri)s).")
logger.error(msg % {'uri': uri})
write_queue_file(file_path, uri, now)
self.registry.update_image(id, {'status': 'deleted'})
utils.safe_remove(file_path)
def _cleanup(self, pool):
now = time.time()
cleanup_file = os.path.join(self.datadir, self.CLEANUP_FILE)
if not os.path.exists(cleanup_file):
write_queue_file(cleanup_file, 'cleanup', now)
return
_uri, last_run_time = read_queue_file(cleanup_file)
cleanup_time = last_run_time + self.cleanup_time
if cleanup_time > now:
return
logger.info(_("Getting images deleted before %s") % self.cleanup_time)
write_queue_file(cleanup_file, 'cleanup', now)
filters = {'deleted': True, 'is_public': 'none',
'status': 'pending_delete'}
pending_deletes = self.registry.get_images_detailed(filters=filters)
delete_work = []
for pending_delete in pending_deletes:
deleted_at = pending_delete.get('deleted_at')
if not deleted_at:
continue
time_fmt = "%Y-%m-%dT%H:%M:%S"
# NOTE: Strip off microseconds which may occur after the last '.,'
# Example: 2012-07-07T19:14:34.974216
date_str = deleted_at.rsplit('.', 1)[0].rsplit(',', 1)[0]
delete_time = calendar.timegm(time.strptime(date_str,
time_fmt))
if delete_time + self.cleanup_time > now:
continue
delete_work.append((pending_delete['id'],
pending_delete['location'],
now))
logger.info(_("Deleting %s images") % len(delete_work))
pool.starmap(self._delete, delete_work)
def read_queue_file(file_path):
with open(file_path) as f:
uri = f.readline().strip()
delete_time = int(f.readline().strip())
return uri, delete_time
def write_queue_file(file_path, uri, delete_time):
with open(file_path, 'w') as f:
f.write('\n'.join([uri, str(int(delete_time))]))
os.chmod(file_path, 0600)
os.utime(file_path, (delete_time, delete_time))
|