/usr/share/pyshared/radosgw_agent/sync.py is in radosgw-agent 1.1-0ubuntu1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 | import logging
import multiprocessing
import time
from radosgw_agent import worker
from radosgw_agent import client
log = logging.getLogger(__name__)
# the replica log api only supports one entry, and updating it
# requires sending a daemon id that matches the existing one. This
# doesn't make a whole lot of sense with the current structure of
# radosgw-agent, so just use a constant value for the daemon id.
DAEMON_ID = 'radosgw-agent'
def prepare_sync(syncer, error_delay):
"""Attempt to prepare a syncer for running a sync.
:param error_delay: seconds to wait before retrying
This will retry forever so the sync agent continues if radosgws
are unavailable temporarily.
"""
while True:
try:
syncer.prepare()
break
except Exception:
log.warn('error preparing for sync, will retry. Traceback:',
exc_info=True)
time.sleep(error_delay)
def incremental_sync(meta_syncer, data_syncer, num_workers, lock_timeout,
incremental_sync_delay, metadata_only, error_delay):
"""Run a continuous incremental sync.
This will run forever, pausing between syncs by a
incremental_sync_delay seconds.
"""
while True:
try:
meta_syncer.sync(num_workers, lock_timeout)
if not metadata_only:
data_syncer.sync(num_workers, lock_timeout)
except Exception:
log.warn('error doing incremental sync, will try again. Traceback:',
exc_info=True)
# prepare data before sleeping due to rgw_log_bucket_window
if not metadata_only:
prepare_sync(data_syncer, error_delay)
log.info('waiting %d seconds until next sync',
incremental_sync_delay)
time.sleep(incremental_sync_delay)
prepare_sync(meta_syncer, error_delay)
class Syncer(object):
def __init__(self, src, dest, max_entries, *args, **kwargs):
self.src = src
self.dest = dest
self.src_conn = client.connection(src)
self.dest_conn = client.connection(dest)
self.daemon_id = DAEMON_ID
self.worker_cls = None # filled in by subclass constructor
self.num_shards = None
self.max_entries = max_entries
self.object_sync_timeout = kwargs.get('object_sync_timeout')
def init_num_shards(self):
if self.num_shards is not None:
return
try:
self.num_shards = client.num_log_shards(self.src_conn, self.type)
log.debug('%d shards to check', self.num_shards)
except Exception:
log.error('finding number of shards failed')
raise
def shard_num_for_key(self, key):
key = key.encode('utf8')
hash_val = 0
for char in key:
c = ord(char)
hash_val = (hash_val + (c << 4) + (c >> 4)) * 11
return hash_val % self.num_shards
def prepare(self):
"""Setup any state required before syncing starts.
This must be called before sync().
"""
pass
def generate_work(self):
"""Generate items to be place in a queue or processing"""
pass
def wait_until_ready(self):
pass
def complete_item(self, shard_num, retries):
"""Called when syncing a single item completes successfully"""
marker = self.shard_info.get(shard_num)
if not marker:
return
try:
data = [dict(name=retry, time=worker.DEFAULT_TIME)
for retry in retries]
client.set_worker_bound(self.dest_conn,
self.type,
marker,
worker.DEFAULT_TIME,
self.daemon_id,
shard_num,
data)
except Exception:
log.warn('could not set worker bounds, may repeat some work.'
'Traceback:', exc_info=True)
def sync(self, num_workers, log_lock_time, max_entries=None):
workQueue = multiprocessing.Queue()
resultQueue = multiprocessing.Queue()
processes = [self.worker_cls(workQueue,
resultQueue,
log_lock_time,
self.src,
self.dest,
daemon_id=self.daemon_id,
max_entries=max_entries,
object_sync_timeout=self.object_sync_timeout,
)
for i in xrange(num_workers)]
for process in processes:
process.daemon = True
process.start()
self.wait_until_ready()
log.info('Starting sync')
# enqueue the shards to be synced
num_items = 0
for item in self.generate_work():
num_items += 1
workQueue.put(item)
# add a poison pill for each worker
for i in xrange(num_workers):
workQueue.put(None)
# pull the results out as they are produced
retries = {}
for i in xrange(num_items):
result, item = resultQueue.get()
shard_num, retries = item
if result == worker.RESULT_SUCCESS:
log.debug('synced item %r successfully', item)
self.complete_item(shard_num, retries)
else:
log.error('error syncing shard %d', shard_num)
retries.append(shard_num)
log.info('%d/%d items processed', i + 1, num_items)
if retries:
log.error('Encountered errors syncing these %d shards: %r',
len(retries), retries)
class IncrementalSyncer(Syncer):
def get_worker_bound(self, shard_num):
try:
marker, timestamp, retries = client.get_worker_bound(
self.dest_conn,
self.type,
shard_num)
log.debug('oldest marker and time for shard %d are: %r %r',
shard_num, marker, timestamp)
log.debug('%d items to retrie are: %r', len(retries), retries)
except client.NotFound:
# if no worker bounds have been set, start from the beginning
marker, retries = '', []
return marker, retries
def get_log_entries(self, shard_num, marker):
try:
result = client.get_log(self.src_conn, self.type,
marker, self.max_entries,
shard_num)
last_marker = result['marker']
log_entries = result['entries']
if len(log_entries) == self.max_entries:
log.warn('shard %d log has fallen behind - log length >= %d',
shard_num)
except client.NotFound:
# no entries past this marker yet, but we my have retries
last_marker = ''
log_entries = []
return last_marker, log_entries
def prepare(self):
self.init_num_shards()
self.shard_info = {}
self.shard_work = {}
for shard_num in xrange(self.num_shards):
marker, retries = self.get_worker_bound(shard_num)
last_marker, log_entries = self.get_log_entries(shard_num, marker)
self.shard_work[shard_num] = log_entries, retries
self.shard_info[shard_num] = last_marker
self.prepared_at = time.time()
def generate_work(self):
return self.shard_work.iteritems()
class MetaSyncerInc(IncrementalSyncer):
def __init__(self, *args, **kwargs):
super(MetaSyncerInc, self).__init__(*args, **kwargs)
self.worker_cls = worker.MetadataWorkerIncremental
self.type = 'metadata'
class DataSyncerInc(IncrementalSyncer):
def __init__(self, *args, **kwargs):
super(DataSyncerInc, self).__init__(*args, **kwargs)
self.worker_cls = worker.DataWorkerIncremental
self.type = 'data'
self.rgw_data_log_window = kwargs.get('rgw_data_log_window', 30)
def wait_until_ready(self):
log.info('waiting to make sure bucket log is consistent')
while time.time() < self.prepared_at + self.rgw_data_log_window:
time.sleep(1)
class DataSyncerFull(Syncer):
def __init__(self, *args, **kwargs):
super(DataSyncerFull, self).__init__(*args, **kwargs)
self.worker_cls = worker.DataWorkerFull
self.type = 'data'
self.rgw_data_log_window = kwargs.get('rgw_data_log_window', 30)
def prepare(self):
self.init_num_shards()
# save data log markers for each shard
self.shard_info = {}
for shard in xrange(self.num_shards):
info = client.get_log_info(self.src_conn, 'data', shard)
# setting an empty marker returns an error
if info['marker']:
self.shard_info[shard] = info['marker']
# get list of buckets after getting any markers to avoid skipping
# entries added before we got the marker info
buckets = client.get_bucket_list(self.src_conn)
self.prepared_at = time.time()
self.buckets_by_shard = {}
for bucket in buckets:
shard = self.shard_num_for_key(bucket)
self.buckets_by_shard.setdefault(shard, [])
self.buckets_by_shard[shard].append(bucket)
def generate_work(self):
return self.buckets_by_shard.iteritems()
def wait_until_ready(self):
log.info('waiting to make sure bucket log is consistent')
while time.time() < self.prepared_at + self.rgw_data_log_window:
time.sleep(1)
class MetaSyncerFull(Syncer):
def __init__(self, *args, **kwargs):
super(MetaSyncerFull, self).__init__(*args, **kwargs)
self.worker_cls = worker.MetadataWorkerFull
self.type = 'metadata'
def prepare(self):
try:
self.sections = client.get_metadata_sections(self.src_conn)
except client.HttpError as e:
log.error('Error listing metadata sections: %s', e)
raise
# grab the lastest shard markers and timestamps before we sync
self.shard_info = {}
self.init_num_shards()
for shard_num in xrange(self.num_shards):
info = client.get_log_info(self.src_conn, 'metadata', shard_num)
# setting an empty marker returns an error
if info['marker']:
self.shard_info[shard_num] = info['marker']
self.metadata_by_shard = {}
for section in self.sections:
try:
for key in client.list_metadata_keys(self.src_conn, section):
shard = self.shard_num_for_key(section + ':' + key)
self.metadata_by_shard.setdefault(shard, [])
self.metadata_by_shard[shard].append((section, key))
except client.NotFound:
# no keys of this type exist
continue
except client.HttpError as e:
log.error('Error listing metadata for section %s: %s',
section, e)
raise
def generate_work(self):
return self.metadata_by_shard.iteritems()
|