This file is indexed.

/usr/share/pyshared/CedarBackup2/extend/amazons3.py is in cedar-backup2 2.27.0-2.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
# -*- coding: iso-8859-1 -*-
# vim: set ft=python ts=3 sw=3 expandtab:
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#              C E D A R
#          S O L U T I O N S       "Software done right."
#           S O F T W A R E
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Copyright (c) 2014-2015 Kenneth J. Pronovici.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License,
# Version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Copies of the GNU General Public License are available from
# the Free Software Foundation website, http://www.gnu.org/.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Author   : Kenneth J. Pronovici <pronovic@ieee.org>
# Language : Python 2 (>= 2.7)
# Project  : Official Cedar Backup Extensions
# Purpose  : "Store" type extension that writes data to Amazon S3.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #

########################################################################
# Module documentation
########################################################################

"""
Store-type extension that writes data to Amazon S3.

This extension requires a new configuration section <amazons3> and is intended
to be run immediately after the standard stage action, replacing the standard
store action.  Aside from its own configuration, it requires the options and
staging configuration sections in the standard Cedar Backup configuration file.
Since it is intended to replace the store action, it does not rely on any store
configuration.

The underlying functionality relies on the U{AWS CLI interface
<http://aws.amazon.com/documentation/cli/>}.  Before you use this extension,
you need to set up your Amazon S3 account and configure the AWS CLI connection
per Amazon's documentation.  The extension assumes that the backup is being
executed as root, and switches over to the configured backup user to
communicate with AWS.  So, make sure you configure AWS CLI as the backup user
and not root.

You can optionally configure Cedar Backup to encrypt data before sending it
to S3.  To do that, provide a complete command line using the C{${input}} and
C{${output}} variables to represent the original input file and the encrypted
output file.  This command will be executed as the backup user.

For instance, you can use something like this with GPG::

   /usr/bin/gpg -c --no-use-agent --batch --yes --passphrase-file /home/backup/.passphrase -o ${output} ${input}

The GPG mechanism depends on a strong passphrase for security.  One way to
generate a strong passphrase is using your system random number generator, i.e.::

   dd if=/dev/urandom count=20 bs=1 | xxd -ps

(See U{StackExchange <http://security.stackexchange.com/questions/14867/gpg-encryption-security>}
for more details about that advice.) If you decide to use encryption, make sure
you save off the passphrase in a safe place, so you can get at your backup data
later if you need to.  And obviously, make sure to set permissions on the
passphrase file so it can only be read by the backup user.

This extension was written for and tested on Linux.  It will throw an exception
if run on Windows.

@author: Kenneth J. Pronovici <pronovic@ieee.org>
"""

########################################################################
# Imported modules
########################################################################

# System modules
import sys
import os
import logging
import tempfile
import datetime
import json
import shutil

# Cedar Backup modules
from CedarBackup2.filesystem import FilesystemList, BackupFileList
from CedarBackup2.util import resolveCommand, executeCommand, isRunningAsRoot, changeOwnership, isStartOfWeek
from CedarBackup2.util import displayBytes, UNIT_BYTES
from CedarBackup2.xmlutil import createInputDom, addContainerNode, addBooleanNode, addStringNode
from CedarBackup2.xmlutil import readFirstChild, readString, readBoolean
from CedarBackup2.actions.util import writeIndicatorFile
from CedarBackup2.actions.constants import DIR_TIME_FORMAT, STAGE_INDICATOR
from CedarBackup2.config import ByteQuantity, readByteQuantity, addByteQuantityNode


########################################################################
# Module-wide constants and variables
########################################################################

logger = logging.getLogger("CedarBackup2.log.extend.amazons3")

SU_COMMAND    = [ "su" ]
AWS_COMMAND   = [ "aws" ]

STORE_INDICATOR = "cback.amazons3"


########################################################################
# AmazonS3Config class definition
########################################################################

class AmazonS3Config(object):

   """
   Class representing Amazon S3 configuration.

   Amazon S3 configuration is used for storing backup data in Amazon's S3 cloud
   storage using the C{s3cmd} tool.

   The following restrictions exist on data in this class:

      - The s3Bucket value must be a non-empty string
      - The encryptCommand value, if set, must be a non-empty string
      - The full backup size limit, if set, must be a ByteQuantity >= 0
      - The incremental backup size limit, if set, must be a ByteQuantity >= 0

   @sort: __init__, __repr__, __str__, __cmp__, warnMidnite, s3Bucket
   """

   def __init__(self, warnMidnite=None, s3Bucket=None, encryptCommand=None,
                fullBackupSizeLimit=None, incrementalBackupSizeLimit=None):
      """
      Constructor for the C{AmazonS3Config} class.

      @param warnMidnite: Whether to generate warnings for crossing midnite.
      @param s3Bucket: Name of the Amazon S3 bucket in which to store the data
      @param encryptCommand: Command used to encrypt backup data before upload to S3
      @param fullBackupSizeLimit: Maximum size of a full backup, a ByteQuantity
      @param incrementalBackupSizeLimit: Maximum size of an incremental backup, a ByteQuantity

      @raise ValueError: If one of the values is invalid.
      """
      self._warnMidnite = None
      self._s3Bucket = None
      self._encryptCommand = None
      self._fullBackupSizeLimit = None
      self._incrementalBackupSizeLimit = None
      self.warnMidnite = warnMidnite
      self.s3Bucket = s3Bucket
      self.encryptCommand = encryptCommand
      self.fullBackupSizeLimit = fullBackupSizeLimit
      self.incrementalBackupSizeLimit = incrementalBackupSizeLimit

   def __repr__(self):
      """
      Official string representation for class instance.
      """
      return "AmazonS3Config(%s, %s, %s, %s, %s)" % (self.warnMidnite, self.s3Bucket, self.encryptCommand,
                                                     self.fullBackupSizeLimit, self.incrementalBackupSizeLimit)

   def __str__(self):
      """
      Informal string representation for class instance.
      """
      return self.__repr__()

   def __cmp__(self, other):
      """
      Definition of equals operator for this class.
      @param other: Other object to compare to.
      @return: -1/0/1 depending on whether self is C{<}, C{=} or C{>} other.
      """
      if other is None:
         return 1
      if self.warnMidnite != other.warnMidnite:
         if self.warnMidnite < other.warnMidnite:
            return -1
         else:
            return 1
      if self.s3Bucket != other.s3Bucket:
         if self.s3Bucket < other.s3Bucket:
            return -1
         else:
            return 1
      if self.encryptCommand != other.encryptCommand:
         if self.encryptCommand < other.encryptCommand:
            return -1
         else:
            return 1
      if self.fullBackupSizeLimit != other.fullBackupSizeLimit:
         if self.fullBackupSizeLimit < other.fullBackupSizeLimit:
            return -1
         else:
            return 1
      if self.incrementalBackupSizeLimit != other.incrementalBackupSizeLimit:
         if self.incrementalBackupSizeLimit < other.incrementalBackupSizeLimit:
            return -1
         else:
            return 1
      return 0

   def _setWarnMidnite(self, value):
      """
      Property target used to set the midnite warning flag.
      No validations, but we normalize the value to C{True} or C{False}.
      """
      if value:
         self._warnMidnite = True
      else:
         self._warnMidnite = False

   def _getWarnMidnite(self):
      """
      Property target used to get the midnite warning flag.
      """
      return self._warnMidnite

   def _setS3Bucket(self, value):
      """
      Property target used to set the S3 bucket.
      """
      if value is not None:
         if len(value) < 1:
            raise ValueError("S3 bucket must be non-empty string.")
      self._s3Bucket = value

   def _getS3Bucket(self):
      """
      Property target used to get the S3 bucket.
      """
      return self._s3Bucket

   def _setEncryptCommand(self, value):
      """
      Property target used to set the encrypt command.
      """
      if value is not None:
         if len(value) < 1:
            raise ValueError("Encrypt command must be non-empty string.")
      self._encryptCommand = value

   def _getEncryptCommand(self):
      """
      Property target used to get the encrypt command.
      """
      return self._encryptCommand

   def _setFullBackupSizeLimit(self, value):
      """
      Property target used to set the full backup size limit.
      The value must be an integer >= 0.
      @raise ValueError: If the value is not valid.
      """
      if value is None:
         self._fullBackupSizeLimit = None
      else:
         if isinstance(value, ByteQuantity):
            self._fullBackupSizeLimit = value
         else:
            self._fullBackupSizeLimit = ByteQuantity(value, UNIT_BYTES)

   def _getFullBackupSizeLimit(self):
      """
      Property target used to get the full backup size limit.
      """
      return self._fullBackupSizeLimit

   def _setIncrementalBackupSizeLimit(self, value):
      """
      Property target used to set the incremental backup size limit.
      The value must be an integer >= 0.
      @raise ValueError: If the value is not valid.
      """
      if value is None:
         self._incrementalBackupSizeLimit = None
      else:
         if isinstance(value, ByteQuantity):
            self._incrementalBackupSizeLimit = value
         else:
            self._incrementalBackupSizeLimit = ByteQuantity(value, UNIT_BYTES)

   def _getIncrementalBackupSizeLimit(self):
      """
      Property target used to get the incremental backup size limit.
      """
      return self._incrementalBackupSizeLimit

   warnMidnite = property(_getWarnMidnite, _setWarnMidnite, None, "Whether to generate warnings for crossing midnite.")
   s3Bucket = property(_getS3Bucket, _setS3Bucket, None, doc="Amazon S3 Bucket in which to store data")
   encryptCommand = property(_getEncryptCommand, _setEncryptCommand, None, doc="Command used to encrypt data before upload to S3")
   fullBackupSizeLimit = property(_getFullBackupSizeLimit, _setFullBackupSizeLimit, None,
                                  doc="Maximum size of a full backup, as a ByteQuantity")
   incrementalBackupSizeLimit = property(_getIncrementalBackupSizeLimit, _setIncrementalBackupSizeLimit, None,
                                         doc="Maximum size of an incremental backup, as a ByteQuantity")


########################################################################
# LocalConfig class definition
########################################################################

class LocalConfig(object):

   """
   Class representing this extension's configuration document.

   This is not a general-purpose configuration object like the main Cedar
   Backup configuration object.  Instead, it just knows how to parse and emit
   amazons3-specific configuration values.  Third parties who need to read and
   write configuration related to this extension should access it through the
   constructor, C{validate} and C{addConfig} methods.

   @note: Lists within this class are "unordered" for equality comparisons.

   @sort: __init__, __repr__, __str__, __cmp__, amazons3, validate, addConfig
   """

   def __init__(self, xmlData=None, xmlPath=None, validate=True):
      """
      Initializes a configuration object.

      If you initialize the object without passing either C{xmlData} or
      C{xmlPath} then configuration will be empty and will be invalid until it
      is filled in properly.

      No reference to the original XML data or original path is saved off by
      this class.  Once the data has been parsed (successfully or not) this
      original information is discarded.

      Unless the C{validate} argument is C{False}, the L{LocalConfig.validate}
      method will be called (with its default arguments) against configuration
      after successfully parsing any passed-in XML.  Keep in mind that even if
      C{validate} is C{False}, it might not be possible to parse the passed-in
      XML document if lower-level validations fail.

      @note: It is strongly suggested that the C{validate} option always be set
      to C{True} (the default) unless there is a specific need to read in
      invalid configuration from disk.

      @param xmlData: XML data representing configuration.
      @type xmlData: String data.

      @param xmlPath: Path to an XML file on disk.
      @type xmlPath: Absolute path to a file on disk.

      @param validate: Validate the document after parsing it.
      @type validate: Boolean true/false.

      @raise ValueError: If both C{xmlData} and C{xmlPath} are passed-in.
      @raise ValueError: If the XML data in C{xmlData} or C{xmlPath} cannot be parsed.
      @raise ValueError: If the parsed configuration document is not valid.
      """
      self._amazons3 = None
      self.amazons3 = None
      if xmlData is not None and xmlPath is not None:
         raise ValueError("Use either xmlData or xmlPath, but not both.")
      if xmlData is not None:
         self._parseXmlData(xmlData)
         if validate:
            self.validate()
      elif xmlPath is not None:
         xmlData = open(xmlPath).read()
         self._parseXmlData(xmlData)
         if validate:
            self.validate()

   def __repr__(self):
      """
      Official string representation for class instance.
      """
      return "LocalConfig(%s)" % (self.amazons3)

   def __str__(self):
      """
      Informal string representation for class instance.
      """
      return self.__repr__()

   def __cmp__(self, other):
      """
      Definition of equals operator for this class.
      Lists within this class are "unordered" for equality comparisons.
      @param other: Other object to compare to.
      @return: -1/0/1 depending on whether self is C{<}, C{=} or C{>} other.
      """
      if other is None:
         return 1
      if self.amazons3 != other.amazons3:
         if self.amazons3 < other.amazons3:
            return -1
         else:
            return 1
      return 0

   def _setAmazonS3(self, value):
      """
      Property target used to set the amazons3 configuration value.
      If not C{None}, the value must be a C{AmazonS3Config} object.
      @raise ValueError: If the value is not a C{AmazonS3Config}
      """
      if value is None:
         self._amazons3 = None
      else:
         if not isinstance(value, AmazonS3Config):
            raise ValueError("Value must be a C{AmazonS3Config} object.")
         self._amazons3 = value

   def _getAmazonS3(self):
      """
      Property target used to get the amazons3 configuration value.
      """
      return self._amazons3

   amazons3 = property(_getAmazonS3, _setAmazonS3, None, "AmazonS3 configuration in terms of a C{AmazonS3Config} object.")

   def validate(self):
      """
      Validates configuration represented by the object.

      AmazonS3 configuration must be filled in.  Within that, the s3Bucket target must be filled in

      @raise ValueError: If one of the validations fails.
      """
      if self.amazons3 is None:
         raise ValueError("AmazonS3 section is required.")
      if self.amazons3.s3Bucket is None:
         raise ValueError("AmazonS3 s3Bucket must be set.")

   def addConfig(self, xmlDom, parentNode):
      """
      Adds an <amazons3> configuration section as the next child of a parent.

      Third parties should use this function to write configuration related to
      this extension.

      We add the following fields to the document::

         warnMidnite                 //cb_config/amazons3/warn_midnite
         s3Bucket                    //cb_config/amazons3/s3_bucket
         encryptCommand              //cb_config/amazons3/encrypt
         fullBackupSizeLimit         //cb_config/amazons3/full_size_limit
         incrementalBackupSizeLimit  //cb_config/amazons3/incr_size_limit

      @param xmlDom: DOM tree as from C{impl.createDocument()}.
      @param parentNode: Parent that the section should be appended to.
      """
      if self.amazons3 is not None:
         sectionNode = addContainerNode(xmlDom, parentNode, "amazons3")
         addBooleanNode(xmlDom, sectionNode, "warn_midnite", self.amazons3.warnMidnite)
         addStringNode(xmlDom, sectionNode, "s3_bucket", self.amazons3.s3Bucket)
         addStringNode(xmlDom, sectionNode, "encrypt", self.amazons3.encryptCommand)
         addByteQuantityNode(xmlDom, sectionNode, "full_size_limit", self.amazons3.fullBackupSizeLimit)
         addByteQuantityNode(xmlDom, sectionNode, "incr_size_limit", self.amazons3.incrementalBackupSizeLimit)

   def _parseXmlData(self, xmlData):
      """
      Internal method to parse an XML string into the object.

      This method parses the XML document into a DOM tree (C{xmlDom}) and then
      calls a static method to parse the amazons3 configuration section.

      @param xmlData: XML data to be parsed
      @type xmlData: String data

      @raise ValueError: If the XML cannot be successfully parsed.
      """
      (xmlDom, parentNode) = createInputDom(xmlData)
      self._amazons3 = LocalConfig._parseAmazonS3(parentNode)

   @staticmethod
   def _parseAmazonS3(parent):
      """
      Parses an amazons3 configuration section.

      We read the following individual fields::

         warnMidnite                 //cb_config/amazons3/warn_midnite
         s3Bucket                    //cb_config/amazons3/s3_bucket
         encryptCommand              //cb_config/amazons3/encrypt
         fullBackupSizeLimit         //cb_config/amazons3/full_size_limit
         incrementalBackupSizeLimit  //cb_config/amazons3/incr_size_limit

      @param parent: Parent node to search beneath.

      @return: C{AmazonS3Config} object or C{None} if the section does not exist.
      @raise ValueError: If some filled-in value is invalid.
      """
      amazons3 = None
      section = readFirstChild(parent, "amazons3")
      if section is not None:
         amazons3 = AmazonS3Config()
         amazons3.warnMidnite = readBoolean(section, "warn_midnite")
         amazons3.s3Bucket = readString(section, "s3_bucket")
         amazons3.encryptCommand = readString(section, "encrypt")
         amazons3.fullBackupSizeLimit = readByteQuantity(section, "full_size_limit")
         amazons3.incrementalBackupSizeLimit = readByteQuantity(section, "incr_size_limit")
      return amazons3


########################################################################
# Public functions
########################################################################

###########################
# executeAction() function
###########################

def executeAction(configPath, options, config):
   """
   Executes the amazons3 backup action.

   @param configPath: Path to configuration file on disk.
   @type configPath: String representing a path on disk.

   @param options: Program command-line options.
   @type options: Options object.

   @param config: Program configuration.
   @type config: Config object.

   @raise ValueError: Under many generic error conditions
   @raise IOError: If there are I/O problems reading or writing files
   """
   logger.debug("Executing amazons3 extended action.")
   if not isRunningAsRoot():
      logger.error("Error: the amazons3 extended action must be run as root.")
      raise ValueError("The amazons3 extended action must be run as root.")
   if sys.platform == "win32":
      logger.error("Error: the amazons3 extended action is not supported on Windows.")
      raise ValueError("The amazons3 extended action is not supported on Windows.")
   if config.options is None or config.stage is None:
      raise ValueError("Cedar Backup configuration is not properly filled in.")
   local = LocalConfig(xmlPath=configPath)
   stagingDirs = _findCorrectDailyDir(options, config, local)
   _applySizeLimits(options, config, local, stagingDirs)
   _writeToAmazonS3(config, local, stagingDirs)
   _writeStoreIndicator(config, stagingDirs)
   logger.info("Executed the amazons3 extended action successfully.")


########################################################################
# Private utility functions
########################################################################

#########################
# _findCorrectDailyDir()
#########################

def _findCorrectDailyDir(options, config, local):
   """
   Finds the correct daily staging directory to be written to Amazon S3.

   This is substantially similar to the same function in store.py.  The
   main difference is that it doesn't rely on store configuration at all.

   @param options: Options object.
   @param config: Config object.
   @param local: Local config object.

   @return: Correct staging dir, as a dict mapping directory to date suffix.
   @raise IOError: If the staging directory cannot be found.
   """
   oneDay = datetime.timedelta(days=1)
   today = datetime.date.today()
   yesterday = today - oneDay
   tomorrow = today + oneDay
   todayDate = today.strftime(DIR_TIME_FORMAT)
   yesterdayDate = yesterday.strftime(DIR_TIME_FORMAT)
   tomorrowDate = tomorrow.strftime(DIR_TIME_FORMAT)
   todayPath = os.path.join(config.stage.targetDir, todayDate)
   yesterdayPath = os.path.join(config.stage.targetDir, yesterdayDate)
   tomorrowPath = os.path.join(config.stage.targetDir, tomorrowDate)
   todayStageInd = os.path.join(todayPath, STAGE_INDICATOR)
   yesterdayStageInd = os.path.join(yesterdayPath, STAGE_INDICATOR)
   tomorrowStageInd = os.path.join(tomorrowPath, STAGE_INDICATOR)
   todayStoreInd = os.path.join(todayPath, STORE_INDICATOR)
   yesterdayStoreInd = os.path.join(yesterdayPath, STORE_INDICATOR)
   tomorrowStoreInd = os.path.join(tomorrowPath, STORE_INDICATOR)
   if options.full:
      if os.path.isdir(todayPath) and os.path.exists(todayStageInd):
         logger.info("Amazon S3 process will use current day's staging directory [%s]", todayPath)
         return { todayPath:todayDate }
      raise IOError("Unable to find staging directory to process (only tried today due to full option).")
   else:
      if os.path.isdir(todayPath) and os.path.exists(todayStageInd) and not os.path.exists(todayStoreInd):
         logger.info("Amazon S3 process will use current day's staging directory [%s]", todayPath)
         return { todayPath:todayDate }
      elif os.path.isdir(yesterdayPath) and os.path.exists(yesterdayStageInd) and not os.path.exists(yesterdayStoreInd):
         logger.info("Amazon S3 process will use previous day's staging directory [%s]", yesterdayPath)
         if local.amazons3.warnMidnite:
            logger.warn("Warning: Amazon S3 process crossed midnite boundary to find data.")
         return { yesterdayPath:yesterdayDate }
      elif os.path.isdir(tomorrowPath) and os.path.exists(tomorrowStageInd) and not os.path.exists(tomorrowStoreInd):
         logger.info("Amazon S3 process will use next day's staging directory [%s]", tomorrowPath)
         if local.amazons3.warnMidnite:
            logger.warn("Warning: Amazon S3 process crossed midnite boundary to find data.")
         return { tomorrowPath:tomorrowDate }
      raise IOError("Unable to find unused staging directory to process (tried today, yesterday, tomorrow).")


##############################
# _applySizeLimits() function
##############################

def _applySizeLimits(options, config, local, stagingDirs):
   """
   Apply size limits, throwing an exception if any limits are exceeded.

   Size limits are optional.  If a limit is set to None, it does not apply.
   The full size limit applies if the full option is set or if today is the
   start of the week.  The incremental size limit applies otherwise.  Limits
   are applied to the total size of all the relevant staging directories.

   @param options: Options object.
   @param config: Config object.
   @param local: Local config object.
   @param stagingDirs: Dictionary mapping directory path to date suffix.

   @raise ValueError: Under many generic error conditions
   @raise ValueError: If a size limit has been exceeded
   """
   if options.full or isStartOfWeek(config.options.startingDay):
      logger.debug("Using Amazon S3 size limit for full backups.")
      limit = local.amazons3.fullBackupSizeLimit
   else:
      logger.debug("Using Amazon S3 size limit for incremental backups.")
      limit = local.amazons3.incrementalBackupSizeLimit
   if limit is None:
      logger.debug("No Amazon S3 size limit will be applied.")
   else:
      logger.debug("Amazon S3 size limit is: %s", limit)
      contents = BackupFileList()
      for stagingDir in stagingDirs:
         contents.addDirContents(stagingDir)
      total = contents.totalSize()
      logger.debug("Amazon S3 backup size is: %s", displayBytes(total))
      if total > limit.bytes:
         logger.error("Amazon S3 size limit exceeded: %s > %s", displayBytes(total), limit)
         raise ValueError("Amazon S3 size limit exceeded: %s > %s" % (displayBytes(total), limit))
      else:
         logger.info("Total size does not exceed Amazon S3 size limit, so backup can continue.")


##############################
# _writeToAmazonS3() function
##############################

def _writeToAmazonS3(config, local, stagingDirs):
   """
   Writes the indicated staging directories to an Amazon S3 bucket.

   Each of the staging directories listed in C{stagingDirs} will be written to
   the configured Amazon S3 bucket from local configuration.  The directories
   will be placed into the image at the root by date, so staging directory
   C{/opt/stage/2005/02/10} will be placed into the S3 bucket at C{/2005/02/10}.
   If an encrypt commmand is provided, the files will be encrypted first.

   @param config: Config object.
   @param local: Local config object.
   @param stagingDirs: Dictionary mapping directory path to date suffix.

   @raise ValueError: Under many generic error conditions
   @raise IOError: If there is a problem writing to Amazon S3
   """
   for stagingDir in stagingDirs.keys():
      logger.debug("Storing stage directory to Amazon S3 [%s].", stagingDir)
      dateSuffix = stagingDirs[stagingDir]
      s3BucketUrl = "s3://%s/%s" % (local.amazons3.s3Bucket, dateSuffix)
      logger.debug("S3 bucket URL is [%s]", s3BucketUrl)
      _clearExistingBackup(config, s3BucketUrl)
      if local.amazons3.encryptCommand is None:
         logger.debug("Encryption is disabled; files will be uploaded in cleartext.")
         _uploadStagingDir(config, stagingDir, s3BucketUrl)
         _verifyUpload(config, stagingDir, s3BucketUrl)
      else:
         logger.debug("Encryption is enabled; files will be uploaded after being encrypted.")
         encryptedDir = tempfile.mkdtemp(dir=config.options.workingDir)
         changeOwnership(encryptedDir, config.options.backupUser, config.options.backupGroup)
         try:
            _encryptStagingDir(config, local, stagingDir, encryptedDir)
            _uploadStagingDir(config, encryptedDir, s3BucketUrl)
            _verifyUpload(config, encryptedDir, s3BucketUrl)
         finally:
            if os.path.exists(encryptedDir):
               shutil.rmtree(encryptedDir)


##################################
# _writeStoreIndicator() function
##################################

def _writeStoreIndicator(config, stagingDirs):
   """
   Writes a store indicator file into staging directories.
   @param config: Config object.
   @param stagingDirs: Dictionary mapping directory path to date suffix.
   """
   for stagingDir in stagingDirs.keys():
      writeIndicatorFile(stagingDir, STORE_INDICATOR,
                         config.options.backupUser,
                         config.options.backupGroup)


##################################
# _clearExistingBackup() function
##################################

def _clearExistingBackup(config, s3BucketUrl):
   """
   Clear any existing backup files for an S3 bucket URL.
   @param config: Config object.
   @param s3BucketUrl: S3 bucket URL associated with the staging directory
   """
   suCommand = resolveCommand(SU_COMMAND)
   awsCommand = resolveCommand(AWS_COMMAND)
   actualCommand = "%s s3 rm --recursive %s/" % (awsCommand[0], s3BucketUrl)
   result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
   if result != 0:
      raise IOError("Error [%d] calling AWS CLI to clear existing backup for [%s]." % (result, s3BucketUrl))
   logger.debug("Completed clearing any existing backup in S3 for [%s]", s3BucketUrl)


###############################
# _uploadStagingDir() function
###############################

def _uploadStagingDir(config, stagingDir, s3BucketUrl):
   """
   Upload the contents of a staging directory out to the Amazon S3 cloud.
   @param config: Config object.
   @param stagingDir: Staging directory to upload
   @param s3BucketUrl: S3 bucket URL associated with the staging directory
   """
   # The version of awscli in Debian stretch (1.11.13-1) has a problem
   # uploading empty files, due to running with Python 3 rather than Python 2
   # as the upstream maintainers intended.  To work around this, I'm explicitly
   # excluding files like cback.stage, cback.collect, etc. which should be the
   # only empty files we ever try to copy.  See: https://github.com/aws/aws-cli/issues/2403
   suCommand = resolveCommand(SU_COMMAND)
   awsCommand = resolveCommand(AWS_COMMAND)
   actualCommand = "%s s3 cp --recursive --exclude \"*cback.*\" %s/ %s/" % (awsCommand[0], stagingDir, s3BucketUrl)
   result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
   if result != 0:
      raise IOError("Error [%d] calling AWS CLI to upload staging directory to [%s]." % (result, s3BucketUrl))
   logger.debug("Completed uploading staging dir [%s] to [%s]", stagingDir, s3BucketUrl)


###########################
# _verifyUpload() function
###########################

def _verifyUpload(config, stagingDir, s3BucketUrl):
   """
   Verify that a staging directory was properly uploaded to the Amazon S3 cloud.
   @param config: Config object.
   @param stagingDir: Staging directory to verify
   @param s3BucketUrl: S3 bucket URL associated with the staging directory
   """
   (bucket, prefix) = s3BucketUrl.replace("s3://", "").split("/", 1)
   suCommand = resolveCommand(SU_COMMAND)
   awsCommand = resolveCommand(AWS_COMMAND)
   query = "Contents[].{Key: Key, Size: Size}"
   actualCommand = "%s s3api list-objects --bucket %s --prefix %s --query '%s'" % (awsCommand[0], bucket, prefix, query)
   (result, data) = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand], returnOutput=True)
   if result != 0:
      raise IOError("Error [%d] calling AWS CLI verify upload to [%s]." % (result, s3BucketUrl))
   contents = { }
   for entry in json.loads("".join(data)):
      key = entry["Key"].replace(prefix, "")
      size = long(entry["Size"])
      contents[key] = size
   files = FilesystemList()
   files.excludeBasenamePatterns = [ r"cback\..*", ]  # because these are excluded from the upload
   files.addDirContents(stagingDir)
   for entry in files:
      if os.path.isfile(entry):
         key = entry.replace(stagingDir, "")
         size = long(os.stat(entry).st_size)
         if not key in contents:
            raise IOError("File was apparently not uploaded: [%s]" % entry)
         else:
            if size != contents[key]:
               raise IOError("File size differs [%s], expected %s bytes but got %s bytes" % (entry, size, contents[key]))
   logger.debug("Completed verifying upload from [%s] to [%s].", stagingDir, s3BucketUrl)


################################
# _encryptStagingDir() function
################################

def _encryptStagingDir(config, local, stagingDir, encryptedDir):
   """
   Encrypt a staging directory, creating a new directory in the process.
   @param config: Config object.
   @param stagingDir: Staging directory to use as source
   @param encryptedDir: Target directory into which encrypted files should be written
   """
   suCommand = resolveCommand(SU_COMMAND)
   files = FilesystemList()
   files.addDirContents(stagingDir)
   for cleartext in files:
      if os.path.isfile(cleartext):
         encrypted = "%s%s" % (encryptedDir, cleartext.replace(stagingDir, ""))
         if long(os.stat(cleartext).st_size) == 0:
            open(encrypted, 'a').close() # don't bother encrypting empty files
         else:
            actualCommand = local.amazons3.encryptCommand.replace("${input}", cleartext).replace("${output}", encrypted)
            subdir = os.path.dirname(encrypted)
            if not os.path.isdir(subdir):
               os.makedirs(subdir)
               changeOwnership(subdir, config.options.backupUser, config.options.backupGroup)
            result = executeCommand(suCommand, [config.options.backupUser, "-c", actualCommand])[0]
            if result != 0:
               raise IOError("Error [%d] encrypting [%s]." % (result, cleartext))
   logger.debug("Completed encrypting staging directory [%s] into [%s]", stagingDir, encryptedDir)