This file is indexed.

/usr/share/pyshared/ZODB/blob.py is in python-zodb 1:3.10.5-0ubuntu3.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
##############################################################################
#
# Copyright (c) 2005-2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Blobs
"""

import cPickle
import cStringIO
import base64
import binascii
import logging
import os
import re
import shutil
import stat
import sys
import tempfile
import weakref

import zope.interface

import ZODB.interfaces
from ZODB.interfaces import BlobError
from ZODB import utils
from ZODB.POSException import POSKeyError
import persistent

logger = logging.getLogger('ZODB.blob')

BLOB_SUFFIX = ".blob"
SAVEPOINT_SUFFIX = ".spb"

LAYOUT_MARKER = '.layout'
LAYOUTS = {}

valid_modes = 'r', 'w', 'r+', 'a', 'c'

# Threading issues:
# We want to support closing blob files when they are destroyed.
# This introduces a threading issue, since a blob file may be destroyed
# via GC in any thread.


class Blob(persistent.Persistent):
    """A BLOB supports efficient handling of large data within ZODB."""

    zope.interface.implements(ZODB.interfaces.IBlob)

    _p_blob_uncommitted = None  # Filename of the uncommitted (dirty) data
    _p_blob_committed = None    # Filename of the committed data

    readers = writers = None

    def __init__(self, data=None):
        # Raise exception if Blobs are getting subclassed
        # refer to ZODB-Bug No.127182 by Jim Fulton on 2007-07-20
        if (self.__class__ is not Blob):
            raise TypeError('Blobs do not support subclassing.')
        self.__setstate__()
        if data is not None:
            self.open('w').write(data)

    def __setstate__(self, state=None):
        # we use lists here because it will allow us to add and remove
        # atomically
        self.readers = []
        self.writers = []

    def __getstate__(self):
        return None

    def _p_deactivate(self):
        # Only ghostify if we are unopened.
        if self.readers or self.writers:
            return
        super(Blob, self)._p_deactivate()

    def _p_invalidate(self):
        # Force-close any open readers or writers,
        # XXX should we warn of this? Maybe?
        if self._p_changed is None:
            return
        for ref in (self.readers or [])+(self.writers or []):
            f = ref()
            if f is not None:
                f.close()

        if (self._p_blob_uncommitted):
            os.remove(self._p_blob_uncommitted)

        super(Blob, self)._p_invalidate()

    def opened(self):
        return bool(self.readers or self.writers)

    def closed(self, f):
        # We use try/except below because another thread might remove
        # the ref after we check it if the file is GCed.
        for file_refs in (self.readers, self.writers):
            for ref in file_refs:
                if ref() is f:
                    try:
                        file_refs.remove(ref)
                    except ValueError:
                        pass
                    return

    def open(self, mode="r"):
        if mode not in valid_modes:
            raise ValueError("invalid mode", mode)

        if mode == 'c':
            if (self._p_blob_uncommitted
                or
                not self._p_blob_committed
                or
                self._p_blob_committed.endswith(SAVEPOINT_SUFFIX)
                ):
                raise BlobError('Uncommitted changes')
            return self._p_jar._storage.openCommittedBlobFile(
                self._p_oid, self._p_serial)

        if self.writers:
            raise BlobError("Already opened for writing.")

        if self.readers is None:
            self.readers = []

        if mode == 'r':
            result = None
            to_open = self._p_blob_uncommitted
            if not to_open:
                to_open = self._p_blob_committed
                if to_open:
                    result = self._p_jar._storage.openCommittedBlobFile(
                        self._p_oid, self._p_serial, self)
                else:
                    self._create_uncommitted_file()
                    to_open = self._p_blob_uncommitted
                    assert to_open

            if result is None:
                result = BlobFile(to_open, mode, self)

            def destroyed(ref, readers=self.readers):
                try:
                    readers.remove(ref)
                except ValueError:
                    pass

            self.readers.append(weakref.ref(result, destroyed))
        else:
            if self.readers:
                raise BlobError("Already opened for reading.")

            if mode == 'w':
                if self._p_blob_uncommitted is None:
                    self._create_uncommitted_file()
                result = BlobFile(self._p_blob_uncommitted, mode, self)
            else: # 'r+' and 'a'
                if self._p_blob_uncommitted is None:
                    # Create a new working copy
                    self._create_uncommitted_file()
                    result = BlobFile(self._p_blob_uncommitted, mode, self)
                    if self._p_blob_committed:
                        utils.cp(open(self._p_blob_committed), result)
                        if mode == 'r+':
                            result.seek(0)
                else:
                    # Re-use existing working copy
                    result = BlobFile(self._p_blob_uncommitted, mode, self)

            def destroyed(ref, writers=self.writers):
                try:
                    writers.remove(ref)
                except ValueError:
                    pass

            self.writers.append(weakref.ref(result, destroyed))

            self._p_changed = True

        return result

    def committed(self):
        if (self._p_blob_uncommitted
            or
            not self._p_blob_committed
            or
            self._p_blob_committed.endswith(SAVEPOINT_SUFFIX)
            ):
            raise BlobError('Uncommitted changes')

        result = self._p_blob_committed

        # We do this to make sure we have the file and to let the
        # storage know we're accessing the file.
        n = self._p_jar._storage.loadBlob(self._p_oid, self._p_serial)
        assert result == n, (result, n)

        return result

    def consumeFile(self, filename):
        """Will replace the current data of the blob with the file given under
        filename.
        """
        if self.writers:
            raise BlobError("Already opened for writing.")
        if self.readers:
            raise BlobError("Already opened for reading.")

        previous_uncommitted = bool(self._p_blob_uncommitted)
        if previous_uncommitted:
            # If we have uncommitted data, we move it aside for now
            # in case the consumption doesn't work.
            target = self._p_blob_uncommitted
            target_aside = target+".aside"
            os.rename(target, target_aside)
        else:
            target = self._create_uncommitted_file()
            # We need to unlink the freshly created target again
            # to allow link() to do its job
            os.remove(target)

        try:
            rename_or_copy_blob(filename, target, chmod=False)
        except:
            # Recover from the failed consumption: First remove the file, it
            # might exist and mark the pointer to the uncommitted file.
            self._p_blob_uncommitted = None
            if os.path.exists(target):
                os.remove(target)

            # If there was a file moved aside, bring it back including the
            # pointer to the uncommitted file.
            if previous_uncommitted:
                os.rename(target_aside, target)
                self._p_blob_uncommitted = target

            # Re-raise the exception to make the application aware of it.
            raise
        else:
            if previous_uncommitted:
                # The relinking worked so we can remove the data that we had
                # set aside.
                os.remove(target_aside)

            # We changed the blob state and have to make sure we join the
            # transaction.
            self._p_changed = True

    # utility methods

    def _create_uncommitted_file(self):
        assert self._p_blob_uncommitted is None, (
            "Uncommitted file already exists.")
        if self._p_jar:
            tempdir = self._p_jar.db()._storage.temporaryDirectory()
        else:
            tempdir = tempfile.gettempdir()
        filename = utils.mktemp(dir=tempdir)
        self._p_blob_uncommitted = filename

        def cleanup(ref):
            if os.path.exists(filename):
                os.remove(filename)

        self._p_blob_ref = weakref.ref(self, cleanup)
        return filename

    def _uncommitted(self):
        # hand uncommitted data to connection, relinquishing responsibility
        # for it.
        filename = self._p_blob_uncommitted
        if filename is None and self._p_blob_committed is None:
            filename = self._create_uncommitted_file()
        self._p_blob_uncommitted = self._p_blob_ref = None
        return filename

class BlobFile(file):
    """A BlobFile that holds a file handle to actual blob data.

    It is a file that can be used within a transaction boundary; a BlobFile is
    just a Python file object, we only override methods which cause a change to
    blob data in order to call methods on our 'parent' persistent blob object
    signifying that the change happened.

    """

    # XXX these files should be created in the same partition as
    # the storage later puts them to avoid copying them ...

    def __init__(self, name, mode, blob):
        super(BlobFile, self).__init__(name, mode+'b')
        self.blob = blob

    def close(self):
        self.blob.closed(self)
        file.close(self)

_pid = str(os.getpid())

def log(msg, level=logging.INFO, subsys=_pid, exc_info=False):
    message = "(%s) %s" % (subsys, msg)
    logger.log(level, message, exc_info=exc_info)


class FilesystemHelper:
    # Storages that implement IBlobStorage can choose to use this
    # helper class to generate and parse blob filenames.  This is not
    # a set-in-stone interface for all filesystem operations dealing
    # with blobs and storages needn't indirect through this if they
    # want to perform blob storage differently.

    def __init__(self, base_dir, layout_name='automatic'):
        self.base_dir = os.path.abspath(base_dir) + os.path.sep
        self.temp_dir = os.path.join(base_dir, 'tmp')

        if layout_name == 'automatic':
            layout_name = auto_layout_select(base_dir)
        if layout_name == 'lawn':
            log('The `lawn` blob directory layout is deprecated due to '
                'scalability issues on some file systems, please consider '
                'migrating to the `bushy` layout.', level=logging.WARN)
        self.layout_name = layout_name
        self.layout = LAYOUTS[layout_name]

    def create(self):
        if not os.path.exists(self.base_dir):
            os.makedirs(self.base_dir, 0700)
            log("Blob directory '%s' does not exist. "
                "Created new directory." % self.base_dir)
        if not os.path.exists(self.temp_dir):
            os.makedirs(self.temp_dir, 0700)
            log("Blob temporary directory '%s' does not exist. "
                "Created new directory." % self.temp_dir)

        if not os.path.exists(os.path.join(self.base_dir, LAYOUT_MARKER)):
            layout_marker = open(
                os.path.join(self.base_dir, LAYOUT_MARKER), 'wb')
            layout_marker.write(self.layout_name)
        else:
            layout = open(os.path.join(self.base_dir, LAYOUT_MARKER), 'rb'
                          ).read().strip()
            if layout != self.layout_name:
                raise ValueError(
                    "Directory layout `%s` selected for blob directory %s, but "
                    "marker found for layout `%s`" %
                    (self.layout_name, self.base_dir, layout))

    def isSecure(self, path):
        """Ensure that (POSIX) path mode bits are 0700."""
        return (os.stat(path).st_mode & 077) == 0

    def checkSecure(self):
        if not self.isSecure(self.base_dir):
            log('Blob dir %s has insecure mode setting' % self.base_dir,
                level=logging.WARNING)

    def getPathForOID(self, oid, create=False):
        """Given an OID, return the path on the filesystem where
        the blob data relating to that OID is stored.

        If the create flag is given, the path is also created if it didn't
        exist already.

        """
        # OIDs are numbers and sometimes passed around as integers. For our
        # computations we rely on the 64-bit packed string representation.
        if isinstance(oid, int):
            oid = utils.p64(oid)

        path = self.layout.oid_to_path(oid)
        path = os.path.join(self.base_dir, path)

        if create and not os.path.exists(path):
            try:
                os.makedirs(path, 0700)
            except OSError:
                # We might have lost a race.  If so, the directory
                # must exist now
                assert os.path.exists(path)
        return path

    def getOIDForPath(self, path):
        """Given a path, return an OID, if the path is a valid path for an
        OID. The inverse function to `getPathForOID`.

        Raises ValueError if the path is not valid for an OID.

        """
        path = path[len(self.base_dir):]
        return self.layout.path_to_oid(path)

    def createPathForOID(self, oid):
        """Given an OID, creates a directory on the filesystem where
        the blob data relating to that OID is stored, if it doesn't exist.
        """
        return self.getPathForOID(oid, create=True)

    def getBlobFilename(self, oid, tid):
        """Given an oid and a tid, return the full filename of the
        'committed' blob file related to that oid and tid.

        """
        # TIDs are numbers and sometimes passed around as integers. For our
        # computations we rely on the 64-bit packed string representation
        if isinstance(oid, int):
            oid = utils.p64(oid)
        if isinstance(tid, int):
            tid = utils.p64(tid)
        return os.path.join(self.base_dir,
                            self.layout.getBlobFilePath(oid, tid),
                            )

    def blob_mkstemp(self, oid, tid):
        """Given an oid and a tid, return a temporary file descriptor
        and a related filename.

        The file is guaranteed to exist on the same partition as committed
        data, which is important for being able to rename the file without a
        copy operation.  The directory in which the file will be placed, which
        is the return value of self.getPathForOID(oid), must exist before this
        method may be called successfully.

        """
        oidpath = self.getPathForOID(oid)
        fd, name = tempfile.mkstemp(suffix='.tmp', prefix=utils.tid_repr(tid),
                                    dir=oidpath)
        return fd, name

    def splitBlobFilename(self, filename):
        """Returns the oid and tid for a given blob filename.

        If the filename cannot be recognized as a blob filename, (None, None)
        is returned.

        """
        if not filename.endswith(BLOB_SUFFIX):
            return None, None
        path, filename = os.path.split(filename)
        oid = self.getOIDForPath(path)

        serial = filename[:-len(BLOB_SUFFIX)]
        serial = utils.repr_to_oid(serial)
        return oid, serial

    def getOIDsForSerial(self, search_serial):
        """Return all oids related to a particular tid that exist in
        blob data.

        """
        oids = []
        for oid, oidpath in self.listOIDs():
            for filename in os.listdir(oidpath):
                blob_path = os.path.join(oidpath, filename)
                oid, serial = self.splitBlobFilename(blob_path)
                if search_serial == serial:
                    oids.append(oid)
        return oids

    def listOIDs(self):
        """Iterates over all paths under the base directory that contain blob
        files.
        """
        for path, dirs, files in os.walk(self.base_dir):
            # Make sure we traverse in a stable order. This is mainly to make
            # testing predictable.
            dirs.sort()
            files.sort()
            try:
                oid = self.getOIDForPath(path)
            except ValueError:
                continue
            yield oid, path


class NoBlobsFileSystemHelper:

    @property
    def temp_dir(self):
        raise TypeError("Blobs are not supported")

    getPathForOID = getBlobFilename = temp_dir


class BlobStorageError(Exception):
    """The blob storage encountered an invalid state."""

def auto_layout_select(path):
    # A heuristic to look at a path and determine which directory layout to
    # use.
    layout_marker = os.path.join(path, LAYOUT_MARKER)
    if os.path.exists(layout_marker):
        layout = open(layout_marker, 'rb').read()
        layout = layout.strip()
        log('Blob directory `%s` has layout marker set. '
            'Selected `%s` layout. ' % (path, layout), level=logging.DEBUG)
    elif not os.path.exists(path):
        log('Blob directory %s does not exist. '
            'Selected `bushy` layout. ' % path)
        layout = 'bushy'
    else:
        # look for a non-hidden file in the directory
        has_files = False
        for name in os.listdir(path):
            if not name.startswith('.'):
                has_files = True
                break
        if not has_files:
            log('Blob directory `%s` is unused and has no layout marker set. '
                'Selected `bushy` layout. ' % path)
            layout = 'bushy'
        else:
            log('Blob directory `%s` is used but has no layout marker set. '
                'Selected `lawn` layout. ' % path)
            layout = 'lawn'
    return layout


class BushyLayout(object):
    """A bushy directory layout for blob directories.

    Creates an 8-level directory structure (one level per byte) in
    big-endian order from the OID of an object.

    """

    blob_path_pattern = re.compile(
        r'(0x[0-9a-f]{1,2}\%s){7,7}0x[0-9a-f]{1,2}$' % os.path.sep)

    def oid_to_path(self, oid):
        directories = []
        # Create the bushy directory structure with the least significant byte
        # first
        for byte in str(oid):
            directories.append('0x%s' % binascii.hexlify(byte))
        return os.path.sep.join(directories)

    def path_to_oid(self, path):
        if self.blob_path_pattern.match(path) is None:
            raise ValueError("Not a valid OID path: `%s`" % path)
        path = path.split(os.path.sep)
        # Each path segment stores a byte in hex representation. Turn it into
        # an int and then get the character for our byte string.
        oid = ''.join(binascii.unhexlify(byte[2:]) for byte in path)
        return oid

    def getBlobFilePath(self, oid, tid):
        """Given an oid and a tid, return the full filename of the
        'committed' blob file related to that oid and tid.

        """
        oid_path = self.oid_to_path(oid)
        filename = "%s%s" % (utils.tid_repr(tid), BLOB_SUFFIX)
        return os.path.join(oid_path, filename)

LAYOUTS['bushy'] = BushyLayout()

class LawnLayout(BushyLayout):
    """A shallow directory layout for blob directories.

    Creates a single level of directories (one for each oid).

    """

    def oid_to_path(self, oid):
        return utils.oid_repr(oid)

    def path_to_oid(self, path):
        try:
            if path == '':
                # This is a special case where repr_to_oid converts '' to the
                # OID z64.
                raise TypeError()
            return utils.repr_to_oid(path)
        except TypeError:
            raise ValueError('Not a valid OID path: `%s`' % path)

LAYOUTS['lawn'] = LawnLayout()

class BlobStorageMixin(object):
    """A mix-in to help storages support blobs."""

    def _blob_init(self, blob_dir, layout='automatic'):
        # XXX Log warning if storage is ClientStorage
        self.fshelper = FilesystemHelper(blob_dir, layout)
        self.fshelper.create()
        self.fshelper.checkSecure()
        self.dirty_oids = []

    def _blob_init_no_blobs(self):
        self.fshelper = NoBlobsFileSystemHelper()
        self.dirty_oids = []

    def _blob_tpc_abort(self):
        """Blob cleanup to be called from subclass tpc_abort
        """
        while self.dirty_oids:
            oid, serial = self.dirty_oids.pop()
            clean = self.fshelper.getBlobFilename(oid, serial)
            if os.path.exists(clean):
                remove_committed(clean)

    def _blob_tpc_finish(self):
        """Blob cleanup to be called from subclass tpc_finish
        """
        self.dirty_oids = []

    def registerDB(self, db):
        self.__untransform_record_data = db.untransform_record_data
        try:
            m = super(BlobStorageMixin, self).registerDB
        except AttributeError:
            pass
        else:
            m(db)

    def __untransform_record_data(self, record):
        return record

    def is_blob_record(self, record):
        if record:
            return is_blob_record(self.__untransform_record_data(record))

    def copyTransactionsFrom(self, other):
        copyTransactionsFromTo(other, self)

    def loadBlob(self, oid, serial):
        """Return the filename where the blob file can be found.
        """
        filename = self.fshelper.getBlobFilename(oid, serial)
        if not os.path.exists(filename):
            raise POSKeyError("No blob file", oid, serial)
        return filename

    def openCommittedBlobFile(self, oid, serial, blob=None):
        blob_filename = self.loadBlob(oid, serial)
        if blob is None:
            return open(blob_filename, 'rb')
        else:
            return BlobFile(blob_filename, 'r', blob)

    def restoreBlob(self, oid, serial, data, blobfilename, prev_txn,
                    transaction):
        """Write blob data already committed in a separate database
        """
        self.restore(oid, serial, data, '', prev_txn, transaction)
        self._blob_storeblob(oid, serial, blobfilename)

        return self._tid

    def _blob_storeblob(self, oid, serial, blobfilename):
        self._lock_acquire()
        try:
            self.fshelper.getPathForOID(oid, create=True)
            targetname = self.fshelper.getBlobFilename(oid, serial)
            rename_or_copy_blob(blobfilename, targetname)

            # if oid already in there, something is really hosed.
            # The underlying storage should have complained anyway
            self.dirty_oids.append((oid, serial))
        finally:
            self._lock_release()

    def storeBlob(self, oid, oldserial, data, blobfilename, version,
                  transaction):
        """Stores data that has a BLOB attached."""
        assert not version, "Versions aren't supported."
        serial = self.store(oid, oldserial, data, '', transaction)
        self._blob_storeblob(oid, serial, blobfilename)

        return self._tid

    def temporaryDirectory(self):
        return self.fshelper.temp_dir


class BlobStorage(BlobStorageMixin):
    """A wrapper/proxy storage to support blobs.
    """

    zope.interface.implements(ZODB.interfaces.IBlobStorage)

    def __init__(self, base_directory, storage, layout='automatic'):
        assert not ZODB.interfaces.IBlobStorage.providedBy(storage)
        self.__storage = storage

        self._blob_init(base_directory, layout)
        try:
            supportsUndo = storage.supportsUndo
        except AttributeError:
            supportsUndo = False
        else:
            supportsUndo = supportsUndo()
        self.__supportsUndo = supportsUndo
        self._blobs_pack_is_in_progress = False

        if ZODB.interfaces.IStorageRestoreable.providedBy(storage):
            iblob = ZODB.interfaces.IBlobStorageRestoreable
        else:
            iblob = ZODB.interfaces.IBlobStorage

        zope.interface.directlyProvides(
            self, iblob, zope.interface.providedBy(storage))

    def __getattr__(self, name):
        return getattr(self.__storage, name)

    def __len__(self):
        return len(self.__storage)

    def __repr__(self):
        normal_storage = self.__storage
        return '<BlobStorage proxy for %r at %s>' % (normal_storage,
                                                     hex(id(self)))

    def tpc_finish(self, *arg, **kw):
        # We need to override the base storage's tpc_finish instead of
        # providing a _finish method because methods found on the proxied
        # object aren't rebound to the proxy
        self.__storage.tpc_finish(*arg, **kw)
        self._blob_tpc_finish()

    def tpc_abort(self, *arg, **kw):
        # We need to override the base storage's abort instead of
        # providing an _abort method because methods found on the proxied object
        # aren't rebound to the proxy
        self.__storage.tpc_abort(*arg, **kw)
        self._blob_tpc_abort()

    def _packUndoing(self, packtime, referencesf):
        # Walk over all existing revisions of all blob files and check
        # if they are still needed by attempting to load the revision
        # of that object from the database.  This is maybe the slowest
        # possible way to do this, but it's safe.
        for oid, oid_path in self.fshelper.listOIDs():
            files = os.listdir(oid_path)
            for filename in files:
                filepath = os.path.join(oid_path, filename)
                whatever, serial = self.fshelper.splitBlobFilename(filepath)
                try:
                    self.loadSerial(oid, serial)
                except POSKeyError:
                    remove_committed(filepath)

            if not os.listdir(oid_path):
                shutil.rmtree(oid_path)

    def _packNonUndoing(self, packtime, referencesf):
        for oid, oid_path in self.fshelper.listOIDs():
            exists = True
            try:
                self.load(oid, None) # no version support
            except (POSKeyError, KeyError):
                exists = False

            if exists:
                files = os.listdir(oid_path)
                files.sort()
                latest = files[-1] # depends on ever-increasing tids
                files.remove(latest)
                for file in files:
                    remove_committed(os.path.join(oid_path, file))
            else:
                remove_committed_dir(oid_path)
                continue

            if not os.listdir(oid_path):
                shutil.rmtree(oid_path)

    def pack(self, packtime, referencesf):
        """Remove all unused OID/TID combinations."""
        self._lock_acquire()
        try:
            if self._blobs_pack_is_in_progress:
                raise BlobStorageError('Already packing')
            self._blobs_pack_is_in_progress = True
        finally:
            self._lock_release()

        try:
            # Pack the underlying storage, which will allow us to determine
            # which serials are current.
            unproxied = self.__storage
            result = unproxied.pack(packtime, referencesf)

            # Perform a pack on the blob data.
            if self.__supportsUndo:
                self._packUndoing(packtime, referencesf)
            else:
                self._packNonUndoing(packtime, referencesf)
        finally:
            self._lock_acquire()
            self._blobs_pack_is_in_progress = False
            self._lock_release()

        return result

    def undo(self, serial_id, transaction):
        undo_serial, keys = self.__storage.undo(serial_id, transaction)
        # serial_id is the transaction id of the txn that we wish to undo.
        # "undo_serial" is the transaction id of txn in which the undo is
        # performed.  "keys" is the list of oids that are involved in the
        # undo transaction.

        # The serial_id is assumed to be given to us base-64 encoded
        # (belying the web UI legacy of the ZODB code :-()
        serial_id = base64.decodestring(serial_id+'\n')

        self._lock_acquire()

        try:
            # we get all the blob oids on the filesystem related to the
            # transaction we want to undo.
            for oid in self.fshelper.getOIDsForSerial(serial_id):
                # we want to find the serial id of the previous revision
                # of this blob object.
                load_result = self.loadBefore(oid, serial_id)

                if load_result is None:

                    # There was no previous revision of this blob
                    # object.  The blob was created in the transaction
                    # represented by serial_id.  We copy the blob data
                    # to a new file that references the undo
                    # transaction in case a user wishes to undo this
                    # undo. It would be nice if we had some way to
                    # link to old blobs.
                    orig_fn = self.fshelper.getBlobFilename(oid, serial_id)
                    new_fn = self.fshelper.getBlobFilename(oid, undo_serial)
                else:
                    # A previous revision of this blob existed before the
                    # transaction implied by "serial_id".  We copy the blob
                    # data to a new file that references the undo transaction
                    # in case a user wishes to undo this undo.
                    data, serial_before, serial_after = load_result
                    orig_fn = self.fshelper.getBlobFilename(oid, serial_before)
                    new_fn = self.fshelper.getBlobFilename(oid, undo_serial)
                orig = open(orig_fn, "r")
                new = open(new_fn, "wb")
                utils.cp(orig, new)
                orig.close()
                new.close()
                self.dirty_oids.append((oid, undo_serial))

        finally:
            self._lock_release()
        return undo_serial, keys

    def new_instance(self):
        """Implementation of IMVCCStorage.new_instance.

        This method causes all storage instances to be wrapped with
        a blob storage wrapper.
        """
        base_dir = self.fshelper.base_dir
        s = self.__storage.new_instance()
        res = BlobStorage(base_dir, s)
        return res

copied = logging.getLogger('ZODB.blob.copied').debug
def rename_or_copy_blob(f1, f2, chmod=True):
    """Try to rename f1 to f2, fallback to copy.

    Under certain conditions a rename might not work, e.g. because the target
    directory is on a different partition. In this case we try to copy the
    data and remove the old file afterwards.

    """
    try:
        os.rename(f1, f2)
    except OSError:
        copied("Copied blob file %r to %r.", f1, f2)
        file1 = open(f1, 'rb')
        file2 = open(f2, 'wb')
        try:
            utils.cp(file1, file2)
        finally:
            file1.close()
            file2.close()
        remove_committed(f1)
    if chmod:
        os.chmod(f2, stat.S_IREAD)

if sys.platform == 'win32':
    # On Windows, you can't remove read-only files, so make the
    # file writable first.

    def remove_committed(filename):
        os.chmod(filename, stat.S_IWRITE)
        os.remove(filename)

    def remove_committed_dir(path):
        for (dirpath, dirnames, filenames) in os.walk(path):
            for filename in filenames:
                filename = os.path.join(dirpath, filename)
                remove_committed(filename)
        shutil.rmtree(path)

    link_or_copy = shutil.copy
else:
    remove_committed = os.remove
    remove_committed_dir = shutil.rmtree
    link_or_copy = os.link


def find_global_Blob(module, class_):
    if module == 'ZODB.blob' and class_ == 'Blob':
        return Blob

def is_blob_record(record):
    """Check whether a database record is a blob record.

    This is primarily intended to be used when copying data from one
    storage to another.

    """
    if record and ('ZODB.blob' in record):
        unpickler = cPickle.Unpickler(cStringIO.StringIO(record))
        unpickler.find_global = find_global_Blob

        try:
            return unpickler.load() is Blob
        except (MemoryError, KeyboardInterrupt, SystemExit):
            raise
        except Exception:
            pass

    return False

def copyTransactionsFromTo(source, destination):
    for trans in source.iterator():
        destination.tpc_begin(trans, trans.tid, trans.status)
        for record in trans:
            blobfilename = None
            if is_blob_record(record.data):
                try:
                    blobfilename = source.loadBlob(record.oid, record.tid)
                except POSKeyError:
                    pass
            if blobfilename is not None:
                fd, name = tempfile.mkstemp(
                    suffix='.tmp', dir=destination.fshelper.temp_dir)
                os.close(fd)
                utils.cp(open(blobfilename, 'rb'), open(name, 'wb'))
                destination.restoreBlob(record.oid, record.tid, record.data,
                                 name, record.data_txn, trans)
            else:
                destination.restore(record.oid, record.tid, record.data,
                             '', record.data_txn, trans)

        destination.tpc_vote(trans)
        destination.tpc_finish(trans)