This file is indexed.

/usr/share/pyshared/neo/io/hdf5io.py is in python-neo 0.3.3-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
# -*- coding: utf-8 -*-
"""
README
================================================================================
This is the implementation of the NEO IO for the HDF5 files.
http://neuralensemble.org/

IO dependencies:
 - NEO
 - types
 - warnings
 - numpy
 - pytables >= 2.2
 - quantities


Quick reference:
================================================================================
Class NeoHdf5IO() with methods get(), save(), delete() is implemented. This
class represents a connection manager with the HDF5 file with the possibility
to put (save()) or retrieve (get()) runtime NEO objects from the file.

Start by initializing IO:

>>> from neo.io.hdf5io import NeoHdf5IO
>>> iom = NeoHdf5IO('myfile.h5')
>>> iom
<hdf5io.NeoHdf5IO object at 0x7f291ebe6810>

Now you may save any of your neo objects into the file:

>>> b = Block()
>>> iom.write_block(b)

or just do

>>> iom.save(b)

After you stored an object it receives a unique "path" in the hdf5 file. This is
exactly the place in the HDF5 hierarchy, where it was written. This information
is now accessible by "hdf5_path" property:

>>> b.hdf5_path
'/block_0'

You may save more complicated NEO stuctures, with relations and arrays:

>>> import numpy as np
>>> import quantities as pq
>>> s = Segment()
>>> b.segments.append(s)
>>> a1 = AnalogSignal(signal=np.random.rand(300), t_start=42*pq.ms)
>>> s.analogsignals.append(a1)

and then

>>> iom.write_block(b)

or just

>>> iom.save(b)

If you already have hdf5 file in NEO format, or you just created one, then you
may want to read NEO data (providing the path to what to read):

>>> b1 = iom.read_block("/block_0")
>>> b1
<neo.core.block.Block object at 0x34ee590>

or just use

>>> b1 = iom.get("/block_0")

You may notice, by default the reading function retrieves all available data,
with all downstream relations and arrays:

>>> b1._segments
[<neo.core.segment.Segment object at 0x34ee750>]
>>> b1._segments[0]._analogsignals[0].signal
array([  3.18987819e-01,   1.08448284e-01,   1.03858980e-01,
        ...
         3.78908705e-01,   3.08669731e-02,   9.48965785e-01]) * dimensionless

When you need to save time and performance, you may load an object without
relations

>>> b2 = iom.get("/block_0", cascade=False)
>>> b2._segments
[]

and/or even without arrays

>>> a2 = iom.get("/block_0/_segments/segment_0/_analogsignals/analogsignal_0",
lazy=True)
>>> a2.signal
[]

These functions return "pure" NEO objects. They are completely "detached" from
the HDF5 file - changes to the runtime objects will not cause any changes in the
file:

>>> a2.t_start
array(42.0) * ms
>>> a2.t_start = 32 * pq.ms
>>> a2.t_start
array(32.0) * ms
>>> iom.get("/block_0/_segments/segment_0/_analogsignals/analogsignal_0").t_start
array(42.0) * ms

However, if you want to work directly with HDF5 storage making instant
modifications, you may use the native PyTables functionality, where all objects
are accessible through "<IO_manager_inst>._data.root":

>>> iom._data.root
/ (RootGroup) 'neo.h5'
  children := ['block_0' (Group)]
>>> b3 = iom._data.root.block_0
>>> b3
/block_0 (Group) ''
  children := ['_recordingchannelgroups' (Group), '_segments' (Group)]

To understand more about this "direct" way of working with data, please refer to
http://www.pytables.org/
Finally, you may get an overview of the contents of the file by running

>>> iom.get_info()
This is a neo.HDF5 file. it contains:
{'spiketrain': 0, 'irsaanalogsignal': 0, 'analogsignalarray': 0,
'recordingchannelgroup': 0, 'eventarray': 0, 'analogsignal': 1, 'epoch': 0,
'unit': 0, 'recordingchannel': 0, 'spike': 0, 'epocharray': 0, 'segment': 1,
'event': 0, 'block': 1}


The general structure of the file:
================================================================================

\'Block_1'
\
\'Block_2'
    \
     \---'_recordingchannelgroups'
     \           \
     \            \---'RecordingChannelGroup_1'
     \            \
     \            \---'RecordingChannelGroup_2'
     \                       \
     \                        \---'_recordingchannels'
     \                                   \
     \                                    \---'RecordingChannel_1'
     \                                    \
     \                                    \---'RecordingChannel_2'
     \                                           \
     \                                            \---'_units'
     \                                                   \
     \                                                    \---'Unit_1'
     \                                                    \
     \                                                    \---'Unit_2'
     \
     \---'_segments'
            \
             \--'Segment_1'
             \
             \--'Segment_2'
                    \
                     \---'_epochs'
                     \       \
                     \        \---'Epoch_1'
                     \
                     \---'_epochs'
                   etc.

Plans for future extensions:
================================================================================
#FIXME - implement logging mechanism (probably in general for NEO)
#FIXME - implement actions history (probably in general for NEO)
#FIXME - implement callbacks in functions for GUIs
#FIXME - no performance testing yet

IMPORTANT things:
================================================================================
1. Every NEO node object in HDF5 has a "_type" attribute. Please don't modify.
2. There are reserved attributes "unit__<quantity>" or "<name>__<quantity>" in
objects, containing quantities.
3. Don't use "__" in attribute names, as this symbol is reserved for quantities.


Author: asobolev
"""

# needed for python 3 compatibility
from __future__ import absolute_import

import logging
import uuid

#version checking
from distutils import version

import numpy as np
import quantities as pq

# check tables
try:
    import tables as tb
except ImportError as err:
    HAVE_TABLES = False
    TABLES_ERR = err
else:
    if version.LooseVersion(tb.__version__) < '2.2':
        HAVE_TABLES = False
        TABLES_ERR = ImportError("your pytables version is too old to " +
                                 "support NeoHdf5IO, you need at least 2.2. " +
                                 "You have %s" % tb.__version__)
    else:
        HAVE_TABLES = True
        TABLES_ERR = None

from neo.core import Block
from neo.description import (class_by_name, name_by_class,
                             classes_inheriting_quantities,
                             classes_necessary_attributes,
                             classes_recommended_attributes,
                             many_to_many_relationship,
                             many_to_one_relationship,
                             one_to_many_relationship)
from neo.io.baseio import BaseIO
from neo.io.tools import create_many_to_one_relationship, LazyList

logger = logging.getLogger("Neo")


def _func_wrapper(func):
    try:
        return func
    except IOError:
        raise IOError("There is no connection with the file or the file was recently corrupted. \
            Please reload the IO manager.")


#---------------------------------------------------------------
# Basic I/O manager, implementing basic I/O functionality
#---------------------------------------------------------------
all_objects = list(class_by_name.values())
all_objects.remove(Block)  # the order is important
all_objects = [Block] + all_objects

# Types where an object might have to be loaded multiple times to create
# all realtionships
complex_relationships = ["Unit", "Segment", "RecordingChannel"]

# Data objects which have multiple parents (Segment and one other)
multi_parent = {'AnalogSignal': 'RecordingChannel',
                'AnalogSignalArray': 'RecordingChannelGroup',
                'IrregularlySampledSignal': 'RecordingChannel',
                'Spike': 'Unit', 'SpikeTrain': 'Unit'}

# Arrays node names for lazy shapes
lazy_shape_arrays = {'SpikeTrain': 'times', 'Spike': 'waveform',
                     'AnalogSignal': 'signal',
                     'AnalogSignalArray': 'signal',
                     'EventArray': 'times', 'EpochArray': 'times'}


class NeoHdf5IO(BaseIO):
    """
    The IO Manager is the core I/O class for HDF5 / NEO. It handles the
    connection with the HDF5 file, and uses PyTables for data operations. Use
    this class to get (load), insert or delete NEO objects to HDF5 file.
    """
    supported_objects = all_objects
    readable_objects = all_objects
    writeable_objects = all_objects
    read_params = dict(zip(all_objects, [] * len(all_objects)))
    write_params = dict(zip(all_objects, [] * len(all_objects)))
    name = 'NeoHdf5 IO'
    extensions = ['h5']
    mode = 'file'
    is_readable = True
    is_writable = True

    def __init__(self, filename=None, **kwargs):
        if not HAVE_TABLES:
            raise TABLES_ERR
        BaseIO.__init__(self, filename=filename)
        self.connected = False
        self.objects_by_ref = {}  # Loaded objects by reference id
        self.parent_paths = {}  # Tuples of (Segment, other parent) paths
        self.name_indices = {}
        if filename:
            self.connect(filename=filename)

    def _read_entity(self, path="/", cascade=True, lazy=False):
        """
        Wrapper for base io "reader" functions.
        """
        ob = self.get(path, cascade, lazy)
        if cascade and cascade != 'lazy':
            create_many_to_one_relationship(ob)
        return ob

    def _write_entity(self, obj, where="/", cascade=True, lazy=False):
        """
        Wrapper for base io "writer" functions.
        """
        self.save(obj, where, cascade, lazy)

    #-------------------------------------------
    # IO connectivity / Session management
    #-------------------------------------------

    def connect(self, filename):
        """
        Opens / initialises new HDF5 file.
        We rely on PyTables and keep all session management staff there.
        """
        if not self.connected:
            try:
                if tb.isHDF5File(filename):
                    self._data = tb.openFile(filename, mode = "a", title = filename)
                    self.connected = True
                else:
                    raise TypeError('"%s" is not an HDF5 file format.' % filename)
            except IOError:
                # create a new file if specified file not found
                self._data = tb.openFile(filename, mode = "w", title = filename)
                self.connected = True
            except:
                raise NameError("Incorrect file path, couldn't find or create a file.")
            self.objects_by_ref = {}
            self.name_indices = {}
        else:
            logger.info("Already connected.")

    def close(self):
        """
        Closes the connection.
        """
        self.objects_by_ref = {}
        self.parent_paths = {}
        self.name_indices = {}
        self._data.close()
        self.connected = False

    #-------------------------------------------
    # some internal IO functions
    #-------------------------------------------

    def _get_class_by_node(self, node):
        """
        Returns the type of the object (string) depending on node.
        """
        try:
            obj_type = node._f_getAttr("_type")
            return class_by_name[obj_type]
        except:
            return None # that's an alien node

    def _update_path(self, obj, node):
        setattr(obj, "hdf5_path", node._v_pathname)

    def _get_next_name(self, obj_type, where):
        """
        Returns the next possible name within a given container (group)
        """
        if not (obj_type, where) in self.name_indices:
            self.name_indices[(obj_type, where)] = 0

        index_num = self.name_indices[(obj_type, where)]
        prefix = str(obj_type) + "_"
        if where + '/' + prefix + str(index_num) not in self._data:
            self.name_indices[(obj_type, where)] = index_num + 1
            return prefix + str(index_num)

        nodes = []
        for node in self._data.iterNodes(where):
            index = node._v_name[node._v_name.find(prefix) + len(prefix):]
            if len(index) > 0:
                try:
                    nodes.append(int(index))
                except ValueError:
                    pass # index was changed by user, but then we don't care
        nodes.sort(reverse=True)
        if len(nodes) > 0:
            self.name_indices[(obj_type, where)] = nodes[0] + 2
            return prefix + str(nodes[0] + 1)
        else:
            self.name_indices[(obj_type, where)] = 1
            return prefix + "0"

    #-------------------------------------------
    # general IO functions, for all NEO objects
    #-------------------------------------------

    @_func_wrapper
    def save(self, obj, where="/", cascade=True, lazy=False):
        """ Saves changes of a given object to the file. Saves object as new at
        location "where" if it is not in the file yet. Returns saved node.

        cascade: True/False process downstream relationships
        lazy: True/False process any quantity/ndarray attributes """

        def assign_attribute(obj_attr, attr_name, path, node):
            """ subfunction to serialize a given attribute """
            if isinstance(obj_attr, pq.Quantity) or isinstance(obj_attr, np.ndarray):
                if not lazy:
                    # we need to simplify custom quantities
                    if isinstance(obj_attr, pq.Quantity):
                        for un in obj_attr.dimensionality.keys():
                            if not un.name in pq.units.__dict__ or \
                                    not isinstance(pq.units.__dict__[un.name], pq.Quantity):
                                obj_attr = obj_attr.simplified
                                break

                    # we try to create new array first, so not to loose the
                    # data in case of any failure
                    if obj_attr.size == 0:
                        atom = tb.Float64Atom(shape=(1,))
                        new_arr = self._data.createEArray(path, attr_name + "__temp", atom, shape=(0,), expectedrows=1)
                    else:
                        new_arr = self._data.createArray(path, attr_name + "__temp", obj_attr)

                    if hasattr(obj_attr, "dimensionality"):
                        for un in obj_attr.dimensionality.items():
                            new_arr._f_setAttr("unit__" + un[0].name, un[1])
                    try:
                        self._data.removeNode(path, attr_name)
                    except:
                        pass # there is no array yet or object is new
                    self._data.renameNode(path, attr_name, name=attr_name + "__temp")
            elif obj_attr is not None:
                node._f_setAttr(attr_name, obj_attr)

        #assert_neo_object_is_compliant(obj)
        obj_type = name_by_class[obj.__class__]
        if self._data.mode != 'w' and hasattr(obj, "hdf5_path"): # this is an update case
            path = str(obj.hdf5_path)
            try:
                node = self._data.getNode(obj.hdf5_path)
            except tb.NoSuchNodeError:  # create a new node?
                raise LookupError("A given object has a path %s attribute, \
                    but such an object does not exist in the file. Please \
                    correct these values or delete this attribute \
                    (.__delattr__('hdf5_path')) to create a new object in \
                    the file." % path)
        else: # create new object
            node = self._data.createGroup(where, self._get_next_name(obj_type, where))
            node._f_setAttr("_type", obj_type)
            path = node._v_pathname
            # processing attributes
        if obj_type in multi_parent: # Initialize empty parent paths
            node._f_setAttr('segment', '')
            node._f_setAttr(multi_parent[obj_type].lower(), '')
        attrs = classes_necessary_attributes[obj_type] + classes_recommended_attributes[obj_type]
        for attr in attrs: # we checked already obj is compliant, loop over all safely
            if hasattr(obj, attr[0]): # save an attribute if exists
                assign_attribute(getattr(obj, attr[0]), attr[0], path, node)
            # not forget to save AS, ASA or ST - NEO "stars"
        if obj_type in classes_inheriting_quantities.keys():
            assign_attribute(obj, classes_inheriting_quantities[obj_type], path, node)
        if hasattr(obj, "annotations"): # annotations should be just a dict
            node._f_setAttr("annotations", getattr(obj, "annotations"))
        node._f_setAttr("object_ref", uuid.uuid4().hex)
        if one_to_many_relationship.has_key(obj_type) and cascade:
            rels = list(one_to_many_relationship[obj_type])
            if obj_type == "RecordingChannelGroup":
                rels += many_to_many_relationship[obj_type]

            for child_name in rels:  # child_name like "Segment", "Event" etc.
                container = child_name.lower() + "s"  # like "units"
                try:
                    ch = self._data.getNode(node, container)
                except tb.NoSuchNodeError:
                    ch = self._data.createGroup(node, container)
                saved = []  # keeps track of saved object names for removal
                for child in getattr(obj, container):
                    new_name = None
                    child_node = None
                    if hasattr(child, "hdf5_path"):
                        if not child.hdf5_path.startswith(ch._v_pathname):
                        # create a Hard Link if object exists already somewhere
                            try:
                                target = self._data.getNode(child.hdf5_path)
                                new_name = self._get_next_name(
                                    name_by_class[child.__class__], ch._v_pathname)
                                if not hasattr(ch, new_name):  # Only link if path does not exist
                                    child_node = self._data.createHardLink(ch._v_pathname, new_name, target)
                            except tb.NoSuchNodeError:
                                pass
                    if child_node is None:
                        child_node = self.save(child, where=ch._v_pathname)

                    if child_name in multi_parent: # Save parent for multiparent objects
                        child_node._f_setAttr(obj_type.lower(), path)
                    elif child_name == 'RecordingChannel':
                        parents = []
                        if 'recordingchannelgroups' in child_node._v_attrs:
                            parents = child_node._v_attrs['recordingchannelgroups']
                        parents.append(path)
                        child_node._f_setAttr('recordingchannelgroups', parents)
                    if not new_name:
                        new_name = child.hdf5_path.split('/')[-1]
                    saved.append(new_name)
                for child in self._data.iterNodes(ch._v_pathname):
                    if child._v_name not in saved: # clean-up
                        self._data.removeNode(ch._v_pathname, child._v_name, recursive=True)

        self._update_path(obj, node)
        return node

    def _get_parent(self, path, ref, parent_type):
        """ Return the path of the parent of type "parent_type" for the object
        in "path" with id "ref". Returns an empty string if no parent extists.
        """
        parts = path.split('/')

        if parent_type == 'Block' or parts[-4] == parent_type.lower() + 's':
            return '/'.join(parts[:-2])

        object_folder = parts[-2]
        parent_folder = parts[-4]
        if parent_folder in ('recordingchannels', 'units'):
            block_path = '/'.join(parts[:-6])
        else:
            block_path = '/'.join(parts[:-4])

        if parent_type in ('RecordingChannel', 'Unit'):
            # We need to search all recording channels
            path = block_path + '/recordingchannelgroups'
            for n in self._data.iterNodes(path):
                if not '_type' in n._v_attrs:
                    continue
                p = self._search_parent(
                    '%s/%ss' % (n._v_pathname, parent_type.lower()),
                    object_folder, ref)
                if p != '':
                    return p
            return ''

        if parent_type == 'Segment':
            path = block_path + '/segments'
        elif parent_type == 'RecordingChannelGroup':
            path = block_path + '/recordingchannelgroups'
        else:
            return ''

        return self._search_parent(path, object_folder, ref)

    def _get_rcgs(self, path, ref):
        """ Get RecordingChannelGroup parents for a RecordingChannel
        """
        parts = path.split('/')
        object_folder = parts[-2]
        block_path = '/'.join(parts[:-4])
        path = block_path + '/recordingchannelgroups'
        return self._search_parent(path, object_folder, ref, True)

    def _search_parent(self, path, object_folder, ref, multi=False):
        """ Searches a folder for an object with a given reference
        and returns the path of the parent node.

        :param str path: Path to search
        :param str object_folder: The name of the folder within the parent
            object containing the objects to search.
        :param ref: Object reference
        """
        if multi:
            ret = []
        else:
            ret = ''

        for n in self._data.iterNodes(path):
            if not '_type' in n._v_attrs:
                continue
            for c in self._data.iterNodes(n._f_getChild(object_folder)):
                try:
                    if c._f_getAttr("object_ref") == ref:
                        if not multi:
                            return n._v_pathname
                        else:
                            ret.append(n._v_pathname)
                except AttributeError:  # alien node
                    pass  # not an error

        return ret

    _second_parent = {  # Second parent type apart from Segment
        'AnalogSignal': 'RecordingChannel',
        'AnalogSignalArray': 'RecordingChannelGroup',
        'IrregularlySampledSignal': 'RecordingChannel',
        'Spike': 'Unit', 'SpikeTrain': 'Unit'}

    def load_lazy_cascade(self, path, lazy):
        """ Load an object with the given path in lazy cascade mode.
        """
        o = self.get(path, cascade='lazy', lazy=lazy)
        t = type(o).__name__
        node = self._data.getNode(path)

        if t in multi_parent:  # Try to read parent objects from attributes
            if not path in self.parent_paths:
                ppaths = [None, None]
                if 'segment' in node._v_attrs:
                    ppaths[0] = node._f_getAttr('segment')
                if multi_parent[t] in node._v_attrs:
                    ppaths[1] = node._f_getAttr(multi_parent[t])
                self.parent_paths[path] = ppaths
        elif  t == 'RecordingChannel':
            if not path in self.parent_paths:
                if 'recordingchannelgroups' in node._v_attrs:
                    self.parent_paths[path] = node._f_getAttr('recordingchannelgroups')

        # Set parent objects
        if path in self.parent_paths:
            paths = self.parent_paths[path]

            if t == 'RecordingChannel':  # Set list of parnet channel groups
                for rcg in self.parent_paths[path]:
                    o.recordingchannelgroups.append(self.get(rcg, cascade='lazy', lazy=lazy))
            else:  # Set parents: Segment and another parent
                if paths[0] is None:
                    paths[0] = self._get_parent(
                        path, self._data.getNodeAttr(path, 'object_ref'),
                        'Segment')
                if paths[0]:
                    o.segment = self.get(paths[0], cascade='lazy', lazy=lazy)

                parent = self._second_parent[t]
                if paths[1] is None:
                    paths[1] = self._get_parent(
                        path, self._data.getNodeAttr(path, 'object_ref'),
                        parent)
                if paths[1]:
                    setattr(o, parent.lower(), self.get(paths[1], cascade='lazy', lazy=lazy))
        elif t != 'Block':
            ref = self._data.getNodeAttr(path, 'object_ref')

            if t == 'RecordingChannel':
                rcg_paths = self._get_rcgs(path, ref)
                for rcg in rcg_paths:
                    o.recordingchannelgroups.append(self.get(rcg, cascade='lazy', lazy=lazy))
                self.parent_paths[path] = rcg_paths
            else:
                for p in many_to_one_relationship[t]:
                    parent = self._get_parent(path, ref, p)
                    if parent:
                        setattr(o, p.lower(), self.get(parent, cascade='lazy', lazy=lazy))
        return o

    def load_lazy_object(self, obj):
        """ Return the fully loaded version of a lazily loaded object. Does not
        set links to parent objects.
        """
        return self.get(obj.hdf5_path, cascade=False, lazy=False, lazy_loaded=True)

    @_func_wrapper
    def get(self, path="/", cascade=True, lazy=False, lazy_loaded=False):
        """ Returns a requested NEO object as instance of NEO class.
        Set lazy_loaded to True to load a previously lazily loaded object
        (cache is ignored in this case)."""
        def fetch_attribute(attr_name, attr, node):
            """ fetch required attribute from the corresp. node in the file """
            try:
                if attr[1] == pq.Quantity:
                    arr = self._data.getNode(node, attr_name)
                    units = ""
                    for unit in arr._v_attrs._f_list(attrset='user'):
                        if unit.startswith("unit__"):
                            units += " * " + str(unit[6:]) + " ** " + str(arr._f_getAttr(unit))
                    units = units.replace(" * ", "", 1)
                    if not lazy or sum(arr.shape) <= 1:
                        nattr = pq.Quantity(arr.read(), units)
                    else:  # making an empty array
                        nattr = pq.Quantity(np.empty(tuple([0 for _ in range(attr[2])])), units)
                elif attr[1] == np.ndarray:
                    arr = self._data.getNode(node, attr_name)
                    if not lazy:
                        nattr = np.array(arr.read(), attr[3])
                        if nattr.shape == (0, 1):  # Fix: Empty arrays should have only one dimension
                            nattr = nattr.reshape(-1)
                    else:  # making an empty array
                        nattr = np.empty(0, attr[3])
                else:
                    nattr = node._f_getAttr(attr_name)
                    if attr[1] == str or attr[1] == int:
                        nattr = attr[1](nattr)  # compliance with NEO attr types
            except (AttributeError, tb.NoSuchNodeError):  # not assigned, continue
                nattr = None
            return nattr

        def get_lazy_shape(obj, node):
            attr = lazy_shape_arrays[type(obj).__name__]
            arr = self._data.getNode(node, attr)
            return arr.shape

        if path == "/":  # this is just for convenience. Try to return any object
            found = False
            for n in self._data.iterNodes(path):
                for obj_type in class_by_name.keys():
                    if obj_type.lower() in str(n._v_name).lower():
                        path = n._v_pathname
                        found = True
                if found:
                    break
        try:
            if path == "/":
                raise ValueError()  # root is not a NEO object
            node = self._data.getNode(path)
        except (tb.NoSuchNodeError, ValueError):  # create a new node?
            raise LookupError("There is no valid object with a given path " +
                              str(path) + ' . Please give correct path or just browse the file '
                              '(e.g. NeoHdf5IO()._data.root.<Block>._segments...) to find an '
                              'appropriate name.')
        classname = self._get_class_by_node(node)
        if not classname:
            raise LookupError("The requested object with the path " + str(path) +
                              " exists, but is not of a NEO type. Please check the '_type' attribute.")

        obj_type = name_by_class[classname]
        try:
            object_ref = self._data.getNodeAttr(node, 'object_ref')
        except AttributeError:  # Object does not have reference, e.g. because this is an old file format
            object_ref = None
        if object_ref in self.objects_by_ref and not lazy_loaded:
            obj = self.objects_by_ref[object_ref]
            if cascade == 'lazy' or obj_type not in complex_relationships:
                return obj
        else:
            kwargs = {}
            # load attributes (inherited *-ed attrs are also here)
            attrs = classes_necessary_attributes[obj_type] + classes_recommended_attributes[obj_type]
            for i, attr in enumerate(attrs):
                attr_name = attr[0]
                nattr = fetch_attribute(attr_name, attr, node)
                if nattr is not None:
                    kwargs[attr_name] = nattr
            obj = class_by_name[obj_type](**kwargs)  # instantiate new object
            if lazy and obj_type in lazy_shape_arrays:
                obj.lazy_shape = get_lazy_shape(obj, node)
            self._update_path(obj, node)  # set up HDF attributes: name, path
            try:
                setattr(obj, "annotations", node._f_getAttr("annotations"))
            except AttributeError:
                pass  # not assigned, continue

        if object_ref and not lazy_loaded:
            self.objects_by_ref[object_ref] = obj
        # load relationships
        if cascade:
            if obj_type in one_to_many_relationship:
                rels = list(one_to_many_relationship[obj_type])
                if obj_type == "RecordingChannelGroup":
                    rels += many_to_many_relationship[obj_type]
                for child in rels:  # 'child' is like 'Segment', 'Event' etc.
                    if cascade == 'lazy':
                        relatives = LazyList(self, lazy)
                    else:
                        relatives = []
                    container = self._data.getNode(node, child.lower() + "s")
                    for n in self._data.iterNodes(container):
                        if cascade == 'lazy':
                            relatives.append(n._v_pathname)
                        else:
                            try:
                                if n._f_getAttr("_type") == child:
                                    relatives.append(self.get(n._v_pathname, lazy=lazy))
                            except AttributeError:  # alien node
                                pass  # not an error
                    setattr(obj, child.lower() + "s", relatives)
                    if not cascade == 'lazy':
                        # RC -> AnalogSignal relationship will not be created later, do it now
                        if obj_type == "RecordingChannel" and child == "AnalogSignal":
                            for r in relatives:
                                r.recordingchannel = obj
                        # Cannot create Many-to-Many relationship with old format, create at least One-to-Many
                        if obj_type == "RecordingChannelGroup" and not object_ref:
                            for r in relatives:
                                r.recordingchannelgroups = [obj]
            # special processor for RC -> RCG
            if obj_type == "RecordingChannel":
                if hasattr(node, '_v_parent'):
                    parent = node._v_parent
                    if hasattr(parent, '_v_parent'):
                        parent = parent._v_parent
                        if 'object_ref' in parent._v_attrs:
                            obj.recordingchannelgroups.append(self.get(
                                parent._v_pathname, lazy=lazy))
        return obj

    @_func_wrapper
    def read_all_blocks(self, lazy=False, cascade=True, **kargs):
        """
        Loads all blocks in the file that are attached to the root (which
        happens when they are saved with save() or write_block()).
        """
        blocks = []
        for n in self._data.iterNodes(self._data.root):
            if self._get_class_by_node(n) == Block:
                blocks.append(self.read_block(n._v_pathname, lazy=lazy, cascade=cascade, **kargs))
        return blocks

    @_func_wrapper
    def write_all_blocks(self, blocks, **kargs):
        """
        Writes a sequence of blocks. Just calls write_block() for each element.
        """
        for b in blocks:
            self.write_block(b)

    @_func_wrapper
    def delete(self, path, cascade=False):
        """
        Deletes an object in the file. Just a simple alternative of removeNode().
        """
        self._data.removeNode(path, recursive=cascade)

    @_func_wrapper
    def reset(self, obj):
        """
        Resets runtime changes made to the object. TBD.
        """
        pass

    @_func_wrapper
    def get_info(self):
        """
        Returns a quantitative information about the contents of the file.
        """
        logger.info("This is a neo.HDF5 file. it contains:")
        info = {}
        info = info.fromkeys(class_by_name.keys(), 0)
        for node in self._data.walkNodes():
            try:
                t = node._f_getAttr("_type")
                info[t] += 1
            except:
                # node is not of NEO type
                pass
        return info

for obj_type in NeoHdf5IO.writeable_objects:
    setattr(NeoHdf5IO, "write_" + obj_type.__name__.lower(), NeoHdf5IO._write_entity)
for obj_type in NeoHdf5IO.readable_objects:
    setattr(NeoHdf5IO, "read_" + obj_type.__name__.lower(), NeoHdf5IO._read_entity)