/usr/share/pyshared/mvpa2/mappers/som.py is in python-mvpa2 2.1.0-1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Self-organizing map (SOM) mapper."""
__docformat__ = 'restructuredtext'
import numpy as np
from mvpa2.mappers.base import Mapper, accepts_dataset_as_samples
if __debug__:
from mvpa2.base import debug
class SimpleSOMMapper(Mapper):
"""Mapper using a self-organizing map (SOM) for dimensionality reduction.
This mapper provides a simple, but pretty fast implementation of a
self-organizing map using an unsupervised training algorithm. It performs a
ND -> 2D mapping, which can for, example, be used for visualization of
high-dimensional data.
This SOM implementation uses squared Euclidean distance to determine
the best matching Kohonen unit and a Gaussian neighborhood influence
kernel.
"""
def __init__(self, kshape, niter, learning_rate=0.005,
iradius=None):
"""
Parameters
----------
kshape : (int, int)
Shape of the internal Kohonen layer. Currently, only 2D Kohonen
layers are supported, although the length of an axis might be set
to 1.
niter : int
Number of iteration during network training.
learning_rate : float
Initial learning rate, which will continuously decreased during
network training.
iradius : float or None
Initial radius of the Gaussian neighborhood kernel radius, which
will continuously decreased during network training. If `None`
(default) the radius is set equal to the longest edge of the
Kohonen layer.
"""
# init base class
Mapper.__init__(self)
self.kshape = np.array(kshape, dtype='int')
if iradius is None:
self.radius = self.kshape.max()
else:
self.radius = iradius
# learning rate
self.lrate = learning_rate
# number of training iterations
self.niter = niter
# precompute whatever can be done
# scalar for decay of learning rate and radius across all iterations
self.iter_scale = self.niter / np.log(self.radius)
# the internal kohonen layer
self._K = None
@accepts_dataset_as_samples
def _train(self, samples):
"""Perform network training.
Parameter
---------
samples : array-like
Used for unsupervised training of the SOM.
"""
# XXX initialize with clever default, e.g. plain of first two PCA
# components
self._K = np.random.standard_normal(tuple(self.kshape) + (samples.shape[1],))
# units weight vector deltas for batch training
# (height x width x #features)
unit_deltas = np.zeros(self._K.shape, dtype='float')
# precompute distance kernel between elements in the Kohonen layer
# that will remain constant throughout the training
# (just compute one quadrant, as the distances are symmetric)
# XXX maybe do other than squared Euclidean?
dqd = np.fromfunction(lambda x, y: (x**2 + y**2)**0.5,
self.kshape, dtype='float')
# for all iterations
for it in xrange(1, self.niter + 1):
# compute the neighborhood impact kernel for this iteration
# has to be recomputed since kernel shrinks over time
k = self._compute_influence_kernel(it, dqd)
# for all training vectors
for s in samples:
# determine closest unit (as element coordinate)
b = self._get_bmu(s)
# train all units at once by unfolding the kernel (from the
# single quadrant that is precomputed), cutting it to the
# right shape and simply multiply it to the difference of target
# and all unit weights....
infl = np.vstack((
np.hstack((
# upper left
k[b[0]:0:-1, b[1]:0:-1],
# upper right
k[b[0]:0:-1, :self.kshape[1] - b[1]])),
np.hstack((
# lower left
k[:self.kshape[0] - b[0], b[1]:0:-1],
# lower right
k[:self.kshape[0] - b[0], :self.kshape[1] - b[1]]))
))
unit_deltas += infl[:,:,np.newaxis] * (s - self._K)
# apply cumulative unit deltas
self._K += unit_deltas
if __debug__:
debug("SOM", "Iteration %d/%d done: ||unit_deltas||=%g" %
(it, self.niter, np.sqrt(np.sum(unit_deltas **2))))
# reset unit deltas
unit_deltas.fill(0.)
##REF: Name was automagically refactored
def _compute_influence_kernel(self, iter, dqd):
"""Compute the neighborhood kernel for some iteration.
Parameters
----------
iter : int
The iteration for which to compute the kernel.
dqd : array (nrows x ncolumns)
This is one quadrant of Euclidean distances between Kohonen unit
locations.
"""
# compute radius decay for this iteration
curr_max_radius = self.radius * np.exp(-1.0 * iter / self.iter_scale)
# same for learning rate
curr_lrate = self.lrate * np.exp(-1.0 * iter / self.iter_scale)
# compute Gaussian influence kernel
infl = np.exp((-1.0 * dqd) / (2 * curr_max_radius * iter))
infl *= curr_lrate
# hard-limit kernel to max radius
# XXX is this really necessary?
infl[dqd > curr_max_radius] = 0.
return infl
##REF: Name was automagically refactored
def _get_bmu(self, sample):
"""Returns the ID of the best matching unit.
'best' is determined as minimal squared Euclidean distance between
any units weight vector and some given target `sample`
Parameters
----------
sample : array
Target sample.
Returns
-------
tuple: (row, column)
"""
# TODO expose distance function as parameter
loc = np.argmin(((self.K - sample) ** 2).sum(axis=2))
# assumes 2D Kohonen layer
return (np.divide(loc, self.kshape[1]), loc % self.kshape[1])
def _forward_data(self, data):
"""Map data from the IN dataspace into OUT space.
Mapping is performs by simple determining the best matching Kohonen
unit for each data sample.
"""
return np.array([self._get_bmu(d) for d in data])
def _reverse_data(self, data):
"""Reverse map data from OUT space into the IN space.
"""
# simple transform into appropriate array slicing and
# return the associated Kohonen unit weights
return self.K[tuple(np.transpose(data))]
def __repr__(self):
s = Mapper.__repr__(self).rstrip(' )')
# beautify
if not s[-1] == '(':
s += ' '
s += 'kshape=%s, niter=%i, learning_rate=%f, iradius=%f)' \
% (str(tuple(self.kshape)), self.niter, self.lrate,
self.radius)
return s
##REF: Name was automagically refactored
def _access_kohonen(self):
"""Provide access to the Kohonen layer.
With some care.
"""
if self._K is None:
raise RuntimeError, \
'The SOM needs to be trained before access to the Kohonen ' \
'layer is possible.'
return self._K
K = property(fget=_access_kohonen)
|