This file is indexed.

/usr/lib/ocf/resource.d/heartbeat/ManageRAID is in resource-agents 1:3.9.2-5ubuntu4.

This file is owned by root:root, with mode 0o755.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
#!/bin/bash
#
# Name     ManageRAID
# Author   Matthias Dahl, m.dahl@designassembly.de
# License  GPL version 2
#
# (c) 2006 The Design Assembly GmbH.
#
#
# WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
#
# This resource agent is most likely function complete but not error free. Please
# consider it BETA quality for the moment until it has proven itself stable...
#
# USE AT YOUR OWN RISK.
#
# WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
#
#
# partly based on/inspired by original Heartbeat2 OCF resource agents
#
# Description
#
# Manages starting, mounting, unmounting, stopping and monitoring of RAID devices
# which are preconfigured in /etc/conf.d/HB-ManageRAID.
#
# 
# Created  11. Sep 2006
# Updated  18. Sep 2006
#
# rev. 1.00.2
#
# Changelog
#
# 18/Sep/06 1.00.1 more cleanup
# 12/Sep/06 1.00.1 add more functionality
#                  add sanity check for config parameters
#                  general cleanup all over the place
# 11/Sep/06 1.00.0 it's alive... muahaha... ALIVE... :-)
#
#
# TODO
#
#   - check if at least one disk out of PREFIX_LOCALDISKS is still active
#     in RAID otherwise consider RAID broken and stop it.
#
#     The reason behind this: consider a RAID-1 which contains iSCSI devices
#     shared over Ethernet which get dynamically added/removed to/from the RAID.
#     Once all local disks have failed and only those iSCSI disks remain, the RAID
#     should really stop to prevent bad performance and possible data loss.
# 

###
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
###

# required utilities

# required files/devices
RAID_MDSTAT=/proc/mdstat

#
# check_file()
#
check_file ()
{
    if [[ ! -e $1 ]]; then
        ocf_log err "setup problem: file $1 does not exist."
        exit $OCF_ERR_GENERIC
    fi
}

#
# usage()
#
usage()
{
	cat <<-EOT
	usage: $0 {start|stop|status|monitor|validate-all|usage|meta-data}
	EOT
}

#
# meta_data()
#
meta_data()
{
	cat <<END
<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="ManageRAID">
  <version>1.00.2</version>

  <longdesc lang="en">
    Manages starting, stopping and monitoring of RAID devices which
    are preconfigured in /etc/conf.d/HB-ManageRAID.
  </longdesc>

  <shortdesc lang="en">Manages RAID devices</shortdesc>

  <parameters>
    <parameter name="raidname" unique="0" required="1">
      <longdesc lang="en">
        Name (case sensitive) of RAID to manage. (preconfigured in /etc/conf.d/HB-ManageRAID)
      </longdesc>
      <shortdesc lang="en">RAID name</shortdesc>
      <content type="string" default="" />
    </parameter>
  </parameters>

  <actions>
    <action name="start" timeout="75" />
    <action name="stop" timeout="75" />
    <action name="status" depth="0" timeout="10" interval="10" />
    <action name="monitor" depth="0" timeout="10" interval="10" />
    <action name="validate-all" timeout="5" />
    <action name="meta-data" timeout="5" />
  </actions>
</resource-agent>
END
}

#
# start_raid()
#
start_raid()
{
  declare -i retcode

  status_raid
  retcode=$?
  if [[ $retcode == $OCF_SUCCESS ]]; then
    return $OCF_SUCCESS
  elif [[ $retcode != $OCF_NOT_RUNNING ]]; then
    return $retcode
  fi

  for ldev in ${RAID_LOCALDISKS[@]}; do
    if [[ ! -b $ldev ]]; then
      ocf_log err "$ldev is not a (local) block device."
      return $OCF_ERR_ARGS
    fi
  done

  $MDADM -A $RAID_DEVPATH -a yes -u ${!RAID_UUID} ${RAID_LOCALDISKS[@]} &> /dev/null
  if [[ $? != 0 ]]; then
    ocf_log err "starting ${!RAID_DEV} with ${RAID_LOCALDISKS[@]} failed."
    return $OCF_ERR_GENERIC
  fi

  $MOUNT -o ${!RAID_MOUNTOPTIONS} $RAID_DEVPATH ${!RAID_MOUNTPOINT} &> /dev/null
  if [[ $? != 0 ]]; then
    $MDADM -S $RAID_DEVPATH &> /dev/null
    
    if [[ $? != 0 ]]; then
      ocf_log err "mounting ${!RAID_DEV} to ${!RAID_MOUNTPOINT} failed as well as stopping the RAID itself."
    else
      ocf_log err "mounting ${!RAID_DEV} to ${!RAID_MOUNTPOINT} failed. RAID stopped again."
    fi

    return $OCF_ERR_GENERIC
  fi

  return $OCF_SUCCESS
}

#
# stop_raid()
#
stop_raid()
{
  status_raid
  if [[ $? == $OCF_NOT_RUNNING ]]; then
    return $OCF_SUCCESS
  fi

  $UMOUNT ${!RAID_MOUNTPOINT} &> /dev/null
  if [[ $? != 0 ]]; then
    ocf_log err "unmounting ${!RAID_MOUNTPOINT} failed. not stopping ${!RAID_DEV}!"
    return $OCF_ERR_GENERIC
  fi

  $MDADM -S $RAID_DEVPATH &> /dev/null
  if [[ $? != 0 ]]; then
    ocf_log err "stopping RAID ${!RAID_DEV} failed."
    return $OCF_ERR_GENERIC
  fi

  return $OCF_SUCCESS
}

#
# status_raid()
#
status_raid()
{ 
  declare -i retcode_raidcheck
  declare -i retcode_uuidcheck

  $CAT $RAID_MDSTAT | $GREP -e "${!RAID_DEV}[\ ]*:[\ ]*active" &> /dev/null
  if [[ $? != 0 ]]; then
    return $OCF_NOT_RUNNING
  fi
 
  if [[ ! -e $RAID_DEVPATH ]]; then
    return $OCF_ERR_GENERIC
  fi

  $MDADM --detail -t $RAID_DEVPATH &> /dev/null
  retcode_raidcheck=$?
  $MDADM --detail -t $RAID_DEVPATH | $GREP -qEe "^[\ ]*UUID[\ ]*:[\ ]*${!RAID_UUID}" &> /dev/null
  retcode_uuidcheck=$?

  if [[ $retcode_raidcheck > 3 ]]; then
    ocf_log err "mdadm returned error code $retcode_raidcheck while checking ${!RAID_DEV}."
    return $OCF_ERR_GENERIC
  elif [[ $retcode_raidcheck == 3 ]]; then
    ocf_log err "${!RAID_DEV} has failed."
    return $OCF_ERR_GENERIC
  elif [[ $retcode_raidcheck < 3 && $retcode_uuidcheck != 0  ]]; then
    ocf_log err "active RAID ${!RAID_DEV} and configured UUID (!$RAID_UUID) do not match."
    return $OCF_ERR_GENERIC
  fi

  $MOUNT | $GREP -e "$RAID_DEVPATH on ${!RAID_MOUNTPOINT}" &> /dev/null
  if [[ $? != 0 ]]; then
    ocf_log err "${!RAID_DEV} seems to be no longer mounted at ${!RAID_MOUNTPOINT}"
    return $OCF_ERR_GENERIC
  fi

  return $OCF_SUCCESS
}    

#
# validate_all_raid()
#
validate_all_raid()
{
  #
  # since all parameters are checked every time ManageRAID is
  # invoked, there not much more to check...
  #
  # status_raid should cover the rest.
  # 
  declare -i retcode

  status_ve
  retcode=$?

  if [[ $retcode != $OCF_SUCCESS && $retcode != $OCF_NOT_RUNNING ]]; then
    return $retcode
  fi

  return $OCF_SUCCESS
}

if [ $# -ne 1 ]; then
  usage
  exit $OCF_ERR_ARGS
fi

case "$1" in
  meta-data)
	meta_data
	exit $OCF_SUCCESS
	;;
  usage) 
	usage
	exit $OCF_SUCCESS
	;;
  *)
	;;
esac

## required configuration
#
[ -f /etc/conf.d/HB-ManageRAID ] || {
	ocf_log err "/etc/conf.d/HB-ManageRAID missing"
	exit $OCF_ERR_INSTALLED
}
. /etc/conf.d/HB-ManageRAID
#
##

#
# check relevant environment variables for sanity and security
#

declare -i retcode_test
declare -i retcode_grep

$TEST -z "$OCF_RESKEY_raidname"
retcode_test=$?
echo "$OCF_RESKEY_raidname" | $GREP -qEe "^[[:alnum:]\_]+$"
retcode_grep=$?

if [[ $retcode_test != 1 || $retcode_grep != 0 ]]; then
  ocf_log err "OCF_RESKEY_raidname not set or invalid."
  exit $OCF_ERR_ARGS
fi

RAID_UUID=${OCF_RESKEY_raidname}_UUID

echo ${!RAID_UUID} | $GREP -qEe "^[[:alnum:]]{8}:[[:alnum:]]{8}:[[:alnum:]]{8}:[[:alnum:]]{8}$"
if [[ $? != 0 ]]; then
  ocf_log err "${OCF_RESKEY_raidname}_UUID is invalid."
  exit $OCF_ERR_ARGS
fi

RAID_DEV=${OCF_RESKEY_raidname}_DEV

echo ${!RAID_DEV} | $GREP -qEe "^md[0-9]+$"
if [[ $? != 0 ]]; then
  ocf_log err "${OCF_RESKEY_raidname}_DEV is invalid."
  exit $OCF_ERR_ARGS
fi

RAID_DEVPATH=/dev/${!RAID_DEV/md/md\/}
RAID_MOUNTPOINT=${OCF_RESKEY_raidname}_MOUNTPOINT

echo ${!RAID_MOUNTPOINT} | $GREP -qEe "^[[:alnum:]\/\_\"\ ]+$"
if [[ $? != 0 ]]; then
  ocf_log err "${OCF_RESKEY_raidname}_MOUNTPOINT is invalid."
  exit $OCF_ERR_ARGS
fi

RAID_MOUNTOPTIONS=${OCF_RESKEY_raidname}_MOUNTOPTIONS

echo ${!RAID_MOUNTOPTIONS} | $GREP -qEe "^[[:alpha:]\,]+$"
if [[ $? != 0 ]]; then
  ocf_log err "${OCF_RESKEY_raidname}_MOUNTOPTIONS is invalid."
  exit $OCF_ERR_ARGS
fi

RAID_LOCALDISKS=${OCF_RESKEY_raidname}_LOCALDISKS[@]
RAID_LOCALDISKS=( "${!RAID_LOCALDISKS}" )

if [[ ${#RAID_LOCALDISKS[@]} < 1 ]]; then
  ocf_log err "you have to specify at least one local disk."
  exit $OCF_ERR_ARGS
fi

#
# check that all relevant utilities are available
# 
check_binary $MDADM
check_binary $MOUNT
check_binary $UMOUNT
check_binary $GREP
check_binary $CAT
check_binary $TEST
check_binary echo


#
# check that all relevant devices are available
#
check_file $RAID_MDSTAT 

#
# finally... let's see what we are ordered to do :-)
#
case "$1" in
  start)
	start_raid
	;;
  stop)
	stop_raid
	;;
  status|monitor) 
	status_raid
	;;
  validate-all)
	validate_all_raid
	;;
  *)
	usage
	exit $OCF_ERR_UNIMPLEMENTED 
	;;
esac

exit $?