This file is indexed.

/usr/share/heartbeat/cts/CM_LinuxHAv2.py is in heartbeat 1:3.0.5-3ubuntu2.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
#!/usr/bin/python

'''CTS: Cluster Testing System: LinuxHA v2 dependent modules...
'''

__copyright__='''
Author: Huang Zhen <zhenhltc@cn.ibm.com>
Copyright (C) 2004 International Business Machines

Additional Audits, Revised Start action, Default Configuration:
     Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>

'''

#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.

import os,sys,CTS,CTSaudits,CTStests, warnings
from CTS import *
from CM_hb import HeartbeatCM
from CTSaudits import ClusterAudit
from CTStests import *
from CIB import *
try:
    from xml.dom.minidom import *
except ImportError:
    sys.__stdout__.write("Python module xml.dom.minidom not found\n")
    sys.__stdout__.write("Please install python-xml or similar before continuing\n")
    sys.__stdout__.flush()
    sys.exit(1)

#######################################################################
#
#  LinuxHA v2 dependent modules
#
#######################################################################


class LinuxHAv2(HeartbeatCM):
    '''
    The linux-ha version 2 cluster manager class.
    It implements the things we need to talk to and manipulate
    linux-ha version 2 clusters
    '''
    def __init__(self, Environment, randseed=None):
        HeartbeatCM.__init__(self, Environment, randseed=randseed)

        self.clear_cache = 0
        self.cib_installed = 0
        self.config = None
        self.cluster_monitor = 0
        self.use_short_names = 1
        self.update({
            "Name"           : "linux-ha-v2",
            "DeadTime"       : 300,
            "StartTime"      : 300,        # Max time to start up
            "StableTime"     : 30,
            "StartCmd"       : "/etc/init.d/heartbeat start > /dev/null 2>&1",
            "StopCmd"        : "/etc/init.d/heartbeat stop  > /dev/null 2>&1",
            "ElectionCmd"    : "/usr/sbin/crmadmin -E %s",
            "StatusCmd"      : "/usr/sbin/crmadmin -S %s 2>/dev/null",
            "EpocheCmd"      : "/usr/sbin/ccm_tool -e",
            "QuorumCmd"      : "/usr/sbin/ccm_tool -q",
            "CibQuery"       : "/usr/sbin/cibadmin -Ql",
            "ParitionCmd"    : "/usr/sbin/ccm_tool -p",
            "IsRscRunning"   : "/usr/sbin/lrmadmin -E %s monitor 0 0 EVERYTIME 2>/dev/null|grep return",
            "ExecuteRscOp"   : "/usr/sbin/lrmadmin -n %s -E %s %s 0 %d EVERYTIME 2>/dev/null",
            "CIBfile"        : "%s:/var/lib/heartbeat/crm/cib.xml",
            "TmpDir"         : "/tmp",
            "BreakCommCmd2"  : "/usr/share/heartbeat/TestHeartbeatComm break-communication %s>/dev/null 2>&1",
            "IsIPAddrRscRunning"   : "",

            "StandbyCmd"   : "/usr/sbin/crm_standby -U %s -v %s 2>/dev/null",
            "UUIDQueryCmd"   : "/usr/sbin/crmadmin -N",
            "StandbyQueryCmd"    : "/usr/sbin/crm_standby -GQ -U %s 2>/dev/null",

            # Patterns to look for in the log files for various occasions...
            "Pat:DC_IDLE"      : "crmd.*State transition.*-> S_IDLE",
            
            # This wont work if we have multiple partitions
            # Use: "Pat:They_started" : "%s crmd:.*State transition.*-> S_NOT_DC",
            "Pat:They_started" : "Updating node state to member for %s",
            "Pat:We_started"   : "%s crmd:.* State transition.*-> S_IDLE",
            "Pat:We_stopped"   : "heartbeat.*%s.*Heartbeat shutdown complete",
            "Pat:Logd_stopped" : "%s logd:.*Exiting write process",
            "Pat:They_stopped" : "%s crmd:.*LOST:.* %s ",
            "Pat:All_stopped"  : "heartbeat.*%s.*Heartbeat shutdown complete",
            "Pat:They_dead"    : "node %s.*: is dead",
            "Pat:TransitionComplete" : "Transition status: Complete: complete",

            
            # Bad news Regexes.  Should never occur.
            "BadRegexes"   : (
                r"ERROR:",
                r"CRIT:",
                r"Shutting down\.",
                r"Forcing shutdown\.",
                r"Timer I_TERMINATE just popped",
                r"input=I_ERROR",
                r"input=I_FAIL",
                r"input=I_INTEGRATED cause=C_TIMER_POPPED",
                r"input=I_FINALIZED cause=C_TIMER_POPPED",
                r"input=I_ERROR",
                r", exiting\.",
                r"WARN.*Ignoring HA message.*vote.*not in our membership list",
                r"pengine.*Attempting recovery of resource",
                r"tengine.*is taking more than 2x its timeout",
                r"Confirm not received from",
                r"Welcome reply not received from",
                r"Attempting to schedule .* after a stop",
                r"Resource .* was active at shutdown",
                r"duplicate entries for call_id",
                r"Search terminated:",
                r"No need to invoke the TE",
                r":global_timer_callback",
                r"Faking parameter digest creation",
                r"Parameters to .* action changed:",
                r"Parameters to .* changed",
            ),
        })
        del self["Standby"]
        if self.Env["DoBSC"]:
            del self["Pat:They_stopped"]
            del self["Pat:Logd_stopped"]
            self.Env["use_logd"] = 0

        self.check_transitions = 0
        self.check_elections = 0
        self.CIBsync = {}
        self.default_cts_cib=CIB(self).cib()
        self.debug(self.default_cts_cib)
    
    def errorstoignore(self):
        # At some point implement a more elegant solution that 
        #   also produces a report at the end
        '''Return list of errors which are known and very noisey should be ignored'''
        if 1:
            return [ 
                "crmadmin:",
                "ERROR: Message hist queue is filling up"
                ]
        return []

    def install_config(self, node):
        if not self.ns.WaitForNodeToComeUp(node):
            self.log("Node %s is not up." % node)
            return None

        if not self.CIBsync.has_key(node) and self.Env["ClobberCIB"] == 1:
            self.CIBsync[node] = 1
            self.rsh.remote_py(node, "os", "system", "rm -f /var/lib/heartbeat/crm/cib.xml")
            self.rsh.remote_py(node, "os", "system", "rm -f /var/lib/heartbeat/crm/cib.xml.sig")
            self.rsh.remote_py(node, "os", "system", "rm -f /var/lib/heartbeat/crm/cib.xml.last")
            self.rsh.remote_py(node, "os", "system", "rm -f /var/lib/heartbeat/crm/cib.xml.sig.last")

            # Only install the CIB on the first node, all the other ones will pick it up from there
            if self.cib_installed == 1:
                return None

            self.cib_installed = 1
            if self.Env["CIBfilename"] == None:
                self.debug("Installing Generated CIB on node %s" %(node))
                warnings.filterwarnings("ignore")
                cib_file=os.tmpnam()
                warnings.resetwarnings()
                os.system("rm -f "+cib_file)
                self.debug("Creating new CIB for " + node + " in: " + cib_file)
                os.system("echo \'" + self.default_cts_cib + "\' > " + cib_file)
                if 0!=self.rsh.echo_cp(None, cib_file, node, "/var/lib/heartbeat/crm/cib.xml"):
                    raise ValueError("Can not create CIB on %s "%node)

                os.system("rm -f "+cib_file)
            else:
                self.debug("Installing CIB (%s) on node %s" %(self.Env["CIBfilename"], node))
                if 0!=self.rsh.cp(self.Env["CIBfilename"], "root@" + (self["CIBfile"]%node)):
                    raise ValueError("Can not scp file to %s "%node)
        
            self.rsh.remote_py(node, "os", "system", "chown hacluster /var/lib/heartbeat/crm/cib.xml")

    def prepare(self):
        '''Finish the Initialization process. Prepare to test...'''

        for node in self.Env["nodes"]:
            self.ShouldBeStatus[node] = ""
            self.StataCM(node)

    def test_node_CM(self, node):
        '''Report the status of the cluster manager on a given node'''

        watchpats = [ ]
        watchpats.append("Current ping state: (S_IDLE|S_NOT_DC)")
        watchpats.append(self["Pat:They_started"]%node)
        idle_watch = CTS.LogWatcher(self["LogFileName"], watchpats)
        idle_watch.setwatch()

        out=self.rsh.readaline(node, self["StatusCmd"]%node)
        ret= (string.find(out, 'ok') != -1)
        self.debug("Node %s status: %s" %(node, out))            

        if not ret:
            if self.ShouldBeStatus[node] == self["up"]:
                self.log(
                    "Node status for %s is %s but we think it should be %s"
                    %(node, self["down"], self.ShouldBeStatus[node]))
            self.ShouldBeStatus[node]=self["down"]
            return 0

        if self.ShouldBeStatus[node] == self["down"]:
            self.log(
                "Node status for %s is %s but we think it should be %s: %s"
                %(node, self["up"], self.ShouldBeStatus[node], out))

        self.ShouldBeStatus[node]=self["up"]

        # check the output first - because syslog-ng looses messages
        if string.find(out, 'S_NOT_DC') != -1:
            # Up and stable
            return 2
        if string.find(out, 'S_IDLE') != -1:
            # Up and stable
            return 2

        # fall back to syslog-ng and wait
        if not idle_watch.look():
            # just up
            self.debug("Warn: Node %s is unstable: %s" %(node, out))
            return 1

        # Up and stable
        return 2

    # Is the node up or is the node down
    def StataCM(self, node):
        '''Report the status of the cluster manager on a given node'''

        if self.test_node_CM(node) > 0:
            return 1
        return None

    # Being up and being stable is not the same question...
    def node_stable(self, node):
        '''Report the status of the cluster manager on a given node'''

        if self.test_node_CM(node) == 2:
            return 1
        self.log("Warn: Node %s not stable" %(node)) 
        return None

    def cluster_stable(self, timeout=None):
        watchpats = [ ]
        watchpats.append("Current ping state: S_IDLE")
        watchpats.append(self["Pat:DC_IDLE"])

        if timeout == None:
            timeout = self["DeadTime"]

        idle_watch = CTS.LogWatcher(self["LogFileName"], watchpats, timeout)
        idle_watch.setwatch()

        any_up = 0
        for node in self.Env["nodes"]:
            # have each node dump its current state
            if self.ShouldBeStatus[node] == self["up"]:
                self.rsh.readaline(node, (self["StatusCmd"] %node) )
                any_up = 1

        if any_up == 0:
            self.debug("Cluster is inactive") 
            return 1

        ret = idle_watch.look()
        if ret:
            self.debug(ret) 
            return 1

        self.log("Warn: Cluster Master not IDLE after %ds" % timeout) 
        return None

    def is_node_dc(self, node, status_line=None):
        rc = 0

        if not status_line: 
            status_line = self.rsh.readaline(node, self["StatusCmd"]%node)

        if not status_line:
            rc = 0
        elif string.find(status_line, 'S_IDLE') != -1:
            rc = 1
        elif string.find(status_line, 'S_INTEGRATION') != -1: 
            rc = 1
        elif string.find(status_line, 'S_FINALIZE_JOIN') != -1: 
            rc = 1
        elif string.find(status_line, 'S_POLICY_ENGINE') != -1: 
            rc = 1
        elif string.find(status_line, 'S_TRANSITION_ENGINE') != -1: 
            rc = 1

        if rc == 1:
            self.debug("%s _is_ the DC" % node)
            
        return rc

    def active_resources(self, node):
        (rc, output) = self.rsh.remote_py(
            node, "os", "system", """/usr/sbin/crm_mon -1 | grep "Started %s" """ % node)

        resources = []
        for line in output:
            fields = line.split()
            resources.append(fields[0])
        return resources

    def ResourceOp(self, resource, op, node, interval=0, app="lrmadmin"):
        '''
        Execute an operation on a resource
        '''
        self.rsh.readaline(node, self["ExecuteRscOp"] 
                           % (app, resource, op, interval))
        return self.rsh.lastrc

    def ResourceLocation(self, rid):
        ResourceNodes = []
        for node in self.Env["nodes"]:
            if self.ShouldBeStatus[node] == self["up"]:
                if self.ResourceOp(rid, "monitor", node) == 0:
                    ResourceNodes.append(node)
        return ResourceNodes

    def isolate_node(self, node, allowlist):
        '''isolate the communication between the nodes'''
        rc = self.rsh(node, self["BreakCommCmd2"]%allowlist)
        if rc == 0:
            return 1
        else:
            self.log("Could not break the communication from node: %s",node)
        return None
        
    def Configuration(self):
        if self.config:
            return self.config.getElementsByTagName('configuration')[0]

        warnings.filterwarnings("ignore")
        cib_file=os.tmpnam()
        warnings.resetwarnings()
        
        os.system("rm -f "+cib_file)

        if self.Env["ClobberCIB"] == 1:
            if self.Env["CIBfilename"] == None:
                self.debug("Creating new CIB in: " + cib_file)
                os.system("echo \'"+ self.default_cts_cib +"\' > "+ cib_file)
            else:
                os.system("cp "+self.Env["CIBfilename"]+" "+cib_file)
        else:            
            if 0 != self.rsh.echo_cp(
                self.Env["nodes"][0], "/var/lib/heartbeat/crm/cib.xml", None, cib_file):
                raise ValueError("Can not copy file to %s, maybe permission denied"%cib_file)

        self.config = parse(cib_file)
        os.remove(cib_file)

        return self.config.getElementsByTagName('configuration')[0]
    
    def Resources(self):
        ResourceList = []
        #read resources in cib
        configuration = self.Configuration()
        resources = configuration.getElementsByTagName('resources')[0]
        rscs = configuration.getElementsByTagName('primitive')
        incs = configuration.getElementsByTagName('clone')
        groups = configuration.getElementsByTagName('group')
        
        for rsc in rscs:
            if rsc in resources.childNodes:
                ResourceList.append(HAResource(self,rsc))
                
        for grp in groups:
            for rsc in rscs:
                if rsc in grp.childNodes:
                    if self.use_short_names:
                        resource = HAResource(self,rsc)
                    else:
                        resource = HAResource(self,rsc,grp.getAttribute('id'))
                    ResourceList.append(resource)
        
        for inc in incs:
            max = 0
            inc_name = inc.getAttribute("id")
            instance_attributes = inc.getElementsByTagName('instance_attributes')[0]
            attributes = instance_attributes.getElementsByTagName('attributes')[0]
            nvpairs = attributes.getElementsByTagName('nvpair')
            for nvpair in nvpairs:
                if nvpair.getAttribute("name") == "clone_max":
                    max = int(nvpair.getAttribute("value"))
            inc_rsc = inc.getElementsByTagName('primitive')[0]
            for i in range(0,max):
                rsc = HAResource(self,inc_rsc)
                rsc.inc_no = i
                rsc.inc_name = inc_name
                rsc.inc_max = max
                if self.use_short_names:
                    rsc.rid = rsc.rid + ":%d"%i
                else:
                    rsc.rid = inc_name+":"+rsc.rid + ":%d"%i
                rsc.Instance = rsc.rid
                ResourceList.append(rsc)
        return ResourceList
    
    def ResourceGroups(self):
        GroupList = []
        #read resources in cib
        configuration = self.Configuration()
        groups = configuration.getElementsByTagName('group')
        rscs = configuration.getElementsByTagName('primitive')
        for grp in groups:
            group = []
            GroupList.append(group)
            for rsc in rscs:
                if rsc in grp.childNodes:
                    if self.use_short_names:
                        resource = HAResource(self,rsc)
                    else:
                        resource = HAResource(self,rsc,grp.getAttribute('id'))
                    group.append(resource)
        return GroupList
        
    def Dependencies(self):
        DependencyList = []
        #read dependency in cib
        configuration=self.Configuration()
        constraints=configuration.getElementsByTagName('constraints')[0]
        rsc_to_rscs=configuration.getElementsByTagName('rsc_to_rsc')
        for node in rsc_to_rscs:
            dependency = {}
            dependency["id"]=node.getAttribute('id')
            dependency["from"]=node.getAttribute('from')
            dependency["to"]=node.getAttribute('to')
            dependency["type"]=node.getAttribute('type')
            dependency["strength"]=node.getAttribute('strength')
            DependencyList.append(dependency)
        return DependencyList

    def find_partitions(self):
        ccm_partitions = []

        for node in self.Env["nodes"]:
            self.debug("Retrieving partition details for %s" %node)
            if self.ShouldBeStatus[node] == self["up"]:
                partition = self.rsh.readaline(node, self["ParitionCmd"])

                if not partition:
                    self.log("no partition details for %s" %node)
                elif len(partition) > 2:
                    partition = partition[:-1]
                    found=0
                    for a_partition in ccm_partitions:
                        if partition == a_partition:
                            found = 1
                    if found == 0:
                        self.debug("Adding partition from %s: %s" %(node, partition))
                        ccm_partitions.append(partition)
                else:
                    self.log("bad partition details for %s" %node)

        return ccm_partitions

    def HasQuorum(self, node_list):
        # If we are auditing a partition, then one side will
        #   have quorum and the other not.
        # So the caller needs to tell us which we are checking
        # If no value for node_list is specified... assume all nodes  
        if not node_list:
            node_list = self.Env["nodes"]

        for node in node_list:
            if self.ShouldBeStatus[node] == self["up"]:
                quorum = self.rsh.readaline(node, self["QuorumCmd"])
                if string.find(quorum, "1") != -1:
                    return 1
                elif string.find(quorum, "0") != -1:
                    return 0
                else:
                    self.log("WARN: Unexpected quorum test result from "+ node +":"+ quorum)

        return 0
    def Components(self):    
        complist = []
        common_ignore = [
                    "Pending action:",
                    "ERROR: crm_log_message_adv:",
                    "ERROR: MSG: No message to dump",
                    "pending LRM operations at shutdown",
                    "Lost connection to the CIB service",
                    "Connection to the CIB terminated...",
                    "Sending message to CIB service FAILED",
                    "crmd: .*Action A_RECOVER .* not supported",
                    "ERROR: stonithd_op_result_ready: not signed on",
                    "send_ipc_message: IPC Channel to .* is not connected",
                    "unconfirmed_actions: Waiting on .* unconfirmed actions",
                    "cib_native_msgready: Message pending on command channel",
                    "crmd:.*do_exit: Performing A_EXIT_1 - forcefully exiting the CRMd",
                    "verify_stopped: Resource .* was active at shutdown.  You may ignore this error if it is unmanaged.",
            ]

        stonith_ignore = [
            "ERROR: stonithd_signon: ",
            "update_failcount: Updating failcount for child_DoFencing",
            "ERROR: te_connect_stonith: Sign-in failed: triggered a retry",
            ]

        stonith_ignore.extend(common_ignore)

        complist.append(Process("ccm", 0, [
                    "State transition S_IDLE",
                    "CCM connection appears to have failed",
                    "crmd: .*Action A_RECOVER .* not supported",
                    "crmd: .*Input I_TERMINATE from do_recover",
                    "Exiting to recover from CCM connection failure",
                    "crmd:.*do_exit: Could not recover from internal error",
                    "crmd: .*I_ERROR.*(ccm_dispatch|crmd_cib_connection_destroy)",
#                    "WARN: determine_online_status: Node .* is unclean",
#                    "Scheduling Node .* for STONITH",
#                    "Executing .* fencing operation",
#                    "tengine_stonith_callback: .*result=0",
                    "State transition S_.* -> S_INTEGRATION.*input=I_NODE_JOIN",
                    "State transition S_STARTING -> S_PENDING",
                    ], [], common_ignore, 1, self))

        complist.append(Process("cib", 0, [
                    "State transition S_IDLE",
                    "Lost connection to the CIB service",
                    "Connection to the CIB terminated...",
                    "crmd: .*Input I_TERMINATE from do_recover",
                    "crmd: .*I_ERROR.*crmd_cib_connection_destroy",
                    "crmd:.*do_exit: Could not recover from internal error",
                    ], [], common_ignore, 1, self))

        complist.append(Process("lrmd", 0, [
                    "State transition S_IDLE",
                    "LRM Connection failed",
                    "crmd: .*I_ERROR.*lrm_dispatch",
                    "State transition S_STARTING -> S_PENDING",
                    ".*crmd .*exited with return code 2.",
                    "crmd: .*Input I_TERMINATE from do_recover",
                    "crmd:.*do_exit: Could not recover from internal error",
                    ], [], common_ignore, 1, self))

        complist.append(Process("crmd", 0, [
#                    "WARN: determine_online_status: Node .* is unclean",
#                    "Scheduling Node .* for STONITH",
#                    "Executing .* fencing operation",
#                    "tengine_stonith_callback: .*result=0",
                    "State transition S_IDLE",
                    "State transition S_STARTING -> S_PENDING",
                    ], [
                    "tengine: .*ERROR: subsystem_msg_dispatch: The server .* has left us: Shutting down...NOW",
                    "pengine: .*ERROR: subsystem_msg_dispatch: The server .* has left us: Shutting down...NOW",
                    ], common_ignore, 1, self))

        complist.append(Process("pengine", 1, [
                    "State transition S_IDLE",
                    ".*crmd .*exited with return code 2.",
                    "crmd: .*Input I_TERMINATE from do_recover",
                    "crmd:.*do_exit: Could not recover from internal error",
                    ], [], common_ignore, 1, self))

        complist.append(Process("tengine", 1, [
                    "State transition S_IDLE",
                    ".*crmd .*exited with return code 2.",
                    "crmd: .*Input I_TERMINATE from do_recover",
                    "crmd:.*do_exit: Could not recover from internal error",
                    ], [], common_ignore, 1, self))

        if self.Env["DoFencing"] == 1 :
            complist.append(Process("stonithd", 0, [], [
                        "tengine_stonith_connection_destroy: Fencing daemon has left us",
                        "Attempting connection to fencing daemon",
                        "te_connect_stonith: Connected",
                        ], stonith_ignore, 0, self))
#            complist.append(Process("heartbeat", 0, [], [], [], None, self))
        return complist

    def NodeUUID(self, node):
        lines = self.rsh.readlines(node, self["UUIDQueryCmd"])
        for line in lines:
            self.debug("UUIDLine:"+ line)
            m = re.search(r'%s.+\((.+)\)' % node, line)
            if m:
                return m.group(1)
        return ""

    def StandbyStatus(self, node):
        out=self.rsh.readaline(node, self["StandbyQueryCmd"]%node)
        if not out:
            return "off"
        out = out[:-1]
        self.debug("Standby result: "+out)
        return out

    # status == "on" : Enter Standby mode
    # status == "off": Enter Active mode
    def SetStandbyMode(self, node, status):
        current_status = self.StandbyStatus(node)
        cmd = self["StandbyCmd"] % (node, status)
        ret = self.rsh(node, cmd)
        return True

class HAResource(Resource):
    def __init__(self, cm, node, group=None):
        '''
        Get information from xml node
        '''
        if group == None :
            self.rid     = str(node.getAttribute('id'))
        else :
            self.rid     = group + ":" + str(node.getAttribute('id'))
        self.rclass  = str(node.getAttribute('class'))
        self.rtype   = str(node.getAttribute('type'))
        self.inc_name = None
        self.inc_no = -1
        self.inc_max = -1
        self.rparameters = {}
        nvpairs = [] 
 
        list = node.getElementsByTagName('instance_attributes')
        if len(list) > 0:
            attributes = list[0]
            list = attributes.getElementsByTagName('attributes')
        if len(list) > 0:
            parameters = list[0]
            nvpairs     = parameters.getElementsByTagName('nvpair')

        for nvpair in nvpairs:
            name=nvpair.getAttribute('name')
            value=nvpair.getAttribute('value')
            self.rparameters[name]=value

        # This should normally be called first... FIXME!
        Resource.__init__(self, cm, self.rtype, self.rid)
        
        # resources that dont need quorum will have:
        #        <op name="start" prereq="nothing"/>
        ops = node.getElementsByTagName('op')
        for op in ops:
            if op.getAttribute('name') == "start" and op.getAttribute('prereq') == "nothing":
                self.needs_quorum = 0

    def IsRunningOn(self, nodename):
        '''
        This member function returns true if our resource is running
        on the given node in the cluster.
        We call the status operation for the resource script.
        '''
        rc = self.CM.ResourceOp(self.rid, "monitor", nodename)
        return (rc == 0)
        
    def RunningNodes(self):
        return self.CM.ResourceLocation(self.rid)

    def Start(self, nodename):
        '''
        This member function starts or activates the resource.
        '''
        return self.CM.ResourceOp(self.rid, "start", nodename)

    def Stop(self, nodename):
        '''
        This member function stops or deactivates the resource.
        '''
        return self.CM.ResourceOp(self.rid, "stop", nodename)

    def IsWorkingCorrectly(self, nodename):
        return self.IsRunningOn(nodename)


#######################################################################
#
#   A little test code...
#
#   Which you are advised to completely ignore...
#
#######################################################################
if __name__ == '__main__': 
    pass