/usr/share/pyshared/insanity/scenario.py is in python-insanity 0.0+git20110920.4750a8e8-2.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 | # GStreamer QA system
#
# scenario.py
#
# Copyright (c) 2007, Edward Hervey <bilboed@bilboed.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from copy import copy
import gobject
from insanity.test import Test
from insanity.log import debug, exception
class Scenario(Test):
"""
Test that runs other tests with optional programmatic decisions
and result processing.
"""
__test_name__ = "scenario"
__test_description__ = """Base class for scenarios"""
__test_timeout__ = 600 # 10 minutes because the subtests will handle themselves
__test_extra_infos__ = {"subtest-names":
"The instance-name argument for all subtests started."}
# TODO :
# auto-aggregation of arguments, checklists and extra-info
# Scenario might want to add some arguments, checks, extra-info ?
# arg/checklist/extra-info names might need to be prefixed ?
# Ex : <test-name>-<nb>-<name>
# Override timeout !
# implement methods to:
# * decide which test should be run first
# * what should be done when a test is done
# Test methods overrides
def setUp(self):
if not Test.setUp(self):
return False
self._tests = [] # list of (test, args, monitors)
self.tests = [] # executed tests
self._subtest_names = []
# FIXME : asynchronous starts ???
return True
def _setUpMonitors(self):
# we don't need monitors, our subclass do
return True
def tearDown(self):
# FIXME : implement this for the case where we are aborted !
self.extraInfo("subtest-names", repr(self._subtest_names))
pass
def test(self):
# get the first test to run
self._startNextSubTest()
def getSuccessPercentage(self):
if not self.tests:
return 0.0
res = reduce(lambda x, y: x+y, [z.getSuccessPercentage() for z in self.tests]) / len(self.tests)
return res
# private methods
def _startNextSubTest(self):
try:
testclass, args, monitors, instance_name = self._tests.pop(0)
if not 'bus' in args.keys():
args["bus"] = self.arguments.get("bus")
if not 'bus_address' in args.keys():
args["bus_address"] = self.arguments.get("bus_address")
debug("About to create subtest %r (instance_name=%r) "
"with arguments %r", testclass, instance_name, args)
args["instance-name"] = instance_name
instance = testclass(testrun=self._testrun,
**args)
if monitors:
for monitor in monitors:
instance.addMonitor(*monitor)
except Exception, e:
exception("Failed to create instance of class %r : %r", testclass, e)
self.stop()
return
# connect to signals
self.tests.append(instance)
instance.connect("done", self._subTestDoneCb)
for monitor in self._monitors:
instance.addMonitor(*monitor)
instance.run()
# returning False so that idle_add() doesn't call us again
return False
# sub-test callbacks
def _subTestDoneCb(self, subtest):
debug("Done with subtest %r", subtest)
carryon = self.subTestDone(subtest)
debug("carryon:%r , len(self._tests):%d",
carryon, len(self._tests))
if carryon and len(self._tests) > 0:
# startup the next test !
debug("Carrying on with next test")
gobject.idle_add(self._startNextSubTest)
else:
debug("No more subtests to run, stopping")
self.stop()
# overridable methods
def addSubTest(self, testclass, arguments, monitors=None, position=-1,
instance_name=None):
"""
testclass : a testclass to run next, can be a Scenario
arguments : dictionnary of arguments
monitors : list of (Monitor, monitorargs) to run the test with
position : the position to insert the test in (-1 for last)
instance_name : a human-readable name for the test.
This method can be called several times in a row at any moment.
"""
if instance_name is None:
instance_name = "%u.%s" % (len(self._subtest_names),
testclass.__test_name__)
# filter out unused arguments in arguments for non-scenarios
if not issubclass(testclass, Scenario):
args = {}
for validkey in testclass.getFullArgumentList():
if validkey in arguments.keys():
args[validkey] = arguments[validkey]
else:
args = copy(arguments)
debug("Appending subtest %r args:%r", testclass, args)
if position == -1:
self._tests.append((testclass, args, monitors, instance_name))
else:
self._tests.insert(position,
(testclass, args, monitors, instance_name))
self._subtest_names.append(instance_name)
def subTestDone(self, subtest):
"""
subclass should implement this method to know when a subtest is
done. This is the right place to call setNextSubTest().
Return True (default) if we should carry on with the next subtest (if any).
Return False if we should not carry on with further tests.
"""
return True
# implement Test methods
def _getRecursiveArgumentList(self):
"""
Like Test.getFullArgumentsList(), but takes subtests into account,
which would not be possible with a classmethod.
"""
validkeys = self.getFullArgumentList()
for sub in self.tests:
if isinstance(sub, Scenario):
validkeys.update(sub._getRecursiveArgumentList())
else:
validkeys.update(sub.getFullArgumentList())
return validkeys
def getArguments(self):
"""
Returns the list of valid arguments for this scenario.
"""
validkeys = self._getRecursiveArgumentList()
# Hide expected-failures from the storage backend.
validkeys.pop("expected-failures", [])
res = {}
for key in self.arguments.iterkeys():
if key in validkeys:
res[key] = self.arguments[key]
return res
def getCheckList(self):
checklist = dict(super(Scenario, self).getCheckList())
for sub in self.tests:
n_u_failures = \
dict(sub.getCheckList()).get("no-unexpected-failures")
if n_u_failures == 0:
checklist["no-unexpected-failures"] = 0
return checklist.items()
def addMonitor(self, monitor, monitorargs=None):
# the subtests will do the check for validity
self._monitors.append((monitor, monitorargs))
class ListScenario(Scenario):
"""
Scenario that will run each test one after the other on the same
arguments.
"""
__test_name__ = """list-scenario"""
__test_arguments__ = {
"subtest-list" : ( "List of Testclass to run sequentially",
[], None ),
"fatal-subtest-failure" : ( "Do not carry on with next subtest if previous failed",
True, None )
}
__test_description__ = """
This scenario will execute the given tests one after the other.
"""
__test_full_description__ = """
This scenario will execute the given tests one after the other.
If fata-subtest-failure is set to True, then it will stop whenever
one test hasn't succeeded fully (all steps validated).
"""
def setUp(self):
if not Scenario.setUp(self):
return False
# add the tests
for subtest in self.arguments["subtest-list"]:
self.addSubTest(subtest,
self.arguments,
[])
return True
def subTestDone(self, test):
# if we don't have fatal-subtest-failure, carry on if any
if self.arguments["fatal-subtest-failure"] == False:
return True
# else we only carry on if the test was 100% succesfull
if test.getSuccessPercentage() == 100.0:
return True
return False
|