This file is indexed.

/usr/share/pyshared/insanity/tests/scenarios/gstmediatest.py is in python-insanity 0.0+git20110920.4750a8e8-2.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
# GStreamer QA system
#
#       tests/scenario/gstmediatest.py
#
# Copyright (c) 2007, Edward Hervey <bilboed@bilboed.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.

"""
Scenario simulating the behaviour of the historical gst-media-test scenarios
"""

from insanity.scenario import Scenario
from insanity.monitor import GstDebugLogMonitor
from insanity.test import Test
from insanity.log import warning

class GstMediaTestScenario(Scenario):
    """
    This is a scenario that will attempt to run the given test.
    If it doesn't reach 100% succes, it will be re-run with more aggressive
    monitoring.

    It automatically adds the correct monitors to the underlying tests, or
    sets the right parameter for tests that have default monitor.

    This reproduces the re-try behaviour of gst-media-test
    """

    __test_name__ = "GstMediaTestScenario"
    __test_description__ = """
    Re-runs failed subtests with higher debug level
    """
    __test_arguments__ = {
        "subtest-class": ( "TestClass to run", None, None ),
        "debug-level-1": ( "GST_DEBUG specification to use on first run",
                           "*:2", None ),
        "debug-level-2": ( "GST_DEBUG specification to use on second run",
                           "*:5", None )
        }
    __test_checklist__ = {
        "similar-results":"were the results similar over the two runs"
        }

    def setUp(self):
        if not Scenario.setUp(self):
            return False
        # add the initial test
        subtest = self.arguments.get("subtest-class")
        debuglevel = self.arguments.get("debug-level-1", "*:2")
        if not subtest:
            return False
        self.addSubTest(subtest, self.arguments,
                        [(GstDebugLogMonitor, {"debug-level": debuglevel})
                         ], instance_name=subtest.__test_name__)
        return True

    def subTestDone(self, test):
        if len(self.tests) == 2:
            if test.getSuccessPercentage() == self.tests[0].getSuccessPercentage():
                self.validateStep("similar-results")
            return True

        if not test.getSuccessPercentage() == 100.0 and \
                dict(test.getCheckList()).get("no-unexpected-failures") != 1:
            warning("Checklist was %s for %s with args %s. Rerunning.",
                test.getCheckList(), test.__test_name__, test.getArguments())
            subtest = self.arguments.get("subtest-class")
            debuglevel = self.arguments.get("debug-level-2", "*:5")
            self.addSubTest(subtest, self.arguments,
                            [(GstDebugLogMonitor, {"debug-level": debuglevel})
                             ], instance_name="rerun." + subtest.__test_name__ )
        else:
            self.validateStep("similar-results")
        return True