This file is indexed.

/usr/lib/python3/dist-packages/pytest_benchmark/hookspec.py is in python3-pytest-benchmark 3.0.0-1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
def pytest_benchmark_generate_machine_info(config):
    """
    To completely replace the generated machine_info do something like this:

    .. sourcecode:: python

        def pytest_benchmark_update_machine_info(config):
            return {'user': getpass.getuser()}
    """
    pass


def pytest_benchmark_update_machine_info(config, info):
    """
    If benchmarks are compared and machine_info is different then warnings will be shown.

    To add the current user to the commit info override the hook in your conftest.py like this:

    .. sourcecode:: python

        def pytest_benchmark_update_machine_info(config, info):
            info['user'] = getpass.getuser()
    """
    pass


def pytest_benchmark_generate_commit_info(config):
    """
    To completely replace the generated commit_info do something like this:

    .. sourcecode:: python

        def pytest_benchmark_generate_commit_info(config):
            return {'id': subprocess.check_output(['svnversion']).strip()}
    """
    pass


def pytest_benchmark_update_commit_info(config, info):
    """
    To add something into the commit_info, like the commit message do something like this:

    .. sourcecode:: python

        def pytest_benchmark_update_commit_info(config, info):
            info['message'] = subprocess.check_output(['git', 'log', '-1', '--pretty=%B']).strip()
    """
    pass


def pytest_benchmark_group_stats(config, benchmarks, group_by):
    """
    You may perform grouping customization here, in case the builtin grouping doesn't suit you.

    Example:

    .. sourcecode:: python

        @pytest.mark.hookwrapper
        def pytest_benchmark_group_stats(config, benchmarks, group_by):
            outcome = yield
            if group_by == "special":  # when you use --benchmark-group-by=special
                result = defaultdict(list)
                for bench in benchmarks:
                    # `bench.special` doesn't exist, replace with whatever you need
                    result[bench.special].append(bench)
                outcome.force_result(result.items())
    """
    pass


def pytest_benchmark_generate_json(config, benchmarks, include_data):
    """
    You should read pytest-benchmark's code if you really need to wholly customize the json.

    .. warning::

        Improperly customizing this may cause breakage if ``--benchmark-compare`` or ``--benchmark-histogram`` are used.

    By default, ``pytest_benchmark_generate_json`` strips benchmarks that have errors from the output. To prevent this,
    implement the hook like this:

    .. sourcecode:: python

        @pytest.mark.hookwrapper
        def pytest_benchmark_generate_json(config, benchmarks, include_data):
            for bench in benchmarks:
                bench.has_error = False
            yield
    """
    pass


def pytest_benchmark_update_json(config, benchmarks, output_json):
    """
    Use this to add custom fields in the output JSON.

    Example:

    .. sourcecode:: python

        def pytest_benchmark_update_json(config, benchmarks, output_json):
            output_json['foo'] = 'bar'
    """
    pass


def pytest_benchmark_compare_machine_info(config, benchmarksession, machine_info, compared_benchmark):
    """
    You may want to use this hook to implement custom checks or abort execution.
    ``pytest-benchmark`` builtin hook does this:

    .. sourcecode:: python

        def pytest_benchmark_compare_machine_info(config, benchmarksession, machine_info, compared_benchmark):
            if compared_benchmark["machine_info"] != machine_info:
                benchmarksession.logger.warn(
                    "Benchmark machine_info is different. Current: %s VS saved: %s." % (
                        format_dict(machine_info),
                        format_dict(compared_benchmark["machine_info"]),
                    )
            )
    """
    pass


pytest_benchmark_generate_commit_info.firstresult = True
pytest_benchmark_generate_json.firstresult = True
pytest_benchmark_generate_machine_info.firstresult = True
pytest_benchmark_group_stats.firstresult = True