This file is indexed.

/usr/lib/python3/dist-packages/gnocchi/common/s3.py is in python3-gnocchi 4.2.0-0ubuntu5.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# -*- encoding: utf-8 -*-
#
# Copyright © 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import daiquiri

import tenacity
try:
    import boto3
    import botocore.config as boto_config
    import botocore.exceptions
except ImportError:
    boto3 = None
    botocore = None

from gnocchi import utils

LOG = daiquiri.getLogger(__name__)


def retry_if_operationaborted(exception):
    return (isinstance(exception, botocore.exceptions.ClientError)
            and exception.response['Error'].get('Code') == "OperationAborted")


def get_connection(conf):
    if boto3 is None:
        raise RuntimeError("boto3 unavailable")
    conn = boto3.client(
        's3',
        endpoint_url=conf.s3_endpoint_url,
        region_name=conf.s3_region_name,
        aws_access_key_id=conf.s3_access_key_id,
        aws_secret_access_key=conf.s3_secret_access_key,
        config=boto_config.Config(
            max_pool_connections=conf.s3_max_pool_connections))
    return conn, conf.s3_region_name, conf.s3_bucket_prefix


# NOTE(jd) OperationAborted might be raised if we try to create the bucket
# for the first time at the same time
@tenacity.retry(
    stop=tenacity.stop_after_attempt(10),
    wait=tenacity.wait_fixed(0.5),
    retry=tenacity.retry_if_exception(retry_if_operationaborted)
)
def create_bucket(conn, name, region_name):
    if region_name:
        kwargs = dict(CreateBucketConfiguration={
            "LocationConstraint": region_name,
        })
    else:
        kwargs = {}
    return conn.create_bucket(Bucket=name, **kwargs)


def bulk_delete(conn, bucket, objects):
    # NOTE(jd) The maximum object to delete at once is 1000
    # TODO(jd) Parallelize?
    deleted = 0
    for obj_slice in utils.grouper(objects, 1000):
        d = {
            'Objects': [{'Key': o} for o in obj_slice],
            # FIXME(jd) Use Quiet mode, but s3rver does not seem to
            # support it
            # 'Quiet': True,
        }
        response = conn.delete_objects(
            Bucket=bucket,
            Delete=d)
        deleted += len(response['Deleted'])
    LOG.debug('%s objects deleted, %s objects skipped',
              deleted, len(objects) - deleted)