This file is indexed.

/usr/lib/python2.7/dist-packages/boto/s3/key.py is in python-boto 2.34.0-2.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.  All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import email.utils
import errno
import hashlib
import mimetypes
import os
import re
import base64
import binascii
import math
from hashlib import md5
import boto.utils
from boto.compat import BytesIO, six, urllib, encodebytes

from boto.exception import BotoClientError
from boto.exception import StorageDataError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5, compute_hash
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name


class Key(object):
    """
    Represents a key (object) in an S3 bucket.

    :ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
    :ivar name: The name of this Key object.
    :ivar metadata: A dictionary containing user metadata that you
        wish to store with the object or that has been retrieved from
        an existing object.
    :ivar cache_control: The value of the `Cache-Control` HTTP header.
    :ivar content_type: The value of the `Content-Type` HTTP header.
    :ivar content_encoding: The value of the `Content-Encoding` HTTP header.
    :ivar content_disposition: The value of the `Content-Disposition` HTTP
        header.
    :ivar content_language: The value of the `Content-Language` HTTP header.
    :ivar etag: The `etag` associated with this object.
    :ivar last_modified: The string timestamp representing the last
        time this object was modified in S3.
    :ivar owner: The ID of the owner of this object.
    :ivar storage_class: The storage class of the object.  Currently, one of:
        STANDARD | REDUCED_REDUNDANCY | GLACIER
    :ivar md5: The MD5 hash of the contents of the object.
    :ivar size: The size, in bytes, of the object.
    :ivar version_id: The version ID of this object, if it is a versioned
        object.
    :ivar encrypted: Whether the object is encrypted while at rest on
        the server.
    """

    DefaultContentType = 'application/octet-stream'

    RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
      <RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
        <Days>%s</Days>
      </RestoreRequest>"""


    BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192)

    # The object metadata fields a user can set, other than custom metadata
    # fields (i.e., those beginning with a provider-specific prefix like
    # x-amz-meta).
    base_user_settable_fields = set(["cache-control", "content-disposition",
                                    "content-encoding", "content-language",
                                    "content-md5", "content-type",
                                     "x-robots-tag", "expires"])
    _underscore_base_user_settable_fields = set()
    for f in base_user_settable_fields:
      _underscore_base_user_settable_fields.add(f.replace('-', '_'))
    # Metadata fields, whether user-settable or not, other than custom
    # metadata fields (i.e., those beginning with a provider specific prefix
    # like x-amz-meta).
    base_fields = (base_user_settable_fields |
                   set(["last-modified", "content-length", "date", "etag"]))



    def __init__(self, bucket=None, name=None):
        self.bucket = bucket
        self.name = name
        self.metadata = {}
        self.cache_control = None
        self.content_type = self.DefaultContentType
        self.content_encoding = None
        self.content_disposition = None
        self.content_language = None
        self.filename = None
        self.etag = None
        self.is_latest = False
        self.last_modified = None
        self.owner = None
        self._storage_class = None
        self.path = None
        self.resp = None
        self.mode = None
        self.size = None
        self.version_id = None
        self.source_version_id = None
        self.delete_marker = False
        self.encrypted = None
        # If the object is being restored, this attribute will be set to True.
        # If the object is restored, it will be set to False.  Otherwise this
        # value will be None. If the restore is completed (ongoing_restore =
        # False), the expiry_date will be populated with the expiry date of the
        # restored object.
        self.ongoing_restore = None
        self.expiry_date = None
        self.local_hashes = {}

    def __repr__(self):
        if self.bucket:
            name = u'<Key: %s,%s>' % (self.bucket.name, self.name)
        else:
            name = u'<Key: None,%s>' % self.name

        # Encode to bytes for Python 2 to prevent display decoding issues
        if not isinstance(name, str):
            name = name.encode('utf-8')

        return name

    def __iter__(self):
        return self

    @property
    def provider(self):
        provider = None
        if self.bucket and self.bucket.connection:
            provider = self.bucket.connection.provider
        return provider

    def _get_key(self):
        return self.name

    def _set_key(self, value):
        self.name = value

    key = property(_get_key, _set_key);

    def _get_md5(self):
        if 'md5' in self.local_hashes and self.local_hashes['md5']:
            return binascii.b2a_hex(self.local_hashes['md5'])

    def _set_md5(self, value):
        if value:
            self.local_hashes['md5'] = binascii.a2b_hex(value)
        elif 'md5' in self.local_hashes:
            self.local_hashes.pop('md5', None)

    md5 = property(_get_md5, _set_md5);

    def _get_base64md5(self):
        if 'md5' in self.local_hashes and self.local_hashes['md5']:
            md5 = self.local_hashes['md5']
            if not isinstance(md5, bytes):
                md5 = md5.encode('utf-8')
            return binascii.b2a_base64(md5).decode('utf-8').rstrip('\n')

    def _set_base64md5(self, value):
        if value:
            if not isinstance(value, six.string_types):
                value = value.decode('utf-8')
            self.local_hashes['md5'] = binascii.a2b_base64(value)
        elif 'md5' in self.local_hashes:
            del self.local_hashes['md5']

    base64md5 = property(_get_base64md5, _set_base64md5);

    def _get_storage_class(self):
        if self._storage_class is None and self.bucket:
            # Attempt to fetch storage class
            list_items = list(self.bucket.list(self.name.encode('utf-8')))
            if len(list_items) and getattr(list_items[0], '_storage_class',
                                           None):
                self._storage_class = list_items[0]._storage_class
            else:
                # Key is not yet saved? Just use default...
                self._storage_class = 'STANDARD'

        return self._storage_class

    def _set_storage_class(self, value):
        self._storage_class = value

    storage_class = property(_get_storage_class, _set_storage_class)

    def get_md5_from_hexdigest(self, md5_hexdigest):
        """
        A utility function to create the 2-tuple (md5hexdigest, base64md5)
        from just having a precalculated md5_hexdigest.
        """
        digest = binascii.unhexlify(md5_hexdigest)
        base64md5 = encodebytes(digest)
        if base64md5[-1] == '\n':
            base64md5 = base64md5[0:-1]
        return (md5_hexdigest, base64md5)

    def handle_encryption_headers(self, resp):
        provider = self.bucket.connection.provider
        if provider.server_side_encryption_header:
            self.encrypted = resp.getheader(
                provider.server_side_encryption_header, None)
        else:
            self.encrypted = None

    def handle_version_headers(self, resp, force=False):
        provider = self.bucket.connection.provider
        # If the Key object already has a version_id attribute value, it
        # means that it represents an explicit version and the user is
        # doing a get_contents_*(version_id=<foo>) to retrieve another
        # version of the Key.  In that case, we don't really want to
        # overwrite the version_id in this Key object.  Comprende?
        if self.version_id is None or force:
            self.version_id = resp.getheader(provider.version_id, None)
        self.source_version_id = resp.getheader(provider.copy_source_version_id,
                                                None)
        if resp.getheader(provider.delete_marker, 'false') == 'true':
            self.delete_marker = True
        else:
            self.delete_marker = False

    def handle_restore_headers(self, response):
        provider = self.bucket.connection.provider
        header = response.getheader(provider.restore_header)
        if header is None:
            return
        parts = header.split(',', 1)
        for part in parts:
            key, val = [i.strip() for i in part.split('=')]
            val = val.replace('"', '')
            if key == 'ongoing-request':
                self.ongoing_restore = True if val.lower() == 'true' else False
            elif key == 'expiry-date':
                self.expiry_date = val

    def handle_addl_headers(self, headers):
        """
        Used by Key subclasses to do additional, provider-specific
        processing of response headers. No-op for this base class.
        """
        pass

    def open_read(self, headers=None, query_args='',
                  override_num_retries=None, response_headers=None):
        """
        Open this key for reading

        :type headers: dict
        :param headers: Headers to pass in the web request

        :type query_args: string
        :param query_args: Arguments to pass in the query string
            (ie, 'torrent')

        :type override_num_retries: int
        :param override_num_retries: If not None will override configured
            num_retries parameter for underlying GET.

        :type response_headers: dict
        :param response_headers: A dictionary containing HTTP
            headers/values that will override any headers associated
            with the stored object in the response.  See
            http://goo.gl/EWOPb for details.
        """
        if self.resp is None:
            self.mode = 'r'

            provider = self.bucket.connection.provider
            self.resp = self.bucket.connection.make_request(
                'GET', self.bucket.name, self.name, headers,
                query_args=query_args,
                override_num_retries=override_num_retries)
            if self.resp.status < 199 or self.resp.status > 299:
                body = self.resp.read()
                raise provider.storage_response_error(self.resp.status,
                                                      self.resp.reason, body)
            response_headers = self.resp.msg
            self.metadata = boto.utils.get_aws_metadata(response_headers,
                                                        provider)
            for name, value in response_headers.items():
                # To get correct size for Range GETs, use Content-Range
                # header if one was returned. If not, use Content-Length
                # header.
                if (name.lower() == 'content-length' and
                    'Content-Range' not in response_headers):
                    self.size = int(value)
                elif name.lower() == 'content-range':
                    end_range = re.sub('.*/(.*)', '\\1', value)
                    self.size = int(end_range)
                elif name.lower() in Key.base_fields:
                    self.__dict__[name.lower().replace('-', '_')] = value
            self.handle_version_headers(self.resp)
            self.handle_encryption_headers(self.resp)
            self.handle_restore_headers(self.resp)
            self.handle_addl_headers(self.resp.getheaders())

    def open_write(self, headers=None, override_num_retries=None):
        """
        Open this key for writing.
        Not yet implemented

        :type headers: dict
        :param headers: Headers to pass in the write request

        :type override_num_retries: int
        :param override_num_retries: If not None will override configured
            num_retries parameter for underlying PUT.
        """
        raise BotoClientError('Not Implemented')

    def open(self, mode='r', headers=None, query_args=None,
             override_num_retries=None):
        if mode == 'r':
            self.mode = 'r'
            self.open_read(headers=headers, query_args=query_args,
                           override_num_retries=override_num_retries)
        elif mode == 'w':
            self.mode = 'w'
            self.open_write(headers=headers,
                            override_num_retries=override_num_retries)
        else:
            raise BotoClientError('Invalid mode: %s' % mode)

    closed = False

    def close(self, fast=False):
        """
        Close this key.

        :type fast: bool
        :param fast: True if you want the connection to be closed without first
        reading the content. This should only be used in cases where subsequent
        calls don't need to return the content from the open HTTP connection.
        Note: As explained at
        http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
        callers must read the whole response before sending a new request to the
        server. Calling Key.close(fast=True) and making a subsequent request to
        the server will work because boto will get an httplib exception and
        close/reopen the connection.

        """
        if self.resp and not fast:
            self.resp.read()
        self.resp = None
        self.mode = None
        self.closed = True

    def next(self):
        """
        By providing a next method, the key object supports use as an iterator.
        For example, you can now say:

        for bytes in key:
            write bytes to a file or whatever

        All of the HTTP connection stuff is handled for you.
        """
        self.open_read()
        data = self.resp.read(self.BufferSize)
        if not data:
            self.close()
            raise StopIteration
        return data

    # Python 3 iterator support
    __next__ = next

    def read(self, size=0):
        self.open_read()
        if size == 0:
            data = self.resp.read()
        else:
            data = self.resp.read(size)
        if not data:
            self.close()
        return data

    def change_storage_class(self, new_storage_class, dst_bucket=None,
                             validate_dst_bucket=True):
        """
        Change the storage class of an existing key.
        Depending on whether a different destination bucket is supplied
        or not, this will either move the item within the bucket, preserving
        all metadata and ACL info bucket changing the storage class or it
        will copy the item to the provided destination bucket, also
        preserving metadata and ACL info.

        :type new_storage_class: string
        :param new_storage_class: The new storage class for the Key.
            Possible values are:
            * STANDARD
            * REDUCED_REDUNDANCY

        :type dst_bucket: string
        :param dst_bucket: The name of a destination bucket.  If not
            provided the current bucket of the key will be used.

        :type validate_dst_bucket: bool
        :param validate_dst_bucket: If True, will validate the dst_bucket
            by using an extra list request.
        """
        if new_storage_class == 'STANDARD':
            return self.copy(self.bucket.name, self.name,
                             reduced_redundancy=False, preserve_acl=True,
                             validate_dst_bucket=validate_dst_bucket)
        elif new_storage_class == 'REDUCED_REDUNDANCY':
            return self.copy(self.bucket.name, self.name,
                             reduced_redundancy=True, preserve_acl=True,
                             validate_dst_bucket=validate_dst_bucket)
        else:
            raise BotoClientError('Invalid storage class: %s' %
                                  new_storage_class)

    def copy(self, dst_bucket, dst_key, metadata=None,
             reduced_redundancy=False, preserve_acl=False,
             encrypt_key=False, validate_dst_bucket=True):
        """
        Copy this Key to another bucket.

        :type dst_bucket: string
        :param dst_bucket: The name of the destination bucket

        :type dst_key: string
        :param dst_key: The name of the destination key

        :type metadata: dict
        :param metadata: Metadata to be associated with new key.  If
            metadata is supplied, it will replace the metadata of the
            source key being copied.  If no metadata is supplied, the
            source key's metadata will be copied to the new key.

        :type reduced_redundancy: bool
        :param reduced_redundancy: If True, this will force the
            storage class of the new Key to be REDUCED_REDUNDANCY
            regardless of the storage class of the key being copied.
            The Reduced Redundancy Storage (RRS) feature of S3,
            provides lower redundancy at lower storage cost.

        :type preserve_acl: bool
        :param preserve_acl: If True, the ACL from the source key will
            be copied to the destination key.  If False, the
            destination key will have the default ACL.  Note that
            preserving the ACL in the new key object will require two
            additional API calls to S3, one to retrieve the current
            ACL and one to set that ACL on the new object.  If you
            don't care about the ACL, a value of False will be
            significantly more efficient.

        :type encrypt_key: bool
        :param encrypt_key: If True, the new copy of the object will
            be encrypted on the server-side by S3 and will be stored
            in an encrypted form while at rest in S3.

        :type validate_dst_bucket: bool
        :param validate_dst_bucket: If True, will validate the dst_bucket
            by using an extra list request.

        :rtype: :class:`boto.s3.key.Key` or subclass
        :returns: An instance of the newly created key object
        """
        dst_bucket = self.bucket.connection.lookup(dst_bucket,
                                                   validate_dst_bucket)
        if reduced_redundancy:
            storage_class = 'REDUCED_REDUNDANCY'
        else:
            storage_class = self.storage_class
        return dst_bucket.copy_key(dst_key, self.bucket.name,
                                   self.name, metadata,
                                   storage_class=storage_class,
                                   preserve_acl=preserve_acl,
                                   encrypt_key=encrypt_key)

    def startElement(self, name, attrs, connection):
        if name == 'Owner':
            self.owner = User(self)
            return self.owner
        else:
            return None

    def endElement(self, name, value, connection):
        if name == 'Key':
            self.name = value
        elif name == 'ETag':
            self.etag = value
        elif name == 'IsLatest':
            if value == 'true':
                self.is_latest = True
            else:
                self.is_latest = False
        elif name == 'LastModified':
            self.last_modified = value
        elif name == 'Size':
            self.size = int(value)
        elif name == 'StorageClass':
            self.storage_class = value
        elif name == 'Owner':
            pass
        elif name == 'VersionId':
            self.version_id = value
        else:
            setattr(self, name, value)

    def exists(self, headers=None):
        """
        Returns True if the key exists

        :rtype: bool
        :return: Whether the key exists on S3
        """
        return bool(self.bucket.lookup(self.name, headers=headers))

    def delete(self, headers=None):
        """
        Delete this key from S3
        """
        return self.bucket.delete_key(self.name, version_id=self.version_id,
                                      headers=headers)

    def get_metadata(self, name):
        return self.metadata.get(name)

    def set_metadata(self, name, value):
        # Ensure that metadata that is vital to signing is in the correct
        # case. Applies to ``Content-Type`` & ``Content-MD5``.
        if name.lower() == 'content-type':
            self.metadata['Content-Type'] = value
        elif name.lower() == 'content-md5':
            self.metadata['Content-MD5'] = value
        else:
            self.metadata[name] = value
        if name.lower() in Key.base_user_settable_fields:
            self.__dict__[name.lower().replace('-', '_')] = value

    def update_metadata(self, d):
        self.metadata.update(d)

    # convenience methods for setting/getting ACL
    def set_acl(self, acl_str, headers=None):
        if self.bucket is not None:
            self.bucket.set_acl(acl_str, self.name, headers=headers)

    def get_acl(self, headers=None):
        if self.bucket is not None:
            return self.bucket.get_acl(self.name, headers=headers)

    def get_xml_acl(self, headers=None):
        if self.bucket is not None:
            return self.bucket.get_xml_acl(self.name, headers=headers)

    def set_xml_acl(self, acl_str, headers=None):
        if self.bucket is not None:
            return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)

    def set_canned_acl(self, acl_str, headers=None):
        return self.bucket.set_canned_acl(acl_str, self.name, headers)

    def get_redirect(self):
        """Return the redirect location configured for this key.

        If no redirect is configured (via set_redirect), then None
        will be returned.

        """
        response = self.bucket.connection.make_request(
            'HEAD', self.bucket.name, self.name)
        if response.status == 200:
            return response.getheader('x-amz-website-redirect-location')
        else:
            raise self.provider.storage_response_error(
                response.status, response.reason, response.read())

    def set_redirect(self, redirect_location, headers=None):
        """Configure this key to redirect to another location.

        When the bucket associated with this key is accessed from the website
        endpoint, a 301 redirect will be issued to the specified
        `redirect_location`.

        :type redirect_location: string
        :param redirect_location: The location to redirect.

        """
        if headers is None:
            headers = {}
        else:
            headers = headers.copy()

        headers['x-amz-website-redirect-location'] = redirect_location
        response = self.bucket.connection.make_request('PUT', self.bucket.name,
                                                       self.name, headers)
        if response.status == 200:
            return True
        else:
            raise self.provider.storage_response_error(
                response.status, response.reason, response.read())

    def make_public(self, headers=None):
        return self.bucket.set_canned_acl('public-read', self.name, headers)

    def generate_url(self, expires_in, method='GET', headers=None,
                     query_auth=True, force_http=False, response_headers=None,
                     expires_in_absolute=False, version_id=None,
                     policy=None, reduced_redundancy=False, encrypt_key=False):
        """
        Generate a URL to access this key.

        :type expires_in: int
        :param expires_in: How long the url is valid for, in seconds

        :type method: string
        :param method: The method to use for retrieving the file
            (default is GET)

        :type headers: dict
        :param headers: Any headers to pass along in the request

        :type query_auth: bool
        :param query_auth:

        :type force_http: bool
        :param force_http: If True, http will be used instead of https.

        :type response_headers: dict
        :param response_headers: A dictionary containing HTTP
            headers/values that will override any headers associated
            with the stored object in the response.  See
            http://goo.gl/EWOPb for details.

        :type expires_in_absolute: bool
        :param expires_in_absolute:

        :type version_id: string
        :param version_id: The version_id of the object to GET. If specified
            this overrides any value in the key.

        :type policy: :class:`boto.s3.acl.CannedACLStrings`
        :param policy: A canned ACL policy that will be applied to the
            new key in S3.

        :type reduced_redundancy: bool
        :param reduced_redundancy: If True, this will set the storage
            class of the new Key to be REDUCED_REDUNDANCY. The Reduced
            Redundancy Storage (RRS) feature of S3, provides lower
            redundancy at lower storage cost.

        :type encrypt_key: bool
        :param encrypt_key: If True, the new copy of the object will
            be encrypted on the server-side by S3 and will be stored
            in an encrypted form while at rest in S3.

        :rtype: string
        :return: The URL to access the key
        """
        provider = self.bucket.connection.provider
        version_id = version_id or self.version_id
        if headers is None:
            headers = {}
        else:
            headers = headers.copy()

        # add headers accordingly (usually PUT case)
        if policy:
            headers[provider.acl_header] = policy
        if reduced_redundancy:
            self.storage_class = 'REDUCED_REDUNDANCY'
            if provider.storage_class_header:
                headers[provider.storage_class_header] = self.storage_class
        if encrypt_key:
            headers[provider.server_side_encryption_header] = 'AES256'
        headers = boto.utils.merge_meta(headers, self.metadata, provider)

        return self.bucket.connection.generate_url(expires_in, method,
                                                   self.bucket.name, self.name,
                                                   headers, query_auth,
                                                   force_http,
                                                   response_headers,
                                                   expires_in_absolute,
                                                   version_id)

    def send_file(self, fp, headers=None, cb=None, num_cb=10,
                  query_args=None, chunked_transfer=False, size=None):
        """
        Upload a file to a key into a bucket on S3.

        :type fp: file
        :param fp: The file pointer to upload. The file pointer must
            point point at the offset from which you wish to upload.
            ie. if uploading the full file, it should point at the
            start of the file. Normally when a file is opened for
            reading, the fp will point at the first byte.  See the
            bytes parameter below for more info.

        :type headers: dict
        :param headers: The headers to pass along with the PUT request

        :type num_cb: int
        :param num_cb: (optional) If a callback is specified with the
            cb parameter this parameter determines the granularity of
            the callback by defining the maximum number of times the
            callback will be called during the file
            transfer. Providing a negative integer will cause your
            callback to be called with each buffer read.

        :type query_args: string
        :param query_args: (optional) Arguments to pass in the query string.

        :type chunked_transfer: boolean
        :param chunked_transfer: (optional) If true, we use chunked
            Transfer-Encoding.

        :type size: int
        :param size: (optional) The Maximum number of bytes to read
            from the file pointer (fp). This is useful when uploading
            a file in multiple parts where you are splitting the file
            up into different ranges to be uploaded. If not specified,
            the default behaviour is to read all bytes from the file
            pointer. Less bytes may be available.
        """
        self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
                                 query_args=query_args,
                                 chunked_transfer=chunked_transfer, size=size)

    def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10,
                            query_args=None, chunked_transfer=False, size=None,
                            hash_algs=None):
        provider = self.bucket.connection.provider
        try:
            spos = fp.tell()
        except IOError:
            spos = None
            self.read_from_stream = False

        # If hash_algs is unset and the MD5 hasn't already been computed,
        # default to an MD5 hash_alg to hash the data on-the-fly.
        if hash_algs is None and not self.md5:
            hash_algs = {'md5': md5}
        digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})

        def sender(http_conn, method, path, data, headers):
            # This function is called repeatedly for temporary retries
            # so we must be sure the file pointer is pointing at the
            # start of the data.
            if spos is not None and spos != fp.tell():
                fp.seek(spos)
            elif spos is None and self.read_from_stream:
                # if seek is not supported, and we've read from this
                # stream already, then we need to abort retries to
                # avoid setting bad data.
                raise provider.storage_data_error(
                    'Cannot retry failed request. fp does not support seeking.')

            # If the caller explicitly specified host header, tell putrequest
            # not to add a second host header. Similarly for accept-encoding.
            skips = {}
            if boto.utils.find_matching_headers('host', headers):
              skips['skip_host'] = 1
            if boto.utils.find_matching_headers('accept-encoding', headers):
              skips['skip_accept_encoding'] = 1
            http_conn.putrequest(method, path, **skips)
            for key in headers:
                http_conn.putheader(key, headers[key])
            http_conn.endheaders()

            save_debug = self.bucket.connection.debug
            self.bucket.connection.debug = 0
            # If the debuglevel < 4 we don't want to show connection
            # payload, so turn off HTTP connection-level debug output (to
            # be restored below).
            # Use the getattr approach to allow this to work in AppEngine.
            if getattr(http_conn, 'debuglevel', 0) < 4:
                http_conn.set_debuglevel(0)

            data_len = 0
            if cb:
                if size:
                    cb_size = size
                elif self.size:
                    cb_size = self.size
                else:
                    cb_size = 0
                if chunked_transfer and cb_size == 0:
                    # For chunked Transfer, we call the cb for every 1MB
                    # of data transferred, except when we know size.
                    cb_count = (1024 * 1024) / self.BufferSize
                elif num_cb > 1:
                    cb_count = int(
                        math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
                elif num_cb < 0:
                    cb_count = -1
                else:
                    cb_count = 0
                i = 0
                cb(data_len, cb_size)

            bytes_togo = size
            if bytes_togo and bytes_togo < self.BufferSize:
                chunk = fp.read(bytes_togo)
            else:
                chunk = fp.read(self.BufferSize)

            if not isinstance(chunk, bytes):
                chunk = chunk.encode('utf-8')

            if spos is None:
                # read at least something from a non-seekable fp.
                self.read_from_stream = True
            while chunk:
                chunk_len = len(chunk)
                data_len += chunk_len
                if chunked_transfer:
                    http_conn.send('%x;\r\n' % chunk_len)
                    http_conn.send(chunk)
                    http_conn.send('\r\n')
                else:
                    http_conn.send(chunk)
                for alg in digesters:
                    digesters[alg].update(chunk)
                if bytes_togo:
                    bytes_togo -= chunk_len
                    if bytes_togo <= 0:
                        break
                if cb:
                    i += 1
                    if i == cb_count or cb_count == -1:
                        cb(data_len, cb_size)
                        i = 0
                if bytes_togo and bytes_togo < self.BufferSize:
                    chunk = fp.read(bytes_togo)
                else:
                    chunk = fp.read(self.BufferSize)

                if not isinstance(chunk, bytes):
                    chunk = chunk.encode('utf-8')

            self.size = data_len

            for alg in digesters:
                self.local_hashes[alg] = digesters[alg].digest()

            if chunked_transfer:
                http_conn.send('0\r\n')
                    # http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
                http_conn.send('\r\n')

            if cb and (cb_count <= 1 or i > 0) and data_len > 0:
                cb(data_len, cb_size)

            http_conn.set_debuglevel(save_debug)
            self.bucket.connection.debug = save_debug
            response = http_conn.getresponse()
            body = response.read()

            if not self.should_retry(response, chunked_transfer):
                raise provider.storage_response_error(
                    response.status, response.reason, body)

            return response

        if not headers:
            headers = {}
        else:
            headers = headers.copy()
        # Overwrite user-supplied user-agent.
        for header in find_matching_headers('User-Agent', headers):
            del headers[header]
        headers['User-Agent'] = UserAgent
        # If storage_class is None, then a user has not explicitly requested
        # a storage class, so we can assume STANDARD here
        if self._storage_class not in [None, 'STANDARD']:
            headers[provider.storage_class_header] = self.storage_class
        if find_matching_headers('Content-Encoding', headers):
            self.content_encoding = merge_headers_by_name(
                'Content-Encoding', headers)
        if find_matching_headers('Content-Language', headers):
            self.content_language = merge_headers_by_name(
                'Content-Language', headers)
        content_type_headers = find_matching_headers('Content-Type', headers)
        if content_type_headers:
            # Some use cases need to suppress sending of the Content-Type
            # header and depend on the receiving server to set the content
            # type. This can be achieved by setting headers['Content-Type']
            # to None when calling this method.
            if (len(content_type_headers) == 1 and
                headers[content_type_headers[0]] is None):
                # Delete null Content-Type value to skip sending that header.
                del headers[content_type_headers[0]]
            else:
                self.content_type = merge_headers_by_name(
                    'Content-Type', headers)
        elif self.path:
            self.content_type = mimetypes.guess_type(self.path)[0]
            if self.content_type is None:
                self.content_type = self.DefaultContentType
            headers['Content-Type'] = self.content_type
        else:
            headers['Content-Type'] = self.content_type
        if self.base64md5:
            headers['Content-MD5'] = self.base64md5
        if chunked_transfer:
            headers['Transfer-Encoding'] = 'chunked'
            #if not self.base64md5:
            #    headers['Trailer'] = "Content-MD5"
        else:
            headers['Content-Length'] = str(self.size)
        # This is terrible. We need a SHA256 of the body for SigV4, but to do
        # the chunked ``sender`` behavior above, the ``fp`` isn't available to
        # the auth mechanism (because closures). Detect if it's SigV4 & embelish
        # while we can before the auth calculations occur.
        if 'hmac-v4-s3' in self.bucket.connection._required_auth_capability():
            kwargs = {'fp': fp, 'hash_algorithm': hashlib.sha256}
            if size is not None:
                kwargs['size'] = size
            headers['_sha256'] = compute_hash(**kwargs)[0]
        headers['Expect'] = '100-Continue'
        headers = boto.utils.merge_meta(headers, self.metadata, provider)
        resp = self.bucket.connection.make_request(
            'PUT',
            self.bucket.name,
            self.name,
            headers,
            sender=sender,
            query_args=query_args
        )
        self.handle_version_headers(resp, force=True)
        self.handle_addl_headers(resp.getheaders())

    def should_retry(self, response, chunked_transfer=False):
        provider = self.bucket.connection.provider

        if not chunked_transfer:
            if response.status in [500, 503]:
                # 500 & 503 can be plain retries.
                return True

            if response.getheader('location'):
                # If there's a redirect, plain retry.
                return True

        if 200 <= response.status <= 299:
            self.etag = response.getheader('etag')
            md5 = self.md5
            if isinstance(md5, bytes):
                md5 = md5.decode('utf-8')

            # If you use customer-provided encryption keys, the ETag value that
            # Amazon S3 returns in the response will not be the MD5 of the
            # object.
            server_side_encryption_customer_algorithm = response.getheader(
                'x-amz-server-side-encryption-customer-algorithm', None)
            if server_side_encryption_customer_algorithm is None:
                if self.etag != '"%s"' % md5:
                    raise provider.storage_data_error(
                        'ETag from S3 did not match computed MD5. '
                        '%s vs. %s' % (self.etag, self.md5))

            return True

        if response.status == 400:
            # The 400 must be trapped so the retry handler can check to
            # see if it was a timeout.
            # If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb
            # out.
            body = response.read()
            err = provider.storage_response_error(
                response.status,
                response.reason,
                body
            )

            if err.error_code in ['RequestTimeout']:
                raise PleaseRetryException(
                    "Saw %s, retrying" % err.error_code,
                    response=response
                )

        return False

    def compute_md5(self, fp, size=None):
        """
        :type fp: file
        :param fp: File pointer to the file to MD5 hash.  The file
            pointer will be reset to the same position before the
            method returns.

        :type size: int
        :param size: (optional) The Maximum number of bytes to read
            from the file pointer (fp). This is useful when uploading
            a file in multiple parts where the file is being split
            in place into different parts. Less bytes may be available.
        """
        hex_digest, b64_digest, data_size = compute_md5(fp, size=size)
        # Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
        # The internal implementation of compute_md5() needs to return the
        # data size but we don't want to return that value to the external
        # caller because it changes the class interface (i.e. it might
        # break some code) so we consume the third tuple value here and
        # return the remainder of the tuple to the caller, thereby preserving
        # the existing interface.
        self.size = data_size
        return (hex_digest, b64_digest)

    def set_contents_from_stream(self, fp, headers=None, replace=True,
                                 cb=None, num_cb=10, policy=None,
                                 reduced_redundancy=False, query_args=None,
                                 size=None):
        """
        Store an object using the name of the Key object as the key in
        cloud and the contents of the data stream pointed to by 'fp' as
        the contents.

        The stream object is not seekable and total size is not known.
        This has the implication that we can't specify the
        Content-Size and Content-MD5 in the header. So for huge
        uploads, the delay in calculating MD5 is avoided but with a
        penalty of inability to verify the integrity of the uploaded
        data.

        :type fp: file
        :param fp: the file whose contents are to be uploaded

        :type headers: dict
        :param headers: additional HTTP headers to be sent with the
            PUT request.

        :type replace: bool
        :param replace: If this parameter is False, the method will first check
            to see if an object exists in the bucket with the same key. If it
            does, it won't overwrite it. The default value is True which will
            overwrite the object.

        :type cb: function
        :param cb: a callback function that will be called to report
            progress on the upload. The callback should accept two integer
            parameters, the first representing the number of bytes that have
            been successfully transmitted to GS and the second representing the
            total number of bytes that need to be transmitted.

        :type num_cb: int
        :param num_cb: (optional) If a callback is specified with the
            cb parameter, this parameter determines the granularity of
            the callback by defining the maximum number of times the
            callback will be called during the file transfer.

        :type policy: :class:`boto.gs.acl.CannedACLStrings`
        :param policy: A canned ACL policy that will be applied to the new key
            in GS.

        :type reduced_redundancy: bool
        :param reduced_redundancy: If True, this will set the storage
            class of the new Key to be REDUCED_REDUNDANCY. The Reduced
            Redundancy Storage (RRS) feature of S3, provides lower
            redundancy at lower storage cost.

        :type size: int
        :param size: (optional) The Maximum number of bytes to read from
            the file pointer (fp). This is useful when uploading a
            file in multiple parts where you are splitting the file up
            into different ranges to be uploaded. If not specified,
            the default behaviour is to read all bytes from the file
            pointer. Less bytes may be available.
        """

        provider = self.bucket.connection.provider
        if not provider.supports_chunked_transfer():
            raise BotoClientError('%s does not support chunked transfer'
                % provider.get_provider_name())

        # Name of the Object should be specified explicitly for Streams.
        if not self.name or self.name == '':
            raise BotoClientError('Cannot determine the destination '
                                'object name for the given stream')

        if headers is None:
            headers = {}
        if policy:
            headers[provider.acl_header] = policy

        if reduced_redundancy:
            self.storage_class = 'REDUCED_REDUNDANCY'
            if provider.storage_class_header:
                headers[provider.storage_class_header] = self.storage_class

        if self.bucket is not None:
            if not replace:
                if self.bucket.lookup(self.name):
                    return
            self.send_file(fp, headers, cb, num_cb, query_args,
                           chunked_transfer=True, size=size)

    def set_contents_from_file(self, fp, headers=None, replace=True,
                               cb=None, num_cb=10, policy=None, md5=None,
                               reduced_redundancy=False, query_args=None,
                               encrypt_key=False, size=None, rewind=False):
        """
        Store an object in S3 using the name of the Key object as the
        key in S3 and the contents of the file pointed to by 'fp' as the
        contents. The data is read from 'fp' from its current position until
        'size' bytes have been read or EOF.

        :type fp: file
        :param fp: the file whose contents to upload

        :type headers: dict
        :param headers: Additional HTTP headers that will be sent with
            the PUT request.

        :type replace: bool
        :param replace: If this parameter is False, the method will
            first check to see if an object exists in the bucket with
            the same key.  If it does, it won't overwrite it.  The
            default value is True which will overwrite the object.

        :type cb: function
        :param cb: a callback function that will be called to report
            progress on the upload.  The callback should accept two
            integer parameters, the first representing the number of
            bytes that have been successfully transmitted to S3 and
            the second representing the size of the to be transmitted
            object.

        :type num_cb: int
        :param num_cb: (optional) If a callback is specified with the
            cb parameter this parameter determines the granularity of
            the callback by defining the maximum number of times the
            callback will be called during the file transfer.

        :type policy: :class:`boto.s3.acl.CannedACLStrings`
        :param policy: A canned ACL policy that will be applied to the
            new key in S3.

        :type md5: A tuple containing the hexdigest version of the MD5
            checksum of the file as the first element and the
            Base64-encoded version of the plain checksum as the second
            element.  This is the same format returned by the
            compute_md5 method.
        :param md5: If you need to compute the MD5 for any reason
            prior to upload, it's silly to have to do it twice so this
            param, if present, will be used as the MD5 values of the
            file.  Otherwise, the checksum will be computed.

        :type reduced_redundancy: bool
        :param reduced_redundancy: If True, this will set the storage
            class of the new Key to be REDUCED_REDUNDANCY. The Reduced
            Redundancy Storage (RRS) feature of S3, provides lower
            redundancy at lower storage cost.

        :type encrypt_key: bool
        :param encrypt_key: If True, the new copy of the object will
            be encrypted on the server-side by S3 and will be stored
            in an encrypted form while at rest in S3.

        :type size: int
        :param size: (optional) The Maximum number of bytes to read
            from the file pointer (fp). This is useful when uploading
            a file in multiple parts where you are splitting the file
            up into different ranges to be uploaded. If not specified,
            the default behaviour is to read all bytes from the file
            pointer. Less bytes may be available.

        :type rewind: bool
        :param rewind: (optional) If True, the file pointer (fp) will
            be rewound to the start before any bytes are read from
            it. The default behaviour is False which reads from the
            current position of the file pointer (fp).

        :rtype: int
        :return: The number of bytes written to the key.
        """
        provider = self.bucket.connection.provider
        headers = headers or {}
        if policy:
            headers[provider.acl_header] = policy
        if encrypt_key:
            headers[provider.server_side_encryption_header] = 'AES256'

        if rewind:
            # caller requests reading from beginning of fp.
            fp.seek(0, os.SEEK_SET)
        else:
            # The following seek/tell/seek logic is intended
            # to detect applications using the older interface to
            # set_contents_from_file(), which automatically rewound the
            # file each time the Key was reused. This changed with commit
            # 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
            # split into multiple parts and uploaded in parallel, and at
            # the time of that commit this check was added because otherwise
            # older programs would get a success status and upload an empty
            # object. Unfortuantely, it's very inefficient for fp's implemented
            # by KeyFile (used, for example, by gsutil when copying between
            # providers). So, we skip the check for the KeyFile case.
            # TODO: At some point consider removing this seek/tell/seek
            # logic, after enough time has passed that it's unlikely any
            # programs remain that assume the older auto-rewind interface.
            if not isinstance(fp, KeyFile):
                spos = fp.tell()
                fp.seek(0, os.SEEK_END)
                if fp.tell() == spos:
                    fp.seek(0, os.SEEK_SET)
                    if fp.tell() != spos:
                        # Raise an exception as this is likely a programming
                        # error whereby there is data before the fp but nothing
                        # after it.
                        fp.seek(spos)
                        raise AttributeError('fp is at EOF. Use rewind option '
                                             'or seek() to data start.')
                # seek back to the correct position.
                fp.seek(spos)

        if reduced_redundancy:
            self.storage_class = 'REDUCED_REDUNDANCY'
            if provider.storage_class_header:
                headers[provider.storage_class_header] = self.storage_class
                # TODO - What if provider doesn't support reduced reduncancy?
                # What if different providers provide different classes?
        if hasattr(fp, 'name'):
            self.path = fp.name
        if self.bucket is not None:
            if not md5 and provider.supports_chunked_transfer():
                # defer md5 calculation to on the fly and
                # we don't know anything about size yet.
                chunked_transfer = True
                self.size = None
            else:
                chunked_transfer = False
                if isinstance(fp, KeyFile):
                    # Avoid EOF seek for KeyFile case as it's very inefficient.
                    key = fp.getkey()
                    size = key.size - fp.tell()
                    self.size = size
                    # At present both GCS and S3 use MD5 for the etag for
                    # non-multipart-uploaded objects. If the etag is 32 hex
                    # chars use it as an MD5, to avoid having to read the file
                    # twice while transferring.
                    if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
                        etag = key.etag.strip('"')
                        md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
                if not md5:
                    # compute_md5() and also set self.size to actual
                    # size of the bytes read computing the md5.
                    md5 = self.compute_md5(fp, size)
                    # adjust size if required
                    size = self.size
                elif size:
                    self.size = size
                else:
                    # If md5 is provided, still need to size so
                    # calculate based on bytes to end of content
                    spos = fp.tell()
                    fp.seek(0, os.SEEK_END)
                    self.size = fp.tell() - spos
                    fp.seek(spos)
                    size = self.size
                self.md5 = md5[0]
                self.base64md5 = md5[1]

            if self.name is None:
                self.name = self.md5
            if not replace:
                if self.bucket.lookup(self.name):
                    return

            self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
                           query_args=query_args,
                           chunked_transfer=chunked_transfer, size=size)
            # return number of bytes written.
            return self.size

    def set_contents_from_filename(self, filename, headers=None, replace=True,
                                   cb=None, num_cb=10, policy=None, md5=None,
                                   reduced_redundancy=False,
                                   encrypt_key=False):
        """
        Store an object in S3 using the name of the Key object as the
        key in S3 and the contents of the file named by 'filename'.
        See set_contents_from_file method for details about the
        parameters.

        :type filename: string
        :param filename: The name of the file that you want to put onto S3

        :type headers: dict
        :param headers: Additional headers to pass along with the
            request to AWS.

        :type replace: bool
        :param replace: If True, replaces the contents of the file
            if it already exists.

        :type cb: function
        :param cb: a callback function that will be called to report
            progress on the upload.  The callback should accept two
            integer parameters, the first representing the number of
            bytes that have been successfully transmitted to S3 and
            the second representing the size of the to be transmitted
            object.

        :type cb: int
        :param num_cb: (optional) If a callback is specified with the
            cb parameter this parameter determines the granularity of
            the callback by defining the maximum number of times the
            callback will be called during the file transfer.

        :type policy: :class:`boto.s3.acl.CannedACLStrings`
        :param policy: A canned ACL policy that will be applied to the
            new key in S3.

        :type md5: A tuple containing the hexdigest version of the MD5
            checksum of the file as the first element and the
            Base64-encoded version of the plain checksum as the second
            element.  This is the same format returned by the
            compute_md5 method.
        :param md5: If you need to compute the MD5 for any reason
            prior to upload, it's silly to have to do it twice so this
            param, if present, will be used as the MD5 values of the
            file.  Otherwise, the checksum will be computed.

        :type reduced_redundancy: bool
        :param reduced_redundancy: If True, this will set the storage
            class of the new Key to be REDUCED_REDUNDANCY. The Reduced
            Redundancy Storage (RRS) feature of S3, provides lower
            redundancy at lower storage cost.  :type encrypt_key: bool
            :param encrypt_key: If True, the new copy of the object
            will be encrypted on the server-side by S3 and will be
            stored in an encrypted form while at rest in S3.

        :rtype: int
        :return: The number of bytes written to the key.
        """
        with open(filename, 'rb') as fp:
            return self.set_contents_from_file(fp, headers, replace, cb,
                                               num_cb, policy, md5,
                                               reduced_redundancy,
                                               encrypt_key=encrypt_key)

    def set_contents_from_string(self, string_data, headers=None, replace=True,
                                 cb=None, num_cb=10, policy=None, md5=None,
                                 reduced_redundancy=False,
                                 encrypt_key=False):
        """
        Store an object in S3 using the name of the Key object as the
        key in S3 and the string 's' as the contents.
        See set_contents_from_file method for details about the
        parameters.

        :type headers: dict
        :param headers: Additional headers to pass along with the
            request to AWS.

        :type replace: bool
        :param replace: If True, replaces the contents of the file if
            it already exists.

        :type cb: function
        :param cb: a callback function that will be called to report
            progress on the upload.  The callback should accept two
            integer parameters, the first representing the number of
            bytes that have been successfully transmitted to S3 and
            the second representing the size of the to be transmitted
            object.

        :type cb: int
        :param num_cb: (optional) If a callback is specified with the
            cb parameter this parameter determines the granularity of
            the callback by defining the maximum number of times the
            callback will be called during the file transfer.

        :type policy: :class:`boto.s3.acl.CannedACLStrings`
        :param policy: A canned ACL policy that will be applied to the
            new key in S3.

        :type md5: A tuple containing the hexdigest version of the MD5
            checksum of the file as the first element and the
            Base64-encoded version of the plain checksum as the second
            element.  This is the same format returned by the
            compute_md5 method.
        :param md5: If you need to compute the MD5 for any reason
            prior to upload, it's silly to have to do it twice so this
            param, if present, will be used as the MD5 values of the
            file.  Otherwise, the checksum will be computed.

        :type reduced_redundancy: bool
        :param reduced_redundancy: If True, this will set the storage
            class of the new Key to be REDUCED_REDUNDANCY. The Reduced
            Redundancy Storage (RRS) feature of S3, provides lower
            redundancy at lower storage cost.

        :type encrypt_key: bool
        :param encrypt_key: If True, the new copy of the object will
            be encrypted on the server-side by S3 and will be stored
            in an encrypted form while at rest in S3.
        """
        if not isinstance(string_data, bytes):
            string_data = string_data.encode("utf-8")
        fp = BytesIO(string_data)
        r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
                                        policy, md5, reduced_redundancy,
                                        encrypt_key=encrypt_key)
        fp.close()
        return r

    def get_file(self, fp, headers=None, cb=None, num_cb=10,
                 torrent=False, version_id=None, override_num_retries=None,
                 response_headers=None):
        """
        Retrieves a file from an S3 Key

        :type fp: file
        :param fp: File pointer to put the data into

        :type headers: string
        :param: headers to send when retrieving the files

        :type cb: function
        :param cb: a callback function that will be called to report
            progress on the upload.  The callback should accept two
            integer parameters, the first representing the number of
            bytes that have been successfully transmitted to S3 and
            the second representing the size of the to be transmitted
            object.

        :type cb: int
        :param num_cb: (optional) If a callback is specified with the
            cb parameter this parameter determines the granularity of
            the callback by defining the maximum number of times the
            callback will be called during the file transfer.

        :type torrent: bool
        :param torrent: Flag for whether to get a torrent for the file

        :type override_num_retries: int
        :param override_num_retries: If not None will override configured
            num_retries parameter for underlying GET.

        :type response_headers: dict
        :param response_headers: A dictionary containing HTTP
            headers/values that will override any headers associated
            with the stored object in the response.  See
            http://goo.gl/EWOPb for details.

        :type version_id: str
        :param version_id: The ID of a particular version of the object.
            If this parameter is not supplied but the Key object has
            a ``version_id`` attribute, that value will be used when
            retrieving the object.  You can set the Key object's
            ``version_id`` attribute to None to always grab the latest
            version from a version-enabled bucket.
        """
        self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
                                torrent=torrent, version_id=version_id,
                                override_num_retries=override_num_retries,
                                response_headers=response_headers,
                                hash_algs=None,
                                query_args=None)

    def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
                 torrent=False, version_id=None, override_num_retries=None,
                 response_headers=None, hash_algs=None, query_args=None):
        if headers is None:
            headers = {}
        save_debug = self.bucket.connection.debug
        if self.bucket.connection.debug == 1:
            self.bucket.connection.debug = 0

        query_args = query_args or []
        if torrent:
            query_args.append('torrent')

        if hash_algs is None and not torrent:
            hash_algs = {'md5': md5}
        digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})

        # If a version_id is passed in, use that.  If not, check to see
        # if the Key object has an explicit version_id and, if so, use that.
        # Otherwise, don't pass a version_id query param.
        if version_id is None:
            version_id = self.version_id
        if version_id:
            query_args.append('versionId=%s' % version_id)
        if response_headers:
            for key in response_headers:
                query_args.append('%s=%s' % (
                    key, urllib.parse.quote(response_headers[key])))
        query_args = '&'.join(query_args)
        self.open('r', headers, query_args=query_args,
                  override_num_retries=override_num_retries)

        data_len = 0
        if cb:
            if self.size is None:
                cb_size = 0
            else:
                cb_size = self.size
            if self.size is None and num_cb != -1:
                # If size is not available due to chunked transfer for example,
                # we'll call the cb for every 1MB of data transferred.
                cb_count = (1024 * 1024) / self.BufferSize
            elif num_cb > 1:
                cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
            elif num_cb < 0:
                cb_count = -1
            else:
                cb_count = 0
            i = 0
            cb(data_len, cb_size)
        try:
            for bytes in self:
                fp.write(bytes)
                data_len += len(bytes)
                for alg in digesters:
                    digesters[alg].update(bytes)
                if cb:
                    if cb_size > 0 and data_len >= cb_size:
                        break
                    i += 1
                    if i == cb_count or cb_count == -1:
                        cb(data_len, cb_size)
                        i = 0
        except IOError as e:
            if e.errno == errno.ENOSPC:
                raise StorageDataError('Out of space for destination file '
                                       '%s' % fp.name)
            raise
        if cb and (cb_count <= 1 or i > 0) and data_len > 0:
            cb(data_len, cb_size)
        for alg in digesters:
          self.local_hashes[alg] = digesters[alg].digest()
        if self.size is None and not torrent and "Range" not in headers:
            self.size = data_len
        self.close()
        self.bucket.connection.debug = save_debug

    def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
        """
        Get a torrent file (see to get_file)

        :type fp: file
        :param fp: The file pointer of where to put the torrent

        :type headers: dict
        :param headers: Headers to be passed

        :type cb: function
        :param cb: a callback function that will be called to report
            progress on the upload.  The callback should accept two
            integer parameters, the first representing the number of
            bytes that have been successfully transmitted to S3 and
            the second representing the size of the to be transmitted
            object.

        :type cb: int
        :param num_cb: (optional) If a callback is specified with the
            cb parameter this parameter determines the granularity of
            the callback by defining the maximum number of times the
            callback will be called during the file transfer.

        """
        return self.get_file(fp, headers, cb, num_cb, torrent=True)

    def get_contents_to_file(self, fp, headers=None,
                             cb=None, num_cb=10,
                             torrent=False,
                             version_id=None,
                             res_download_handler=None,
                             response_headers=None):
        """
        Retrieve an object from S3 using the name of the Key object as the
        key in S3.  Write the contents of the object to the file pointed
        to by 'fp'.

        :type fp: File -like object
        :param fp:

        :type headers: dict
        :param headers: additional HTTP headers that will be sent with
            the GET request.

        :type cb: function
        :param cb: a callback function that will be called to report
            progress on the upload.  The callback should accept two
            integer parameters, the first representing the number of
            bytes that have been successfully transmitted to S3 and
            the second representing the size of the to be transmitted
            object.

        :type cb: int
        :param num_cb: (optional) If a callback is specified with the
            cb parameter this parameter determines the granularity of
            the callback by defining the maximum number of times the
            callback will be called during the file transfer.

        :type torrent: bool
        :param torrent: If True, returns the contents of a torrent
            file as a string.

        :type res_upload_handler: ResumableDownloadHandler
        :param res_download_handler: If provided, this handler will
            perform the download.

        :type response_headers: dict
        :param response_headers: A dictionary containing HTTP
            headers/values that will override any headers associated
            with the stored object in the response.  See
            http://goo.gl/EWOPb for details.

        :type version_id: str
        :param version_id: The ID of a particular version of the object.
            If this parameter is not supplied but the Key object has
            a ``version_id`` attribute, that value will be used when
            retrieving the object.  You can set the Key object's
            ``version_id`` attribute to None to always grab the latest
            version from a version-enabled bucket.
        """
        if self.bucket is not None:
            if res_download_handler:
                res_download_handler.get_file(self, fp, headers, cb, num_cb,
                                              torrent=torrent,
                                              version_id=version_id)
            else:
                self.get_file(fp, headers, cb, num_cb, torrent=torrent,
                              version_id=version_id,
                              response_headers=response_headers)

    def get_contents_to_filename(self, filename, headers=None,
                                 cb=None, num_cb=10,
                                 torrent=False,
                                 version_id=None,
                                 res_download_handler=None,
                                 response_headers=None):
        """
        Retrieve an object from S3 using the name of the Key object as the
        key in S3.  Store contents of the object to a file named by 'filename'.
        See get_contents_to_file method for details about the
        parameters.

        :type filename: string
        :param filename: The filename of where to put the file contents

        :type headers: dict
        :param headers: Any additional headers to send in the request

        :type cb: function
        :param cb: a callback function that will be called to report
            progress on the upload.  The callback should accept two
            integer parameters, the first representing the number of
            bytes that have been successfully transmitted to S3 and
            the second representing the size of the to be transmitted
            object.

        :type cb: int
        :param num_cb: (optional) If a callback is specified with the
            cb parameter this parameter determines the granularity of
            the callback by defining the maximum number of times the
            callback will be called during the file transfer.

        :type torrent: bool
        :param torrent: If True, returns the contents of a torrent file
            as a string.

        :type res_upload_handler: ResumableDownloadHandler
        :param res_download_handler: If provided, this handler will
            perform the download.

        :type response_headers: dict
        :param response_headers: A dictionary containing HTTP
            headers/values that will override any headers associated
            with the stored object in the response.  See
            http://goo.gl/EWOPb for details.

        :type version_id: str
        :param version_id: The ID of a particular version of the object.
            If this parameter is not supplied but the Key object has
            a ``version_id`` attribute, that value will be used when
            retrieving the object.  You can set the Key object's
            ``version_id`` attribute to None to always grab the latest
            version from a version-enabled bucket.
        """
        try:
            with open(filename, 'wb') as fp:
                self.get_contents_to_file(fp, headers, cb, num_cb,
                                          torrent=torrent,
                                          version_id=version_id,
                                          res_download_handler=res_download_handler,
                                          response_headers=response_headers)
        except Exception:
            os.remove(filename)
            raise
        # if last_modified date was sent from s3, try to set file's timestamp
        if self.last_modified is not None:
            try:
                modified_tuple = email.utils.parsedate_tz(self.last_modified)
                modified_stamp = int(email.utils.mktime_tz(modified_tuple))
                os.utime(fp.name, (modified_stamp, modified_stamp))
            except Exception:
                pass

    def get_contents_as_string(self, headers=None,
                               cb=None, num_cb=10,
                               torrent=False,
                               version_id=None,
                               response_headers=None, encoding=None):
        """
        Retrieve an object from S3 using the name of the Key object as the
        key in S3.  Return the contents of the object as a string.
        See get_contents_to_file method for details about the
        parameters.

        :type headers: dict
        :param headers: Any additional headers to send in the request

        :type cb: function
        :param cb: a callback function that will be called to report
            progress on the upload.  The callback should accept two
            integer parameters, the first representing the number of
            bytes that have been successfully transmitted to S3 and
            the second representing the size of the to be transmitted
            object.

        :type cb: int
        :param num_cb: (optional) If a callback is specified with the
            cb parameter this parameter determines the granularity of
            the callback by defining the maximum number of times the
            callback will be called during the file transfer.

        :type torrent: bool
        :param torrent: If True, returns the contents of a torrent file
            as a string.

        :type response_headers: dict
        :param response_headers: A dictionary containing HTTP
            headers/values that will override any headers associated
            with the stored object in the response.  See
            http://goo.gl/EWOPb for details.

        :type version_id: str
        :param version_id: The ID of a particular version of the object.
            If this parameter is not supplied but the Key object has
            a ``version_id`` attribute, that value will be used when
            retrieving the object.  You can set the Key object's
            ``version_id`` attribute to None to always grab the latest
            version from a version-enabled bucket.

        :type encoding: str
        :param encoding: The text encoding to use, such as ``utf-8``
            or ``iso-8859-1``. If set, then a string will be returned.
            Defaults to ``None`` and returns bytes.

        :rtype: bytes or str
        :returns: The contents of the file as bytes or a string
        """
        fp = BytesIO()
        self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
                                  version_id=version_id,
                                  response_headers=response_headers)
        value = fp.getvalue()

        if encoding is not None:
            value = value.decode(encoding)

        return value

    def add_email_grant(self, permission, email_address, headers=None):
        """
        Convenience method that provides a quick way to add an email grant
        to a key. This method retrieves the current ACL, creates a new
        grant based on the parameters passed in, adds that grant to the ACL
        and then PUT's the new ACL back to S3.

        :type permission: string
        :param permission: The permission being granted. Should be one of:
            (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).

        :type email_address: string
        :param email_address: The email address associated with the AWS
            account your are granting the permission to.

        :type recursive: boolean
        :param recursive: A boolean value to controls whether the
            command will apply the grant to all keys within the bucket
            or not.  The default value is False.  By passing a True
            value, the call will iterate through all keys in the
            bucket and apply the same grant to each key.  CAUTION: If
            you have a lot of keys, this could take a long time!
        """
        policy = self.get_acl(headers=headers)
        policy.acl.add_email_grant(permission, email_address)
        self.set_acl(policy, headers=headers)

    def add_user_grant(self, permission, user_id, headers=None,
                       display_name=None):
        """
        Convenience method that provides a quick way to add a canonical
        user grant to a key.  This method retrieves the current ACL,
        creates a new grant based on the parameters passed in, adds that
        grant to the ACL and then PUT's the new ACL back to S3.

        :type permission: string
        :param permission: The permission being granted. Should be one of:
            (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).

        :type user_id: string
        :param user_id: The canonical user id associated with the AWS
            account your are granting the permission to.

        :type display_name: string
        :param display_name: An option string containing the user's
            Display Name.  Only required on Walrus.
        """
        policy = self.get_acl(headers=headers)
        policy.acl.add_user_grant(permission, user_id,
                                  display_name=display_name)
        self.set_acl(policy, headers=headers)

    def _normalize_metadata(self, metadata):
        if type(metadata) == set:
            norm_metadata = set()
            for k in metadata:
                norm_metadata.add(k.lower())
        else:
            norm_metadata = {}
            for k in metadata:
                norm_metadata[k.lower()] = metadata[k]
        return norm_metadata

    def _get_remote_metadata(self, headers=None):
        """
        Extracts metadata from existing URI into a dict, so we can
        overwrite/delete from it to form the new set of metadata to apply to a
        key.
        """
        metadata = {}
        for underscore_name in self._underscore_base_user_settable_fields:
            if hasattr(self, underscore_name):
                value = getattr(self, underscore_name)
                if value:
                    # Generate HTTP field name corresponding to "_" named field.
                    field_name = underscore_name.replace('_', '-')
                    metadata[field_name.lower()] = value
        # self.metadata contains custom metadata, which are all user-settable.
        prefix = self.provider.metadata_prefix
        for underscore_name in self.metadata:
            field_name = underscore_name.replace('_', '-')
            metadata['%s%s' % (prefix, field_name.lower())] = (
                self.metadata[underscore_name])
        return metadata

    def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl,
                            headers=None):
        metadata_plus = self._normalize_metadata(metadata_plus)
        metadata_minus = self._normalize_metadata(metadata_minus)
        metadata = self._get_remote_metadata()
        metadata.update(metadata_plus)
        for h in metadata_minus:
            if h in metadata:
                del metadata[h]
        src_bucket = self.bucket
        # Boto prepends the meta prefix when adding headers, so strip prefix in
        # metadata before sending back in to copy_key() call.
        rewritten_metadata = {}
        for h in metadata:
            if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')):
                rewritten_h = (h.replace('x-goog-meta-', '')
                               .replace('x-amz-meta-', ''))
            else:
                rewritten_h = h
            rewritten_metadata[rewritten_h] = metadata[h]
        metadata = rewritten_metadata
        src_bucket.copy_key(self.name, self.bucket.name, self.name,
                            metadata=metadata, preserve_acl=preserve_acl,
                            headers=headers)

    def restore(self, days, headers=None):
        """Restore an object from an archive.

        :type days: int
        :param days: The lifetime of the restored object (must
            be at least 1 day).  If the object is already restored
            then this parameter can be used to readjust the lifetime
            of the restored object.  In this case, the days
            param is with respect to the initial time of the request.
            If the object has not been restored, this param is with
            respect to the completion time of the request.

        """
        response = self.bucket.connection.make_request(
            'POST', self.bucket.name, self.name,
            data=self.RestoreBody % days,
            headers=headers, query_args='restore')
        if response.status not in (200, 202):
            provider = self.bucket.connection.provider
            raise provider.storage_response_error(response.status,
                                                  response.reason,
                                                  response.read())