| OLD | NEW |
| (Empty) | |
| 1 # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ |
| 2 # Copyright (c) 2010, Eucalyptus Systems, Inc. |
| 3 # All rights reserved. |
| 4 # |
| 5 # Permission is hereby granted, free of charge, to any person obtaining a |
| 6 # copy of this software and associated documentation files (the |
| 7 # "Software"), to deal in the Software without restriction, including |
| 8 # without limitation the rights to use, copy, modify, merge, publish, dis- |
| 9 # tribute, sublicense, and/or sell copies of the Software, and to permit |
| 10 # persons to whom the Software is furnished to do so, subject to the fol- |
| 11 # lowing conditions: |
| 12 # |
| 13 # The above copyright notice and this permission notice shall be included |
| 14 # in all copies or substantial portions of the Software. |
| 15 # |
| 16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
| 17 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- |
| 18 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT |
| 19 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, |
| 20 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 22 # IN THE SOFTWARE. |
| 23 |
| 24 import boto |
| 25 from boto import handler |
| 26 from boto.resultset import ResultSet |
| 27 from boto.exception import BotoClientError |
| 28 from boto.s3.acl import Policy, CannedACLStrings, Grant |
| 29 from boto.s3.key import Key |
| 30 from boto.s3.prefix import Prefix |
| 31 from boto.s3.deletemarker import DeleteMarker |
| 32 from boto.s3.multipart import MultiPartUpload |
| 33 from boto.s3.multipart import CompleteMultiPartUpload |
| 34 from boto.s3.multidelete import MultiDeleteResult |
| 35 from boto.s3.multidelete import Error |
| 36 from boto.s3.bucketlistresultset import BucketListResultSet |
| 37 from boto.s3.bucketlistresultset import VersionedBucketListResultSet |
| 38 from boto.s3.bucketlistresultset import MultiPartUploadListResultSet |
| 39 from boto.s3.lifecycle import Lifecycle |
| 40 from boto.s3.tagging import Tags |
| 41 from boto.s3.cors import CORSConfiguration |
| 42 from boto.s3.bucketlogging import BucketLogging |
| 43 import boto.jsonresponse |
| 44 import boto.utils |
| 45 import xml.sax |
| 46 import xml.sax.saxutils |
| 47 import StringIO |
| 48 import urllib |
| 49 import re |
| 50 import base64 |
| 51 from collections import defaultdict |
| 52 |
| 53 # as per http://goo.gl/BDuud (02/19/2011) |
| 54 |
| 55 |
| 56 class S3WebsiteEndpointTranslate: |
| 57 |
| 58 trans_region = defaultdict(lambda: 's3-website-us-east-1') |
| 59 trans_region['eu-west-1'] = 's3-website-eu-west-1' |
| 60 trans_region['us-west-1'] = 's3-website-us-west-1' |
| 61 trans_region['us-west-2'] = 's3-website-us-west-2' |
| 62 trans_region['sa-east-1'] = 's3-website-sa-east-1' |
| 63 trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1' |
| 64 trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1' |
| 65 |
| 66 @classmethod |
| 67 def translate_region(self, reg): |
| 68 return self.trans_region[reg] |
| 69 |
| 70 S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL'] |
| 71 |
| 72 |
| 73 class Bucket(object): |
| 74 |
| 75 LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery' |
| 76 |
| 77 BucketPaymentBody = """<?xml version="1.0" encoding="UTF-8"?> |
| 78 <RequestPaymentConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-0
1/"> |
| 79 <Payer>%s</Payer> |
| 80 </RequestPaymentConfiguration>""" |
| 81 |
| 82 VersioningBody = """<?xml version="1.0" encoding="UTF-8"?> |
| 83 <VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> |
| 84 <Status>%s</Status> |
| 85 <MfaDelete>%s</MfaDelete> |
| 86 </VersioningConfiguration>""" |
| 87 |
| 88 WebsiteBody = """<?xml version="1.0" encoding="UTF-8"?> |
| 89 <WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> |
| 90 <IndexDocument><Suffix>%s</Suffix></IndexDocument> |
| 91 %s |
| 92 </WebsiteConfiguration>""" |
| 93 |
| 94 WebsiteErrorFragment = """<ErrorDocument><Key>%s</Key></ErrorDocument>""" |
| 95 |
| 96 VersionRE = '<Status>([A-Za-z]+)</Status>' |
| 97 MFADeleteRE = '<MfaDelete>([A-Za-z]+)</MfaDelete>' |
| 98 |
| 99 def __init__(self, connection=None, name=None, key_class=Key): |
| 100 self.name = name |
| 101 self.connection = connection |
| 102 self.key_class = key_class |
| 103 |
| 104 def __repr__(self): |
| 105 return '<Bucket: %s>' % self.name |
| 106 |
| 107 def __iter__(self): |
| 108 return iter(BucketListResultSet(self)) |
| 109 |
| 110 def __contains__(self, key_name): |
| 111 return not (self.get_key(key_name) is None) |
| 112 |
| 113 def startElement(self, name, attrs, connection): |
| 114 return None |
| 115 |
| 116 def endElement(self, name, value, connection): |
| 117 if name == 'Name': |
| 118 self.name = value |
| 119 elif name == 'CreationDate': |
| 120 self.creation_date = value |
| 121 else: |
| 122 setattr(self, name, value) |
| 123 |
| 124 def set_key_class(self, key_class): |
| 125 """ |
| 126 Set the Key class associated with this bucket. By default, this |
| 127 would be the boto.s3.key.Key class but if you want to subclass that |
| 128 for some reason this allows you to associate your new class with a |
| 129 bucket so that when you call bucket.new_key() or when you get a listing |
| 130 of keys in the bucket you will get an instances of your key class |
| 131 rather than the default. |
| 132 |
| 133 :type key_class: class |
| 134 :param key_class: A subclass of Key that can be more specific |
| 135 """ |
| 136 self.key_class = key_class |
| 137 |
| 138 def lookup(self, key_name, headers=None): |
| 139 """ |
| 140 Deprecated: Please use get_key method. |
| 141 |
| 142 :type key_name: string |
| 143 :param key_name: The name of the key to retrieve |
| 144 |
| 145 :rtype: :class:`boto.s3.key.Key` |
| 146 :returns: A Key object from this bucket. |
| 147 """ |
| 148 return self.get_key(key_name, headers=headers) |
| 149 |
| 150 def get_key(self, key_name, headers=None, version_id=None, |
| 151 response_headers=None): |
| 152 """ |
| 153 Check to see if a particular key exists within the bucket. This |
| 154 method uses a HEAD request to check for the existance of the key. |
| 155 Returns: An instance of a Key object or None |
| 156 |
| 157 :type key_name: string |
| 158 :param key_name: The name of the key to retrieve |
| 159 |
| 160 :type response_headers: dict |
| 161 :param response_headers: A dictionary containing HTTP |
| 162 headers/values that will override any headers associated |
| 163 with the stored object in the response. See |
| 164 http://goo.gl/EWOPb for details. |
| 165 |
| 166 :rtype: :class:`boto.s3.key.Key` |
| 167 :returns: A Key object from this bucket. |
| 168 """ |
| 169 query_args_l = [] |
| 170 if version_id: |
| 171 query_args_l.append('versionId=%s' % version_id) |
| 172 if response_headers: |
| 173 for rk, rv in response_headers.iteritems(): |
| 174 query_args_l.append('%s=%s' % (rk, urllib.quote(rv))) |
| 175 |
| 176 key, resp = self._get_key_internal(key_name, headers, query_args_l) |
| 177 return key |
| 178 |
| 179 def _get_key_internal(self, key_name, headers, query_args_l): |
| 180 query_args = '&'.join(query_args_l) or None |
| 181 response = self.connection.make_request('HEAD', self.name, key_name, |
| 182 headers=headers, |
| 183 query_args=query_args) |
| 184 response.read() |
| 185 # Allow any success status (2xx) - for example this lets us |
| 186 # support Range gets, which return status 206: |
| 187 if response.status / 100 == 2: |
| 188 k = self.key_class(self) |
| 189 provider = self.connection.provider |
| 190 k.metadata = boto.utils.get_aws_metadata(response.msg, provider) |
| 191 k.etag = response.getheader('etag') |
| 192 k.content_type = response.getheader('content-type') |
| 193 k.content_encoding = response.getheader('content-encoding') |
| 194 k.content_disposition = response.getheader('content-disposition') |
| 195 k.content_language = response.getheader('content-language') |
| 196 k.last_modified = response.getheader('last-modified') |
| 197 # the following machinations are a workaround to the fact that |
| 198 # apache/fastcgi omits the content-length header on HEAD |
| 199 # requests when the content-length is zero. |
| 200 # See http://goo.gl/0Tdax for more details. |
| 201 clen = response.getheader('content-length') |
| 202 if clen: |
| 203 k.size = int(response.getheader('content-length')) |
| 204 else: |
| 205 k.size = 0 |
| 206 k.cache_control = response.getheader('cache-control') |
| 207 k.name = key_name |
| 208 k.handle_version_headers(response) |
| 209 k.handle_encryption_headers(response) |
| 210 return k, response |
| 211 else: |
| 212 if response.status == 404: |
| 213 return None, response |
| 214 else: |
| 215 raise self.connection.provider.storage_response_error( |
| 216 response.status, response.reason, '') |
| 217 |
| 218 def list(self, prefix='', delimiter='', marker='', headers=None): |
| 219 """ |
| 220 List key objects within a bucket. This returns an instance of an |
| 221 BucketListResultSet that automatically handles all of the result |
| 222 paging, etc. from S3. You just need to keep iterating until |
| 223 there are no more results. |
| 224 |
| 225 Called with no arguments, this will return an iterator object across |
| 226 all keys within the bucket. |
| 227 |
| 228 The Key objects returned by the iterator are obtained by parsing |
| 229 the results of a GET on the bucket, also known as the List Objects |
| 230 request. The XML returned by this request contains only a subset |
| 231 of the information about each key. Certain metadata fields such |
| 232 as Content-Type and user metadata are not available in the XML. |
| 233 Therefore, if you want these additional metadata fields you will |
| 234 have to do a HEAD request on the Key in the bucket. |
| 235 |
| 236 :type prefix: string |
| 237 :param prefix: allows you to limit the listing to a particular |
| 238 prefix. For example, if you call the method with |
| 239 prefix='/foo/' then the iterator will only cycle through |
| 240 the keys that begin with the string '/foo/'. |
| 241 |
| 242 :type delimiter: string |
| 243 :param delimiter: can be used in conjunction with the prefix |
| 244 to allow you to organize and browse your keys |
| 245 hierarchically. See http://goo.gl/Xx63h for more details. |
| 246 |
| 247 :type marker: string |
| 248 :param marker: The "marker" of where you are in the result set |
| 249 |
| 250 :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` |
| 251 :return: an instance of a BucketListResultSet that handles paging, etc |
| 252 """ |
| 253 return BucketListResultSet(self, prefix, delimiter, marker, headers) |
| 254 |
| 255 def list_versions(self, prefix='', delimiter='', key_marker='', |
| 256 version_id_marker='', headers=None): |
| 257 """ |
| 258 List version objects within a bucket. This returns an |
| 259 instance of an VersionedBucketListResultSet that automatically |
| 260 handles all of the result paging, etc. from S3. You just need |
| 261 to keep iterating until there are no more results. Called |
| 262 with no arguments, this will return an iterator object across |
| 263 all keys within the bucket. |
| 264 |
| 265 :type prefix: string |
| 266 :param prefix: allows you to limit the listing to a particular |
| 267 prefix. For example, if you call the method with |
| 268 prefix='/foo/' then the iterator will only cycle through |
| 269 the keys that begin with the string '/foo/'. |
| 270 |
| 271 :type delimiter: string |
| 272 :param delimiter: can be used in conjunction with the prefix |
| 273 to allow you to organize and browse your keys |
| 274 hierarchically. See: |
| 275 http://docs.amazonwebservices.com/AmazonS3/2006-03-01/ for |
| 276 more details. |
| 277 |
| 278 :type marker: string |
| 279 :param marker: The "marker" of where you are in the result set |
| 280 |
| 281 :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` |
| 282 :return: an instance of a BucketListResultSet that handles paging, etc |
| 283 """ |
| 284 return VersionedBucketListResultSet(self, prefix, delimiter, |
| 285 key_marker, version_id_marker, |
| 286 headers) |
| 287 |
| 288 def list_multipart_uploads(self, key_marker='', |
| 289 upload_id_marker='', |
| 290 headers=None): |
| 291 """ |
| 292 List multipart upload objects within a bucket. This returns an |
| 293 instance of an MultiPartUploadListResultSet that automatically |
| 294 handles all of the result paging, etc. from S3. You just need |
| 295 to keep iterating until there are no more results. |
| 296 |
| 297 :type marker: string |
| 298 :param marker: The "marker" of where you are in the result set |
| 299 |
| 300 :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` |
| 301 :return: an instance of a BucketListResultSet that handles paging, etc |
| 302 """ |
| 303 return MultiPartUploadListResultSet(self, key_marker, |
| 304 upload_id_marker, |
| 305 headers) |
| 306 |
| 307 def _get_all(self, element_map, initial_query_string='', |
| 308 headers=None, **params): |
| 309 l = [] |
| 310 for k, v in params.items(): |
| 311 k = k.replace('_', '-') |
| 312 if k == 'maxkeys': |
| 313 k = 'max-keys' |
| 314 if isinstance(v, unicode): |
| 315 v = v.encode('utf-8') |
| 316 if v is not None and v != '': |
| 317 l.append('%s=%s' % (urllib.quote(k), urllib.quote(str(v)))) |
| 318 if len(l): |
| 319 s = initial_query_string + '&' + '&'.join(l) |
| 320 else: |
| 321 s = initial_query_string |
| 322 response = self.connection.make_request('GET', self.name, |
| 323 headers=headers, |
| 324 query_args=s) |
| 325 body = response.read() |
| 326 boto.log.debug(body) |
| 327 if response.status == 200: |
| 328 rs = ResultSet(element_map) |
| 329 h = handler.XmlHandler(rs, self) |
| 330 xml.sax.parseString(body, h) |
| 331 return rs |
| 332 else: |
| 333 raise self.connection.provider.storage_response_error( |
| 334 response.status, response.reason, body) |
| 335 |
| 336 def get_all_keys(self, headers=None, **params): |
| 337 """ |
| 338 A lower-level method for listing contents of a bucket. This |
| 339 closely models the actual S3 API and requires you to manually |
| 340 handle the paging of results. For a higher-level method that |
| 341 handles the details of paging for you, you can use the list |
| 342 method. |
| 343 |
| 344 :type max_keys: int |
| 345 :param max_keys: The maximum number of keys to retrieve |
| 346 |
| 347 :type prefix: string |
| 348 :param prefix: The prefix of the keys you want to retrieve |
| 349 |
| 350 :type marker: string |
| 351 :param marker: The "marker" of where you are in the result set |
| 352 |
| 353 :type delimiter: string |
| 354 :param delimiter: If this optional, Unicode string parameter |
| 355 is included with your request, then keys that contain the |
| 356 same string between the prefix and the first occurrence of |
| 357 the delimiter will be rolled up into a single result |
| 358 element in the CommonPrefixes collection. These rolled-up |
| 359 keys are not returned elsewhere in the response. |
| 360 |
| 361 :rtype: ResultSet |
| 362 :return: The result from S3 listing the keys requested |
| 363 |
| 364 """ |
| 365 return self._get_all([('Contents', self.key_class), |
| 366 ('CommonPrefixes', Prefix)], |
| 367 '', headers, **params) |
| 368 |
| 369 def get_all_versions(self, headers=None, **params): |
| 370 """ |
| 371 A lower-level, version-aware method for listing contents of a |
| 372 bucket. This closely models the actual S3 API and requires |
| 373 you to manually handle the paging of results. For a |
| 374 higher-level method that handles the details of paging for |
| 375 you, you can use the list method. |
| 376 |
| 377 :type max_keys: int |
| 378 :param max_keys: The maximum number of keys to retrieve |
| 379 |
| 380 :type prefix: string |
| 381 :param prefix: The prefix of the keys you want to retrieve |
| 382 |
| 383 :type key_marker: string |
| 384 :param key_marker: The "marker" of where you are in the result set |
| 385 with respect to keys. |
| 386 |
| 387 :type version_id_marker: string |
| 388 :param version_id_marker: The "marker" of where you are in the result |
| 389 set with respect to version-id's. |
| 390 |
| 391 :type delimiter: string |
| 392 :param delimiter: If this optional, Unicode string parameter |
| 393 is included with your request, then keys that contain the |
| 394 same string between the prefix and the first occurrence of |
| 395 the delimiter will be rolled up into a single result |
| 396 element in the CommonPrefixes collection. These rolled-up |
| 397 keys are not returned elsewhere in the response. |
| 398 |
| 399 :rtype: ResultSet |
| 400 :return: The result from S3 listing the keys requested |
| 401 """ |
| 402 return self._get_all([('Version', self.key_class), |
| 403 ('CommonPrefixes', Prefix), |
| 404 ('DeleteMarker', DeleteMarker)], |
| 405 'versions', headers, **params) |
| 406 |
| 407 def get_all_multipart_uploads(self, headers=None, **params): |
| 408 """ |
| 409 A lower-level, version-aware method for listing active |
| 410 MultiPart uploads for a bucket. This closely models the |
| 411 actual S3 API and requires you to manually handle the paging |
| 412 of results. For a higher-level method that handles the |
| 413 details of paging for you, you can use the list method. |
| 414 |
| 415 :type max_uploads: int |
| 416 :param max_uploads: The maximum number of uploads to retrieve. |
| 417 Default value is 1000. |
| 418 |
| 419 :type key_marker: string |
| 420 :param key_marker: Together with upload_id_marker, this |
| 421 parameter specifies the multipart upload after which |
| 422 listing should begin. If upload_id_marker is not |
| 423 specified, only the keys lexicographically greater than |
| 424 the specified key_marker will be included in the list. |
| 425 |
| 426 If upload_id_marker is specified, any multipart uploads |
| 427 for a key equal to the key_marker might also be included, |
| 428 provided those multipart uploads have upload IDs |
| 429 lexicographically greater than the specified |
| 430 upload_id_marker. |
| 431 |
| 432 :type upload_id_marker: string |
| 433 :param upload_id_marker: Together with key-marker, specifies |
| 434 the multipart upload after which listing should begin. If |
| 435 key_marker is not specified, the upload_id_marker |
| 436 parameter is ignored. Otherwise, any multipart uploads |
| 437 for a key equal to the key_marker might be included in the |
| 438 list only if they have an upload ID lexicographically |
| 439 greater than the specified upload_id_marker. |
| 440 |
| 441 :rtype: ResultSet |
| 442 :return: The result from S3 listing the uploads requested |
| 443 |
| 444 """ |
| 445 return self._get_all([('Upload', MultiPartUpload), |
| 446 ('CommonPrefixes', Prefix)], |
| 447 'uploads', headers, **params) |
| 448 |
| 449 def new_key(self, key_name=None): |
| 450 """ |
| 451 Creates a new key |
| 452 |
| 453 :type key_name: string |
| 454 :param key_name: The name of the key to create |
| 455 |
| 456 :rtype: :class:`boto.s3.key.Key` or subclass |
| 457 :returns: An instance of the newly created key object |
| 458 """ |
| 459 if not key_name: |
| 460 raise ValueError('Empty key names are not allowed') |
| 461 return self.key_class(self, key_name) |
| 462 |
| 463 def generate_url(self, expires_in, method='GET', headers=None, |
| 464 force_http=False, response_headers=None, |
| 465 expires_in_absolute=False): |
| 466 return self.connection.generate_url(expires_in, method, self.name, |
| 467 headers=headers, |
| 468 force_http=force_http, |
| 469 response_headers=response_headers, |
| 470 expires_in_absolute=expires_in_absol
ute) |
| 471 |
| 472 def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None): |
| 473 """ |
| 474 Deletes a set of keys using S3's Multi-object delete API. If a |
| 475 VersionID is specified for that key then that version is removed. |
| 476 Returns a MultiDeleteResult Object, which contains Deleted |
| 477 and Error elements for each key you ask to delete. |
| 478 |
| 479 :type keys: list |
| 480 :param keys: A list of either key_names or (key_name, versionid) pairs |
| 481 or a list of Key instances. |
| 482 |
| 483 :type quiet: boolean |
| 484 :param quiet: In quiet mode the response includes only keys |
| 485 where the delete operation encountered an error. For a |
| 486 successful deletion, the operation does not return any |
| 487 information about the delete in the response body. |
| 488 |
| 489 :type mfa_token: tuple or list of strings |
| 490 :param mfa_token: A tuple or list consisting of the serial |
| 491 number from the MFA device and the current value of the |
| 492 six-digit token associated with the device. This value is |
| 493 required anytime you are deleting versioned objects from a |
| 494 bucket that has the MFADelete option on the bucket. |
| 495 |
| 496 :returns: An instance of MultiDeleteResult |
| 497 """ |
| 498 ikeys = iter(keys) |
| 499 result = MultiDeleteResult(self) |
| 500 provider = self.connection.provider |
| 501 query_args = 'delete' |
| 502 |
| 503 def delete_keys2(hdrs): |
| 504 hdrs = hdrs or {} |
| 505 data = u"""<?xml version="1.0" encoding="UTF-8"?>""" |
| 506 data += u"<Delete>" |
| 507 if quiet: |
| 508 data += u"<Quiet>true</Quiet>" |
| 509 count = 0 |
| 510 while count < 1000: |
| 511 try: |
| 512 key = ikeys.next() |
| 513 except StopIteration: |
| 514 break |
| 515 if isinstance(key, basestring): |
| 516 key_name = key |
| 517 version_id = None |
| 518 elif isinstance(key, tuple) and len(key) == 2: |
| 519 key_name, version_id = key |
| 520 elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and
key.name: |
| 521 key_name = key.name |
| 522 version_id = key.version_id |
| 523 else: |
| 524 if isinstance(key, Prefix): |
| 525 key_name = key.name |
| 526 code = 'PrefixSkipped' # Don't delete Prefix |
| 527 else: |
| 528 key_name = repr(key) # try get a string |
| 529 code = 'InvalidArgument' # other unknown type |
| 530 message = 'Invalid. No delete action taken for this object.' |
| 531 error = Error(key_name, code=code, message=message) |
| 532 result.errors.append(error) |
| 533 continue |
| 534 count += 1 |
| 535 data += u"<Object><Key>%s</Key>" % xml.sax.saxutils.escape(key_n
ame) |
| 536 if version_id: |
| 537 data += u"<VersionId>%s</VersionId>" % version_id |
| 538 data += u"</Object>" |
| 539 data += u"</Delete>" |
| 540 if count <= 0: |
| 541 return False # no more |
| 542 data = data.encode('utf-8') |
| 543 fp = StringIO.StringIO(data) |
| 544 md5 = boto.utils.compute_md5(fp) |
| 545 hdrs['Content-MD5'] = md5[1] |
| 546 hdrs['Content-Type'] = 'text/xml' |
| 547 if mfa_token: |
| 548 hdrs[provider.mfa_header] = ' '.join(mfa_token) |
| 549 response = self.connection.make_request('POST', self.name, |
| 550 headers=hdrs, |
| 551 query_args=query_args, |
| 552 data=data) |
| 553 body = response.read() |
| 554 if response.status == 200: |
| 555 h = handler.XmlHandler(result, self) |
| 556 xml.sax.parseString(body, h) |
| 557 return count >= 1000 # more? |
| 558 else: |
| 559 raise provider.storage_response_error(response.status, |
| 560 response.reason, |
| 561 body) |
| 562 while delete_keys2(headers): |
| 563 pass |
| 564 return result |
| 565 |
| 566 def delete_key(self, key_name, headers=None, version_id=None, |
| 567 mfa_token=None): |
| 568 """ |
| 569 Deletes a key from the bucket. If a version_id is provided, |
| 570 only that version of the key will be deleted. |
| 571 |
| 572 :type key_name: string |
| 573 :param key_name: The key name to delete |
| 574 |
| 575 :type version_id: string |
| 576 :param version_id: The version ID (optional) |
| 577 |
| 578 :type mfa_token: tuple or list of strings |
| 579 :param mfa_token: A tuple or list consisting of the serial |
| 580 number from the MFA device and the current value of the |
| 581 six-digit token associated with the device. This value is |
| 582 required anytime you are deleting versioned objects from a |
| 583 bucket that has the MFADelete option on the bucket. |
| 584 |
| 585 :rtype: :class:`boto.s3.key.Key` or subclass |
| 586 :returns: A key object holding information on what was |
| 587 deleted. The Caller can see if a delete_marker was |
| 588 created or removed and what version_id the delete created |
| 589 or removed. |
| 590 """ |
| 591 self._delete_key_internal(key_name, headers=headers, |
| 592 version_id=version_id, mfa_token=mfa_token, |
| 593 query_args_l=None) |
| 594 |
| 595 def _delete_key_internal(self, key_name, headers=None, version_id=None, |
| 596 mfa_token=None, query_args_l=None): |
| 597 query_args_l = query_args_l or [] |
| 598 provider = self.connection.provider |
| 599 if version_id: |
| 600 query_args_l.append('versionId=%s' % version_id) |
| 601 query_args = '&'.join(query_args_l) or None |
| 602 if mfa_token: |
| 603 if not headers: |
| 604 headers = {} |
| 605 headers[provider.mfa_header] = ' '.join(mfa_token) |
| 606 response = self.connection.make_request('DELETE', self.name, key_name, |
| 607 headers=headers, |
| 608 query_args=query_args) |
| 609 body = response.read() |
| 610 if response.status != 204: |
| 611 raise provider.storage_response_error(response.status, |
| 612 response.reason, body) |
| 613 else: |
| 614 # return a key object with information on what was deleted. |
| 615 k = self.key_class(self) |
| 616 k.name = key_name |
| 617 k.handle_version_headers(response) |
| 618 return k |
| 619 |
| 620 def copy_key(self, new_key_name, src_bucket_name, |
| 621 src_key_name, metadata=None, src_version_id=None, |
| 622 storage_class='STANDARD', preserve_acl=False, |
| 623 encrypt_key=False, headers=None, query_args=None): |
| 624 """ |
| 625 Create a new key in the bucket by copying another existing key. |
| 626 |
| 627 :type new_key_name: string |
| 628 :param new_key_name: The name of the new key |
| 629 |
| 630 :type src_bucket_name: string |
| 631 :param src_bucket_name: The name of the source bucket |
| 632 |
| 633 :type src_key_name: string |
| 634 :param src_key_name: The name of the source key |
| 635 |
| 636 :type src_version_id: string |
| 637 :param src_version_id: The version id for the key. This param |
| 638 is optional. If not specified, the newest version of the |
| 639 key will be copied. |
| 640 |
| 641 :type metadata: dict |
| 642 :param metadata: Metadata to be associated with new key. If |
| 643 metadata is supplied, it will replace the metadata of the |
| 644 source key being copied. If no metadata is supplied, the |
| 645 source key's metadata will be copied to the new key. |
| 646 |
| 647 :type storage_class: string |
| 648 :param storage_class: The storage class of the new key. By |
| 649 default, the new key will use the standard storage class. |
| 650 Possible values are: STANDARD | REDUCED_REDUNDANCY |
| 651 |
| 652 :type preserve_acl: bool |
| 653 :param preserve_acl: If True, the ACL from the source key will |
| 654 be copied to the destination key. If False, the |
| 655 destination key will have the default ACL. Note that |
| 656 preserving the ACL in the new key object will require two |
| 657 additional API calls to S3, one to retrieve the current |
| 658 ACL and one to set that ACL on the new object. If you |
| 659 don't care about the ACL, a value of False will be |
| 660 significantly more efficient. |
| 661 |
| 662 :type encrypt_key: bool |
| 663 :param encrypt_key: If True, the new copy of the object will |
| 664 be encrypted on the server-side by S3 and will be stored |
| 665 in an encrypted form while at rest in S3. |
| 666 |
| 667 :type headers: dict |
| 668 :param headers: A dictionary of header name/value pairs. |
| 669 |
| 670 :type query_args: string |
| 671 :param query_args: A string of additional querystring arguments |
| 672 to append to the request |
| 673 |
| 674 :rtype: :class:`boto.s3.key.Key` or subclass |
| 675 :returns: An instance of the newly created key object |
| 676 """ |
| 677 headers = headers or {} |
| 678 provider = self.connection.provider |
| 679 src_key_name = boto.utils.get_utf8_value(src_key_name) |
| 680 if preserve_acl: |
| 681 if self.name == src_bucket_name: |
| 682 src_bucket = self |
| 683 else: |
| 684 src_bucket = self.connection.get_bucket(src_bucket_name) |
| 685 acl = src_bucket.get_xml_acl(src_key_name) |
| 686 if encrypt_key: |
| 687 headers[provider.server_side_encryption_header] = 'AES256' |
| 688 src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name)) |
| 689 if src_version_id: |
| 690 src += '?versionId=%s' % src_version_id |
| 691 headers[provider.copy_source_header] = str(src) |
| 692 # make sure storage_class_header key exists before accessing it |
| 693 if provider.storage_class_header and storage_class: |
| 694 headers[provider.storage_class_header] = storage_class |
| 695 if metadata is not None: |
| 696 headers[provider.metadata_directive_header] = 'REPLACE' |
| 697 headers = boto.utils.merge_meta(headers, metadata, provider) |
| 698 elif not query_args: # Can't use this header with multi-part copy. |
| 699 headers[provider.metadata_directive_header] = 'COPY' |
| 700 response = self.connection.make_request('PUT', self.name, new_key_name, |
| 701 headers=headers, |
| 702 query_args=query_args) |
| 703 body = response.read() |
| 704 if response.status == 200: |
| 705 key = self.new_key(new_key_name) |
| 706 h = handler.XmlHandler(key, self) |
| 707 xml.sax.parseString(body, h) |
| 708 if hasattr(key, 'Error'): |
| 709 raise provider.storage_copy_error(key.Code, key.Message, body) |
| 710 key.handle_version_headers(response) |
| 711 if preserve_acl: |
| 712 self.set_xml_acl(acl, new_key_name) |
| 713 return key |
| 714 else: |
| 715 raise provider.storage_response_error(response.status, |
| 716 response.reason, body) |
| 717 |
| 718 def set_canned_acl(self, acl_str, key_name='', headers=None, |
| 719 version_id=None): |
| 720 assert acl_str in CannedACLStrings |
| 721 |
| 722 if headers: |
| 723 headers[self.connection.provider.acl_header] = acl_str |
| 724 else: |
| 725 headers = {self.connection.provider.acl_header: acl_str} |
| 726 |
| 727 query_args = 'acl' |
| 728 if version_id: |
| 729 query_args += '&versionId=%s' % version_id |
| 730 response = self.connection.make_request('PUT', self.name, key_name, |
| 731 headers=headers, query_args=query_args) |
| 732 body = response.read() |
| 733 if response.status != 200: |
| 734 raise self.connection.provider.storage_response_error( |
| 735 response.status, response.reason, body) |
| 736 |
| 737 def get_xml_acl(self, key_name='', headers=None, version_id=None): |
| 738 query_args = 'acl' |
| 739 if version_id: |
| 740 query_args += '&versionId=%s' % version_id |
| 741 response = self.connection.make_request('GET', self.name, key_name, |
| 742 query_args=query_args, |
| 743 headers=headers) |
| 744 body = response.read() |
| 745 if response.status != 200: |
| 746 raise self.connection.provider.storage_response_error( |
| 747 response.status, response.reason, body) |
| 748 return body |
| 749 |
| 750 def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None, |
| 751 query_args='acl'): |
| 752 if version_id: |
| 753 query_args += '&versionId=%s' % version_id |
| 754 response = self.connection.make_request('PUT', self.name, key_name, |
| 755 data=acl_str.encode('UTF-8'), |
| 756 query_args=query_args, |
| 757 headers=headers) |
| 758 body = response.read() |
| 759 if response.status != 200: |
| 760 raise self.connection.provider.storage_response_error( |
| 761 response.status, response.reason, body) |
| 762 |
| 763 def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None): |
| 764 if isinstance(acl_or_str, Policy): |
| 765 self.set_xml_acl(acl_or_str.to_xml(), key_name, |
| 766 headers, version_id) |
| 767 else: |
| 768 self.set_canned_acl(acl_or_str, key_name, |
| 769 headers, version_id) |
| 770 |
| 771 def get_acl(self, key_name='', headers=None, version_id=None): |
| 772 query_args = 'acl' |
| 773 if version_id: |
| 774 query_args += '&versionId=%s' % version_id |
| 775 response = self.connection.make_request('GET', self.name, key_name, |
| 776 query_args=query_args, |
| 777 headers=headers) |
| 778 body = response.read() |
| 779 if response.status == 200: |
| 780 policy = Policy(self) |
| 781 h = handler.XmlHandler(policy, self) |
| 782 xml.sax.parseString(body, h) |
| 783 return policy |
| 784 else: |
| 785 raise self.connection.provider.storage_response_error( |
| 786 response.status, response.reason, body) |
| 787 |
| 788 def set_subresource(self, subresource, value, key_name='', headers=None, |
| 789 version_id=None): |
| 790 """ |
| 791 Set a subresource for a bucket or key. |
| 792 |
| 793 :type subresource: string |
| 794 :param subresource: The subresource to set. |
| 795 |
| 796 :type value: string |
| 797 :param value: The value of the subresource. |
| 798 |
| 799 :type key_name: string |
| 800 :param key_name: The key to operate on, or None to operate on the |
| 801 bucket. |
| 802 |
| 803 :type headers: dict |
| 804 :param headers: Additional HTTP headers to include in the request. |
| 805 |
| 806 :type src_version_id: string |
| 807 :param src_version_id: Optional. The version id of the key to |
| 808 operate on. If not specified, operate on the newest |
| 809 version. |
| 810 """ |
| 811 if not subresource: |
| 812 raise TypeError('set_subresource called with subresource=None') |
| 813 query_args = subresource |
| 814 if version_id: |
| 815 query_args += '&versionId=%s' % version_id |
| 816 response = self.connection.make_request('PUT', self.name, key_name, |
| 817 data=value.encode('UTF-8'), |
| 818 query_args=query_args, |
| 819 headers=headers) |
| 820 body = response.read() |
| 821 if response.status != 200: |
| 822 raise self.connection.provider.storage_response_error( |
| 823 response.status, response.reason, body) |
| 824 |
| 825 def get_subresource(self, subresource, key_name='', headers=None, |
| 826 version_id=None): |
| 827 """ |
| 828 Get a subresource for a bucket or key. |
| 829 |
| 830 :type subresource: string |
| 831 :param subresource: The subresource to get. |
| 832 |
| 833 :type key_name: string |
| 834 :param key_name: The key to operate on, or None to operate on the |
| 835 bucket. |
| 836 |
| 837 :type headers: dict |
| 838 :param headers: Additional HTTP headers to include in the request. |
| 839 |
| 840 :type src_version_id: string |
| 841 :param src_version_id: Optional. The version id of the key to |
| 842 operate on. If not specified, operate on the newest |
| 843 version. |
| 844 |
| 845 :rtype: string |
| 846 :returns: The value of the subresource. |
| 847 """ |
| 848 if not subresource: |
| 849 raise TypeError('get_subresource called with subresource=None') |
| 850 query_args = subresource |
| 851 if version_id: |
| 852 query_args += '&versionId=%s' % version_id |
| 853 response = self.connection.make_request('GET', self.name, key_name, |
| 854 query_args=query_args, |
| 855 headers=headers) |
| 856 body = response.read() |
| 857 if response.status != 200: |
| 858 raise self.connection.provider.storage_response_error( |
| 859 response.status, response.reason, body) |
| 860 return body |
| 861 |
| 862 def make_public(self, recursive=False, headers=None): |
| 863 self.set_canned_acl('public-read', headers=headers) |
| 864 if recursive: |
| 865 for key in self: |
| 866 self.set_canned_acl('public-read', key.name, headers=headers) |
| 867 |
| 868 def add_email_grant(self, permission, email_address, |
| 869 recursive=False, headers=None): |
| 870 """ |
| 871 Convenience method that provides a quick way to add an email grant |
| 872 to a bucket. This method retrieves the current ACL, creates a new |
| 873 grant based on the parameters passed in, adds that grant to the ACL |
| 874 and then PUT's the new ACL back to S3. |
| 875 |
| 876 :type permission: string |
| 877 :param permission: The permission being granted. Should be one of: |
| 878 (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). |
| 879 |
| 880 :type email_address: string |
| 881 :param email_address: The email address associated with the AWS |
| 882 account your are granting the permission to. |
| 883 |
| 884 :type recursive: boolean |
| 885 :param recursive: A boolean value to controls whether the |
| 886 command will apply the grant to all keys within the bucket |
| 887 or not. The default value is False. By passing a True |
| 888 value, the call will iterate through all keys in the |
| 889 bucket and apply the same grant to each key. CAUTION: If |
| 890 you have a lot of keys, this could take a long time! |
| 891 """ |
| 892 if permission not in S3Permissions: |
| 893 raise self.connection.provider.storage_permissions_error( |
| 894 'Unknown Permission: %s' % permission) |
| 895 policy = self.get_acl(headers=headers) |
| 896 policy.acl.add_email_grant(permission, email_address) |
| 897 self.set_acl(policy, headers=headers) |
| 898 if recursive: |
| 899 for key in self: |
| 900 key.add_email_grant(permission, email_address, headers=headers) |
| 901 |
| 902 def add_user_grant(self, permission, user_id, recursive=False, |
| 903 headers=None, display_name=None): |
| 904 """ |
| 905 Convenience method that provides a quick way to add a canonical |
| 906 user grant to a bucket. This method retrieves the current ACL, |
| 907 creates a new grant based on the parameters passed in, adds that |
| 908 grant to the ACL and then PUT's the new ACL back to S3. |
| 909 |
| 910 :type permission: string |
| 911 :param permission: The permission being granted. Should be one of: |
| 912 (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). |
| 913 |
| 914 :type user_id: string |
| 915 :param user_id: The canonical user id associated with the AWS |
| 916 account your are granting the permission to. |
| 917 |
| 918 :type recursive: boolean |
| 919 :param recursive: A boolean value to controls whether the |
| 920 command will apply the grant to all keys within the bucket |
| 921 or not. The default value is False. By passing a True |
| 922 value, the call will iterate through all keys in the |
| 923 bucket and apply the same grant to each key. CAUTION: If |
| 924 you have a lot of keys, this could take a long time! |
| 925 |
| 926 :type display_name: string |
| 927 :param display_name: An option string containing the user's |
| 928 Display Name. Only required on Walrus. |
| 929 """ |
| 930 if permission not in S3Permissions: |
| 931 raise self.connection.provider.storage_permissions_error( |
| 932 'Unknown Permission: %s' % permission) |
| 933 policy = self.get_acl(headers=headers) |
| 934 policy.acl.add_user_grant(permission, user_id, |
| 935 display_name=display_name) |
| 936 self.set_acl(policy, headers=headers) |
| 937 if recursive: |
| 938 for key in self: |
| 939 key.add_user_grant(permission, user_id, headers=headers, |
| 940 display_name=display_name) |
| 941 |
| 942 def list_grants(self, headers=None): |
| 943 policy = self.get_acl(headers=headers) |
| 944 return policy.acl.grants |
| 945 |
| 946 def get_location(self): |
| 947 """ |
| 948 Returns the LocationConstraint for the bucket. |
| 949 |
| 950 :rtype: str |
| 951 :return: The LocationConstraint for the bucket or the empty |
| 952 string if no constraint was specified when bucket was created. |
| 953 """ |
| 954 response = self.connection.make_request('GET', self.name, |
| 955 query_args='location') |
| 956 body = response.read() |
| 957 if response.status == 200: |
| 958 rs = ResultSet(self) |
| 959 h = handler.XmlHandler(rs, self) |
| 960 xml.sax.parseString(body, h) |
| 961 return rs.LocationConstraint |
| 962 else: |
| 963 raise self.connection.provider.storage_response_error( |
| 964 response.status, response.reason, body) |
| 965 |
| 966 def set_xml_logging(self, logging_str, headers=None): |
| 967 """ |
| 968 Set logging on a bucket directly to the given xml string. |
| 969 |
| 970 :type logging_str: unicode string |
| 971 :param logging_str: The XML for the bucketloggingstatus which |
| 972 will be set. The string will be converted to utf-8 before |
| 973 it is sent. Usually, you will obtain this XML from the |
| 974 BucketLogging object. |
| 975 |
| 976 :rtype: bool |
| 977 :return: True if ok or raises an exception. |
| 978 """ |
| 979 body = logging_str.encode('utf-8') |
| 980 response = self.connection.make_request('PUT', self.name, data=body, |
| 981 query_args='logging', headers=headers) |
| 982 body = response.read() |
| 983 if response.status == 200: |
| 984 return True |
| 985 else: |
| 986 raise self.connection.provider.storage_response_error( |
| 987 response.status, response.reason, body) |
| 988 |
| 989 def enable_logging(self, target_bucket, target_prefix='', |
| 990 grants=None, headers=None): |
| 991 """ |
| 992 Enable logging on a bucket. |
| 993 |
| 994 :type target_bucket: bucket or string |
| 995 :param target_bucket: The bucket to log to. |
| 996 |
| 997 :type target_prefix: string |
| 998 :param target_prefix: The prefix which should be prepended to the |
| 999 generated log files written to the target_bucket. |
| 1000 |
| 1001 :type grants: list of Grant objects |
| 1002 :param grants: A list of extra permissions which will be granted on |
| 1003 the log files which are created. |
| 1004 |
| 1005 :rtype: bool |
| 1006 :return: True if ok or raises an exception. |
| 1007 """ |
| 1008 if isinstance(target_bucket, Bucket): |
| 1009 target_bucket = target_bucket.name |
| 1010 blogging = BucketLogging(target=target_bucket, prefix=target_prefix, |
| 1011 grants=grants) |
| 1012 return self.set_xml_logging(blogging.to_xml(), headers=headers) |
| 1013 |
| 1014 def disable_logging(self, headers=None): |
| 1015 """ |
| 1016 Disable logging on a bucket. |
| 1017 |
| 1018 :rtype: bool |
| 1019 :return: True if ok or raises an exception. |
| 1020 """ |
| 1021 blogging = BucketLogging() |
| 1022 return self.set_xml_logging(blogging.to_xml(), headers=headers) |
| 1023 |
| 1024 def get_logging_status(self, headers=None): |
| 1025 """ |
| 1026 Get the logging status for this bucket. |
| 1027 |
| 1028 :rtype: :class:`boto.s3.bucketlogging.BucketLogging` |
| 1029 :return: A BucketLogging object for this bucket. |
| 1030 """ |
| 1031 response = self.connection.make_request('GET', self.name, |
| 1032 query_args='logging', headers=headers) |
| 1033 body = response.read() |
| 1034 if response.status == 200: |
| 1035 blogging = BucketLogging() |
| 1036 h = handler.XmlHandler(blogging, self) |
| 1037 xml.sax.parseString(body, h) |
| 1038 return blogging |
| 1039 else: |
| 1040 raise self.connection.provider.storage_response_error( |
| 1041 response.status, response.reason, body) |
| 1042 |
| 1043 def set_as_logging_target(self, headers=None): |
| 1044 """ |
| 1045 Setup the current bucket as a logging target by granting the necessary |
| 1046 permissions to the LogDelivery group to write log files to this bucket. |
| 1047 """ |
| 1048 policy = self.get_acl(headers=headers) |
| 1049 g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup) |
| 1050 g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup) |
| 1051 policy.acl.add_grant(g1) |
| 1052 policy.acl.add_grant(g2) |
| 1053 self.set_acl(policy, headers=headers) |
| 1054 |
| 1055 def get_request_payment(self, headers=None): |
| 1056 response = self.connection.make_request('GET', self.name, |
| 1057 query_args='requestPayment', headers=headers) |
| 1058 body = response.read() |
| 1059 if response.status == 200: |
| 1060 return body |
| 1061 else: |
| 1062 raise self.connection.provider.storage_response_error( |
| 1063 response.status, response.reason, body) |
| 1064 |
| 1065 def set_request_payment(self, payer='BucketOwner', headers=None): |
| 1066 body = self.BucketPaymentBody % payer |
| 1067 response = self.connection.make_request('PUT', self.name, data=body, |
| 1068 query_args='requestPayment', headers=headers) |
| 1069 body = response.read() |
| 1070 if response.status == 200: |
| 1071 return True |
| 1072 else: |
| 1073 raise self.connection.provider.storage_response_error( |
| 1074 response.status, response.reason, body) |
| 1075 |
| 1076 def configure_versioning(self, versioning, mfa_delete=False, |
| 1077 mfa_token=None, headers=None): |
| 1078 """ |
| 1079 Configure versioning for this bucket. |
| 1080 |
| 1081 ..note:: This feature is currently in beta. |
| 1082 |
| 1083 :type versioning: bool |
| 1084 :param versioning: A boolean indicating whether version is |
| 1085 enabled (True) or disabled (False). |
| 1086 |
| 1087 :type mfa_delete: bool |
| 1088 :param mfa_delete: A boolean indicating whether the |
| 1089 Multi-Factor Authentication Delete feature is enabled |
| 1090 (True) or disabled (False). If mfa_delete is enabled then |
| 1091 all Delete operations will require the token from your MFA |
| 1092 device to be passed in the request. |
| 1093 |
| 1094 :type mfa_token: tuple or list of strings |
| 1095 :param mfa_token: A tuple or list consisting of the serial |
| 1096 number from the MFA device and the current value of the |
| 1097 six-digit token associated with the device. This value is |
| 1098 required when you are changing the status of the MfaDelete |
| 1099 property of the bucket. |
| 1100 """ |
| 1101 if versioning: |
| 1102 ver = 'Enabled' |
| 1103 else: |
| 1104 ver = 'Suspended' |
| 1105 if mfa_delete: |
| 1106 mfa = 'Enabled' |
| 1107 else: |
| 1108 mfa = 'Disabled' |
| 1109 body = self.VersioningBody % (ver, mfa) |
| 1110 if mfa_token: |
| 1111 if not headers: |
| 1112 headers = {} |
| 1113 provider = self.connection.provider |
| 1114 headers[provider.mfa_header] = ' '.join(mfa_token) |
| 1115 response = self.connection.make_request('PUT', self.name, data=body, |
| 1116 query_args='versioning', headers=headers) |
| 1117 body = response.read() |
| 1118 if response.status == 200: |
| 1119 return True |
| 1120 else: |
| 1121 raise self.connection.provider.storage_response_error( |
| 1122 response.status, response.reason, body) |
| 1123 |
| 1124 def get_versioning_status(self, headers=None): |
| 1125 """ |
| 1126 Returns the current status of versioning on the bucket. |
| 1127 |
| 1128 :rtype: dict |
| 1129 :returns: A dictionary containing a key named 'Versioning' |
| 1130 that can have a value of either Enabled, Disabled, or |
| 1131 Suspended. Also, if MFADelete has ever been enabled on the |
| 1132 bucket, the dictionary will contain a key named |
| 1133 'MFADelete' which will have a value of either Enabled or |
| 1134 Suspended. |
| 1135 """ |
| 1136 response = self.connection.make_request('GET', self.name, |
| 1137 query_args='versioning', headers=headers) |
| 1138 body = response.read() |
| 1139 boto.log.debug(body) |
| 1140 if response.status == 200: |
| 1141 d = {} |
| 1142 ver = re.search(self.VersionRE, body) |
| 1143 if ver: |
| 1144 d['Versioning'] = ver.group(1) |
| 1145 mfa = re.search(self.MFADeleteRE, body) |
| 1146 if mfa: |
| 1147 d['MfaDelete'] = mfa.group(1) |
| 1148 return d |
| 1149 else: |
| 1150 raise self.connection.provider.storage_response_error( |
| 1151 response.status, response.reason, body) |
| 1152 |
| 1153 def configure_lifecycle(self, lifecycle_config, headers=None): |
| 1154 """ |
| 1155 Configure lifecycle for this bucket. |
| 1156 |
| 1157 :type lifecycle_config: :class:`boto.s3.lifecycle.Lifecycle` |
| 1158 :param lifecycle_config: The lifecycle configuration you want |
| 1159 to configure for this bucket. |
| 1160 """ |
| 1161 fp = StringIO.StringIO(lifecycle_config.to_xml()) |
| 1162 md5 = boto.utils.compute_md5(fp) |
| 1163 if headers is None: |
| 1164 headers = {} |
| 1165 headers['Content-MD5'] = md5[1] |
| 1166 headers['Content-Type'] = 'text/xml' |
| 1167 response = self.connection.make_request('PUT', self.name, |
| 1168 data=fp.getvalue(), |
| 1169 query_args='lifecycle', |
| 1170 headers=headers) |
| 1171 body = response.read() |
| 1172 if response.status == 200: |
| 1173 return True |
| 1174 else: |
| 1175 raise self.connection.provider.storage_response_error( |
| 1176 response.status, response.reason, body) |
| 1177 |
| 1178 def get_lifecycle_config(self, headers=None): |
| 1179 """ |
| 1180 Returns the current lifecycle configuration on the bucket. |
| 1181 |
| 1182 :rtype: :class:`boto.s3.lifecycle.Lifecycle` |
| 1183 :returns: A LifecycleConfig object that describes all current |
| 1184 lifecycle rules in effect for the bucket. |
| 1185 """ |
| 1186 response = self.connection.make_request('GET', self.name, |
| 1187 query_args='lifecycle', headers=headers) |
| 1188 body = response.read() |
| 1189 boto.log.debug(body) |
| 1190 if response.status == 200: |
| 1191 lifecycle = Lifecycle() |
| 1192 h = handler.XmlHandler(lifecycle, self) |
| 1193 xml.sax.parseString(body, h) |
| 1194 return lifecycle |
| 1195 else: |
| 1196 raise self.connection.provider.storage_response_error( |
| 1197 response.status, response.reason, body) |
| 1198 |
| 1199 def delete_lifecycle_configuration(self, headers=None): |
| 1200 """ |
| 1201 Removes all lifecycle configuration from the bucket. |
| 1202 """ |
| 1203 response = self.connection.make_request('DELETE', self.name, |
| 1204 query_args='lifecycle', |
| 1205 headers=headers) |
| 1206 body = response.read() |
| 1207 boto.log.debug(body) |
| 1208 if response.status == 204: |
| 1209 return True |
| 1210 else: |
| 1211 raise self.connection.provider.storage_response_error( |
| 1212 response.status, response.reason, body) |
| 1213 |
| 1214 def configure_website(self, suffix, error_key='', headers=None): |
| 1215 """ |
| 1216 Configure this bucket to act as a website |
| 1217 |
| 1218 :type suffix: str |
| 1219 :param suffix: Suffix that is appended to a request that is for a |
| 1220 "directory" on the website endpoint (e.g. if the suffix is |
| 1221 index.html and you make a request to samplebucket/images/ |
| 1222 the data that is returned will be for the object with the |
| 1223 key name images/index.html). The suffix must not be empty |
| 1224 and must not include a slash character. |
| 1225 |
| 1226 :type error_key: str |
| 1227 :param error_key: The object key name to use when a 4XX class |
| 1228 error occurs. This is optional. |
| 1229 |
| 1230 """ |
| 1231 if error_key: |
| 1232 error_frag = self.WebsiteErrorFragment % error_key |
| 1233 else: |
| 1234 error_frag = '' |
| 1235 body = self.WebsiteBody % (suffix, error_frag) |
| 1236 response = self.connection.make_request('PUT', self.name, data=body, |
| 1237 query_args='website', |
| 1238 headers=headers) |
| 1239 body = response.read() |
| 1240 if response.status == 200: |
| 1241 return True |
| 1242 else: |
| 1243 raise self.connection.provider.storage_response_error( |
| 1244 response.status, response.reason, body) |
| 1245 |
| 1246 def get_website_configuration(self, headers=None): |
| 1247 """ |
| 1248 Returns the current status of website configuration on the bucket. |
| 1249 |
| 1250 :rtype: dict |
| 1251 :returns: A dictionary containing a Python representation |
| 1252 of the XML response from S3. The overall structure is: |
| 1253 |
| 1254 * WebsiteConfiguration |
| 1255 |
| 1256 * IndexDocument |
| 1257 |
| 1258 * Suffix : suffix that is appended to request that |
| 1259 is for a "directory" on the website endpoint |
| 1260 * ErrorDocument |
| 1261 |
| 1262 * Key : name of object to serve when an error occurs |
| 1263 """ |
| 1264 return self.get_website_configuration_with_xml(headers)[0] |
| 1265 |
| 1266 def get_website_configuration_with_xml(self, headers=None): |
| 1267 """ |
| 1268 Returns the current status of website configuration on the bucket as |
| 1269 unparsed XML. |
| 1270 |
| 1271 :rtype: 2-Tuple |
| 1272 :returns: 2-tuple containing: |
| 1273 1) A dictionary containing a Python representation |
| 1274 of the XML response from GCS. The overall structure is: |
| 1275 * WebsiteConfiguration |
| 1276 * IndexDocument |
| 1277 * Suffix : suffix that is appended to request that |
| 1278 is for a "directory" on the website endpoint |
| 1279 * ErrorDocument |
| 1280 * Key : name of object to serve when an error occurs |
| 1281 2) unparsed XML describing the bucket's website configuration. |
| 1282 """ |
| 1283 response = self.connection.make_request('GET', self.name, |
| 1284 query_args='website', headers=headers) |
| 1285 body = response.read() |
| 1286 boto.log.debug(body) |
| 1287 |
| 1288 if response.status != 200: |
| 1289 raise self.connection.provider.storage_response_error( |
| 1290 response.status, response.reason, body) |
| 1291 |
| 1292 e = boto.jsonresponse.Element() |
| 1293 h = boto.jsonresponse.XmlHandler(e, None) |
| 1294 h.parse(body) |
| 1295 return e, body |
| 1296 |
| 1297 def delete_website_configuration(self, headers=None): |
| 1298 """ |
| 1299 Removes all website configuration from the bucket. |
| 1300 """ |
| 1301 response = self.connection.make_request('DELETE', self.name, |
| 1302 query_args='website', headers=headers) |
| 1303 body = response.read() |
| 1304 boto.log.debug(body) |
| 1305 if response.status == 204: |
| 1306 return True |
| 1307 else: |
| 1308 raise self.connection.provider.storage_response_error( |
| 1309 response.status, response.reason, body) |
| 1310 |
| 1311 def get_website_endpoint(self): |
| 1312 """ |
| 1313 Returns the fully qualified hostname to use is you want to access this |
| 1314 bucket as a website. This doesn't validate whether the bucket has |
| 1315 been correctly configured as a website or not. |
| 1316 """ |
| 1317 l = [self.name] |
| 1318 l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location()
)) |
| 1319 l.append('.'.join(self.connection.host.split('.')[-2:])) |
| 1320 return '.'.join(l) |
| 1321 |
| 1322 def get_policy(self, headers=None): |
| 1323 """ |
| 1324 Returns the JSON policy associated with the bucket. The policy |
| 1325 is returned as an uninterpreted JSON string. |
| 1326 """ |
| 1327 response = self.connection.make_request('GET', self.name, |
| 1328 query_args='policy', headers=headers) |
| 1329 body = response.read() |
| 1330 if response.status == 200: |
| 1331 return body |
| 1332 else: |
| 1333 raise self.connection.provider.storage_response_error( |
| 1334 response.status, response.reason, body) |
| 1335 |
| 1336 def set_policy(self, policy, headers=None): |
| 1337 """ |
| 1338 Add or replace the JSON policy associated with the bucket. |
| 1339 |
| 1340 :type policy: str |
| 1341 :param policy: The JSON policy as a string. |
| 1342 """ |
| 1343 response = self.connection.make_request('PUT', self.name, |
| 1344 data=policy, |
| 1345 query_args='policy', |
| 1346 headers=headers) |
| 1347 body = response.read() |
| 1348 if response.status >= 200 and response.status <= 204: |
| 1349 return True |
| 1350 else: |
| 1351 raise self.connection.provider.storage_response_error( |
| 1352 response.status, response.reason, body) |
| 1353 |
| 1354 def delete_policy(self, headers=None): |
| 1355 response = self.connection.make_request('DELETE', self.name, |
| 1356 data='/?policy', |
| 1357 query_args='policy', |
| 1358 headers=headers) |
| 1359 body = response.read() |
| 1360 if response.status >= 200 and response.status <= 204: |
| 1361 return True |
| 1362 else: |
| 1363 raise self.connection.provider.storage_response_error( |
| 1364 response.status, response.reason, body) |
| 1365 |
| 1366 def set_cors_xml(self, cors_xml, headers=None): |
| 1367 """ |
| 1368 Set the CORS (Cross-Origin Resource Sharing) for a bucket. |
| 1369 |
| 1370 :type cors_xml: str |
| 1371 :param cors_xml: The XML document describing your desired |
| 1372 CORS configuration. See the S3 documentation for details |
| 1373 of the exact syntax required. |
| 1374 """ |
| 1375 fp = StringIO.StringIO(cors_xml) |
| 1376 md5 = boto.utils.compute_md5(fp) |
| 1377 if headers is None: |
| 1378 headers = {} |
| 1379 headers['Content-MD5'] = md5[1] |
| 1380 headers['Content-Type'] = 'text/xml' |
| 1381 response = self.connection.make_request('PUT', self.name, |
| 1382 data=fp.getvalue(), |
| 1383 query_args='cors', |
| 1384 headers=headers) |
| 1385 body = response.read() |
| 1386 if response.status == 200: |
| 1387 return True |
| 1388 else: |
| 1389 raise self.connection.provider.storage_response_error( |
| 1390 response.status, response.reason, body) |
| 1391 |
| 1392 def set_cors(self, cors_config, headers=None): |
| 1393 """ |
| 1394 Set the CORS for this bucket given a boto CORSConfiguration |
| 1395 object. |
| 1396 |
| 1397 :type cors_config: :class:`boto.s3.cors.CORSConfiguration` |
| 1398 :param cors_config: The CORS configuration you want |
| 1399 to configure for this bucket. |
| 1400 """ |
| 1401 return self.set_cors_xml(cors_config.to_xml()) |
| 1402 |
| 1403 def get_cors_xml(self, headers=None): |
| 1404 """ |
| 1405 Returns the current CORS configuration on the bucket as an |
| 1406 XML document. |
| 1407 """ |
| 1408 response = self.connection.make_request('GET', self.name, |
| 1409 query_args='cors', headers=headers) |
| 1410 body = response.read() |
| 1411 boto.log.debug(body) |
| 1412 if response.status == 200: |
| 1413 return body |
| 1414 else: |
| 1415 raise self.connection.provider.storage_response_error( |
| 1416 response.status, response.reason, body) |
| 1417 |
| 1418 def get_cors(self, headers=None): |
| 1419 """ |
| 1420 Returns the current CORS configuration on the bucket. |
| 1421 |
| 1422 :rtype: :class:`boto.s3.cors.CORSConfiguration` |
| 1423 :returns: A CORSConfiguration object that describes all current |
| 1424 CORS rules in effect for the bucket. |
| 1425 """ |
| 1426 body = self.get_cors_xml(headers) |
| 1427 cors = CORSConfiguration() |
| 1428 h = handler.XmlHandler(cors, self) |
| 1429 xml.sax.parseString(body, h) |
| 1430 return cors |
| 1431 |
| 1432 def delete_cors(self, headers=None): |
| 1433 """ |
| 1434 Removes all CORS configuration from the bucket. |
| 1435 """ |
| 1436 response = self.connection.make_request('DELETE', self.name, |
| 1437 query_args='cors', |
| 1438 headers=headers) |
| 1439 body = response.read() |
| 1440 boto.log.debug(body) |
| 1441 if response.status == 204: |
| 1442 return True |
| 1443 else: |
| 1444 raise self.connection.provider.storage_response_error( |
| 1445 response.status, response.reason, body) |
| 1446 |
| 1447 def initiate_multipart_upload(self, key_name, headers=None, |
| 1448 reduced_redundancy=False, |
| 1449 metadata=None, encrypt_key=False, |
| 1450 policy=None): |
| 1451 """ |
| 1452 Start a multipart upload operation. |
| 1453 |
| 1454 :type key_name: string |
| 1455 :param key_name: The name of the key that will ultimately |
| 1456 result from this multipart upload operation. This will be |
| 1457 exactly as the key appears in the bucket after the upload |
| 1458 process has been completed. |
| 1459 |
| 1460 :type headers: dict |
| 1461 :param headers: Additional HTTP headers to send and store with the |
| 1462 resulting key in S3. |
| 1463 |
| 1464 :type reduced_redundancy: boolean |
| 1465 :param reduced_redundancy: In multipart uploads, the storage |
| 1466 class is specified when initiating the upload, not when |
| 1467 uploading individual parts. So if you want the resulting |
| 1468 key to use the reduced redundancy storage class set this |
| 1469 flag when you initiate the upload. |
| 1470 |
| 1471 :type metadata: dict |
| 1472 :param metadata: Any metadata that you would like to set on the key |
| 1473 that results from the multipart upload. |
| 1474 |
| 1475 :type encrypt_key: bool |
| 1476 :param encrypt_key: If True, the new copy of the object will |
| 1477 be encrypted on the server-side by S3 and will be stored |
| 1478 in an encrypted form while at rest in S3. |
| 1479 |
| 1480 :type policy: :class:`boto.s3.acl.CannedACLStrings` |
| 1481 :param policy: A canned ACL policy that will be applied to the |
| 1482 new key (once completed) in S3. |
| 1483 """ |
| 1484 query_args = 'uploads' |
| 1485 provider = self.connection.provider |
| 1486 headers = headers or {} |
| 1487 if policy: |
| 1488 headers[provider.acl_header] = policy |
| 1489 if reduced_redundancy: |
| 1490 storage_class_header = provider.storage_class_header |
| 1491 if storage_class_header: |
| 1492 headers[storage_class_header] = 'REDUCED_REDUNDANCY' |
| 1493 # TODO: what if the provider doesn't support reduced redundancy? |
| 1494 # (see boto.s3.key.Key.set_contents_from_file) |
| 1495 if encrypt_key: |
| 1496 headers[provider.server_side_encryption_header] = 'AES256' |
| 1497 if metadata is None: |
| 1498 metadata = {} |
| 1499 |
| 1500 headers = boto.utils.merge_meta(headers, metadata, |
| 1501 self.connection.provider) |
| 1502 response = self.connection.make_request('POST', self.name, key_name, |
| 1503 query_args=query_args, |
| 1504 headers=headers) |
| 1505 body = response.read() |
| 1506 boto.log.debug(body) |
| 1507 if response.status == 200: |
| 1508 resp = MultiPartUpload(self) |
| 1509 h = handler.XmlHandler(resp, self) |
| 1510 xml.sax.parseString(body, h) |
| 1511 return resp |
| 1512 else: |
| 1513 raise self.connection.provider.storage_response_error( |
| 1514 response.status, response.reason, body) |
| 1515 |
| 1516 def complete_multipart_upload(self, key_name, upload_id, |
| 1517 xml_body, headers=None): |
| 1518 """ |
| 1519 Complete a multipart upload operation. |
| 1520 """ |
| 1521 query_args = 'uploadId=%s' % upload_id |
| 1522 if headers is None: |
| 1523 headers = {} |
| 1524 headers['Content-Type'] = 'text/xml' |
| 1525 response = self.connection.make_request('POST', self.name, key_name, |
| 1526 query_args=query_args, |
| 1527 headers=headers, data=xml_body) |
| 1528 contains_error = False |
| 1529 body = response.read() |
| 1530 # Some errors will be reported in the body of the response |
| 1531 # even though the HTTP response code is 200. This check |
| 1532 # does a quick and dirty peek in the body for an error element. |
| 1533 if body.find('<Error>') > 0: |
| 1534 contains_error = True |
| 1535 boto.log.debug(body) |
| 1536 if response.status == 200 and not contains_error: |
| 1537 resp = CompleteMultiPartUpload(self) |
| 1538 h = handler.XmlHandler(resp, self) |
| 1539 xml.sax.parseString(body, h) |
| 1540 # Use a dummy key to parse various response headers |
| 1541 # for versioning, encryption info and then explicitly |
| 1542 # set the completed MPU object values from key. |
| 1543 k = self.key_class(self) |
| 1544 k.handle_version_headers(response) |
| 1545 k.handle_encryption_headers(response) |
| 1546 resp.version_id = k.version_id |
| 1547 resp.encrypted = k.encrypted |
| 1548 return resp |
| 1549 else: |
| 1550 raise self.connection.provider.storage_response_error( |
| 1551 response.status, response.reason, body) |
| 1552 |
| 1553 def cancel_multipart_upload(self, key_name, upload_id, headers=None): |
| 1554 query_args = 'uploadId=%s' % upload_id |
| 1555 response = self.connection.make_request('DELETE', self.name, key_name, |
| 1556 query_args=query_args, |
| 1557 headers=headers) |
| 1558 body = response.read() |
| 1559 boto.log.debug(body) |
| 1560 if response.status != 204: |
| 1561 raise self.connection.provider.storage_response_error( |
| 1562 response.status, response.reason, body) |
| 1563 |
| 1564 def delete(self, headers=None): |
| 1565 return self.connection.delete_bucket(self.name, headers=headers) |
| 1566 |
| 1567 def get_tags(self): |
| 1568 response = self.get_xml_tags() |
| 1569 tags = Tags() |
| 1570 h = handler.XmlHandler(tags, self) |
| 1571 xml.sax.parseString(response, h) |
| 1572 return tags |
| 1573 |
| 1574 def get_xml_tags(self): |
| 1575 response = self.connection.make_request('GET', self.name, |
| 1576 query_args='tagging', |
| 1577 headers=None) |
| 1578 body = response.read() |
| 1579 if response.status == 200: |
| 1580 return body |
| 1581 else: |
| 1582 raise self.connection.provider.storage_response_error( |
| 1583 response.status, response.reason, body) |
| 1584 |
| 1585 def set_xml_tags(self, tag_str, headers=None, query_args='tagging'): |
| 1586 if headers is None: |
| 1587 headers = {} |
| 1588 md5 = boto.utils.compute_md5(StringIO.StringIO(tag_str)) |
| 1589 headers['Content-MD5'] = md5[1] |
| 1590 headers['Content-Type'] = 'text/xml' |
| 1591 response = self.connection.make_request('PUT', self.name, |
| 1592 data=tag_str.encode('utf-8'), |
| 1593 query_args=query_args, |
| 1594 headers=headers) |
| 1595 body = response.read() |
| 1596 if response.status != 204: |
| 1597 raise self.connection.provider.storage_response_error( |
| 1598 response.status, response.reason, body) |
| 1599 return True |
| 1600 |
| 1601 def set_tags(self, tags, headers=None): |
| 1602 return self.set_xml_tags(tags.to_xml(), headers=headers) |
| 1603 |
| 1604 def delete_tags(self, headers=None): |
| 1605 response = self.connection.make_request('DELETE', self.name, |
| 1606 query_args='tagging', |
| 1607 headers=headers) |
| 1608 body = response.read() |
| 1609 boto.log.debug(body) |
| 1610 if response.status == 204: |
| 1611 return True |
| 1612 else: |
| 1613 raise self.connection.provider.storage_response_error( |
| 1614 response.status, response.reason, body) |
| OLD | NEW |