Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(194)

Side by Side Diff: third_party/boto/s3/bucket.py

Issue 12633019: Added boto/ to depot_tools/third_party (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/tools/depot_tools
Patch Set: Moved boto down by one Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « third_party/boto/s3/acl.py ('k') | third_party/boto/s3/bucketlistresultset.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
2 # Copyright (c) 2010, Eucalyptus Systems, Inc.
3 # All rights reserved.
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining a
6 # copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish, dis-
9 # tribute, sublicense, and/or sell copies of the Software, and to permit
10 # persons to whom the Software is furnished to do so, subject to the fol-
11 # lowing conditions:
12 #
13 # The above copyright notice and this permission notice shall be included
14 # in all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
18 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
19 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 # IN THE SOFTWARE.
23
24 import boto
25 from boto import handler
26 from boto.resultset import ResultSet
27 from boto.exception import BotoClientError
28 from boto.s3.acl import Policy, CannedACLStrings, Grant
29 from boto.s3.key import Key
30 from boto.s3.prefix import Prefix
31 from boto.s3.deletemarker import DeleteMarker
32 from boto.s3.multipart import MultiPartUpload
33 from boto.s3.multipart import CompleteMultiPartUpload
34 from boto.s3.multidelete import MultiDeleteResult
35 from boto.s3.multidelete import Error
36 from boto.s3.bucketlistresultset import BucketListResultSet
37 from boto.s3.bucketlistresultset import VersionedBucketListResultSet
38 from boto.s3.bucketlistresultset import MultiPartUploadListResultSet
39 from boto.s3.lifecycle import Lifecycle
40 from boto.s3.tagging import Tags
41 from boto.s3.cors import CORSConfiguration
42 from boto.s3.bucketlogging import BucketLogging
43 from boto.s3 import website
44 import boto.jsonresponse
45 import boto.utils
46 import xml.sax
47 import xml.sax.saxutils
48 import StringIO
49 import urllib
50 import re
51 import base64
52 from collections import defaultdict
53
54 # as per http://goo.gl/BDuud (02/19/2011)
55
56
57 class S3WebsiteEndpointTranslate:
58
59 trans_region = defaultdict(lambda: 's3-website-us-east-1')
60 trans_region['eu-west-1'] = 's3-website-eu-west-1'
61 trans_region['us-west-1'] = 's3-website-us-west-1'
62 trans_region['us-west-2'] = 's3-website-us-west-2'
63 trans_region['sa-east-1'] = 's3-website-sa-east-1'
64 trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1'
65 trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1'
66
67 @classmethod
68 def translate_region(self, reg):
69 return self.trans_region[reg]
70
71 S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL']
72
73
74 class Bucket(object):
75
76 LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery'
77
78 BucketPaymentBody = """<?xml version="1.0" encoding="UTF-8"?>
79 <RequestPaymentConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-0 1/">
80 <Payer>%s</Payer>
81 </RequestPaymentConfiguration>"""
82
83 VersioningBody = """<?xml version="1.0" encoding="UTF-8"?>
84 <VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
85 <Status>%s</Status>
86 <MfaDelete>%s</MfaDelete>
87 </VersioningConfiguration>"""
88
89 WebsiteBody = """<?xml version="1.0" encoding="UTF-8"?>
90 <WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
91 <IndexDocument><Suffix>%s</Suffix></IndexDocument>
92 %s
93 </WebsiteConfiguration>"""
94
95 WebsiteErrorFragment = """<ErrorDocument><Key>%s</Key></ErrorDocument>"""
96
97 VersionRE = '<Status>([A-Za-z]+)</Status>'
98 MFADeleteRE = '<MfaDelete>([A-Za-z]+)</MfaDelete>'
99
100 def __init__(self, connection=None, name=None, key_class=Key):
101 self.name = name
102 self.connection = connection
103 self.key_class = key_class
104
105 def __repr__(self):
106 return '<Bucket: %s>' % self.name
107
108 def __iter__(self):
109 return iter(BucketListResultSet(self))
110
111 def __contains__(self, key_name):
112 return not (self.get_key(key_name) is None)
113
114 def startElement(self, name, attrs, connection):
115 return None
116
117 def endElement(self, name, value, connection):
118 if name == 'Name':
119 self.name = value
120 elif name == 'CreationDate':
121 self.creation_date = value
122 else:
123 setattr(self, name, value)
124
125 def set_key_class(self, key_class):
126 """
127 Set the Key class associated with this bucket. By default, this
128 would be the boto.s3.key.Key class but if you want to subclass that
129 for some reason this allows you to associate your new class with a
130 bucket so that when you call bucket.new_key() or when you get a listing
131 of keys in the bucket you will get an instances of your key class
132 rather than the default.
133
134 :type key_class: class
135 :param key_class: A subclass of Key that can be more specific
136 """
137 self.key_class = key_class
138
139 def lookup(self, key_name, headers=None):
140 """
141 Deprecated: Please use get_key method.
142
143 :type key_name: string
144 :param key_name: The name of the key to retrieve
145
146 :rtype: :class:`boto.s3.key.Key`
147 :returns: A Key object from this bucket.
148 """
149 return self.get_key(key_name, headers=headers)
150
151 def get_key(self, key_name, headers=None, version_id=None,
152 response_headers=None):
153 """
154 Check to see if a particular key exists within the bucket. This
155 method uses a HEAD request to check for the existance of the key.
156 Returns: An instance of a Key object or None
157
158 :type key_name: string
159 :param key_name: The name of the key to retrieve
160
161 :type response_headers: dict
162 :param response_headers: A dictionary containing HTTP
163 headers/values that will override any headers associated
164 with the stored object in the response. See
165 http://goo.gl/EWOPb for details.
166
167 :rtype: :class:`boto.s3.key.Key`
168 :returns: A Key object from this bucket.
169 """
170 query_args_l = []
171 if version_id:
172 query_args_l.append('versionId=%s' % version_id)
173 if response_headers:
174 for rk, rv in response_headers.iteritems():
175 query_args_l.append('%s=%s' % (rk, urllib.quote(rv)))
176
177 key, resp = self._get_key_internal(key_name, headers, query_args_l)
178 return key
179
180 def _get_key_internal(self, key_name, headers, query_args_l):
181 query_args = '&'.join(query_args_l) or None
182 response = self.connection.make_request('HEAD', self.name, key_name,
183 headers=headers,
184 query_args=query_args)
185 response.read()
186 # Allow any success status (2xx) - for example this lets us
187 # support Range gets, which return status 206:
188 if response.status / 100 == 2:
189 k = self.key_class(self)
190 provider = self.connection.provider
191 k.metadata = boto.utils.get_aws_metadata(response.msg, provider)
192 k.etag = response.getheader('etag')
193 k.content_type = response.getheader('content-type')
194 k.content_encoding = response.getheader('content-encoding')
195 k.content_disposition = response.getheader('content-disposition')
196 k.content_language = response.getheader('content-language')
197 k.last_modified = response.getheader('last-modified')
198 # the following machinations are a workaround to the fact that
199 # apache/fastcgi omits the content-length header on HEAD
200 # requests when the content-length is zero.
201 # See http://goo.gl/0Tdax for more details.
202 clen = response.getheader('content-length')
203 if clen:
204 k.size = int(response.getheader('content-length'))
205 else:
206 k.size = 0
207 k.cache_control = response.getheader('cache-control')
208 k.name = key_name
209 k.handle_version_headers(response)
210 k.handle_encryption_headers(response)
211 k.handle_restore_headers(response)
212 return k, response
213 else:
214 if response.status == 404:
215 return None, response
216 else:
217 raise self.connection.provider.storage_response_error(
218 response.status, response.reason, '')
219
220 def list(self, prefix='', delimiter='', marker='', headers=None):
221 """
222 List key objects within a bucket. This returns an instance of an
223 BucketListResultSet that automatically handles all of the result
224 paging, etc. from S3. You just need to keep iterating until
225 there are no more results.
226
227 Called with no arguments, this will return an iterator object across
228 all keys within the bucket.
229
230 The Key objects returned by the iterator are obtained by parsing
231 the results of a GET on the bucket, also known as the List Objects
232 request. The XML returned by this request contains only a subset
233 of the information about each key. Certain metadata fields such
234 as Content-Type and user metadata are not available in the XML.
235 Therefore, if you want these additional metadata fields you will
236 have to do a HEAD request on the Key in the bucket.
237
238 :type prefix: string
239 :param prefix: allows you to limit the listing to a particular
240 prefix. For example, if you call the method with
241 prefix='/foo/' then the iterator will only cycle through
242 the keys that begin with the string '/foo/'.
243
244 :type delimiter: string
245 :param delimiter: can be used in conjunction with the prefix
246 to allow you to organize and browse your keys
247 hierarchically. See http://goo.gl/Xx63h for more details.
248
249 :type marker: string
250 :param marker: The "marker" of where you are in the result set
251
252 :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
253 :return: an instance of a BucketListResultSet that handles paging, etc
254 """
255 return BucketListResultSet(self, prefix, delimiter, marker, headers)
256
257 def list_versions(self, prefix='', delimiter='', key_marker='',
258 version_id_marker='', headers=None):
259 """
260 List version objects within a bucket. This returns an
261 instance of an VersionedBucketListResultSet that automatically
262 handles all of the result paging, etc. from S3. You just need
263 to keep iterating until there are no more results. Called
264 with no arguments, this will return an iterator object across
265 all keys within the bucket.
266
267 :type prefix: string
268 :param prefix: allows you to limit the listing to a particular
269 prefix. For example, if you call the method with
270 prefix='/foo/' then the iterator will only cycle through
271 the keys that begin with the string '/foo/'.
272
273 :type delimiter: string
274 :param delimiter: can be used in conjunction with the prefix
275 to allow you to organize and browse your keys
276 hierarchically. See:
277
278 http://aws.amazon.com/releasenotes/Amazon-S3/213
279
280 for more details.
281
282 :type marker: string
283 :param marker: The "marker" of where you are in the result set
284
285 :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
286 :return: an instance of a BucketListResultSet that handles paging, etc
287 """
288 return VersionedBucketListResultSet(self, prefix, delimiter,
289 key_marker, version_id_marker,
290 headers)
291
292 def list_multipart_uploads(self, key_marker='',
293 upload_id_marker='',
294 headers=None):
295 """
296 List multipart upload objects within a bucket. This returns an
297 instance of an MultiPartUploadListResultSet that automatically
298 handles all of the result paging, etc. from S3. You just need
299 to keep iterating until there are no more results.
300
301 :type marker: string
302 :param marker: The "marker" of where you are in the result set
303
304 :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
305 :return: an instance of a BucketListResultSet that handles paging, etc
306 """
307 return MultiPartUploadListResultSet(self, key_marker,
308 upload_id_marker,
309 headers)
310
311 def _get_all(self, element_map, initial_query_string='',
312 headers=None, **params):
313 l = []
314 for k, v in params.items():
315 k = k.replace('_', '-')
316 if k == 'maxkeys':
317 k = 'max-keys'
318 if isinstance(v, unicode):
319 v = v.encode('utf-8')
320 if v is not None and v != '':
321 l.append('%s=%s' % (urllib.quote(k), urllib.quote(str(v))))
322 if len(l):
323 s = initial_query_string + '&' + '&'.join(l)
324 else:
325 s = initial_query_string
326 response = self.connection.make_request('GET', self.name,
327 headers=headers,
328 query_args=s)
329 body = response.read()
330 boto.log.debug(body)
331 if response.status == 200:
332 rs = ResultSet(element_map)
333 h = handler.XmlHandler(rs, self)
334 xml.sax.parseString(body, h)
335 return rs
336 else:
337 raise self.connection.provider.storage_response_error(
338 response.status, response.reason, body)
339
340 def get_all_keys(self, headers=None, **params):
341 """
342 A lower-level method for listing contents of a bucket. This
343 closely models the actual S3 API and requires you to manually
344 handle the paging of results. For a higher-level method that
345 handles the details of paging for you, you can use the list
346 method.
347
348 :type max_keys: int
349 :param max_keys: The maximum number of keys to retrieve
350
351 :type prefix: string
352 :param prefix: The prefix of the keys you want to retrieve
353
354 :type marker: string
355 :param marker: The "marker" of where you are in the result set
356
357 :type delimiter: string
358 :param delimiter: If this optional, Unicode string parameter
359 is included with your request, then keys that contain the
360 same string between the prefix and the first occurrence of
361 the delimiter will be rolled up into a single result
362 element in the CommonPrefixes collection. These rolled-up
363 keys are not returned elsewhere in the response.
364
365 :rtype: ResultSet
366 :return: The result from S3 listing the keys requested
367
368 """
369 return self._get_all([('Contents', self.key_class),
370 ('CommonPrefixes', Prefix)],
371 '', headers, **params)
372
373 def get_all_versions(self, headers=None, **params):
374 """
375 A lower-level, version-aware method for listing contents of a
376 bucket. This closely models the actual S3 API and requires
377 you to manually handle the paging of results. For a
378 higher-level method that handles the details of paging for
379 you, you can use the list method.
380
381 :type max_keys: int
382 :param max_keys: The maximum number of keys to retrieve
383
384 :type prefix: string
385 :param prefix: The prefix of the keys you want to retrieve
386
387 :type key_marker: string
388 :param key_marker: The "marker" of where you are in the result set
389 with respect to keys.
390
391 :type version_id_marker: string
392 :param version_id_marker: The "marker" of where you are in the result
393 set with respect to version-id's.
394
395 :type delimiter: string
396 :param delimiter: If this optional, Unicode string parameter
397 is included with your request, then keys that contain the
398 same string between the prefix and the first occurrence of
399 the delimiter will be rolled up into a single result
400 element in the CommonPrefixes collection. These rolled-up
401 keys are not returned elsewhere in the response.
402
403 :rtype: ResultSet
404 :return: The result from S3 listing the keys requested
405 """
406 return self._get_all([('Version', self.key_class),
407 ('CommonPrefixes', Prefix),
408 ('DeleteMarker', DeleteMarker)],
409 'versions', headers, **params)
410
411 def get_all_multipart_uploads(self, headers=None, **params):
412 """
413 A lower-level, version-aware method for listing active
414 MultiPart uploads for a bucket. This closely models the
415 actual S3 API and requires you to manually handle the paging
416 of results. For a higher-level method that handles the
417 details of paging for you, you can use the list method.
418
419 :type max_uploads: int
420 :param max_uploads: The maximum number of uploads to retrieve.
421 Default value is 1000.
422
423 :type key_marker: string
424 :param key_marker: Together with upload_id_marker, this
425 parameter specifies the multipart upload after which
426 listing should begin. If upload_id_marker is not
427 specified, only the keys lexicographically greater than
428 the specified key_marker will be included in the list.
429
430 If upload_id_marker is specified, any multipart uploads
431 for a key equal to the key_marker might also be included,
432 provided those multipart uploads have upload IDs
433 lexicographically greater than the specified
434 upload_id_marker.
435
436 :type upload_id_marker: string
437 :param upload_id_marker: Together with key-marker, specifies
438 the multipart upload after which listing should begin. If
439 key_marker is not specified, the upload_id_marker
440 parameter is ignored. Otherwise, any multipart uploads
441 for a key equal to the key_marker might be included in the
442 list only if they have an upload ID lexicographically
443 greater than the specified upload_id_marker.
444
445 :rtype: ResultSet
446 :return: The result from S3 listing the uploads requested
447
448 """
449 return self._get_all([('Upload', MultiPartUpload),
450 ('CommonPrefixes', Prefix)],
451 'uploads', headers, **params)
452
453 def new_key(self, key_name=None):
454 """
455 Creates a new key
456
457 :type key_name: string
458 :param key_name: The name of the key to create
459
460 :rtype: :class:`boto.s3.key.Key` or subclass
461 :returns: An instance of the newly created key object
462 """
463 if not key_name:
464 raise ValueError('Empty key names are not allowed')
465 return self.key_class(self, key_name)
466
467 def generate_url(self, expires_in, method='GET', headers=None,
468 force_http=False, response_headers=None,
469 expires_in_absolute=False):
470 return self.connection.generate_url(expires_in, method, self.name,
471 headers=headers,
472 force_http=force_http,
473 response_headers=response_headers,
474 expires_in_absolute=expires_in_absol ute)
475
476 def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None):
477 """
478 Deletes a set of keys using S3's Multi-object delete API. If a
479 VersionID is specified for that key then that version is removed.
480 Returns a MultiDeleteResult Object, which contains Deleted
481 and Error elements for each key you ask to delete.
482
483 :type keys: list
484 :param keys: A list of either key_names or (key_name, versionid) pairs
485 or a list of Key instances.
486
487 :type quiet: boolean
488 :param quiet: In quiet mode the response includes only keys
489 where the delete operation encountered an error. For a
490 successful deletion, the operation does not return any
491 information about the delete in the response body.
492
493 :type mfa_token: tuple or list of strings
494 :param mfa_token: A tuple or list consisting of the serial
495 number from the MFA device and the current value of the
496 six-digit token associated with the device. This value is
497 required anytime you are deleting versioned objects from a
498 bucket that has the MFADelete option on the bucket.
499
500 :returns: An instance of MultiDeleteResult
501 """
502 ikeys = iter(keys)
503 result = MultiDeleteResult(self)
504 provider = self.connection.provider
505 query_args = 'delete'
506
507 def delete_keys2(hdrs):
508 hdrs = hdrs or {}
509 data = u"""<?xml version="1.0" encoding="UTF-8"?>"""
510 data += u"<Delete>"
511 if quiet:
512 data += u"<Quiet>true</Quiet>"
513 count = 0
514 while count < 1000:
515 try:
516 key = ikeys.next()
517 except StopIteration:
518 break
519 if isinstance(key, basestring):
520 key_name = key
521 version_id = None
522 elif isinstance(key, tuple) and len(key) == 2:
523 key_name, version_id = key
524 elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and key.name:
525 key_name = key.name
526 version_id = key.version_id
527 else:
528 if isinstance(key, Prefix):
529 key_name = key.name
530 code = 'PrefixSkipped' # Don't delete Prefix
531 else:
532 key_name = repr(key) # try get a string
533 code = 'InvalidArgument' # other unknown type
534 message = 'Invalid. No delete action taken for this object.'
535 error = Error(key_name, code=code, message=message)
536 result.errors.append(error)
537 continue
538 count += 1
539 data += u"<Object><Key>%s</Key>" % xml.sax.saxutils.escape(key_n ame)
540 if version_id:
541 data += u"<VersionId>%s</VersionId>" % version_id
542 data += u"</Object>"
543 data += u"</Delete>"
544 if count <= 0:
545 return False # no more
546 data = data.encode('utf-8')
547 fp = StringIO.StringIO(data)
548 md5 = boto.utils.compute_md5(fp)
549 hdrs['Content-MD5'] = md5[1]
550 hdrs['Content-Type'] = 'text/xml'
551 if mfa_token:
552 hdrs[provider.mfa_header] = ' '.join(mfa_token)
553 response = self.connection.make_request('POST', self.name,
554 headers=hdrs,
555 query_args=query_args,
556 data=data)
557 body = response.read()
558 if response.status == 200:
559 h = handler.XmlHandler(result, self)
560 xml.sax.parseString(body, h)
561 return count >= 1000 # more?
562 else:
563 raise provider.storage_response_error(response.status,
564 response.reason,
565 body)
566 while delete_keys2(headers):
567 pass
568 return result
569
570 def delete_key(self, key_name, headers=None, version_id=None,
571 mfa_token=None):
572 """
573 Deletes a key from the bucket. If a version_id is provided,
574 only that version of the key will be deleted.
575
576 :type key_name: string
577 :param key_name: The key name to delete
578
579 :type version_id: string
580 :param version_id: The version ID (optional)
581
582 :type mfa_token: tuple or list of strings
583 :param mfa_token: A tuple or list consisting of the serial
584 number from the MFA device and the current value of the
585 six-digit token associated with the device. This value is
586 required anytime you are deleting versioned objects from a
587 bucket that has the MFADelete option on the bucket.
588
589 :rtype: :class:`boto.s3.key.Key` or subclass
590 :returns: A key object holding information on what was
591 deleted. The Caller can see if a delete_marker was
592 created or removed and what version_id the delete created
593 or removed.
594 """
595 return self._delete_key_internal(key_name, headers=headers,
596 version_id=version_id,
597 mfa_token=mfa_token,
598 query_args_l=None)
599
600 def _delete_key_internal(self, key_name, headers=None, version_id=None,
601 mfa_token=None, query_args_l=None):
602 query_args_l = query_args_l or []
603 provider = self.connection.provider
604 if version_id:
605 query_args_l.append('versionId=%s' % version_id)
606 query_args = '&'.join(query_args_l) or None
607 if mfa_token:
608 if not headers:
609 headers = {}
610 headers[provider.mfa_header] = ' '.join(mfa_token)
611 response = self.connection.make_request('DELETE', self.name, key_name,
612 headers=headers,
613 query_args=query_args)
614 body = response.read()
615 if response.status != 204:
616 raise provider.storage_response_error(response.status,
617 response.reason, body)
618 else:
619 # return a key object with information on what was deleted.
620 k = self.key_class(self)
621 k.name = key_name
622 k.handle_version_headers(response)
623 return k
624
625 def copy_key(self, new_key_name, src_bucket_name,
626 src_key_name, metadata=None, src_version_id=None,
627 storage_class='STANDARD', preserve_acl=False,
628 encrypt_key=False, headers=None, query_args=None):
629 """
630 Create a new key in the bucket by copying another existing key.
631
632 :type new_key_name: string
633 :param new_key_name: The name of the new key
634
635 :type src_bucket_name: string
636 :param src_bucket_name: The name of the source bucket
637
638 :type src_key_name: string
639 :param src_key_name: The name of the source key
640
641 :type src_version_id: string
642 :param src_version_id: The version id for the key. This param
643 is optional. If not specified, the newest version of the
644 key will be copied.
645
646 :type metadata: dict
647 :param metadata: Metadata to be associated with new key. If
648 metadata is supplied, it will replace the metadata of the
649 source key being copied. If no metadata is supplied, the
650 source key's metadata will be copied to the new key.
651
652 :type storage_class: string
653 :param storage_class: The storage class of the new key. By
654 default, the new key will use the standard storage class.
655 Possible values are: STANDARD | REDUCED_REDUNDANCY
656
657 :type preserve_acl: bool
658 :param preserve_acl: If True, the ACL from the source key will
659 be copied to the destination key. If False, the
660 destination key will have the default ACL. Note that
661 preserving the ACL in the new key object will require two
662 additional API calls to S3, one to retrieve the current
663 ACL and one to set that ACL on the new object. If you
664 don't care about the ACL, a value of False will be
665 significantly more efficient.
666
667 :type encrypt_key: bool
668 :param encrypt_key: If True, the new copy of the object will
669 be encrypted on the server-side by S3 and will be stored
670 in an encrypted form while at rest in S3.
671
672 :type headers: dict
673 :param headers: A dictionary of header name/value pairs.
674
675 :type query_args: string
676 :param query_args: A string of additional querystring arguments
677 to append to the request
678
679 :rtype: :class:`boto.s3.key.Key` or subclass
680 :returns: An instance of the newly created key object
681 """
682 headers = headers or {}
683 provider = self.connection.provider
684 src_key_name = boto.utils.get_utf8_value(src_key_name)
685 if preserve_acl:
686 if self.name == src_bucket_name:
687 src_bucket = self
688 else:
689 src_bucket = self.connection.get_bucket(src_bucket_name)
690 acl = src_bucket.get_xml_acl(src_key_name)
691 if encrypt_key:
692 headers[provider.server_side_encryption_header] = 'AES256'
693 src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name))
694 if src_version_id:
695 src += '?versionId=%s' % src_version_id
696 headers[provider.copy_source_header] = str(src)
697 # make sure storage_class_header key exists before accessing it
698 if provider.storage_class_header and storage_class:
699 headers[provider.storage_class_header] = storage_class
700 if metadata is not None:
701 headers[provider.metadata_directive_header] = 'REPLACE'
702 headers = boto.utils.merge_meta(headers, metadata, provider)
703 elif not query_args: # Can't use this header with multi-part copy.
704 headers[provider.metadata_directive_header] = 'COPY'
705 response = self.connection.make_request('PUT', self.name, new_key_name,
706 headers=headers,
707 query_args=query_args)
708 body = response.read()
709 if response.status == 200:
710 key = self.new_key(new_key_name)
711 h = handler.XmlHandler(key, self)
712 xml.sax.parseString(body, h)
713 if hasattr(key, 'Error'):
714 raise provider.storage_copy_error(key.Code, key.Message, body)
715 key.handle_version_headers(response)
716 if preserve_acl:
717 self.set_xml_acl(acl, new_key_name)
718 return key
719 else:
720 raise provider.storage_response_error(response.status,
721 response.reason, body)
722
723 def set_canned_acl(self, acl_str, key_name='', headers=None,
724 version_id=None):
725 assert acl_str in CannedACLStrings
726
727 if headers:
728 headers[self.connection.provider.acl_header] = acl_str
729 else:
730 headers = {self.connection.provider.acl_header: acl_str}
731
732 query_args = 'acl'
733 if version_id:
734 query_args += '&versionId=%s' % version_id
735 response = self.connection.make_request('PUT', self.name, key_name,
736 headers=headers, query_args=query_args)
737 body = response.read()
738 if response.status != 200:
739 raise self.connection.provider.storage_response_error(
740 response.status, response.reason, body)
741
742 def get_xml_acl(self, key_name='', headers=None, version_id=None):
743 query_args = 'acl'
744 if version_id:
745 query_args += '&versionId=%s' % version_id
746 response = self.connection.make_request('GET', self.name, key_name,
747 query_args=query_args,
748 headers=headers)
749 body = response.read()
750 if response.status != 200:
751 raise self.connection.provider.storage_response_error(
752 response.status, response.reason, body)
753 return body
754
755 def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None,
756 query_args='acl'):
757 if version_id:
758 query_args += '&versionId=%s' % version_id
759 response = self.connection.make_request('PUT', self.name, key_name,
760 data=acl_str.encode('UTF-8'),
761 query_args=query_args,
762 headers=headers)
763 body = response.read()
764 if response.status != 200:
765 raise self.connection.provider.storage_response_error(
766 response.status, response.reason, body)
767
768 def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None):
769 if isinstance(acl_or_str, Policy):
770 self.set_xml_acl(acl_or_str.to_xml(), key_name,
771 headers, version_id)
772 else:
773 self.set_canned_acl(acl_or_str, key_name,
774 headers, version_id)
775
776 def get_acl(self, key_name='', headers=None, version_id=None):
777 query_args = 'acl'
778 if version_id:
779 query_args += '&versionId=%s' % version_id
780 response = self.connection.make_request('GET', self.name, key_name,
781 query_args=query_args,
782 headers=headers)
783 body = response.read()
784 if response.status == 200:
785 policy = Policy(self)
786 h = handler.XmlHandler(policy, self)
787 xml.sax.parseString(body, h)
788 return policy
789 else:
790 raise self.connection.provider.storage_response_error(
791 response.status, response.reason, body)
792
793 def set_subresource(self, subresource, value, key_name='', headers=None,
794 version_id=None):
795 """
796 Set a subresource for a bucket or key.
797
798 :type subresource: string
799 :param subresource: The subresource to set.
800
801 :type value: string
802 :param value: The value of the subresource.
803
804 :type key_name: string
805 :param key_name: The key to operate on, or None to operate on the
806 bucket.
807
808 :type headers: dict
809 :param headers: Additional HTTP headers to include in the request.
810
811 :type src_version_id: string
812 :param src_version_id: Optional. The version id of the key to
813 operate on. If not specified, operate on the newest
814 version.
815 """
816 if not subresource:
817 raise TypeError('set_subresource called with subresource=None')
818 query_args = subresource
819 if version_id:
820 query_args += '&versionId=%s' % version_id
821 response = self.connection.make_request('PUT', self.name, key_name,
822 data=value.encode('UTF-8'),
823 query_args=query_args,
824 headers=headers)
825 body = response.read()
826 if response.status != 200:
827 raise self.connection.provider.storage_response_error(
828 response.status, response.reason, body)
829
830 def get_subresource(self, subresource, key_name='', headers=None,
831 version_id=None):
832 """
833 Get a subresource for a bucket or key.
834
835 :type subresource: string
836 :param subresource: The subresource to get.
837
838 :type key_name: string
839 :param key_name: The key to operate on, or None to operate on the
840 bucket.
841
842 :type headers: dict
843 :param headers: Additional HTTP headers to include in the request.
844
845 :type src_version_id: string
846 :param src_version_id: Optional. The version id of the key to
847 operate on. If not specified, operate on the newest
848 version.
849
850 :rtype: string
851 :returns: The value of the subresource.
852 """
853 if not subresource:
854 raise TypeError('get_subresource called with subresource=None')
855 query_args = subresource
856 if version_id:
857 query_args += '&versionId=%s' % version_id
858 response = self.connection.make_request('GET', self.name, key_name,
859 query_args=query_args,
860 headers=headers)
861 body = response.read()
862 if response.status != 200:
863 raise self.connection.provider.storage_response_error(
864 response.status, response.reason, body)
865 return body
866
867 def make_public(self, recursive=False, headers=None):
868 self.set_canned_acl('public-read', headers=headers)
869 if recursive:
870 for key in self:
871 self.set_canned_acl('public-read', key.name, headers=headers)
872
873 def add_email_grant(self, permission, email_address,
874 recursive=False, headers=None):
875 """
876 Convenience method that provides a quick way to add an email grant
877 to a bucket. This method retrieves the current ACL, creates a new
878 grant based on the parameters passed in, adds that grant to the ACL
879 and then PUT's the new ACL back to S3.
880
881 :type permission: string
882 :param permission: The permission being granted. Should be one of:
883 (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
884
885 :type email_address: string
886 :param email_address: The email address associated with the AWS
887 account your are granting the permission to.
888
889 :type recursive: boolean
890 :param recursive: A boolean value to controls whether the
891 command will apply the grant to all keys within the bucket
892 or not. The default value is False. By passing a True
893 value, the call will iterate through all keys in the
894 bucket and apply the same grant to each key. CAUTION: If
895 you have a lot of keys, this could take a long time!
896 """
897 if permission not in S3Permissions:
898 raise self.connection.provider.storage_permissions_error(
899 'Unknown Permission: %s' % permission)
900 policy = self.get_acl(headers=headers)
901 policy.acl.add_email_grant(permission, email_address)
902 self.set_acl(policy, headers=headers)
903 if recursive:
904 for key in self:
905 key.add_email_grant(permission, email_address, headers=headers)
906
907 def add_user_grant(self, permission, user_id, recursive=False,
908 headers=None, display_name=None):
909 """
910 Convenience method that provides a quick way to add a canonical
911 user grant to a bucket. This method retrieves the current ACL,
912 creates a new grant based on the parameters passed in, adds that
913 grant to the ACL and then PUT's the new ACL back to S3.
914
915 :type permission: string
916 :param permission: The permission being granted. Should be one of:
917 (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
918
919 :type user_id: string
920 :param user_id: The canonical user id associated with the AWS
921 account your are granting the permission to.
922
923 :type recursive: boolean
924 :param recursive: A boolean value to controls whether the
925 command will apply the grant to all keys within the bucket
926 or not. The default value is False. By passing a True
927 value, the call will iterate through all keys in the
928 bucket and apply the same grant to each key. CAUTION: If
929 you have a lot of keys, this could take a long time!
930
931 :type display_name: string
932 :param display_name: An option string containing the user's
933 Display Name. Only required on Walrus.
934 """
935 if permission not in S3Permissions:
936 raise self.connection.provider.storage_permissions_error(
937 'Unknown Permission: %s' % permission)
938 policy = self.get_acl(headers=headers)
939 policy.acl.add_user_grant(permission, user_id,
940 display_name=display_name)
941 self.set_acl(policy, headers=headers)
942 if recursive:
943 for key in self:
944 key.add_user_grant(permission, user_id, headers=headers,
945 display_name=display_name)
946
947 def list_grants(self, headers=None):
948 policy = self.get_acl(headers=headers)
949 return policy.acl.grants
950
951 def get_location(self):
952 """
953 Returns the LocationConstraint for the bucket.
954
955 :rtype: str
956 :return: The LocationConstraint for the bucket or the empty
957 string if no constraint was specified when bucket was created.
958 """
959 response = self.connection.make_request('GET', self.name,
960 query_args='location')
961 body = response.read()
962 if response.status == 200:
963 rs = ResultSet(self)
964 h = handler.XmlHandler(rs, self)
965 xml.sax.parseString(body, h)
966 return rs.LocationConstraint
967 else:
968 raise self.connection.provider.storage_response_error(
969 response.status, response.reason, body)
970
971 def set_xml_logging(self, logging_str, headers=None):
972 """
973 Set logging on a bucket directly to the given xml string.
974
975 :type logging_str: unicode string
976 :param logging_str: The XML for the bucketloggingstatus which
977 will be set. The string will be converted to utf-8 before
978 it is sent. Usually, you will obtain this XML from the
979 BucketLogging object.
980
981 :rtype: bool
982 :return: True if ok or raises an exception.
983 """
984 body = logging_str.encode('utf-8')
985 response = self.connection.make_request('PUT', self.name, data=body,
986 query_args='logging', headers=headers)
987 body = response.read()
988 if response.status == 200:
989 return True
990 else:
991 raise self.connection.provider.storage_response_error(
992 response.status, response.reason, body)
993
994 def enable_logging(self, target_bucket, target_prefix='',
995 grants=None, headers=None):
996 """
997 Enable logging on a bucket.
998
999 :type target_bucket: bucket or string
1000 :param target_bucket: The bucket to log to.
1001
1002 :type target_prefix: string
1003 :param target_prefix: The prefix which should be prepended to the
1004 generated log files written to the target_bucket.
1005
1006 :type grants: list of Grant objects
1007 :param grants: A list of extra permissions which will be granted on
1008 the log files which are created.
1009
1010 :rtype: bool
1011 :return: True if ok or raises an exception.
1012 """
1013 if isinstance(target_bucket, Bucket):
1014 target_bucket = target_bucket.name
1015 blogging = BucketLogging(target=target_bucket, prefix=target_prefix,
1016 grants=grants)
1017 return self.set_xml_logging(blogging.to_xml(), headers=headers)
1018
1019 def disable_logging(self, headers=None):
1020 """
1021 Disable logging on a bucket.
1022
1023 :rtype: bool
1024 :return: True if ok or raises an exception.
1025 """
1026 blogging = BucketLogging()
1027 return self.set_xml_logging(blogging.to_xml(), headers=headers)
1028
1029 def get_logging_status(self, headers=None):
1030 """
1031 Get the logging status for this bucket.
1032
1033 :rtype: :class:`boto.s3.bucketlogging.BucketLogging`
1034 :return: A BucketLogging object for this bucket.
1035 """
1036 response = self.connection.make_request('GET', self.name,
1037 query_args='logging', headers=headers)
1038 body = response.read()
1039 if response.status == 200:
1040 blogging = BucketLogging()
1041 h = handler.XmlHandler(blogging, self)
1042 xml.sax.parseString(body, h)
1043 return blogging
1044 else:
1045 raise self.connection.provider.storage_response_error(
1046 response.status, response.reason, body)
1047
1048 def set_as_logging_target(self, headers=None):
1049 """
1050 Setup the current bucket as a logging target by granting the necessary
1051 permissions to the LogDelivery group to write log files to this bucket.
1052 """
1053 policy = self.get_acl(headers=headers)
1054 g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup)
1055 g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup)
1056 policy.acl.add_grant(g1)
1057 policy.acl.add_grant(g2)
1058 self.set_acl(policy, headers=headers)
1059
1060 def get_request_payment(self, headers=None):
1061 response = self.connection.make_request('GET', self.name,
1062 query_args='requestPayment', headers=headers)
1063 body = response.read()
1064 if response.status == 200:
1065 return body
1066 else:
1067 raise self.connection.provider.storage_response_error(
1068 response.status, response.reason, body)
1069
1070 def set_request_payment(self, payer='BucketOwner', headers=None):
1071 body = self.BucketPaymentBody % payer
1072 response = self.connection.make_request('PUT', self.name, data=body,
1073 query_args='requestPayment', headers=headers)
1074 body = response.read()
1075 if response.status == 200:
1076 return True
1077 else:
1078 raise self.connection.provider.storage_response_error(
1079 response.status, response.reason, body)
1080
1081 def configure_versioning(self, versioning, mfa_delete=False,
1082 mfa_token=None, headers=None):
1083 """
1084 Configure versioning for this bucket.
1085
1086 ..note:: This feature is currently in beta.
1087
1088 :type versioning: bool
1089 :param versioning: A boolean indicating whether version is
1090 enabled (True) or disabled (False).
1091
1092 :type mfa_delete: bool
1093 :param mfa_delete: A boolean indicating whether the
1094 Multi-Factor Authentication Delete feature is enabled
1095 (True) or disabled (False). If mfa_delete is enabled then
1096 all Delete operations will require the token from your MFA
1097 device to be passed in the request.
1098
1099 :type mfa_token: tuple or list of strings
1100 :param mfa_token: A tuple or list consisting of the serial
1101 number from the MFA device and the current value of the
1102 six-digit token associated with the device. This value is
1103 required when you are changing the status of the MfaDelete
1104 property of the bucket.
1105 """
1106 if versioning:
1107 ver = 'Enabled'
1108 else:
1109 ver = 'Suspended'
1110 if mfa_delete:
1111 mfa = 'Enabled'
1112 else:
1113 mfa = 'Disabled'
1114 body = self.VersioningBody % (ver, mfa)
1115 if mfa_token:
1116 if not headers:
1117 headers = {}
1118 provider = self.connection.provider
1119 headers[provider.mfa_header] = ' '.join(mfa_token)
1120 response = self.connection.make_request('PUT', self.name, data=body,
1121 query_args='versioning', headers=headers)
1122 body = response.read()
1123 if response.status == 200:
1124 return True
1125 else:
1126 raise self.connection.provider.storage_response_error(
1127 response.status, response.reason, body)
1128
1129 def get_versioning_status(self, headers=None):
1130 """
1131 Returns the current status of versioning on the bucket.
1132
1133 :rtype: dict
1134 :returns: A dictionary containing a key named 'Versioning'
1135 that can have a value of either Enabled, Disabled, or
1136 Suspended. Also, if MFADelete has ever been enabled on the
1137 bucket, the dictionary will contain a key named
1138 'MFADelete' which will have a value of either Enabled or
1139 Suspended.
1140 """
1141 response = self.connection.make_request('GET', self.name,
1142 query_args='versioning', headers=headers)
1143 body = response.read()
1144 boto.log.debug(body)
1145 if response.status == 200:
1146 d = {}
1147 ver = re.search(self.VersionRE, body)
1148 if ver:
1149 d['Versioning'] = ver.group(1)
1150 mfa = re.search(self.MFADeleteRE, body)
1151 if mfa:
1152 d['MfaDelete'] = mfa.group(1)
1153 return d
1154 else:
1155 raise self.connection.provider.storage_response_error(
1156 response.status, response.reason, body)
1157
1158 def configure_lifecycle(self, lifecycle_config, headers=None):
1159 """
1160 Configure lifecycle for this bucket.
1161
1162 :type lifecycle_config: :class:`boto.s3.lifecycle.Lifecycle`
1163 :param lifecycle_config: The lifecycle configuration you want
1164 to configure for this bucket.
1165 """
1166 xml = lifecycle_config.to_xml()
1167 xml = xml.encode('utf-8')
1168 fp = StringIO.StringIO(xml)
1169 md5 = boto.utils.compute_md5(fp)
1170 if headers is None:
1171 headers = {}
1172 headers['Content-MD5'] = md5[1]
1173 headers['Content-Type'] = 'text/xml'
1174 response = self.connection.make_request('PUT', self.name,
1175 data=fp.getvalue(),
1176 query_args='lifecycle',
1177 headers=headers)
1178 body = response.read()
1179 if response.status == 200:
1180 return True
1181 else:
1182 raise self.connection.provider.storage_response_error(
1183 response.status, response.reason, body)
1184
1185 def get_lifecycle_config(self, headers=None):
1186 """
1187 Returns the current lifecycle configuration on the bucket.
1188
1189 :rtype: :class:`boto.s3.lifecycle.Lifecycle`
1190 :returns: A LifecycleConfig object that describes all current
1191 lifecycle rules in effect for the bucket.
1192 """
1193 response = self.connection.make_request('GET', self.name,
1194 query_args='lifecycle', headers=headers)
1195 body = response.read()
1196 boto.log.debug(body)
1197 if response.status == 200:
1198 lifecycle = Lifecycle()
1199 h = handler.XmlHandler(lifecycle, self)
1200 xml.sax.parseString(body, h)
1201 return lifecycle
1202 else:
1203 raise self.connection.provider.storage_response_error(
1204 response.status, response.reason, body)
1205
1206 def delete_lifecycle_configuration(self, headers=None):
1207 """
1208 Removes all lifecycle configuration from the bucket.
1209 """
1210 response = self.connection.make_request('DELETE', self.name,
1211 query_args='lifecycle',
1212 headers=headers)
1213 body = response.read()
1214 boto.log.debug(body)
1215 if response.status == 204:
1216 return True
1217 else:
1218 raise self.connection.provider.storage_response_error(
1219 response.status, response.reason, body)
1220
1221 def configure_website(self, suffix=None, error_key=None,
1222 redirect_all_requests_to=None,
1223 routing_rules=None,
1224 headers=None):
1225 """
1226 Configure this bucket to act as a website
1227
1228 :type suffix: str
1229 :param suffix: Suffix that is appended to a request that is for a
1230 "directory" on the website endpoint (e.g. if the suffix is
1231 index.html and you make a request to samplebucket/images/
1232 the data that is returned will be for the object with the
1233 key name images/index.html). The suffix must not be empty
1234 and must not include a slash character.
1235
1236 :type error_key: str
1237 :param error_key: The object key name to use when a 4XX class
1238 error occurs. This is optional.
1239
1240 :type redirect_all_requests_to: :class:`boto.s3.website.RedirectLocation `
1241 :param redirect_all_requests_to: Describes the redirect behavior for
1242 every request to this bucket's website endpoint. If this value is
1243 non None, no other values are considered when configuring the
1244 website configuration for the bucket. This is an instance of
1245 ``RedirectLocation``.
1246
1247 :type routing_rules: :class:`boto.s3.website.RoutingRules`
1248 :param routing_rules: Object which specifies conditions
1249 and redirects that apply when the conditions are met.
1250
1251 """
1252 config = website.WebsiteConfiguration(
1253 suffix, error_key, redirect_all_requests_to,
1254 routing_rules)
1255 body = config.to_xml()
1256 response = self.connection.make_request('PUT', self.name, data=body,
1257 query_args='website',
1258 headers=headers)
1259 body = response.read()
1260 if response.status == 200:
1261 return True
1262 else:
1263 raise self.connection.provider.storage_response_error(
1264 response.status, response.reason, body)
1265
1266 def get_website_configuration(self, headers=None):
1267 """
1268 Returns the current status of website configuration on the bucket.
1269
1270 :rtype: dict
1271 :returns: A dictionary containing a Python representation
1272 of the XML response from S3. The overall structure is:
1273
1274 * WebsiteConfiguration
1275
1276 * IndexDocument
1277
1278 * Suffix : suffix that is appended to request that
1279 is for a "directory" on the website endpoint
1280 * ErrorDocument
1281
1282 * Key : name of object to serve when an error occurs
1283 """
1284 return self.get_website_configuration_with_xml(headers)[0]
1285
1286 def get_website_configuration_with_xml(self, headers=None):
1287 """
1288 Returns the current status of website configuration on the bucket as
1289 unparsed XML.
1290
1291 :rtype: 2-Tuple
1292 :returns: 2-tuple containing:
1293 1) A dictionary containing a Python representation
1294 of the XML response. The overall structure is:
1295 * WebsiteConfiguration
1296 * IndexDocument
1297 * Suffix : suffix that is appended to request that
1298 is for a "directory" on the website endpoint
1299 * ErrorDocument
1300 * Key : name of object to serve when an error occurs
1301 2) unparsed XML describing the bucket's website configuration.
1302 """
1303 response = self.connection.make_request('GET', self.name,
1304 query_args='website', headers=headers)
1305 body = response.read()
1306 boto.log.debug(body)
1307
1308 if response.status != 200:
1309 raise self.connection.provider.storage_response_error(
1310 response.status, response.reason, body)
1311
1312 e = boto.jsonresponse.Element()
1313 h = boto.jsonresponse.XmlHandler(e, None)
1314 h.parse(body)
1315 return e, body
1316
1317 def delete_website_configuration(self, headers=None):
1318 """
1319 Removes all website configuration from the bucket.
1320 """
1321 response = self.connection.make_request('DELETE', self.name,
1322 query_args='website', headers=headers)
1323 body = response.read()
1324 boto.log.debug(body)
1325 if response.status == 204:
1326 return True
1327 else:
1328 raise self.connection.provider.storage_response_error(
1329 response.status, response.reason, body)
1330
1331 def get_website_endpoint(self):
1332 """
1333 Returns the fully qualified hostname to use is you want to access this
1334 bucket as a website. This doesn't validate whether the bucket has
1335 been correctly configured as a website or not.
1336 """
1337 l = [self.name]
1338 l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location() ))
1339 l.append('.'.join(self.connection.host.split('.')[-2:]))
1340 return '.'.join(l)
1341
1342 def get_policy(self, headers=None):
1343 """
1344 Returns the JSON policy associated with the bucket. The policy
1345 is returned as an uninterpreted JSON string.
1346 """
1347 response = self.connection.make_request('GET', self.name,
1348 query_args='policy', headers=headers)
1349 body = response.read()
1350 if response.status == 200:
1351 return body
1352 else:
1353 raise self.connection.provider.storage_response_error(
1354 response.status, response.reason, body)
1355
1356 def set_policy(self, policy, headers=None):
1357 """
1358 Add or replace the JSON policy associated with the bucket.
1359
1360 :type policy: str
1361 :param policy: The JSON policy as a string.
1362 """
1363 response = self.connection.make_request('PUT', self.name,
1364 data=policy,
1365 query_args='policy',
1366 headers=headers)
1367 body = response.read()
1368 if response.status >= 200 and response.status <= 204:
1369 return True
1370 else:
1371 raise self.connection.provider.storage_response_error(
1372 response.status, response.reason, body)
1373
1374 def delete_policy(self, headers=None):
1375 response = self.connection.make_request('DELETE', self.name,
1376 data='/?policy',
1377 query_args='policy',
1378 headers=headers)
1379 body = response.read()
1380 if response.status >= 200 and response.status <= 204:
1381 return True
1382 else:
1383 raise self.connection.provider.storage_response_error(
1384 response.status, response.reason, body)
1385
1386 def set_cors_xml(self, cors_xml, headers=None):
1387 """
1388 Set the CORS (Cross-Origin Resource Sharing) for a bucket.
1389
1390 :type cors_xml: str
1391 :param cors_xml: The XML document describing your desired
1392 CORS configuration. See the S3 documentation for details
1393 of the exact syntax required.
1394 """
1395 fp = StringIO.StringIO(cors_xml)
1396 md5 = boto.utils.compute_md5(fp)
1397 if headers is None:
1398 headers = {}
1399 headers['Content-MD5'] = md5[1]
1400 headers['Content-Type'] = 'text/xml'
1401 response = self.connection.make_request('PUT', self.name,
1402 data=fp.getvalue(),
1403 query_args='cors',
1404 headers=headers)
1405 body = response.read()
1406 if response.status == 200:
1407 return True
1408 else:
1409 raise self.connection.provider.storage_response_error(
1410 response.status, response.reason, body)
1411
1412 def set_cors(self, cors_config, headers=None):
1413 """
1414 Set the CORS for this bucket given a boto CORSConfiguration
1415 object.
1416
1417 :type cors_config: :class:`boto.s3.cors.CORSConfiguration`
1418 :param cors_config: The CORS configuration you want
1419 to configure for this bucket.
1420 """
1421 return self.set_cors_xml(cors_config.to_xml())
1422
1423 def get_cors_xml(self, headers=None):
1424 """
1425 Returns the current CORS configuration on the bucket as an
1426 XML document.
1427 """
1428 response = self.connection.make_request('GET', self.name,
1429 query_args='cors', headers=headers)
1430 body = response.read()
1431 boto.log.debug(body)
1432 if response.status == 200:
1433 return body
1434 else:
1435 raise self.connection.provider.storage_response_error(
1436 response.status, response.reason, body)
1437
1438 def get_cors(self, headers=None):
1439 """
1440 Returns the current CORS configuration on the bucket.
1441
1442 :rtype: :class:`boto.s3.cors.CORSConfiguration`
1443 :returns: A CORSConfiguration object that describes all current
1444 CORS rules in effect for the bucket.
1445 """
1446 body = self.get_cors_xml(headers)
1447 cors = CORSConfiguration()
1448 h = handler.XmlHandler(cors, self)
1449 xml.sax.parseString(body, h)
1450 return cors
1451
1452 def delete_cors(self, headers=None):
1453 """
1454 Removes all CORS configuration from the bucket.
1455 """
1456 response = self.connection.make_request('DELETE', self.name,
1457 query_args='cors',
1458 headers=headers)
1459 body = response.read()
1460 boto.log.debug(body)
1461 if response.status == 204:
1462 return True
1463 else:
1464 raise self.connection.provider.storage_response_error(
1465 response.status, response.reason, body)
1466
1467 def initiate_multipart_upload(self, key_name, headers=None,
1468 reduced_redundancy=False,
1469 metadata=None, encrypt_key=False,
1470 policy=None):
1471 """
1472 Start a multipart upload operation.
1473
1474 :type key_name: string
1475 :param key_name: The name of the key that will ultimately
1476 result from this multipart upload operation. This will be
1477 exactly as the key appears in the bucket after the upload
1478 process has been completed.
1479
1480 :type headers: dict
1481 :param headers: Additional HTTP headers to send and store with the
1482 resulting key in S3.
1483
1484 :type reduced_redundancy: boolean
1485 :param reduced_redundancy: In multipart uploads, the storage
1486 class is specified when initiating the upload, not when
1487 uploading individual parts. So if you want the resulting
1488 key to use the reduced redundancy storage class set this
1489 flag when you initiate the upload.
1490
1491 :type metadata: dict
1492 :param metadata: Any metadata that you would like to set on the key
1493 that results from the multipart upload.
1494
1495 :type encrypt_key: bool
1496 :param encrypt_key: If True, the new copy of the object will
1497 be encrypted on the server-side by S3 and will be stored
1498 in an encrypted form while at rest in S3.
1499
1500 :type policy: :class:`boto.s3.acl.CannedACLStrings`
1501 :param policy: A canned ACL policy that will be applied to the
1502 new key (once completed) in S3.
1503 """
1504 query_args = 'uploads'
1505 provider = self.connection.provider
1506 headers = headers or {}
1507 if policy:
1508 headers[provider.acl_header] = policy
1509 if reduced_redundancy:
1510 storage_class_header = provider.storage_class_header
1511 if storage_class_header:
1512 headers[storage_class_header] = 'REDUCED_REDUNDANCY'
1513 # TODO: what if the provider doesn't support reduced redundancy?
1514 # (see boto.s3.key.Key.set_contents_from_file)
1515 if encrypt_key:
1516 headers[provider.server_side_encryption_header] = 'AES256'
1517 if metadata is None:
1518 metadata = {}
1519
1520 headers = boto.utils.merge_meta(headers, metadata,
1521 self.connection.provider)
1522 response = self.connection.make_request('POST', self.name, key_name,
1523 query_args=query_args,
1524 headers=headers)
1525 body = response.read()
1526 boto.log.debug(body)
1527 if response.status == 200:
1528 resp = MultiPartUpload(self)
1529 h = handler.XmlHandler(resp, self)
1530 xml.sax.parseString(body, h)
1531 return resp
1532 else:
1533 raise self.connection.provider.storage_response_error(
1534 response.status, response.reason, body)
1535
1536 def complete_multipart_upload(self, key_name, upload_id,
1537 xml_body, headers=None):
1538 """
1539 Complete a multipart upload operation.
1540 """
1541 query_args = 'uploadId=%s' % upload_id
1542 if headers is None:
1543 headers = {}
1544 headers['Content-Type'] = 'text/xml'
1545 response = self.connection.make_request('POST', self.name, key_name,
1546 query_args=query_args,
1547 headers=headers, data=xml_body)
1548 contains_error = False
1549 body = response.read()
1550 # Some errors will be reported in the body of the response
1551 # even though the HTTP response code is 200. This check
1552 # does a quick and dirty peek in the body for an error element.
1553 if body.find('<Error>') > 0:
1554 contains_error = True
1555 boto.log.debug(body)
1556 if response.status == 200 and not contains_error:
1557 resp = CompleteMultiPartUpload(self)
1558 h = handler.XmlHandler(resp, self)
1559 xml.sax.parseString(body, h)
1560 # Use a dummy key to parse various response headers
1561 # for versioning, encryption info and then explicitly
1562 # set the completed MPU object values from key.
1563 k = self.key_class(self)
1564 k.handle_version_headers(response)
1565 k.handle_encryption_headers(response)
1566 resp.version_id = k.version_id
1567 resp.encrypted = k.encrypted
1568 return resp
1569 else:
1570 raise self.connection.provider.storage_response_error(
1571 response.status, response.reason, body)
1572
1573 def cancel_multipart_upload(self, key_name, upload_id, headers=None):
1574 query_args = 'uploadId=%s' % upload_id
1575 response = self.connection.make_request('DELETE', self.name, key_name,
1576 query_args=query_args,
1577 headers=headers)
1578 body = response.read()
1579 boto.log.debug(body)
1580 if response.status != 204:
1581 raise self.connection.provider.storage_response_error(
1582 response.status, response.reason, body)
1583
1584 def delete(self, headers=None):
1585 return self.connection.delete_bucket(self.name, headers=headers)
1586
1587 def get_tags(self):
1588 response = self.get_xml_tags()
1589 tags = Tags()
1590 h = handler.XmlHandler(tags, self)
1591 xml.sax.parseString(response, h)
1592 return tags
1593
1594 def get_xml_tags(self):
1595 response = self.connection.make_request('GET', self.name,
1596 query_args='tagging',
1597 headers=None)
1598 body = response.read()
1599 if response.status == 200:
1600 return body
1601 else:
1602 raise self.connection.provider.storage_response_error(
1603 response.status, response.reason, body)
1604
1605 def set_xml_tags(self, tag_str, headers=None, query_args='tagging'):
1606 if headers is None:
1607 headers = {}
1608 md5 = boto.utils.compute_md5(StringIO.StringIO(tag_str))
1609 headers['Content-MD5'] = md5[1]
1610 headers['Content-Type'] = 'text/xml'
1611 response = self.connection.make_request('PUT', self.name,
1612 data=tag_str.encode('utf-8'),
1613 query_args=query_args,
1614 headers=headers)
1615 body = response.read()
1616 if response.status != 204:
1617 raise self.connection.provider.storage_response_error(
1618 response.status, response.reason, body)
1619 return True
1620
1621 def set_tags(self, tags, headers=None):
1622 return self.set_xml_tags(tags.to_xml(), headers=headers)
1623
1624 def delete_tags(self, headers=None):
1625 response = self.connection.make_request('DELETE', self.name,
1626 query_args='tagging',
1627 headers=headers)
1628 body = response.read()
1629 boto.log.debug(body)
1630 if response.status == 204:
1631 return True
1632 else:
1633 raise self.connection.provider.storage_response_error(
1634 response.status, response.reason, body)
OLDNEW
« no previous file with comments | « third_party/boto/s3/acl.py ('k') | third_party/boto/s3/bucketlistresultset.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698