include Fog::InternetArchive::CredentialFetcher::ConnectionMethods Initialize connection to S3
options parameter must include values for :ia_access_key_id and :ia_secret_access_key in order to create a connection
s3 = Fog::Storage.new( :provider => "InternetArchive", :ia_access_key_id => your_ia_access_key_id, :ia_secret_access_key => your_ia_secret_access_key )
options<~Hash> - config arguments for connection. Defaults to {}.
S3 object with connection to aws.
# File lib/fog/internet_archive/storage.rb, line 249 def initialize(options={}) require 'fog/core/parser' require 'mime/types' setup_credentials(options) @connection_options = options[:connection_options] || {} if @endpoint = options[:endpoint] endpoint = URI.parse(@endpoint) @host = endpoint.host @path = if endpoint.path.empty? '/' else endpoint.path end @port = endpoint.port @scheme = endpoint.scheme else options[:region] ||= 'us-east-1' @region = options[:region] @host = options[:host] || Fog::InternetArchive::API_DOMAIN_NAME @path = options[:path] || '/' @persistent = options.fetch(:persistent, false) @port = options[:port] || 80 @scheme = options[:scheme] || 'http' end @connection = Fog::Connection.new("#{@scheme}://#{@host}:#{@port}#{@path}", @persistent, @connection_options) end
Abort a multipart upload
@param [String] bucket_name Name of bucket to abort multipart upload on @param [String] object_name Name of object to abort multipart upload on @param [String] upload_id Id of upload to add part to
@see docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadAbort.html
# File lib/fog/internet_archive/requests/storage/abort_multipart_upload.rb, line 14 def abort_multipart_upload(bucket_name, object_name, upload_id) request({ :expects => 204, :headers => {}, :host => "#{bucket_name}.#{@host}", :method => 'DELETE', :path => CGI.escape(object_name), :query => {'uploadId' => upload_id} }) end
Complete a multipart upload
@param [String] bucket_name Name of bucket to complete multipart upload for @param [String] object_name Name of object to complete multipart upload for @param [String] upload_id Id of upload to add part to @param [Array<String>] parts Array of etags as Strings for parts
@return [Excon::Response]
* headers [Hash]: * Bucket [String] - bucket of new object * ETag [String] - etag of new object (will be needed to complete upload) * Key [String] - key of new object * Location [String] - location of new object
@see docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadComplete.html
# File lib/fog/internet_archive/requests/storage/complete_multipart_upload.rb, line 24 def complete_multipart_upload(bucket_name, object_name, upload_id, parts) data = "<CompleteMultipartUpload>" parts.each_with_index do |part, index| data << "<Part>" data << "<PartNumber>#{index + 1}</PartNumber>" data << "<ETag>#{part}</ETag>" data << "</Part>" end data << "</CompleteMultipartUpload>" request({ :body => data, :expects => 200, :headers => { 'Content-Length' => data.length }, :host => "#{bucket_name}.#{@host}", :method => 'POST', :parser => Fog::Parsers::Storage::InternetArchive::CompleteMultipartUpload.new, :path => CGI.escape(object_name), :query => {'uploadId' => upload_id} }) end
Copy an object from one S3 bucket to another
@param source_bucket_name [String] Name of source bucket @param source_object_name [String] Name of source object @param target_bucket_name [String] Name of bucket to create copy in @param target_object_name [String] Name for new copy of object
@param options [Hash]: @option options [String] x-amz-metadata-directive Specifies whether to copy metadata from source or replace with data in request. Must be in ['COPY', 'REPLACE'] @option options [String] x-amz-copy_source-if-match Copies object if its etag matches this value @option options [Time] x-amz-copy_source-if-modified_since Copies object it it has been modified since this time @option options [String] x-amz-copy_source-if-none-match Copies object if its etag does not match this value @option options [Time] x-amz-copy_source-if-unmodified-since Copies object it it has not been modified since this time @option options [String] x-amz-storage-class Default is 'STANDARD', set to 'REDUCED_REDUNDANCY' for non-critical, reproducable data
@return [Excon::Response]
* body [Hash]: * ETag [String] - etag of new object * LastModified [Time] - date object was last modified
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectCOPY.html
# File lib/fog/internet_archive/requests/storage/copy_object.rb, line 31 def copy_object(source_bucket_name, source_object_name, target_bucket_name, target_object_name, options = {}) headers = { 'x-amz-copy-source' => "/#{source_bucket_name}/#{CGI.escape(source_object_name)}" }.merge!(options) request({ :expects => 200, :headers => headers, :host => "#{target_bucket_name}.#{@host}", :method => 'PUT', :parser => Fog::Parsers::Storage::InternetArchive::CopyObject.new, :path => CGI.escape(target_object_name) }) end
Delete an S3 bucket
@param bucket_name [String] name of bucket to delete
@return [Excon::Response] response:
* status [Integer] - 204
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETE.html
# File lib/fog/internet_archive/requests/storage/delete_bucket.rb, line 15 def delete_bucket(bucket_name) request({ :expects => 204, :headers => {}, :host => "#{bucket_name}.#{@host}", :method => 'DELETE' }) end
Deletes the cors configuration information set for the bucket.
@param bucket_name [String] name of bucket to delete cors rules from
@return [Excon::Response] response:
* status [Integer] - 204
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEcors.html
# File lib/fog/internet_archive/requests/storage/delete_bucket_cors.rb, line 15 def delete_bucket_cors(bucket_name) request({ :expects => 204, :headers => {}, :host => "#{bucket_name}.#{@host}", :method => 'DELETE', :query => {'cors' => nil} }) end
Delete lifecycle configuration for a bucket
@param bucket_name [String] name of bucket to delete lifecycle configuration from
@return [Excon::Response] response:
* status [Integer] - 204
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETElifecycle.html
# File lib/fog/internet_archive/requests/storage/delete_bucket_lifecycle.rb, line 15 def delete_bucket_lifecycle(bucket_name) request({ :expects => 204, :headers => {}, :host => "#{bucket_name}.#{@host}", :method => 'DELETE', :query => {'lifecycle' => nil} }) end
Delete policy for a bucket
@param bucket_name [String] name of bucket to delete policy from
@return [Excon::Response] response:
* status [Integer] - 204
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html
# File lib/fog/internet_archive/requests/storage/delete_bucket_policy.rb, line 15 def delete_bucket_policy(bucket_name) request({ :expects => 204, :headers => {}, :host => "#{bucket_name}.#{@host}", :method => 'DELETE', :query => {'policy' => nil} }) end
Delete website configuration for a bucket
@param bucket_name [String] name of bucket to delete website configuration from
@return [Excon::Response] response:
* status [Integer] - 204
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEwebsite.html
# File lib/fog/internet_archive/requests/storage/delete_bucket_website.rb, line 15 def delete_bucket_website(bucket_name) request({ :expects => 204, :headers => {}, :host => "#{bucket_name}.#{@host}", :method => 'DELETE', :query => {'website' => nil} }) end
Delete multiple objects from S3 @note For versioned deletes, options should include a version_ids hash, which
maps from filename to an array of versions. The semantics are that for each (object_name, version) tuple, the caller must insert the object_name and an associated version (if desired), so for n versions, the object must be inserted n times.
@param bucket_name [String] Name of bucket containing object to delete @param object_names [Array] Array of object names to delete
@return [Excon::Response] response:
* body [Hash]: * DeleteResult [Array]: * Deleted [Hash]: * Key [String] - Name of the object that was deleted * VersionId [String] - ID for the versioned onject in case of a versioned delete * DeleteMarker [Boolean] - Indicates if the request accessed a delete marker * DeleteMarkerVersionId [String] - Version ID of the delete marker accessed * Error [Hash]: * Key [String] - Name of the object that failed to be deleted * VersionId [String] - ID of the versioned object that was attempted to be deleted * Code [String] - Status code for the result of the failed delete * Message [String] - Error description
@see docs.amazonwebservices.com/AmazonS3/latest/API/multiobjectdeleteapi.html
# File lib/fog/internet_archive/requests/storage/delete_multiple_objects.rb, line 34 def delete_multiple_objects(bucket_name, object_names, options = {}) data = "<Delete>" data << "<Quiet>true</Quiet>" if options.delete(:quiet) object_names.each do |object_name| data << "<Object>" data << "<Key>#{CGI.escapeHTML(object_name)}</Key>" data << "</Object>" end data << "</Delete>" headers = options headers['Content-Length'] = data.length headers['Content-MD5'] = Base64.encode64(Digest::MD5.digest(data)). gsub("\n", '') request({ :body => data, :expects => 200, :headers => headers, :host => "#{bucket_name}.#{@host}", :method => 'POST', :parser => Fog::Parsers::Storage::InternetArchive::DeleteMultipleObjects.new, :query => {'delete' => nil} }) end
Delete an object from S3
@param bucket_name [String] Name of bucket containing object to delete @param object_name [String] Name of object to delete
@return [Excon::Response] response:
* status [Integer] - 204
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectDELETE.html
# File lib/fog/internet_archive/requests/storage/delete_object.rb, line 16 def delete_object(bucket_name, object_name, options = {}) path = CGI.escape(object_name) headers = options request({ :expects => 204, :headers => headers, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'DELETE', :path => path }) end
List information about objects in an S3 bucket
@param bucket_name [String] name of bucket to list object keys from @param options [Hash] config arguments for list. Defaults to {}. @option options delimiter [String] causes keys with the same string between the prefix
value and the first occurence of delimiter to be rolled up
@option options marker [String] limits object keys to only those that appear
lexicographically after its value.
@option options max-keys [Integer] limits number of object keys returned @option options prefix [String] limits object keys to those beginning with its value.
@return [Excon::Response] response:
* body [Hash]: * Delimeter [String] - Delimiter specified for query * IsTruncated [Boolean] - Whether or not the listing is truncated * Marker [String]- Marker specified for query * MaxKeys [Integer] - Maximum number of keys specified for query * Name [String] - Name of the bucket * Prefix [String] - Prefix specified for query * CommonPrefixes [Array] - Array of strings for common prefixes * Contents [Array]: * ETag [String] - Etag of object * Key [String] - Name of object * LastModified [String] - Timestamp of last modification of object * Owner [Hash]: * DisplayName [String] - Display name of object owner * ID [String] - Id of object owner * Size [Integer] - Size of object * StorageClass [String] - Storage class of object
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html
# File lib/fog/internet_archive/requests/storage/get_bucket.rb, line 40 def get_bucket(bucket_name, options = {}) unless bucket_name raise ArgumentError.new('bucket_name is required') end request({ :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'GET', :parser => Fog::Parsers::Storage::InternetArchive::GetBucket.new, :query => options }) end
Get access control list for an S3 bucket
@param bucket_name [String] name of bucket to get access control list for
@return [Excon::Response] response:
* body [Hash]: * AccessControlPolicy [Hash]: * Owner [Hash]: * DisplayName [String] - Display name of object owner * ID [String] - Id of object owner * AccessControlList [Array]: * Grant [Hash]: * Grantee [Hash]: * DisplayName [String] - Display name of grantee * ID [String] - Id of grantee or * URI [String] - URI of group to grant access for * Permission [String] - Permission, in [FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP]
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETacl.html
# File lib/fog/internet_archive/requests/storage/get_bucket_acl.rb, line 29 def get_bucket_acl(bucket_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end request({ :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'GET', :parser => Fog::Parsers::Storage::InternetArchive::AccessControlList.new, :query => {'acl' => nil} }) end
Gets the CORS configuration for an S3 bucket
@param bucket_name [String] name of bucket to get access control list for
@return [Excon::Response] response:
* body [Hash]: * CORSConfiguration [Array]: * CORSRule [Hash]: * AllowedHeader [String] - Which headers are allowed in a pre-flight OPTIONS request through the Access-Control-Request-Headers header. * AllowedMethod [String] - Identifies an HTTP method that the domain/origin specified in the rule is allowed to execute. * AllowedOrigin [String] - One or more response headers that you want customers to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object). * ExposeHeader [String] - One or more headers in the response that you want customers to be able to access from their applications (for example, from a JavaScript XMLHttpRequest object). * ID [String] - An optional unique identifier for the rule. The ID value can be up to 255 characters long. The IDs help you find a rule in the configuration. * MaxAgeSeconds [Integer] - The time in seconds that your browser is to cache the preflight response for the specified resource.
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETcors.html
# File lib/fog/internet_archive/requests/storage/get_bucket_cors.rb, line 25 def get_bucket_cors(bucket_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end request({ :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'GET', :parser => Fog::Parsers::Storage::InternetArchive::CorsConfiguration.new, :query => {'cors' => nil} }) end
Get bucket lifecycle configuration
@param bucket_name [String] name of bucket to get lifecycle configuration for
@return [Excon::Response] response:
* body [Hash]: * Rules - object expire rules [Array]: * ID [String] - Unique identifier for the rule * Prefix [String] - Prefix identifying one or more objects to which the rule applies * Enabled [Boolean] - if rule is currently being applied * Days [Integer] - lifetime, in days, of the objects that are subject to the rule
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlifecycle.html
# File lib/fog/internet_archive/requests/storage/get_bucket_lifecycle.rb, line 22 def get_bucket_lifecycle(bucket_name) request({ :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'GET', :parser => Fog::Parsers::Storage::InternetArchive::GetBucketLifecycle.new, :query => {'lifecycle' => nil} }) end
Get location constraint for an S3 bucket
@param bucket_name [String] name of bucket to get location constraint for
@return [Excon::Response] response:
* body [Hash]: * LocationConstraint [String] - Location constraint of the bucket
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlocation.html
# File lib/fog/internet_archive/requests/storage/get_bucket_location.rb, line 18 def get_bucket_location(bucket_name) request({ :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'GET', :parser => Fog::Parsers::Storage::InternetArchive::GetBucketLocation.new, :query => {'location' => nil} }) end
Get logging status for an S3 bucket
@param bucket_name [String] name of bucket to get logging status for
@return [Excon::Response] response:
* body [Hash]: * BucketLoggingStatus (will be empty if logging is disabled) [Hash]: * LoggingEnabled [Hash]: * TargetBucket [String] - bucket where logs are stored * TargetPrefix [String] - prefix logs are stored with * TargetGrants [Array]: * Grant [Hash]: * Grantee [Hash]: * DisplayName [String] - Display name of grantee * ID [String] - Id of grantee or * URI [String] - URI of group to grant access for * Permission [String] - Permission, in [FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP]
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETlogging.html
# File lib/fog/internet_archive/requests/storage/get_bucket_logging.rb, line 29 def get_bucket_logging(bucket_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end request({ :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'GET', :parser => Fog::Parsers::Storage::InternetArchive::GetBucketLogging.new, :query => {'logging' => nil} }) end
Get bucket policy for an S3 bucket
@param bucket_name [String] name of bucket to get policy for
@return [Excon::Response] response:
* body [Hash] - policy document
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETpolicy.html
# File lib/fog/internet_archive/requests/storage/get_bucket_policy.rb, line 15 def get_bucket_policy(bucket_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end response = request({ :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'GET', :query => {'policy' => nil} }) response.body = Fog::JSON.decode(response.body) unless response.body.nil? end
Get website configuration for an S3 bucket
@param bucket_name [String] name of bucket to get website configuration for
@return [Excon::Response] response:
* body [Hash]: * IndexDocument [Hash]: * Suffix [String] - Suffix appended when directory is requested * ErrorDocument [Hash]: * Key [String] - Object key to return for 4XX class errors
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETwebsite.html
# File lib/fog/internet_archive/requests/storage/get_bucket_website.rb, line 22 def get_bucket_website(bucket_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end request({ :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'GET', :parser => Fog::Parsers::Storage::InternetArchive::GetBucketWebsite.new, :query => {'website' => nil} }) end
Get an object from S3
@param bucket_name [String] Name of bucket to read from @param object_name [String] Name of object to read @param options [Hash] @option options If-Match [String] Returns object only if its etag matches this value, otherwise returns 412 (Precondition Failed). @option options If-Modified-Since [Time] Returns object only if it has been modified since this time, otherwise returns 304 (Not Modified). @option options If-None-Match [String] Returns object only if its etag differs from this value, otherwise returns 304 (Not Modified) @option options If-Unmodified-Since [Time] Returns object only if it has not been modified since this time, otherwise returns 412 (Precodition Failed). @option options Range [String] Range of object to download
@return [Excon::Response] response:
* body [String]- Contents of object * headers [Hash]: * Content-Length [String] - Size of object contents * Content-Type [String] - MIME type of object * ETag [String] - Etag of object * Last-Modified [String] - Last modified timestamp for object
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html
# File lib/fog/internet_archive/requests/storage/get_object.rb, line 27 def get_object(bucket_name, object_name, options = {}, &block) unless bucket_name raise ArgumentError.new('bucket_name is required') end unless object_name raise ArgumentError.new('object_name is required') end params = { :headers => {} } params[:headers].merge!(options) if options['If-Modified-Since'] params[:headers]['If-Modified-Since'] = Fog::Time.at(options['If-Modified-Since'].to_i).to_date_header end if options['If-Unmodified-Since'] params[:headers]['If-Unmodified-Since'] = Fog::Time.at(options['If-Unmodified-Since'].to_i).to_date_header end if block_given? params[:response_block] = Proc.new end request(params.merge!({ :expects => [ 200, 206 ], :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'GET', :path => CGI.escape(object_name), })) end
Get access control list for an S3 object
@param bucket_name [String] name of bucket containing object @param object_name [String] name of object to get access control list for @param options [Hash]
@return [Excon::Response] response:
* body [Hash]: * [AccessControlPolicy [Hash]: * Owner [Hash]: * DisplayName [String] - Display name of object owner * ID [String] - Id of object owner * AccessControlList [Array]: * Grant [Hash]: * Grantee [Hash]: * DisplayName [String] - Display name of grantee * ID [String] - Id of grantee or * URI [String] - URI of group to grant access for * Permission [String] - Permission, in [FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP]
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETacl.html
# File lib/fog/internet_archive/requests/storage/get_object_acl.rb, line 31 def get_object_acl(bucket_name, object_name, options = {}) unless bucket_name raise ArgumentError.new('bucket_name is required') end unless object_name raise ArgumentError.new('object_name is required') end query = {'acl' => nil} request({ :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'GET', :parser => Fog::Parsers::Storage::InternetArchive::AccessControlList.new, :path => CGI.escape(object_name), :query => query }) end
Get torrent for an S3 object
@param bucket_name [String] name of bucket containing object @param object_name [String] name of object to get torrent for
@return [Excon::Response] response:
* body [Hash]: * AccessControlPolicy [Hash: * Owner [Hash]: * DisplayName [String] - Display name of object owner * ID [String] - Id of object owner * AccessControlList [Array]: * Grant [Hash]: * Grantee [Hash]: * DisplayName [String] - Display name of grantee * ID [String] - Id of grantee * Permission [String] - Permission, in [FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP]
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETtorrent.html
# File lib/fog/internet_archive/requests/storage/get_object_torrent.rb, line 26 def get_object_torrent(bucket_name, object_name) unless bucket_name raise ArgumentError.new('bucket_name is required') end unless object_name raise ArgumentError.new('object_name is required') end request({ :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'GET', :path => CGI.escape(object_name), :query => {'torrent' => nil} }) end
Get configured payer for an S3 bucket
@param bucket_name [String] name of bucket to get payer for
@return [Excon::Response] response:
* body [Hash]: * Payer [String] - Specifies who pays for download and requests
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentGET.html
# File lib/fog/internet_archive/requests/storage/get_request_payment.rb, line 18 def get_request_payment(bucket_name) request({ :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'GET', :parser => Fog::Parsers::Storage::InternetArchive::GetRequestPayment.new, :query => {'requestPayment' => nil} }) end
List information about S3 buckets for authorized user
@return [Excon::Response] response:
* body [Hash]: * Buckets [Hash]: * Name [String] - Name of bucket * CreationTime [Time] - Timestamp of bucket creation * Owner [Hash]: * DisplayName [String] - Display name of bucket owner * ID [String] - Id of bucket owner
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTServiceGET.html
# File lib/fog/internet_archive/requests/storage/get_service.rb, line 21 def get_service request({ :expects => 200, :headers => {}, :host => @host, :idempotent => true, :method => 'GET', :parser => Fog::Parsers::Storage::InternetArchive::GetService.new }) end
Get headers for an object from S3
@param bucket_name [String] Name of bucket to read from @param object_name [String] Name of object to read @param options [Hash]: @option options [String] If-Match Returns object only if its etag matches this value, otherwise returns 412 (Precondition Failed). @option options [Time] If-Modified-Since Returns object only if it has been modified since this time, otherwise returns 304 (Not Modified). @option options [String] If-None-Match Returns object only if its etag differs from this value, otherwise returns 304 (Not Modified) @option options [Time] If-Unmodified-Since Returns object only if it has not been modified since this time, otherwise returns 412 (Precodition Failed). @option options [String] Range Range of object to download
@return [Excon::Response] response:
* body [String] Contents of object * headers [Hash]: * Content-Length [String] - Size of object contents * Content-Type [String] - MIME type of object * ETag [String] - Etag of object * Last-Modified - [String] Last modified timestamp for object
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectHEAD.html
# File lib/fog/internet_archive/requests/storage/head_object.rb, line 27 def head_object(bucket_name, object_name, options={}) unless bucket_name raise ArgumentError.new('bucket_name is required') end unless object_name raise ArgumentError.new('object_name is required') end headers = {} headers['If-Modified-Since'] = Fog::Time.at(options['If-Modified-Since'].to_i).to_date_header if options['If-Modified-Since'] headers['If-Unmodified-Since'] = Fog::Time.at(options['If-Unmodified-Since'].to_i).to_date_header if options['If-Modified-Since'] headers.merge!(options) request({ :expects => 200, :headers => headers, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'HEAD', :path => CGI.escape(object_name) }) end
Initiate a multipart upload to an S3 bucket
@param bucket_name [String] Name of bucket to create object in @param object_name [String] Name of object to create @param options [Hash]: @option options [String] Cache-Control Caching behaviour @option options [String] Content-Disposition Presentational information for the object @option options [String] Content-Encoding Encoding of object data @option options [String] Content-MD5 Base64 encoded 128-bit MD5 digest of message (defaults to Base64 encoded MD5 of object.read) @option options [String] Content-Type Standard MIME type describing contents (defaults to MIME::Types.of.first) @option options [String] x-amz-acl Permissions, must be in ['private', 'public-read', 'public-read-write', 'authenticated-read'] @option options [String] x-amz-meta-#{name} Headers to be returned with object, note total size of request without body must be less than 8 KB.
@return [Excon::Response] response:
* body [Hash]: * Bucket [String] - Bucket where upload was initiated * Key [String] - Object key where the upload was initiated * UploadId [String] - Id for initiated multipart upload
@see docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadInitiate.html
# File lib/fog/internet_archive/requests/storage/initiate_multipart_upload.rb, line 29 def initiate_multipart_upload(bucket_name, object_name, options = {}) request({ :expects => 200, :headers => options, :host => "#{bucket_name}.#{@host}", :method => 'POST', :parser => Fog::Parsers::Storage::InternetArchive::InitiateMultipartUpload.new, :path => CGI.escape(object_name), :query => {'uploads' => nil} }) end
List multipart uploads for a bucket
@param [String] bucket_name Name of bucket to list multipart uploads for @param [Hash] options config arguments for list. Defaults to {}. @option options [String] key-marker limits parts to only those that appear lexicographically after this key. @option options [Integer] max-uploads limits number of uploads returned @option options [String] upload-id-marker limits uploads to only those that appear lexicographically after this upload id.
@return [Excon::Response] response:
* body [Hash]: * Bucket [string] Bucket where the multipart upload was initiated * IsTruncated [Boolean] Whether or not the listing is truncated * KeyMarker [String] first key in list, only upload ids after this lexographically will appear * MaxUploads [Integer] Maximum results to return * NextKeyMarker [String] last key in list, for further pagination * NextUploadIdMarker [String] last key in list, for further pagination * Upload [Hash]: * Initiated [Time] Time when upload was initiated * Initiator [Hash]: * DisplayName [String] Display name of upload initiator * ID [String] Id of upload initiator * Key [String] Key where multipart upload was initiated * Owner [Hash]: * DisplayName [String] Display name of upload owner * ID [String] Id of upload owner * StorageClass [String] Storage class of object * UploadId [String] upload id of upload containing part * UploadIdMarker [String] first key in list, only upload ids after this lexographically will appear
@see docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListMPUpload.html
# File lib/fog/internet_archive/requests/storage/list_multipart_uploads.rb, line 39 def list_multipart_uploads(bucket_name, options = {}) request({ :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'GET', :parser => Fog::Parsers::Storage::InternetArchive::ListMultipartUploads.new, :query => options.merge!({'uploads' => nil}) }) end
List parts for a multipart upload
@param bucket_name [String] Name of bucket to list parts for @param object_name [String] Name of object to list parts for @param upload_id [String] upload id to list objects for @param options [Hash] config arguments for list. Defaults to {}. @option options max-parts [Integer] limits number of parts returned @option options part-number-marker [String] limits parts to only those that appear lexicographically after this part number.
@return [Excon::Response] response:
* body [Hash]: * Bucket [string] Bucket where the multipart upload was initiated * Initiator [Hash]: * DisplayName [String] Display name of upload initiator * ID [String] Id of upload initiator * IsTruncated [Boolean] Whether or not the listing is truncated * Key [String] Key where multipart upload was initiated * MaxParts [String] maximum number of replies alllowed in response * NextPartNumberMarker [String] last item in list, for further pagination * Part [Array]: * ETag [String] ETag of part * LastModified [Timestamp] Last modified for part * PartNumber [String] Part number for part * Size [Integer] Size of part * PartNumberMarker [String] Part number after which listing begins * StorageClass [String] Storage class of object * UploadId [String] upload id of upload containing part
@see docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListParts.html
# File lib/fog/internet_archive/requests/storage/list_parts.rb, line 38 def list_parts(bucket_name, object_name, upload_id, options = {}) options['uploadId'] = upload_id request({ :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'GET', :parser => Fog::Parsers::Storage::InternetArchive::ListParts.new, :path => CGI.escape(object_name), :query => options.merge!({'uploadId' => upload_id}) }) end
Create an S3 bucket
@param bucket_name [String] name of bucket to create @option options [Hash] config arguments for bucket. Defaults to {}. @option options LocationConstraint [Symbol] sets the location for the bucket @option options x-amz-acl [String] Permissions, must be in ['private', 'public-read', 'public-read-write', 'authenticated-read']
@return [Excon::Response] response:
* status [Integer] 200
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html
# File lib/fog/internet_archive/requests/storage/put_bucket.rb, line 18 def put_bucket(bucket_name, options = {}) if location_constraint = options.delete('LocationConstraint') data = <CreateBucketConfiguration> <LocationConstraint>#{location_constraint}</LocationConstraint> </CreateBucketConfiguration> else data = nil end request({ :expects => 200, :body => data, :headers => options, :idempotent => true, :host => "#{bucket_name}.#{@host}", :method => 'PUT' }) end
Change access control list for an S3 bucket
@param bucket_name [String] name of bucket to modify @param acl [Hash]
* Owner [Hash]: * ID [String]: id of owner * DisplayName [String]: display name of owner * AccessControlList [Array]: * Grantee [Hash]: * DisplayName [String] Display name of grantee * ID [String] Id of grantee or * EmailAddress [String] Email address of grantee or * URI [String] URI of group to grant access for * Permission [String] Permission, in [FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP]
acl [String] Permissions, must be in ['private', 'public-read', 'public-read-write', 'authenticated-read']
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTacl.html
# File lib/fog/internet_archive/requests/storage/put_bucket_acl.rb, line 28 def put_bucket_acl(bucket_name, acl) data = "" headers = {} if acl.is_a?(Hash) data = Fog::Storage::InternetArchive.hash_to_acl(acl) else if !['private', 'public-read', 'public-read-write', 'authenticated-read'].include?(acl) raise Excon::Errors::BadRequest.new('invalid x-amz-acl') end headers['x-amz-acl'] = acl end headers['Content-MD5'] = Base64.encode64(Digest::MD5.digest(data)).strip headers['Content-Type'] = 'application/json' headers['Date'] = Fog::Time.now.to_date_header request({ :body => data, :expects => 200, :headers => headers, :host => "#{bucket_name}.#{@host}", :method => 'PUT', :query => {'acl' => nil} }) end
Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it.
@param bucket_name [String] name of bucket to modify @param cors [Hash]
* CORSConfiguration [Array]: * ID [String]: A unique identifier for the rule. * AllowedMethod [String]: An HTTP method that you want to allow the origin to execute. * AllowedOrigin [String]: An origin that you want to allow cross-domain requests from. * AllowedHeader [String]: Specifies which headers are allowed in a pre-flight OPTIONS request via the Access-Control-Request-Headers header. * MaxAgeSeconds [String]: The time in seconds that your browser is to cache the preflight response for the specified resource. * ExposeHeader [String]: One or more headers in the response that you want customers to be able to access from their applications.
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTcors.html
# File lib/fog/internet_archive/requests/storage/put_bucket_cors.rb, line 22 def put_bucket_cors(bucket_name, cors) data = Fog::Storage::InternetArchive.hash_to_cors(cors) headers = {} headers['Content-MD5'] = Base64.encode64(Digest::MD5.digest(data)).strip headers['Content-Type'] = 'application/json' headers['Date'] = Fog::Time.now.to_date_header request({ :body => data, :expects => 200, :headers => headers, :host => "#{bucket_name}.#{@host}", :method => 'PUT', :query => {'cors' => nil} }) end
Change lifecycle configuration for an S3 bucket
@param bucket_name [String] name of bucket to set lifecycle configuration for
lifecycle [Hash]:
Rules [Array] object expire rules
ID [String] Unique identifier for the rule
Prefix [String] Prefix identifying one or more objects to which the rule applies
Enabled [Boolean] if rule is currently being applied
Expiration [Hash] Container for the object expiration rule.
Days [Integer] lifetime, in days, of the objects that are subject to the rule
Date [Date] Indicates when the specific rule take effect. The date value must conform to the ISO 8601 format. The time is always midnight UTC.
Transition [Hash] Container for the transition rule that describes when objects transition to the Glacier storage class
Days [Integer] lifetime, in days, of the objects that are subject to the rule
Date [Date] Indicates when the specific rule take effect. The date value must conform to the ISO 8601 format. The time is always midnight UTC.
StorageClass [String] Indicates the Amazon S3 storage class to which you want the object to transition to.
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html
# File lib/fog/internet_archive/requests/storage/put_bucket_lifecycle.rb, line 29 def put_bucket_lifecycle(bucket_name, lifecycle) builder = Nokogiri::XML::Builder.new do LifecycleConfiguration { lifecycle['Rules'].each do |rule| Rule { ID rule['ID'] Prefix rule['Prefix'] Status rule['Enabled'] ? 'Enabled' : 'Disabled' unless (rule['Expiration'] or rule['Transition']) Expiration { Days rule['Days'] } else if rule['Expiration'] if rule['Expiration']['Days'] Expiration { Days rule['Expiration']['Days'] } elsif rule['Expiration']['Date'] Expiration { Date rule['Expiration']['Date'].is_a?(Time) ? rule['Expiration']['Date'].utc.iso8601 : Time.parse(rule['Expiration']['Date']).utc.iso8601 } end end if rule['Transition'] Transition { if rule['Transition']['Days'] Days rule['Transition']['Days'] elsif rule['Transition']['Date'] Date rule['Transition']['Date'].is_a?(Time) ? time.utc.iso8601 : Time.parse(time).utc.iso8601 end StorageClass rule['Transition']['StorageClass'].nil? ? 'GLACIER' : rule['Transition']['StorageClass'] } end end } end } end body = builder.to_xml body.gsub! /<([^<>]+)\/>/, '<\1></\1>' request({ :body => body, :expects => 200, :headers => {'Content-MD5' => Base64.encode64(Digest::MD5.digest(body)).chomp!, 'Content-Type' => 'application/xml'}, :host => "#{bucket_name}.#{@host}", :method => 'PUT', :query => {'lifecycle' => nil} }) end
Change logging status for an S3 bucket
@param bucket_name [String] name of bucket to modify @param logging_status [Hash]:
* Owner [Hash]: * ID [String]: id of owner * DisplayName [String]: display name of owner * AccessControlList [Array]: * Grantee [Hash]: * DisplayName [String] Display name of grantee * ID [String] Id of grantee or * EmailAddress [String] Email address of grantee or * URI [String] URI of group to grant access for * Permission [String] Permission, in [FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP]
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlogging.html
# File lib/fog/internet_archive/requests/storage/put_bucket_logging.rb, line 25 def put_bucket_logging(bucket_name, logging_status) if logging_status['LoggingEnabled'].empty? data = <BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" /> else data = <BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01"> <LoggingEnabled> <TargetBucket>#{logging_status['LoggingEnabled']['TargetBucket']}</TargetBucket> <TargetPrefix>#{logging_status['LoggingEnabled']['TargetBucket']}</TargetPrefix> <TargetGrants> acl['AccessControlList'].each do |grant| data << " <Grant>" type = case grant['Grantee'].keys.sort when ['DisplayName', 'ID'] 'CanonicalUser' when ['EmailAddress'] 'AmazonCustomerByEmail' when ['URI'] 'Group' end data << " <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"#{type}\">" for key, value in grant['Grantee'] data << " <#{key}>#{value}</#{key}>" end data << " </Grantee>" data << " <Permission>#{grant['Permission']}</Permission>" data << " </Grant>" end data << </TargetGrants> </LoggingEnabled></BucketLoggingStatus> end request({ :body => data, :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :method => 'PUT', :query => {'logging' => nil} }) end
Change bucket policy for an S3 bucket
@param bucket_name [String] name of bucket to modify @param policy [Hash] policy document
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTpolicy.html
# File lib/fog/internet_archive/requests/storage/put_bucket_policy.rb, line 13 def put_bucket_policy(bucket_name, policy) request({ :body => Fog::JSON.encode(policy), :expects => 204, :headers => {}, :host => "#{bucket_name}.#{@host}", :method => 'PUT', :query => {'policy' => nil} }) end
Change website configuration for an S3 bucket
@param bucket_name [String] name of bucket to modify @param suffix [String] suffix to append to requests for the bucket @param options [Hash] @option options key [String] key to use for 4XX class errors
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html
# File lib/fog/internet_archive/requests/storage/put_bucket_website.rb, line 15 def put_bucket_website(bucket_name, suffix, options = {}) data = <WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <IndexDocument> <Suffix>#{suffix}</Suffix> </IndexDocument> if options[:key] data << <ErrorDocument> <Key>#{options[:key]}</Key></ErrorDocument> end data << '</WebsiteConfiguration>' request({ :body => data, :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :method => 'PUT', :query => {'website' => nil} }) end
Create an object in an S3 bucket
@param bucket_name [String] Name of bucket to create object in @param object_name [String] Name of object to create @param data [File||String] File or String to create object from @param options [Hash] @option options Cache-Control [String] Caching behaviour @option options Content-Disposition [String] Presentational information for the object @option options Content-Encoding [String] Encoding of object data @option options Content-Length [String] Size of object in bytes (defaults to object.read.length) @option options Content-MD5 [String] Base64 encoded 128-bit MD5 digest of message @option options Content-Type [String] Standard MIME type describing contents (defaults to MIME::Types.of.first) @option options Expires [String] Cache expiry @option options x-amz-acl [String] Permissions, must be in ['private', 'public-read', 'public-read-write', 'authenticated-read'] @option options x-amz-storage-class [String] Default is 'STANDARD', set to 'REDUCED_REDUNDANCY' for non-critical, reproducable data @option options x-amz-meta-#{name} Headers to be returned with object, note total size of request without body must be less than 8 KB.
@return [Excon::Response] response:
* headers [Hash]: * ETag [String] etag of new object
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html
# File lib/fog/internet_archive/requests/storage/put_object.rb, line 29 def put_object(bucket_name, object_name, data, options = {}) data = Fog::Storage.parse_data(data) headers = data[:headers].merge!(options) request({ :body => data[:body], :expects => 200, :headers => headers, :host => "#{bucket_name}.#{@host}", :idempotent => true, :method => 'PUT', :path => CGI.escape(object_name) }) end
Change access control list for an S3 object
@param bucket_name [String] name of bucket to modify @param object_name [String] name of object to get access control list for @param acl [Hash]:
* Owner [Hash] * ID [String] id of owner * DisplayName [String] display name of owner * AccessControlList [Array] * Grantee [Hash] * DisplayName [String] Display name of grantee * ID [String] Id of grantee or * EmailAddress [String] Email address of grantee or * URI [String] URI of group to grant access for * Permission [String] Permission, in [FULL_CONTROL, WRITE, WRITE_ACP, READ, READ_ACP]
@param acl [String] Permissions, must be in ['private', 'public-read', 'public-read-write', 'authenticated-read'] @param options [Hash]
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUTacl.html
# File lib/fog/internet_archive/requests/storage/put_object_acl.rb, line 30 def put_object_acl(bucket_name, object_name, acl, options = {}) query = {'acl' => nil} data = "" headers = {} if acl.is_a?(Hash) data = Fog::Storage::InternetArchive.hash_to_acl(acl) else if !['private', 'public-read', 'public-read-write', 'authenticated-read'].include?(acl) raise Excon::Errors::BadRequest.new('invalid x-amz-acl') end headers['x-amz-acl'] = acl end headers['Content-MD5'] = Base64.encode64(Digest::MD5.digest(data)).strip headers['Content-Type'] = 'application/json' headers['Date'] = Fog::Time.now.to_date_header request({ :body => data, :expects => 200, :headers => headers, :host => "#{bucket_name}.#{@host}", :method => 'PUT', :path => CGI.escape(object_name), :query => query }) end
Change who pays for requests to an S3 bucket
@param bucket_name [String] name of bucket to modify @param payer [String] valid values are BucketOwner or Requester
@see docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentPUT.html
# File lib/fog/internet_archive/requests/storage/put_request_payment.rb, line 13 def put_request_payment(bucket_name, payer) data = <RequestPaymentConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <Payer>#{payer}</Payer></RequestPaymentConfiguration> request({ :body => data, :expects => 200, :headers => {}, :host => "#{bucket_name}.#{@host}", :method => 'PUT', :query => {'requestPayment' => nil} }) end
# File lib/fog/internet_archive/storage.rb, line 278 def reload @connection.reset end
# File lib/fog/internet_archive/storage.rb, line 282 def signature(params) string_to_sign = #{params[:method].to_s.upcase}#{params[:headers]['Content-MD5']}#{params[:headers]['Content-Type']}#{params[:headers]['Date']} amz_headers, canonical_amz_headers = {}, '' for key, value in params[:headers] if key[0..5] == 'x-amz-' amz_headers[key] = value end end amz_headers = amz_headers.sort {|x, y| x[0] <=> y[0]} for key, value in amz_headers canonical_amz_headers << "#{key}:#{value}\n" end string_to_sign << canonical_amz_headers subdomain = params[:host].split(".#{@host}").first unless subdomain =~ /^(?:[a-z]|\d(?!\d{0,2}(?:\.\d{1,3}){3}$))(?:[a-z0-9]|\.(?![\.\-])|\-(?![\.])){1,61}[a-z0-9]$/ Fog::Logger.warning("fog: the specified s3 bucket name(#{subdomain}) is not a valid dns name, which will negatively impact performance. For details see: http://docs.amazonwebservices.com/AmazonS3/latest/dev/BucketRestrictions.html") params[:host] = params[:host].split("#{subdomain}.")[-1] if params[:path] params[:path] = "#{subdomain}/#{params[:path]}" else params[:path] = subdomain end subdomain = nil end canonical_resource = @path.dup unless subdomain.nil? || subdomain == @host canonical_resource << "#{Fog::InternetArchive.escape(subdomain).downcase}/" end canonical_resource << params[:path].to_s canonical_resource << '?' for key in (params[:query] || {}).keys.sort if %{ acl cors delete lifecycle location logging notification partNumber policy requestPayment response-cache-control response-content-disposition response-content-encoding response-content-language response-content-type response-expires torrent uploadId uploads versionId versioning versions website }.include?(key) canonical_resource << "#{key}#{"=#{params[:query][key]}" unless params[:query][key].nil?}&" end end canonical_resource.chop! string_to_sign << canonical_resource signed_string = @hmac.sign(string_to_sign) Base64.encode64(signed_string).chomp! end
Sync clock against S3 to avoid skew errors
# File lib/fog/internet_archive/requests/storage/sync_clock.rb, line 8 def sync_clock response = begin get_service rescue Excon::Errors::HTTPStatusError => error error.response end Fog::Time.now = Time.parse(response.headers['Date']) end
Upload a part for a multipart upload
@param bucket_name [String] Name of bucket to add part to @param object_name [String] Name of object to add part to @param upload_id [String] Id of upload to add part to @param part_number [String] Index of part in upload @param data [File||String] Content for part @param options [Hash] @option options Content-MD5 [String] Base64 encoded 128-bit MD5 digest of message
@return [Excon::Response] response
* headers [Hash]: * ETag [String] etag of new object (will be needed to complete upload)
@see docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html
# File lib/fog/internet_archive/requests/storage/upload_part.rb, line 22 def upload_part(bucket_name, object_name, upload_id, part_number, data, options = {}) data = Fog::Storage.parse_data(data) headers = options headers['Content-Length'] = data[:headers]['Content-Length'] request({ :body => data[:body], :expects => 200, :idempotent => true, :headers => headers, :host => "#{bucket_name}.#{@host}", :method => 'PUT', :path => CGI.escape(object_name), :query => {'uploadId' => upload_id, 'partNumber' => part_number} }) end
Generated with the Darkfish Rdoc Generator 2.