@@ -6,9 +6,11 @@ module S3
66 # capabilities with automatic multipart handling, progress tracking, and
77 # handling of large files. The following features are supported:
88 #
9- # * upload a S3 object with multipart upload
9+ # * upload a file with multipart upload
10+ # * upload a stream with multipart upload
1011 # * download a S3 object with multipart download
1112 # * track transfer progress by using progress listener
13+ #
1214 class TransferManager
1315 # @param [Hash] options
1416 # @option options [S3::Client] :client (S3::Client.new)
@@ -25,12 +27,12 @@ def initialize(options = {})
2527 #
2628 # # small files (< 5MB) are downloaded in a single API call
2729 # tm = TransferManager.new
28- # tm.download_file('/path/to/file', bucket: 'bucket-name ', key: 'key-name ')
30+ # tm.download_file('/path/to/file', bucket: 'bucket', key: 'key')
2931 #
3032 # Files larger than 5MB are downloaded using multipart method:
3133 #
3234 # # large files are split into parts and the parts are downloaded in parallel
33- # tm.download_file('/path/to/large_file', bucket: 'bucket-name ', key: 'key-name ')
35+ # tm.download_file('/path/to/large_file', bucket: 'bucket', key: 'key')
3436 #
3537 # You can provide a callback to monitor progress of the download:
3638 #
@@ -41,7 +43,7 @@ def initialize(options = {})
4143 # puts "Part #{i + 1}: #{b} / #{part_sizes[i]}".join(' ') + "Total: #{100.0 * bytes.sum / file_size}%"
4244 # end
4345 # end
44- # tm.download_file('/path/to/file', bucket: 'bucket-name ', key: 'key-name ', progress_callback: progress)
46+ # tm.download_file('/path/to/file', bucket: 'bucket', key: 'key', progress_callback: progress)
4547 #
4648 # @param [String] destination
4749 # Where to download the file to.
@@ -102,17 +104,17 @@ def download_file(destination, bucket:, key:, **options)
102104 #
103105 # # a small file are uploaded with PutObject API
104106 # tm = TransferManager.new
105- # tm.upload_file('/path/to/small_file', bucket: 'bucket-name ', key: 'key-name ')
107+ # tm.upload_file('/path/to/small_file', bucket: 'bucket', key: 'key')
106108 #
107109 # Files larger than or equal to `:multipart_threshold` are uploaded using multipart upload APIs.
108110 #
109111 # # large files are automatically split into parts and the parts are uploaded in parallel
110- # tm.upload_file('/path/to/large_file', bucket: 'bucket-name ', key: 'key-name ')
112+ # tm.upload_file('/path/to/large_file', bucket: 'bucket', key: 'key')
111113 #
112114 # The response of the S3 upload API is yielded if a block given.
113115 #
114116 # # API response will have etag value of the file
115- # tm.upload_file('/path/to/file', bucket: 'bucket-name ', key: 'key-name ') do |response|
117+ # tm.upload_file('/path/to/file', bucket: 'bucket', key: 'key') do |response|
116118 # etag = response.etag
117119 # end
118120 #
@@ -124,7 +126,7 @@ def download_file(destination, bucket:, key:, **options)
124126 # puts "Part #{i + 1}: #{b} / #{totals[i]} " + "Total: #{100.0 * bytes.sum / totals.sum}%"
125127 # end
126128 # end
127- # tm.upload_file('/path/to/file', bucket: 'bucket-name ', key: 'key-name ', progress_callback: progress)
129+ # tm.upload_file('/path/to/file', bucket: 'bucket', key: 'key', progress_callback: progress)
128130 #
129131 # @param [String, Pathname, File, Tempfile] source
130132 # A file on the local file system that will be uploaded. This can either be a `String` or `Pathname` to the
@@ -186,15 +188,15 @@ def upload_file(source, bucket:, key:, **options)
186188 #
187189 # @example Streaming chunks of data
188190 # tm = TransferManager.new
189- # tm.upload_stream(bucket: 'example- bucket', key: 'example- key') do |write_stream|
191+ # tm.upload_stream(bucket: 'bucket', key: 'key') do |write_stream|
190192 # 10.times { write_stream << 'foo' }
191193 # end
192194 # @example Streaming chunks of data
193- # tm.upload_stream(bucket: 'example- bucket', key: 'example- key') do |write_stream|
195+ # tm.upload_stream(bucket: 'bucket', key: 'key') do |write_stream|
194196 # IO.copy_stream(IO.popen('ls'), write_stream)
195197 # end
196198 # @example Streaming chunks of data
197- # tm.upload_stream(bucket: 'example- bucket', key: 'example- key') do |write_stream|
199+ # tm.upload_stream(bucket: 'bucket', key: 'key') do |write_stream|
198200 # IO.copy_stream(STDIN, write_stream)
199201 # end
200202 #
0 commit comments