Procházet zdrojové kódy

@uppy/aws-s3-multipart: add support for presigned URL batching (#3056)

* Add first basic test for aws-s3-multipart

* Add another test for GH actions run

* Add whatwg-fetch to aws-s3-multipart package.json

This is to satisfy linter errors, and the other s3 plugin
does this as well

* Adding more tests

* More test progress

* Tests working with nock

* Add another test, for part uploaded event

* Remove spec and refactor to beforeEach

* S3 multipart batch presign working...in theory

* Working batching and tests

* Min needed for presign batch functionality

* Fix lint issues

* Fix syntax error in companion

* Add companion batch presign endpoint

* Improve tests

* Fix each -> forEach

* Remove .prettierrc

* Adding docs and allow for headers with batch prepare

* Review fixes

* Rename prepareUploadPart to prepareUploadParts and use breaking changes
  to batch presign URLs there.
* Get rid of unnecessary batchPartPresign and minNeededForPresignBatch
  options. Use Math.ceil(limit / 2) for min needed instead.
* Fix up tests.
* Fix up type tests.
* Update documentation.

* Review fixes

* Change _ private methods to use # syntax
* Minor code and docs improvements

* Change Promise.resolve() microtasks to async/await
Martin Brennan před 3 roky
rodič
revize
d613b849a6

+ 4 - 0
packages/@uppy/aws-s3-multipart/package.json

@@ -26,6 +26,10 @@
     "@uppy/companion-client": "file:../companion-client",
     "@uppy/companion-client": "file:../companion-client",
     "@uppy/utils": "file:../utils"
     "@uppy/utils": "file:../utils"
   },
   },
+  "devDependencies": {
+    "whatwg-fetch": "3.6.2",
+    "nock": "^13.1.0"
+  },
   "peerDependencies": {
   "peerDependencies": {
     "@uppy/core": "^1.0.0"
     "@uppy/core": "^1.0.0"
   }
   }

+ 105 - 75
packages/@uppy/aws-s3-multipart/src/MultipartUploader.js

@@ -50,7 +50,7 @@ class MultipartUploader {
     // upload was created already. That also ensures that the sequencing is right
     // upload was created already. That also ensures that the sequencing is right
     // (so the `OP` definitely happens if the upload is created).
     // (so the `OP` definitely happens if the upload is created).
     //
     //
-    // This mostly exists to make `_abortUpload` work well: only sending the abort request if
+    // This mostly exists to make `#abortUpload` work well: only sending the abort request if
     // the upload was already created, and if the createMultipartUpload request is still in flight,
     // the upload was already created, and if the createMultipartUpload request is still in flight,
     // aborting it immediately after it finishes.
     // aborting it immediately after it finishes.
     this.createdPromise = Promise.reject() // eslint-disable-line prefer-promise-reject-errors
     this.createdPromise = Promise.reject() // eslint-disable-line prefer-promise-reject-errors
@@ -58,8 +58,9 @@ class MultipartUploader {
     this.partsInProgress = 0
     this.partsInProgress = 0
     this.chunks = null
     this.chunks = null
     this.chunkState = null
     this.chunkState = null
+    this.lockedCandidatesForBatch = []
 
 
-    this._initChunks()
+    this.#initChunks()
 
 
     this.createdPromise.catch(() => {}) // silence uncaught rejection warning
     this.createdPromise.catch(() => {}) // silence uncaught rejection warning
   }
   }
@@ -71,11 +72,11 @@ class MultipartUploader {
    *
    *
    * @returns {boolean}
    * @returns {boolean}
    */
    */
-  _aborted () {
+  #aborted () {
     return this.abortController.signal.aborted
     return this.abortController.signal.aborted
   }
   }
 
 
-  _initChunks () {
+  #initChunks () {
     const chunks = []
     const chunks = []
     const desiredChunkSize = this.options.getChunkSize(this.file)
     const desiredChunkSize = this.options.getChunkSize(this.file)
     // at least 5MB per request, at most 10k requests
     // at least 5MB per request, at most 10k requests
@@ -100,10 +101,10 @@ class MultipartUploader {
     }))
     }))
   }
   }
 
 
-  _createUpload () {
+  #createUpload () {
     this.createdPromise = Promise.resolve().then(() => this.options.createMultipartUpload())
     this.createdPromise = Promise.resolve().then(() => this.options.createMultipartUpload())
     return this.createdPromise.then((result) => {
     return this.createdPromise.then((result) => {
-      if (this._aborted()) throw createAbortError()
+      if (this.#aborted()) throw createAbortError()
 
 
       const valid = typeof result === 'object' && result
       const valid = typeof result === 'object' && result
         && typeof result.uploadId === 'string'
         && typeof result.uploadId === 'string'
@@ -116,18 +117,19 @@ class MultipartUploader {
       this.uploadId = result.uploadId
       this.uploadId = result.uploadId
 
 
       this.options.onStart(result)
       this.options.onStart(result)
-      this._uploadParts()
+      this.#uploadParts()
     }).catch((err) => {
     }).catch((err) => {
-      this._onError(err)
+      this.#onError(err)
     })
     })
   }
   }
 
 
-  _resumeUpload () {
-    return Promise.resolve().then(() => this.options.listParts({
-      uploadId: this.uploadId,
-      key: this.key,
-    })).then((parts) => {
-      if (this._aborted()) throw createAbortError()
+  async #resumeUpload () {
+    try {
+      const parts = await this.options.listParts({
+        uploadId: this.uploadId,
+        key: this.key,
+      })
+      if (this.#aborted()) throw createAbortError()
 
 
       parts.forEach((part) => {
       parts.forEach((part) => {
         const i = part.PartNumber - 1
         const i = part.PartNumber - 1
@@ -146,26 +148,40 @@ class MultipartUploader {
           })
           })
         }
         }
       })
       })
-      this._uploadParts()
-    }).catch((err) => {
-      this._onError(err)
-    })
+      this.#uploadParts()
+    } catch (err) {
+      this.#onError(err)
+    }
   }
   }
 
 
-  _uploadParts () {
+  #uploadParts () {
     if (this.isPaused) return
     if (this.isPaused) return
 
 
-    const need = this.options.limit - this.partsInProgress
-    if (need === 0) return
-
     // All parts are uploaded.
     // All parts are uploaded.
     if (this.chunkState.every((state) => state.done)) {
     if (this.chunkState.every((state) => state.done)) {
-      this._completeUpload()
+      this.#completeUpload()
       return
       return
     }
     }
 
 
+    // For a 100MB file, with the default min chunk size of 5MB and a limit of 10:
+    //
+    // Total 20 parts
+    // ---------
+    // Need 1 is 10
+    // Need 2 is 5
+    // Need 3 is 5
+    const need = this.options.limit - this.partsInProgress
+    const completeChunks = this.chunkState.filter((state) => state.done).length
+    const remainingChunks = this.chunks.length - completeChunks
+    let minNeeded = Math.ceil(this.options.limit / 2)
+    if (minNeeded > remainingChunks) {
+      minNeeded = remainingChunks
+    }
+    if (need < minNeeded) return
+
     const candidates = []
     const candidates = []
     for (let i = 0; i < this.chunkState.length; i++) {
     for (let i = 0; i < this.chunkState.length; i++) {
+      if (this.lockedCandidatesForBatch.includes(i)) continue
       const state = this.chunkState[i]
       const state = this.chunkState[i]
       if (state.done || state.busy) continue
       if (state.done || state.busy) continue
 
 
@@ -174,18 +190,22 @@ class MultipartUploader {
         break
         break
       }
       }
     }
     }
-
-    candidates.forEach((index) => {
-      this._uploadPartRetryable(index).then(() => {
-        // Continue uploading parts
-        this._uploadParts()
-      }, (err) => {
-        this._onError(err)
+    if (candidates.length === 0) return
+
+    this.#prepareUploadParts(candidates).then((result) => {
+      candidates.forEach((index) => {
+        const partNumber = index + 1
+        const prePreparedPart = { url: result.presignedUrls[partNumber], headers: result.headers }
+        this.#uploadPartRetryable(index, prePreparedPart).then(() => {
+          this.#uploadParts()
+        }, (err) => {
+          this.#onError(err)
+        })
       })
       })
     })
     })
   }
   }
 
 
-  _retryable ({ before, attempt, after }) {
+  #retryable ({ before, attempt, after }) {
     const { retryDelays } = this.options
     const { retryDelays } = this.options
     const { signal } = this.abortController
     const { signal } = this.abortController
 
 
@@ -201,7 +221,7 @@ class MultipartUploader {
     }
     }
 
 
     const doAttempt = (retryAttempt) => attempt().catch((err) => {
     const doAttempt = (retryAttempt) => attempt().catch((err) => {
-      if (this._aborted()) throw createAbortError()
+      if (this.#aborted()) throw createAbortError()
 
 
       if (shouldRetry(err) && retryAttempt < retryDelays.length) {
       if (shouldRetry(err) && retryAttempt < retryDelays.length) {
         return delay(retryDelays[retryAttempt], { signal })
         return delay(retryDelays[retryAttempt], { signal })
@@ -219,53 +239,62 @@ class MultipartUploader {
     })
     })
   }
   }
 
 
-  _uploadPartRetryable (index) {
-    return this._retryable({
+  async #prepareUploadParts (candidates) {
+    this.lockedCandidatesForBatch.push(...candidates)
+
+    const result = await this.options.prepareUploadParts({
+      key: this.key,
+      uploadId: this.uploadId,
+      partNumbers: candidates.map((index) => index + 1),
+    })
+
+    const valid = typeof result?.presignedUrls === 'object'
+    if (!valid) {
+      throw new TypeError(
+        'AwsS3/Multipart: Got incorrect result from `prepareUploadParts()`, expected an object `{ presignedUrls }`.'
+      )
+    }
+    return result
+  }
+
+  #uploadPartRetryable (index, prePreparedPart) {
+    return this.#retryable({
       before: () => {
       before: () => {
         this.partsInProgress += 1
         this.partsInProgress += 1
       },
       },
-      attempt: () => this._uploadPart(index),
+      attempt: () => this.#uploadPart(index, prePreparedPart),
       after: () => {
       after: () => {
         this.partsInProgress -= 1
         this.partsInProgress -= 1
       },
       },
     })
     })
   }
   }
 
 
-  _uploadPart (index) {
+  #uploadPart (index, prePreparedPart) {
     const body = this.chunks[index]
     const body = this.chunks[index]
     this.chunkState[index].busy = true
     this.chunkState[index].busy = true
 
 
-    return Promise.resolve().then(() => this.options.prepareUploadPart({
-      key: this.key,
-      uploadId: this.uploadId,
-      body,
-      number: index + 1,
-    })).then((result) => {
-      const valid = typeof result === 'object' && result
-        && typeof result.url === 'string'
-      if (!valid) {
-        throw new TypeError('AwsS3/Multipart: Got incorrect result from `prepareUploadPart()`, expected an object `{ url }`.')
-      }
+    const valid = typeof prePreparedPart?.url === 'string'
+    if (!valid) {
+      throw new TypeError('AwsS3/Multipart: Got incorrect result for `prePreparedPart`, expected an object `{ url }`.')
+    }
 
 
-      return result
-    }).then(({ url, headers }) => {
-      if (this._aborted()) {
-        this.chunkState[index].busy = false
-        throw createAbortError()
-      }
+    const { url, headers } = prePreparedPart
+    if (this.#aborted()) {
+      this.chunkState[index].busy = false
+      throw createAbortError()
+    }
 
 
-      return this._uploadPartBytes(index, url, headers)
-    })
+    return this.#uploadPartBytes(index, url, headers)
   }
   }
 
 
-  _onPartProgress (index, sent, total) {
+  #onPartProgress (index, sent, total) {
     this.chunkState[index].uploaded = ensureInt(sent)
     this.chunkState[index].uploaded = ensureInt(sent)
 
 
     const totalUploaded = this.chunkState.reduce((n, c) => n + c.uploaded, 0)
     const totalUploaded = this.chunkState.reduce((n, c) => n + c.uploaded, 0)
     this.options.onProgress(totalUploaded, this.file.size)
     this.options.onProgress(totalUploaded, this.file.size)
   }
   }
 
 
-  _onPartComplete (index, etag) {
+  #onPartComplete (index, etag) {
     this.chunkState[index].etag = etag
     this.chunkState[index].etag = etag
     this.chunkState[index].done = true
     this.chunkState[index].done = true
 
 
@@ -278,7 +307,7 @@ class MultipartUploader {
     this.options.onPartComplete(part)
     this.options.onPartComplete(part)
   }
   }
 
 
-  _uploadPartBytes (index, url, headers) {
+  #uploadPartBytes (index, url, headers) {
     const body = this.chunks[index]
     const body = this.chunks[index]
     const { signal } = this.abortController
     const { signal } = this.abortController
 
 
@@ -307,7 +336,7 @@ class MultipartUploader {
     xhr.upload.addEventListener('progress', (ev) => {
     xhr.upload.addEventListener('progress', (ev) => {
       if (!ev.lengthComputable) return
       if (!ev.lengthComputable) return
 
 
-      this._onPartProgress(index, ev.loaded, ev.total)
+      this.#onPartProgress(index, ev.loaded, ev.total)
     })
     })
 
 
     xhr.addEventListener('abort', (ev) => {
     xhr.addEventListener('abort', (ev) => {
@@ -328,7 +357,7 @@ class MultipartUploader {
         return
         return
       }
       }
 
 
-      this._onPartProgress(index, body.size, body.size)
+      this.#onPartProgress(index, body.size, body.size)
 
 
       // NOTE This must be allowed by CORS.
       // NOTE This must be allowed by CORS.
       const etag = ev.target.getResponseHeader('ETag')
       const etag = ev.target.getResponseHeader('ETag')
@@ -337,7 +366,7 @@ class MultipartUploader {
         return
         return
       }
       }
 
 
-      this._onPartComplete(index, etag)
+      this.#onPartComplete(index, etag)
       defer.resolve()
       defer.resolve()
     })
     })
 
 
@@ -355,22 +384,23 @@ class MultipartUploader {
     return promise
     return promise
   }
   }
 
 
-  _completeUpload () {
+  async #completeUpload () {
     // Parts may not have completed uploading in sorted order, if limit > 1.
     // Parts may not have completed uploading in sorted order, if limit > 1.
     this.parts.sort((a, b) => a.PartNumber - b.PartNumber)
     this.parts.sort((a, b) => a.PartNumber - b.PartNumber)
 
 
-    return Promise.resolve().then(() => this.options.completeMultipartUpload({
-      key: this.key,
-      uploadId: this.uploadId,
-      parts: this.parts,
-    })).then((result) => {
+    try {
+      const result = await this.options.completeMultipartUpload({
+        key: this.key,
+        uploadId: this.uploadId,
+        parts: this.parts,
+      })
       this.options.onSuccess(result)
       this.options.onSuccess(result)
-    }, (err) => {
-      this._onError(err)
-    })
+    } catch (err) {
+      this.#onError(err)
+    }
   }
   }
 
 
-  _abortUpload () {
+  #abortUpload () {
     this.abortController.abort()
     this.abortController.abort()
 
 
     this.createdPromise.then(() => {
     this.createdPromise.then(() => {
@@ -383,7 +413,7 @@ class MultipartUploader {
     })
     })
   }
   }
 
 
-  _onError (err) {
+  #onError (err) {
     if (err && err.name === 'AbortError') {
     if (err && err.name === 'AbortError') {
       return
       return
     }
     }
@@ -394,9 +424,9 @@ class MultipartUploader {
   start () {
   start () {
     this.isPaused = false
     this.isPaused = false
     if (this.uploadId) {
     if (this.uploadId) {
-      this._resumeUpload()
+      this.#resumeUpload()
     } else {
     } else {
-      this._createUpload()
+      this.#createUpload()
     }
     }
   }
   }
 
 
@@ -413,7 +443,7 @@ class MultipartUploader {
 
 
     if (!really) return this.pause()
     if (!really) return this.pause()
 
 
-    this._abortUpload()
+    this.#abortUpload()
   }
   }
 }
 }
 
 

+ 5 - 5
packages/@uppy/aws-s3-multipart/src/index.js

@@ -31,7 +31,7 @@ module.exports = class AwsS3Multipart extends BasePlugin {
       retryDelays: [0, 1000, 3000, 5000],
       retryDelays: [0, 1000, 3000, 5000],
       createMultipartUpload: this.createMultipartUpload.bind(this),
       createMultipartUpload: this.createMultipartUpload.bind(this),
       listParts: this.listParts.bind(this),
       listParts: this.listParts.bind(this),
-      prepareUploadPart: this.prepareUploadPart.bind(this),
+      prepareUploadParts: this.prepareUploadParts.bind(this),
       abortMultipartUpload: this.abortMultipartUpload.bind(this),
       abortMultipartUpload: this.abortMultipartUpload.bind(this),
       completeMultipartUpload: this.completeMultipartUpload.bind(this),
       completeMultipartUpload: this.completeMultipartUpload.bind(this),
     }
     }
@@ -101,11 +101,11 @@ module.exports = class AwsS3Multipart extends BasePlugin {
       .then(assertServerError)
       .then(assertServerError)
   }
   }
 
 
-  prepareUploadPart (file, { key, uploadId, number }) {
-    this.assertHost('prepareUploadPart')
+  prepareUploadParts (file, { key, uploadId, partNumbers }) {
+    this.assertHost('prepareUploadParts')
 
 
     const filename = encodeURIComponent(key)
     const filename = encodeURIComponent(key)
-    return this.client.get(`s3/multipart/${uploadId}/${number}?key=${filename}`)
+    return this.client.get(`s3/multipart/${uploadId}/batch?key=${filename}?partNumbers=${partNumbers.join(',')}`)
       .then(assertServerError)
       .then(assertServerError)
   }
   }
 
 
@@ -191,7 +191,7 @@ module.exports = class AwsS3Multipart extends BasePlugin {
         // .bind to pass the file object to each handler.
         // .bind to pass the file object to each handler.
         createMultipartUpload: this.opts.createMultipartUpload.bind(this, file),
         createMultipartUpload: this.opts.createMultipartUpload.bind(this, file),
         listParts: this.opts.listParts.bind(this, file),
         listParts: this.opts.listParts.bind(this, file),
-        prepareUploadPart: this.opts.prepareUploadPart.bind(this, file),
+        prepareUploadParts: this.opts.prepareUploadParts.bind(this, file),
         completeMultipartUpload: this.opts.completeMultipartUpload.bind(this, file),
         completeMultipartUpload: this.opts.completeMultipartUpload.bind(this, file),
         abortMultipartUpload: this.opts.abortMultipartUpload.bind(this, file),
         abortMultipartUpload: this.opts.abortMultipartUpload.bind(this, file),
         getChunkSize: this.opts.getChunkSize ? this.opts.getChunkSize.bind(this) : null,
         getChunkSize: this.opts.getChunkSize ? this.opts.getChunkSize.bind(this) : null,

+ 164 - 0
packages/@uppy/aws-s3-multipart/src/index.test.js

@@ -0,0 +1,164 @@
+require('whatwg-fetch')
+const nock = require('nock')
+const Core = require('@uppy/core')
+const AwsS3Multipart = require('.')
+
+const KB = 1024
+const MB = KB * KB
+
+describe('AwsS3Multipart', () => {
+  beforeEach(() => nock.disableNetConnect())
+
+  it('Registers AwsS3Multipart upload plugin', () => {
+    const core = new Core()
+    core.use(AwsS3Multipart)
+
+    const pluginNames = core[Symbol.for('uppy test: getPlugins')]('uploader').map((plugin) => plugin.constructor.name)
+    expect(pluginNames).toContain('AwsS3Multipart')
+  })
+
+  describe('companionUrl assertion', () => {
+    it('Throws an error for main functions if configured without companionUrl', () => {
+      const core = new Core()
+      core.use(AwsS3Multipart)
+      const awsS3Multipart = core.getPlugin('AwsS3Multipart')
+
+      const err = 'Expected a `companionUrl` option'
+      const file = {}
+      const opts = {}
+
+      expect(() => awsS3Multipart.opts.createMultipartUpload(file)).toThrow(
+        err
+      )
+      expect(() => awsS3Multipart.opts.listParts(file, opts)).toThrow(err)
+      expect(() => awsS3Multipart.opts.completeMultipartUpload(file, opts)).toThrow(err)
+      expect(() => awsS3Multipart.opts.abortMultipartUpload(file, opts)).toThrow(err)
+      expect(() => awsS3Multipart.opts.prepareUploadParts(file, opts)).toThrow(err)
+    })
+  })
+
+  describe('without companionUrl (custom main functions)', () => {
+    let core
+    let awsS3Multipart
+
+    beforeEach(() => {
+      core = new Core()
+      core.use(AwsS3Multipart, {
+        createMultipartUpload: jest.fn(() => {
+          return {
+            uploadId: '6aeb1980f3fc7ce0b5454d25b71992',
+            key: 'test/upload/multitest.dat',
+          }
+        }),
+        completeMultipartUpload: jest.fn(() => Promise.resolve({ location: 'test' })),
+        abortMultipartUpload: jest.fn(),
+        prepareUploadParts: jest.fn(() => {
+          const presignedUrls = {}
+          const possiblePartNumbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+          possiblePartNumbers.forEach((partNumber) => {
+            presignedUrls[
+              partNumber
+            ] = `https://bucket.s3.us-east-2.amazonaws.com/test/upload/multitest.dat?partNumber=${partNumber}&uploadId=6aeb1980f3fc7ce0b5454d25b71992&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIATEST%2F20210729%2Fus-east-2%2Fs3%2Faws4_request&X-Amz-Date=20210729T014044Z&X-Amz-Expires=600&X-Amz-SignedHeaders=host&X-Amz-Signature=test`
+          })
+          return { presignedUrls }
+        }),
+      })
+      awsS3Multipart = core.getPlugin('AwsS3Multipart')
+    })
+
+    it('Calls the prepareUploadParts function totalChunks / limit times', (done) => {
+      const scope = nock(
+        'https://bucket.s3.us-east-2.amazonaws.com'
+      ).defaultReplyHeaders({
+        'access-control-allow-method': 'PUT',
+        'access-control-allow-origin': '*',
+        'access-control-expose-headers': 'ETag',
+      })
+      scope
+        .options((uri) => uri.includes('test/upload/multitest.dat'))
+        .reply(200, '')
+      scope
+        .options((uri) => uri.includes('test/upload/multitest.dat'))
+        .reply(200, '')
+      scope
+        .put((uri) => uri.includes('test/upload/multitest.dat'))
+        .reply(200, '', { ETag: 'test1' })
+      scope
+        .put((uri) => uri.includes('test/upload/multitest.dat'))
+        .reply(200, '', { ETag: 'test2' })
+
+      // 6MB file will give us 2 chunks, so there will be 2 PUT and 2 OPTIONS
+      // calls to the presigned URL from 1 prepareUploadParts calls
+      const fileSize = 5 * MB + 1 * MB
+      core.addFile({
+        source: 'jest',
+        name: 'multitest.dat',
+        type: 'application/octet-stream',
+        data: new File([Buffer.alloc(fileSize)], {
+          type: 'application/octet-stream',
+        }),
+      })
+      core.upload().then(() => {
+        expect(
+          awsS3Multipart.opts.prepareUploadParts.mock.calls.length
+        ).toEqual(1)
+        scope.done()
+        done()
+      })
+    })
+
+    it('Calls prepareUploadParts with a Math.ceil(limit / 2) minimum, instead of one at a time for the remaining chunks after the first limit batch', (done) => {
+      const scope = nock(
+        'https://bucket.s3.us-east-2.amazonaws.com'
+      ).defaultReplyHeaders({
+        'access-control-allow-method': 'PUT',
+        'access-control-allow-origin': '*',
+        'access-control-expose-headers': 'ETag',
+      })
+      scope
+        .options((uri) => uri.includes('test/upload/multitest.dat'))
+        .reply(200, '')
+      scope
+        .put((uri) => uri.includes('test/upload/multitest.dat'))
+        .reply(200, '', { ETag: 'test' })
+      scope.persist()
+
+      // 50MB file will give us 10 chunks, so there will be 10 PUT and 10 OPTIONS
+      // calls to the presigned URL from 3 prepareUploadParts calls
+      //
+      // The first prepareUploadParts call will be for 5 parts, the second
+      // will be for 3 parts, the third will be for 2 parts.
+      const fileSize = 50 * MB
+      core.addFile({
+        source: 'jest',
+        name: 'multitest.dat',
+        type: 'application/octet-stream',
+        data: new File([Buffer.alloc(fileSize)], {
+          type: 'application/octet-stream',
+        }),
+      })
+      core.upload().then(() => {
+        expect(
+          awsS3Multipart.opts.prepareUploadParts.mock.calls.length
+        ).toEqual(3)
+        expect(awsS3Multipart.opts.prepareUploadParts.mock.calls[0][1].partNumbers).toEqual([1, 2, 3, 4, 5])
+        expect(awsS3Multipart.opts.prepareUploadParts.mock.calls[1][1].partNumbers).toEqual([6, 7, 8])
+        expect(awsS3Multipart.opts.prepareUploadParts.mock.calls[2][1].partNumbers).toEqual([9, 10])
+        const completeCall = awsS3Multipart.opts.completeMultipartUpload.mock.calls[0][1]
+        expect(completeCall.parts).toEqual([
+          { ETag: 'test', PartNumber: 1 },
+          { ETag: 'test', PartNumber: 2 },
+          { ETag: 'test', PartNumber: 3 },
+          { ETag: 'test', PartNumber: 4 },
+          { ETag: 'test', PartNumber: 5 },
+          { ETag: 'test', PartNumber: 6 },
+          { ETag: 'test', PartNumber: 7 },
+          { ETag: 'test', PartNumber: 8 },
+          { ETag: 'test', PartNumber: 9 },
+          { ETag: 'test', PartNumber: 10 },
+        ])
+        done()
+      })
+    })
+  })
+})

+ 3 - 3
packages/@uppy/aws-s3-multipart/types/index.d.ts

@@ -19,10 +19,10 @@ interface AwsS3MultipartOptions extends PluginOptions {
       file: UppyFile,
       file: UppyFile,
       opts: { uploadId: string; key: string }
       opts: { uploadId: string; key: string }
     ) => MaybePromise<AwsS3Part[]>
     ) => MaybePromise<AwsS3Part[]>
-    prepareUploadPart?: (
+    prepareUploadParts?: (
       file: UppyFile,
       file: UppyFile,
-      partData: { uploadId: string; key: string; body: Blob; number: number }
-    ) => MaybePromise<{ url: string, headers?: { [k: string]: string } }>
+      partData: { uploadId: string; key: string; partNumbers: Array<number> }
+    ) => MaybePromise<{ presignedUrls: { [k: number]: string }, headers?: { [k: string]: string } }>
     abortMultipartUpload?: (
     abortMultipartUpload?: (
       file: UppyFile,
       file: UppyFile,
       opts: { uploadId: string; key: string }
       opts: { uploadId: string; key: string }

+ 5 - 6
packages/@uppy/aws-s3-multipart/types/index.test-d.ts

@@ -17,13 +17,12 @@ import type { AwsS3Part } from '..'
       expectType<string>(opts.key)
       expectType<string>(opts.key)
       return []
       return []
     },
     },
-    prepareUploadPart (file, part) {
+    prepareUploadParts (file, partData) {
       expectType<UppyFile>(file)
       expectType<UppyFile>(file)
-      expectType<string>(part.uploadId)
-      expectType<string>(part.key)
-      expectType<Blob>(part.body)
-      expectType<number>(part.number)
-      return { url: '' }
+      expectType<string>(partData.uploadId)
+      expectType<string>(partData.key)
+      expectType<Array<number>>(partData.partNumbers)
+      return { presignedUrls: {} }
     },
     },
     abortMultipartUpload (file, opts) {
     abortMultipartUpload (file, opts) {
       expectType<UppyFile>(file)
       expectType<UppyFile>(file)

+ 1 - 1
packages/@uppy/aws-s3/src/index.test.js

@@ -20,7 +20,7 @@ describe('AwsS3', () => {
       expect(awsS3.opts.getUploadParameters).toThrow()
       expect(awsS3.opts.getUploadParameters).toThrow()
     })
     })
 
 
-    it('Does not throw an error with campanionUrl configured', () => {
+    it('Does not throw an error with companionUrl configured', () => {
       const core = new Core()
       const core = new Core()
       core.use(AwsS3, { companionUrl: 'https://uppy-companion.myapp.com/' })
       core.use(AwsS3, { companionUrl: 'https://uppy-companion.myapp.com/' })
       const awsS3 = core.getPlugin('AwsS3')
       const awsS3 = core.getPlugin('AwsS3')

+ 56 - 0
packages/@uppy/companion/src/server/controllers/s3.js

@@ -204,6 +204,61 @@ module.exports = function s3 (config) {
     })
     })
   }
   }
 
 
+  /**
+   * Get parameters for uploading a batch of parts.
+   *
+   * Expected URL parameters:
+   *  - uploadId - The uploadId returned from `createMultipartUpload`.
+   * Expected query parameters:
+   *  - key - The object key in the S3 bucket.
+   *  - partNumbers - A comma separated list of part numbers representing
+   *                  indecies in the file (1-10000).
+   * Response JSON:
+   *  - presignedUrls - The URLs to upload to, including signed query parameters,
+   *                    in an object mapped to part numbers.
+   */
+  function batchSignPartsUpload (req, res, next) {
+    // @ts-ignore The `companion` property is added by middleware before reaching here.
+    const client = req.companion.s3Client
+    const { uploadId } = req.params
+    const { key, partNumbers } = req.query
+
+    if (typeof key !== 'string') {
+      return res.status(400).json({ error: 's3: the object key must be passed as a query parameter. For example: "?key=abc.jpg"' })
+    }
+
+    if (typeof partNumbers !== 'string') {
+      return res.status(400).json({ error: 's3: the part numbers must be passed as a comma separated query parameter. For example: "?partNumbers=4,6,7,21"' })
+    }
+
+    const partNumbersArray = partNumbers.split(',')
+    partNumbersArray.forEach((partNumber) => {
+      if (!parseInt(partNumber, 10)) {
+        return res.status(400).json({ error: 's3: the part numbers must be a number between 1 and 10000.' })
+      }
+    })
+
+    Promise.all(
+      partNumbersArray.map((partNumber) => {
+        return client.getSignedUrlPromise('uploadPart', {
+          Bucket: config.bucket,
+          Key: key,
+          UploadId: uploadId,
+          PartNumber: partNumber,
+          Body: '',
+          Expires: config.expires,
+        })
+      })
+    ).then((urls) => {
+      const presignedUrls = Object.fromEntries(
+        partNumbersArray.map((partNumber, index) => [partNumber, urls[index]])
+      )
+      res.json({ presignedUrls })
+    }).catch((err) => {
+      next(err)
+    })
+  }
+
   /**
   /**
    * Abort a multipart upload, deleting already uploaded parts.
    * Abort a multipart upload, deleting already uploaded parts.
    *
    *
@@ -287,6 +342,7 @@ module.exports = function s3 (config) {
     .get('/multipart/:uploadId', getUploadedParts)
     .get('/multipart/:uploadId', getUploadedParts)
     .get('/multipart/:uploadId/:partNumber', signPartUpload)
     .get('/multipart/:uploadId/:partNumber', signPartUpload)
     .post('/multipart/:uploadId/complete', completeMultipartUpload)
     .post('/multipart/:uploadId/complete', completeMultipartUpload)
+    .get(`/multipart/:uploadId/batch`, batchSignPartsUpload)
     .delete('/multipart/:uploadId', abortMultipartUpload)
     .delete('/multipart/:uploadId', abortMultipartUpload)
 }
 }
 
 

+ 16 - 18
website/src/docs/aws-s3-multipart.md

@@ -39,13 +39,15 @@ const { AwsS3Multipart } = Uppy
 
 
 The `@uppy/aws-s3-multipart` plugin has the following configurable options:
 The `@uppy/aws-s3-multipart` plugin has the following configurable options:
 
 
-### `limit: 0`
+### `limit: 5`
 
 
-The maximum amount of chunks to upload simultaneously. Set to `0` to disable limiting.
+The maximum amount of chunks to upload simultaneously. This affects [`prepareUploadParts()`](#prepareUploadParts-file-partData) as well; after the initial batch of `limit` parts is presigned, a minimum of `limit / 2` rounded up will be presigned at a time. You should set the limit carefully. Setting it to a value too high could cause issues where the presigned URLs begin to expire before the chunks they are for start uploading. Too low and you will end up with a lot of extra round trips to your server (or Companion) than necessary to presign URLs. If the default chunk size of 5MB is used, a `limit` between 5 and 15 is recommended.
+
+For example, with a 50MB file and a `limit` of 5 we end up with 10 chunks. 5 of these are presigned in one batch, then 3, then 2, for a total of 3 round trips to the server via [`prepareUploadParts()`](#prepareUploadParts-file-partData) and 10 requests sent to AWS via the presigned URLs generated.
 
 
 ### `retryDelays: [0, 1000, 3000, 5000]`
 ### `retryDelays: [0, 1000, 3000, 5000]`
 
 
-When uploading a chunk fails, automatically try again after the millisecond intervals specified in this array. By default, we first retry instantly; if that fails, we retry after 1 second; if that fails, we retry after 3 seconds, etc.
+When uploading a chunk to S3 using a presigned URL fails, automatically try again after the millisecond intervals specified in this array. By default, we first retry instantly; if that fails, we retry after 1 second; if that fails, we retry after 3 seconds, etc.
 
 
 Set to `null` to disable automatic retries, and fail instantly if any chunk fails to upload.
 Set to `null` to disable automatic retries, and fail instantly if any chunk fails to upload.
 
 
@@ -69,7 +71,7 @@ This option correlates to the [RequestCredentials value](https://developer.mozil
 
 
 A function that returns the minimum chunk size to use when uploading the given file.
 A function that returns the minimum chunk size to use when uploading the given file.
 
 
-The S3 Multipart plugin uploads files in chunks. Each chunk requires a signing request ([`prepareUploadPart()`](#prepareUploadPart-file-partData)). To reduce the amount of requests for large files, you can choose a larger chunk size, at the cost of having to re-upload more data if one chunk fails to upload.
+The S3 Multipart plugin uploads files in chunks. Chunks are sent in batches to have presigned URLs generated via ([`prepareUploadParts()`](#prepareUploadParts-file-partData)). To reduce the amount of requests for large files, you can choose a larger chunk size, at the cost of having to re-upload more data if one chunk fails to upload.
 
 
 S3 requires a minimum chunk size of 5MB, and supports at most 10,000 chunks per multipart upload. If `getChunkSize()` returns a size that's too small, Uppy will increase it to S3's minimum requirements.
 S3 requires a minimum chunk size of 5MB, and supports at most 10,000 chunks per multipart upload. If `getChunkSize()` returns a size that's too small, Uppy will increase it to S3's minimum requirements.
 
 
@@ -99,29 +101,25 @@ Return a Promise for an array of S3 Part objects, as returned by the S3 Multipar
 
 
 The default implementation calls out to Companion's S3 signing endpoints.
 The default implementation calls out to Companion's S3 signing endpoints.
 
 
-### `prepareUploadPart(file, partData)`
+### `prepareUploadParts(file, partData)`
 
 
-A function that generates a signed URL to upload a single part. Receives the `file` object from Uppy's state. The `partData` argument is an object with keys:
+A function that generates a batch of signed URLs for the specified part numbers. Receives the `file` object from Uppy's state. The `partData` argument is an object with keys:
 
 
  - `uploadId` - The UploadID of this Multipart upload.
  - `uploadId` - The UploadID of this Multipart upload.
  - `key` - The object key in the S3 bucket.
  - `key` - The object key in the S3 bucket.
- - `body` - A [`Blob`](https://developer.mozilla.org/en-US/docs/Web/API/Blob) of this part's contents.
- - `number` - The index of this part in the file (`PartNumber` in S3 terminology).
+ - `partNumbers` - An array of indecies of this part in the file (`PartNumber` in S3 terminology). Note that part numbers are _not_ zero-based.
 
 
 Return a Promise for an object with keys:
 Return a Promise for an object with keys:
 
 
- - `url` - The presigned URL to upload a part. This can be generated on the server using the S3 SDK like so:
+ - `presignedUrls` - A JavaScript object with the part numbers as keys and the presigned URL for each part as the value. An example of what the return value should look like:
 
 
-   <!-- eslint-disable node/handle-callback-err -->
    ```js
    ```js
-   sdkInstance.getSignedUrl('uploadPart', {
-     Bucket: 'target',
-     Key: partData.key,
-     UploadId: partData.uploadId,
-     PartNumber: partData.number,
-     Body: '', // Empty, because it is uploaded later
-     Expires: 5 * 60,
-   }, (err, url) => { /* there's the url! */ })
+   // for partNumbers [1, 2, 3]
+   return {
+     1: 'https://bucket.region.amazonaws.com/path/to/file.jpg?partNumber=1&...',
+     2: 'https://bucket.region.amazonaws.com/path/to/file.jpg?partNumber=2&...',
+     3: 'https://bucket.region.amazonaws.com/path/to/file.jpg?partNumber=3&...',
+   }
    ```
    ```
  - `headers` - **(Optional)** Custom headers that should be sent to the S3 presigned URL.
  - `headers` - **(Optional)** Custom headers that should be sent to the S3 presigned URL.