feat(medusa): Add streaming to Minio fileservice (#1619)

This commit is contained in:
Philip Korsholm
2022-07-10 12:34:28 +02:00
committed by GitHub
parent 9fa4238ee4
commit 812ba65d8f
5 changed files with 138 additions and 40 deletions

View File

@@ -12,5 +12,27 @@ Learn more about how you can use this plugin in the [documentation](https://docs
bucket: "test",
access_key_id: "YOUR-ACCESS-KEY",
secret_access_key: "YOUR-SECRET-KEY",
// private bucket configuration
private_bucket: 'private-bucket',
private_access_key_id: "YOUR-ACCESS-KEY",
private_secret_access_key: "YOUR-SECRET-KEY",
}
```
Optionally a `download_url_duration` option can be specified to change the valid duration of presigned download links. The duration is configured in seconds. (Default = 60 seconds)
## Configuring a private bucket in Minio
Certain operations in Medusa such as data import and export require a separate, protected bucket. The plugin will raise an error if operations used for imports and exports are invoked without the correct setup.
Configuring Minio for requires configuration of one additional option: `private_bucket` which refers to the name given to the protected bucket in Minio.
Separate credentials can, optionally, be used to access the private bucket by configuring the following options:
```
private_access_key_id: "YOUR-ACCESS-KEY",
private_secret_access_key: "YOUR-SECRET-KEY",
```
If no separate access key is given the same access key will be used for both the `bucket` and the `private_bucket`.

View File

@@ -1,36 +1,39 @@
import { AbstractFileService } from '@medusajs/medusa'
import stream from "stream"
import aws from "aws-sdk"
import fs from "fs"
import { AbstractFileService } from "@medusajs/medusa"
import { MedusaError } from "medusa-core-utils"
class MinioService extends AbstractFileService {
constructor({}, options) {
super({}, options)
this.bucket_ = options.bucket
this.accessKeyId_ = options.access_key_id
this.secretAccessKey_ = options.secret_access_key
this.private_bucket_ = options.private_bucket
this.private_access_key_id_ =
options.private_access_key_id ?? this.accessKeyId_
this.private_secret_access_key_ =
options.private_secret_access_key ?? this.secretAccessKey_
this.endpoint_ = options.endpoint
this.s3ForcePathStyle_ = true
this.signatureVersion_ = "v4"
this.downloadUrlDuration = options.download_url_duration ?? 60 // 60 seconds
}
upload(file) {
aws.config.setPromisesDependency(null)
aws.config.update({
accessKeyId: this.accessKeyId_,
secretAccessKey: this.secretAccessKey_,
endpoint: this.endpoint_,
s3ForcePathStyle: this.s3ForcePathStyle_,
signatureVersion: this.signatureVersion_,
}, true)
this.updateAwsConfig_()
const parsedFilename = parse(file.originalname)
const fileKey = `${parsedFilename.name}-${Date.now()}${parsedFilename.ext}`
const s3 = new aws.S3()
const params = {
ACL: "public-read",
Bucket: this.bucket_,
Body: fs.createReadStream(file.path),
Key: `${file.originalname}`,
Key: fileKey,
}
return new Promise((resolve, reject) => {
@@ -46,43 +49,113 @@ class MinioService extends AbstractFileService {
})
}
delete(file) {
aws.config.setPromisesDependency(null)
aws.config.update({
accessKeyId: this.accessKeyId_,
secretAccessKey: this.secretAccessKey_,
endpoint: this.endpoint_,
s3ForcePathStyle: this.s3ForcePathStyle_,
signatureVersion: this.signatureVersion_,
}, true)
async delete(file) {
this.updateAwsConfig_()
const s3 = new aws.S3()
const params = {
Bucket: this.bucket_,
Key: `${file}`,
}
return new Promise((resolve, reject) => {
s3.deleteObject(params, (err, data) => {
if (err) {
reject(err)
return
}
resolve(data)
})
return await Promise.all(
[
s3.deleteObject({...params, Bucket: this.bucket_}, (err, data) => {
if (err) {
reject(err)
return
}
resolve(data)
}),
s3.deleteObject({...params, Bucket: this.private_bucket_}, (err, data) => {
if (err) {
reject(err)
return
}
resolve(data)
})
]
)
}
async getUploadStreamDescriptor({ usePrivateBucket = true, ...fileData }) {
this.validatePrivateBucketConfiguration_(usePrivateBucket)
this.updateAwsConfig_(usePrivateBucket)
const pass = new stream.PassThrough()
const fileKey = `${fileData.name}.${fileData.ext}`
const params = {
Bucket: usePrivateBucket ? this.private_bucket_ : this.bucket_,
Body: pass,
Key: fileKey,
}
const s3 = new aws.S3()
return {
writeStream: pass,
promise: s3.upload(params).promise(),
url: `${this.spacesUrl_}/${fileKey}`,
fileKey,
}
}
async getDownloadStream({ usePrivateBucket = true, ...fileData }) {
this.validatePrivateBucketConfiguration_(usePrivateBucket)
this.updateAwsConfig_(usePrivateBucket)
const s3 = new aws.S3()
const params = {
Bucket: usePrivateBucket ? this.private_bucket_ : this.bucket_,
Key: `${fileData.fileKey}`,
}
return s3.getObject(params).createReadStream()
}
async getPresignedDownloadUrl({ usePrivateBucket = true, ...fileData }) {
this.validatePrivateBucketConfiguration_(usePrivateBucket)
this.updateAwsConfig_(usePrivateBucket, {
signatureVersion: "v4",
})
const s3 = new aws.S3()
const params = {
Bucket: usePrivateBucket ? this.private_bucket_ : this.bucket_,
Key: `${fileData.fileKey}`,
Expires: this.downloadUrlDuration,
}
return await s3.getSignedUrlPromise("getObject", params)
}
async getUploadStreamDescriptor(fileData) {
throw new Error("Method not implemented.")
validatePrivateBucketConfiguration_(usePrivateBucket) {
if (usePrivateBucket && !this.private_bucket_) {
throw new MedusaError(
MedusaError.Types.INVALID_CONFIGURATION,
"Private bucket is not configured"
)
}
}
async getDownloadStream(fileData) {
throw new Error("Method not implemented.")
}
async getPresignedDownloadUrl(fileData) {
throw new Error("Method not implemented.")
updateAwsConfig_(usePrivateBucket = false, additionalConfiguration = {}) {
aws.config.setPromisesDependency(null)
aws.config.update(
{
accessKeyId: usePrivateBucket
? this.private_access_key_id_
: this.accessKeyId_,
secretAccessKey: usePrivateBucket
? this.private_secret_access_key_
: this.secretAccessKey_,
endpoint: this.endpoint_,
s3ForcePathStyle: this.s3ForcePathStyle_,
signatureVersion: this.signatureVersion_,
...additionalConfiguration,
},
true
)
}
}