diff --git a/client.go b/client.go index cb59f03..5f90540 100644 --- a/client.go +++ b/client.go @@ -84,6 +84,39 @@ func New(endpoint, accessKey, secretKey, bucket, region string) (*S3, error) { }, nil } +// CreateBucket uses the client to create an S3 Bucket +func (s *S3) CreateBucket() error { + ctx := context.Background() + _, err := s.Client.CreateBucket(ctx, &s3.CreateBucketInput{ + Bucket: aws.String(s.Bucket), + }) + return err +} + +// DeleteBucket removes all objects from a bucket and then deletes the bucket itself +func (s *S3) DeleteBucket() error { + // List all objects in the bucket + objects, err := s.List("") + if err != nil { + return err + } + + // Delete all objects + for _, key := range objects { + if err := s.Delete(key); err != nil { + return err + } + } + + // Delete the bucket + input := &s3.DeleteBucketInput{ + Bucket: aws.String(s.Bucket), + } + + _, err = s.Client.DeleteBucket(context.Background(), input) + return err +} + // Fetch Downloads a file from an S3 bucket and returns its contents as a byte array. func (s *S3) Fetch(fileName string) ([]byte, error) { params := &s3.GetObjectInput{ @@ -117,7 +150,7 @@ func (s *S3) Put(key string, body *os.File) error { var partMiBs int64 = 100 maxPartSize := partMiBs * 1024 * 1024 - // If the file is greater than 100MB then we'll do a multipart upload + // If the file is greater than 100MB, then we'll do a multipart upload if fi.size > maxPartSize { uploader := manager.NewUploader(s.Client, func(u *manager.Uploader) { u.PartSize = maxPartSize @@ -177,3 +210,14 @@ func (s *S3) List(prefix string) ([]string, error) { return contents, nil } + +// Delete removes a single object from an S3 bucket +func (s *S3) Delete(key string) error { + input := &s3.DeleteObjectInput{ + Bucket: aws.String(s.Bucket), + Key: aws.String(key), + } + + _, err := s.Client.DeleteObject(context.Background(), input) + return err +}