From a46b26b3c621cb1da62f4703dc5c1a369ab5065f Mon Sep 17 00:00:00 2001 From: mohemohe Date: Fri, 26 Apr 2019 17:16:42 +0900 Subject: [PATCH] initial --- .gitignore | 19 +++ Gopkg.lock | 67 ++++++++++ Gopkg.toml | 33 +++++ LICENSE | 21 +++ s3fs.go | 369 +++++++++++++++++++++++++++++++++++++++++++++++++++ s3fs_test.go | 1 + 6 files changed, 510 insertions(+) create mode 100644 .gitignore create mode 100644 Gopkg.lock create mode 100644 Gopkg.toml create mode 100644 LICENSE create mode 100644 s3fs.go create mode 100644 s3fs_test.go diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5fc4365 --- /dev/null +++ b/.gitignore @@ -0,0 +1,19 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# Jetbrains IDE +.idea + +vendor diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 0000000..9c4f37d --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,67 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:f2fa1fb519c95bb0a00f0104cd3c5899e29c21b3a0e0baa783e333f35d08becf" + name = "github.com/aws/aws-sdk-go" + packages = [ + "aws", + "aws/awserr", + "aws/awsutil", + "aws/client", + "aws/client/metadata", + "aws/corehandlers", + "aws/credentials", + "aws/credentials/ec2rolecreds", + "aws/credentials/endpointcreds", + "aws/credentials/stscreds", + "aws/csm", + "aws/defaults", + "aws/ec2metadata", + "aws/endpoints", + "aws/request", + "aws/session", + "aws/signer/v4", + "internal/ini", + "internal/s3err", + "internal/sdkio", + "internal/sdkrand", + "internal/sdkuri", + "internal/shareddefaults", + "private/protocol", + "private/protocol/eventstream", + "private/protocol/eventstream/eventstreamapi", + "private/protocol/query", + "private/protocol/query/queryutil", + "private/protocol/rest", + "private/protocol/restxml", + "private/protocol/xml/xmlutil", + "service/s3", + "service/s3/s3iface", + "service/s3/s3manager", + "service/sts", + ] + pruneopts = "UT" + revision = "6b08b085a17ae5a6040fdb67971812696569dced" + version = "v1.15.89" + +[[projects]] + digest = "1:e22af8c7518e1eab6f2eab2b7d7558927f816262586cd6ed9f349c97a6c285c4" + name = "github.com/jmespath/go-jmespath" + packages = ["."] + pruneopts = "UT" + revision = "0b12d6b5" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/aws/aws-sdk-go/aws", + "github.com/aws/aws-sdk-go/aws/credentials", + "github.com/aws/aws-sdk-go/aws/endpoints", + "github.com/aws/aws-sdk-go/aws/session", + "github.com/aws/aws-sdk-go/service/s3", + "github.com/aws/aws-sdk-go/service/s3/s3manager", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 0000000..4b0a058 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,33 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + name = "github.com/aws/aws-sdk-go" + version = "1.15.89" diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..ef7a04c --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) +Copyright (c) 2019 mobilus corp. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/s3fs.go b/s3fs.go new file mode 100644 index 0000000..8fc73bb --- /dev/null +++ b/s3fs.go @@ -0,0 +1,369 @@ +package s3fs + +import ( + "errors" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "io" + "net/url" + "strings" + "sync" +) + +type ( + S3FS struct { + sess *session.Session + s3 *s3.S3 + config *Config + } + Config struct { + NameSpace string + Domain string + Region string + Bucket string + EnableIAMAuth bool + AccessKeyID string + AccessSecretKey string + } + FileInfo struct { + Name string `json:"name"` + Path string `json:"path"` + Type int `json:"type"` + Size int64 `json:"size,omitempty"` + //Raw interface{} `json:"raw"` + } + CopyInfo struct { + Src string + Dest string + } +) + +const ( + Directory int = 1 + iota + File +) + +func New(config *Config) *S3FS { + var sess *session.Session + if config.Region == "" { + config.Region = endpoints.ApNortheast1RegionID + } + if config.EnableIAMAuth { + sess = session.Must(session.NewSessionWithOptions(session.Options{ + Config: aws.Config{ + Region: aws.String(config.Region), + Credentials: credentials.NewStaticCredentials(config.AccessKeyID, config.AccessSecretKey, ""), + }, + SharedConfigState: session.SharedConfigDisable, + })) + } else { + sess = session.Must(session.NewSessionWithOptions(session.Options{ + Config: aws.Config{ + Region: aws.String(config.Region), + }, + })) + } + serv := s3.New(sess) + + return &S3FS{ + sess, + serv, + config, + } +} + +func (this *S3FS) List(key string) *[]FileInfo { + fileList := make([]FileInfo, 0) + var continuationToken *string + for { + list, err := this.s3.ListObjectsV2(&s3.ListObjectsV2Input{ + Bucket: aws.String(this.config.Bucket), + Prefix: aws.String(this.getKey(key)), + Delimiter: aws.String("/"), + ContinuationToken: continuationToken, + }) + if err != nil { + return nil + } + for _, val := range list.CommonPrefixes { + if *val.Prefix == this.getKey("") { + continue + } + + k := strings.Split(*val.Prefix, "/") + name := k[len(k)-2] + path := "/" + strings.TrimPrefix(*val.Prefix, this.getKey("")) + fileInfo := FileInfo{ + Type: Directory, + Name: name, + Path: path, + //Raw: val, + } + fileList = append(fileList, fileInfo) + } + for _, val := range list.Contents { + if *val.Key == this.getKey("") { + continue + } + if *val.Key == this.getKey(key) { + continue + } + + k := strings.Split(*val.Key, "/") + name := k[len(k)-1] + path := "/" + strings.TrimPrefix(*val.Key, this.getKey("")) + fileInfo := FileInfo{ + Type: File, + Name: name, + Path: path, + Size: *val.Size, + //Raw: val, + } + fileList = append(fileList, fileInfo) + } + + if *list.IsTruncated { + continuationToken = list.ContinuationToken + } else { + break + } + } + + return &fileList +} + +func (this *S3FS) MkDir(key string) error { + if !strings.HasSuffix(key, "/") { + key += "/" + } + _, err := this.s3.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(this.config.Bucket), + Key: aws.String(this.getKey(key)), + }) + if err != nil { + return err + } + return nil +} + +func (this *S3FS) Get(key string) (*io.ReadCloser, error) { + output, err := this.s3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(this.config.Bucket), + Key: aws.String(this.getKey(key)), + }) + if err != nil { + return nil, err + } + return &output.Body, nil +} + +func (this *S3FS) Put(key string, body io.ReadCloser, contentType string) error { + uploader := s3manager.NewUploader(this.sess) + _, err := uploader.Upload(&s3manager.UploadInput{ + Bucket: aws.String(this.config.Bucket), + Key: aws.String(this.getKey(key)), + Body: body, + ContentType: aws.String(contentType), + }) + if err != nil { + return err + } + return nil +} + +func (this *S3FS) Delete(key string) error { + if strings.HasSuffix(key, "/") { + return this.BulkDelete(key) + } else { + return this.SingleDelete(key) + } +} + +func (this *S3FS) SingleDelete(key string) error { + _, err := this.s3.DeleteObject(&s3.DeleteObjectInput{ + Bucket: aws.String(this.config.Bucket), + Key: aws.String(this.getKey(key)), + }) + if err != nil { + return err + } + return nil +} + +func (this *S3FS) BulkDelete(prefix string) error { + var continuationToken *string + for { + list, err := this.s3.ListObjectsV2(&s3.ListObjectsV2Input{ + Bucket: aws.String(this.config.Bucket), + Prefix: aws.String(this.getKey(prefix)), + ContinuationToken: continuationToken, + }) + if err != nil { + return err + } + + objects := []*s3.ObjectIdentifier{} + for _, content := range list.Contents { + objects = append(objects, &s3.ObjectIdentifier{ + Key: content.Key, + }) + } + + _, err = this.s3.DeleteObjects(&s3.DeleteObjectsInput{ + Bucket: aws.String(this.config.Bucket), + Delete: &s3.Delete{ + Objects: objects, + }, + }) + + if err != nil { + return err + } + if *list.IsTruncated { + continuationToken = list.ContinuationToken + } else { + return nil + } + } +} + +func (this *S3FS) Copy(src string, dest string, metadata *map[string]*string) error { + if strings.HasSuffix(src, "/") { + return this.BulkCopy(src, dest, metadata) + } else { + return this.SingleCopy(src, dest, metadata) + } +} + +func (this *S3FS) SingleCopy(src string, dest string, metadata *map[string]*string) error { + var err error + if metadata == nil { + _, err = this.s3.CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String(this.config.Bucket), + CopySource: aws.String(url.QueryEscape(this.config.Bucket + "/" + this.getKey(src))), + Key: aws.String(this.getKey(dest)), + }) + } else { + _, err = this.s3.CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String(this.config.Bucket), + CopySource: aws.String(url.QueryEscape(this.config.Bucket + "/" + this.getKey(src))), + Key: aws.String(this.getKey(dest)), + Metadata: *metadata, + MetadataDirective: aws.String(s3.MetadataDirectiveReplace), + }) + } + + if err != nil { + return err + } + return nil +} + +func (this *S3FS) BulkCopy(prefix string, dest string, metadata *map[string]*string) error { + var continuationToken *string + for { + list, err := this.s3.ListObjectsV2(&s3.ListObjectsV2Input{ + Bucket: aws.String(this.config.Bucket), + Prefix: aws.String(this.getKey(prefix)), + ContinuationToken: continuationToken, + }) + if err != nil { + return err + } + + k := strings.Split(prefix, "/") + currentKey := k[len(k)-2] + baseKey := strings.TrimSuffix(prefix, currentKey+"/") + + var result error + wg := &sync.WaitGroup{} + for _, content := range list.Contents { + go func(c s3.Object) { + wg.Add(1) + + srcRel := strings.Replace(*c.Key, this.config.Domain, "", 1) + destRel := strings.Replace(dest, this.config.Domain, "", 1) + targetPath := destRel + strings.TrimPrefix(srcRel, baseKey) + + var e error + if strings.HasSuffix(srcRel, "/") { + e = this.MkDir(targetPath) + } else { + e = this.SingleCopy(srcRel, targetPath, metadata) + } + + if e != nil { + result = e + } + + wg.Done() + }(*content) + } + wg.Wait() + + if result != nil { + return errors.New("some files failed") + } + + if *list.IsTruncated { + continuationToken = list.ContinuationToken + } else { + return nil + } + } +} + +func (this *S3FS) Move(src string, dest string) error { + if strings.HasSuffix(src, "/") { + return this.BulkMove(src, dest) + } else { + return this.SingleMove(src, dest) + } +} + +func (this *S3FS) SingleMove(src string, dest string) error { + if err := this.Copy(src, dest, nil); err != nil { + return err + } + if err := this.Delete(src); err != nil { + return err + } + return nil +} + +func (this *S3FS) BulkMove(prefix string, dest string) error { + if err := this.BulkCopy(prefix, dest, nil); err != nil { + return err + } + if err := this.BulkDelete(prefix); err != nil { + return err + } + return nil +} + +func (this *S3FS) Info(key string) *s3.HeadObjectOutput { + result, _ := this.s3.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(this.config.Bucket), + Key: aws.String(this.getKey(key)), + }) + return result +} + +func (this *S3FS) getKey(key string) string { + k := "" + if this.config.NameSpace != "" { + k += this.config.NameSpace + "/" + } + if this.config.Domain != "" { + k += this.config.Domain + "/" + } + if strings.HasPrefix(key, "/") { + key = strings.TrimPrefix(key, "/") + } + + return k + key +} diff --git a/s3fs_test.go b/s3fs_test.go new file mode 100644 index 0000000..8fc1666 --- /dev/null +++ b/s3fs_test.go @@ -0,0 +1 @@ +package s3fs