| @@ -0,0 +1,68 @@ | |||||
| // Copyright 2018 The Gitea Authors. All rights reserved. | |||||
| // Use of this source code is governed by a MIT-style | |||||
| // license that can be found in the LICENSE file. | |||||
| package cmd | |||||
| import ( | |||||
| "context" | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/models/migrations" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/setting" | |||||
| "code.gitea.io/gitea/modules/storage" | |||||
| "github.com/urfave/cli" | |||||
| ) | |||||
| // CmdMigrateStorage represents the available migrate storage sub-command. | |||||
| var CmdMigrateStorage = cli.Command{ | |||||
| Name: "migrate-storage", | |||||
| Usage: "Migrate the storage", | |||||
| Description: "This is a command for migrating storage.", | |||||
| Action: runMigrateStorage, | |||||
| } | |||||
| func migrateAttachments(dstStorage storage.ObjectStorage) error { | |||||
| return models.IterateAttachment(func(attach *models.Attachment) error { | |||||
| _, err := storage.Copy(dstStorage, attach.UUID, storage.Attachments, attach.RelativePath()) | |||||
| return err | |||||
| }) | |||||
| } | |||||
| func runMigrateStorage(ctx *cli.Context) error { | |||||
| if err := initDB(); err != nil { | |||||
| return err | |||||
| } | |||||
| log.Trace("AppPath: %s", setting.AppPath) | |||||
| log.Trace("AppWorkPath: %s", setting.AppWorkPath) | |||||
| log.Trace("Custom path: %s", setting.CustomPath) | |||||
| log.Trace("Log path: %s", setting.LogRootPath) | |||||
| setting.InitDBConfig() | |||||
| if err := models.NewEngine(context.Background(), migrations.Migrate); err != nil { | |||||
| log.Fatal("Failed to initialize ORM engine: %v", err) | |||||
| return err | |||||
| } | |||||
| tp := ctx.String("type") | |||||
| // TODO: init setting | |||||
| if err := storage.Init(); err != nil { | |||||
| return err | |||||
| } | |||||
| switch tp { | |||||
| case "attachments": | |||||
| dstStorage, err := storage.NewLocalStorage(ctx.String("dst")) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return migrateAttachments(dstStorage) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| @@ -40,6 +40,7 @@ require ( | |||||
| github.com/go-enry/go-enry/v2 v2.3.0 | github.com/go-enry/go-enry/v2 v2.3.0 | ||||
| github.com/go-git/go-billy/v5 v5.0.0 | github.com/go-git/go-billy/v5 v5.0.0 | ||||
| github.com/go-git/go-git/v5 v5.0.0 | github.com/go-git/go-git/v5 v5.0.0 | ||||
| github.com/go-ini/ini v1.56.0 // indirect | |||||
| github.com/go-openapi/jsonreference v0.19.3 // indirect | github.com/go-openapi/jsonreference v0.19.3 // indirect | ||||
| github.com/go-redis/redis v6.15.2+incompatible | github.com/go-redis/redis v6.15.2+incompatible | ||||
| github.com/go-sql-driver/mysql v1.4.1 | github.com/go-sql-driver/mysql v1.4.1 | ||||
| @@ -72,6 +73,7 @@ require ( | |||||
| github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 | github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 | ||||
| github.com/mgechev/revive v1.0.2 | github.com/mgechev/revive v1.0.2 | ||||
| github.com/microcosm-cc/bluemonday v1.0.3-0.20191119130333-0a75d7616912 | github.com/microcosm-cc/bluemonday v1.0.3-0.20191119130333-0a75d7616912 | ||||
| github.com/minio/minio-go v6.0.14+incompatible | |||||
| github.com/mitchellh/go-homedir v1.1.0 | github.com/mitchellh/go-homedir v1.1.0 | ||||
| github.com/msteinert/pam v0.0.0-20151204160544-02ccfbfaf0cc | github.com/msteinert/pam v0.0.0-20151204160544-02ccfbfaf0cc | ||||
| github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5 | github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5 | ||||
| @@ -208,6 +208,8 @@ github.com/go-git/go-git-fixtures/v4 v4.0.1 h1:q+IFMfLx200Q3scvt2hN79JsEzy4AmBTp | |||||
| github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= | github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= | ||||
| github.com/go-git/go-git/v5 v5.0.0 h1:k5RWPm4iJwYtfWoxIJy4wJX9ON7ihPeZZYC1fLYDnpg= | github.com/go-git/go-git/v5 v5.0.0 h1:k5RWPm4iJwYtfWoxIJy4wJX9ON7ihPeZZYC1fLYDnpg= | ||||
| github.com/go-git/go-git/v5 v5.0.0/go.mod h1:oYD8y9kWsGINPFJoLdaScGCN6dlKg23blmClfZwtUVA= | github.com/go-git/go-git/v5 v5.0.0/go.mod h1:oYD8y9kWsGINPFJoLdaScGCN6dlKg23blmClfZwtUVA= | ||||
| github.com/go-ini/ini v1.56.0 h1:6HjxSjqdmgnujDPhlzR4a44lxK3w03WPN8te0SoUSeM= | |||||
| github.com/go-ini/ini v1.56.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= | |||||
| github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= | github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= | ||||
| github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= | github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= | ||||
| github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= | github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= | ||||
| @@ -469,6 +471,8 @@ github.com/mgechev/revive v1.0.2 h1:v0NxxQ7fSFz/u1NQydPo6EGdq7va0J1BtsZmae6kzUg= | |||||
| github.com/mgechev/revive v1.0.2/go.mod h1:rb0dQy1LVAxW9SWy5R3LPUjevzUbUS316U5MFySA2lo= | github.com/mgechev/revive v1.0.2/go.mod h1:rb0dQy1LVAxW9SWy5R3LPUjevzUbUS316U5MFySA2lo= | ||||
| github.com/microcosm-cc/bluemonday v1.0.3-0.20191119130333-0a75d7616912 h1:hJde9rA24hlTcAYSwJoXpDUyGtfKQ/jsofw+WaDqGrI= | github.com/microcosm-cc/bluemonday v1.0.3-0.20191119130333-0a75d7616912 h1:hJde9rA24hlTcAYSwJoXpDUyGtfKQ/jsofw+WaDqGrI= | ||||
| github.com/microcosm-cc/bluemonday v1.0.3-0.20191119130333-0a75d7616912/go.mod h1:8iwZnFn2CDDNZ0r6UXhF4xawGvzaqzCRa1n3/lO3W2w= | github.com/microcosm-cc/bluemonday v1.0.3-0.20191119130333-0a75d7616912/go.mod h1:8iwZnFn2CDDNZ0r6UXhF4xawGvzaqzCRa1n3/lO3W2w= | ||||
| github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o= | |||||
| github.com/minio/minio-go v6.0.14+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8= | |||||
| github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= | github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= | ||||
| github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= | github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= | ||||
| github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= | github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= | ||||
| @@ -123,10 +123,7 @@ func TestGetAttachment(t *testing.T) { | |||||
| t.Run(tc.name, func(t *testing.T) { | t.Run(tc.name, func(t *testing.T) { | ||||
| //Write empty file to be available for response | //Write empty file to be available for response | ||||
| if tc.createFile { | if tc.createFile { | ||||
| localPath := models.AttachmentLocalPath(tc.uuid) | |||||
| err := os.MkdirAll(path.Dir(localPath), os.ModePerm) | |||||
| assert.NoError(t, err) | |||||
| err = ioutil.WriteFile(localPath, []byte("hello world"), 0644) | |||||
| err = SaveAttachment(tc.uuid, strings.NewReader("hello world")) | |||||
| assert.NoError(t, err) | assert.NoError(t, err) | ||||
| } | } | ||||
| //Actual test | //Actual test | ||||
| @@ -69,6 +69,7 @@ arguments - which can alternatively be run by running the subcommand web.` | |||||
| cmd.CmdDoctor, | cmd.CmdDoctor, | ||||
| cmd.CmdManager, | cmd.CmdManager, | ||||
| cmd.Cmdembedded, | cmd.Cmdembedded, | ||||
| cmd.CmdMigrateStorage, | |||||
| } | } | ||||
| // Now adjust these commands to add our global configuration options | // Now adjust these commands to add our global configuration options | ||||
| @@ -9,6 +9,7 @@ import ( | |||||
| "os" | "os" | ||||
| "code.gitea.io/gitea/modules/log" | "code.gitea.io/gitea/modules/log" | ||||
| "code.gitea.io/gitea/modules/storage" | |||||
| "code.gitea.io/gitea/modules/timeutil" | "code.gitea.io/gitea/modules/timeutil" | ||||
| "github.com/unknwon/com" | "github.com/unknwon/com" | ||||
| @@ -65,6 +66,18 @@ func RemoveAllWithNotice(title, path string) { | |||||
| removeAllWithNotice(x, title, path) | removeAllWithNotice(x, title, path) | ||||
| } | } | ||||
| // RemoveStorageWithNotice removes a file from the storage and | |||||
| // creates a system notice when error occurs. | |||||
| func RemoveStorageWithNotice(bucket storage.ObjectStorage, title, path string) { | |||||
| if err := bucket.Delete(path); err != nil { | |||||
| desc := fmt.Sprintf("%s [%s]: %v", title, path, err) | |||||
| log.Warn(title+" [%s]: %v", path, err) | |||||
| if err = createNotice(x, NoticeRepository, desc); err != nil { | |||||
| log.Error("CreateRepositoryNotice: %v", err) | |||||
| } | |||||
| } | |||||
| } | |||||
| func removeAllWithNotice(e Engine, title, path string) { | func removeAllWithNotice(e Engine, title, path string) { | ||||
| if err := os.RemoveAll(path); err != nil { | if err := os.RemoveAll(path); err != nil { | ||||
| desc := fmt.Sprintf("%s [%s]: %v", title, path, err) | desc := fmt.Sprintf("%s [%s]: %v", title, path, err) | ||||
| @@ -5,12 +5,13 @@ | |||||
| package models | package models | ||||
| import ( | import ( | ||||
| "bytes" | |||||
| "fmt" | "fmt" | ||||
| "io" | "io" | ||||
| "os" | |||||
| "path" | "path" | ||||
| "code.gitea.io/gitea/modules/setting" | "code.gitea.io/gitea/modules/setting" | ||||
| "code.gitea.io/gitea/modules/storage" | |||||
| api "code.gitea.io/gitea/modules/structs" | api "code.gitea.io/gitea/modules/structs" | ||||
| "code.gitea.io/gitea/modules/timeutil" | "code.gitea.io/gitea/modules/timeutil" | ||||
| @@ -55,22 +56,16 @@ func (a *Attachment) APIFormat() *api.Attachment { | |||||
| } | } | ||||
| } | } | ||||
| // AttachmentLocalPath returns where attachment is stored in local file | |||||
| // system based on given UUID. | |||||
| func AttachmentLocalPath(uuid string) string { | |||||
| return path.Join(setting.AttachmentPath, uuid[0:1], uuid[1:2], uuid) | |||||
| } | |||||
| // LocalPath returns where attachment is stored in local file system. | |||||
| func (a *Attachment) LocalPath() string { | |||||
| return AttachmentLocalPath(a.UUID) | |||||
| } | |||||
| // DownloadURL returns the download url of the attached file | // DownloadURL returns the download url of the attached file | ||||
| func (a *Attachment) DownloadURL() string { | func (a *Attachment) DownloadURL() string { | ||||
| return fmt.Sprintf("%sattachments/%s", setting.AppURL, a.UUID) | return fmt.Sprintf("%sattachments/%s", setting.AppURL, a.UUID) | ||||
| } | } | ||||
| // RelativePath returns the relative path of the attachment | |||||
| func (a *Attachment) RelativePath() string { | |||||
| return path.Join(a.UUID[0:1], a.UUID[1:2], a.UUID) | |||||
| } | |||||
| // LinkedRepository returns the linked repo if any | // LinkedRepository returns the linked repo if any | ||||
| func (a *Attachment) LinkedRepository() (*Repository, UnitType, error) { | func (a *Attachment) LinkedRepository() (*Repository, UnitType, error) { | ||||
| if a.IssueID != 0 { | if a.IssueID != 0 { | ||||
| @@ -99,29 +94,11 @@ func (a *Attachment) LinkedRepository() (*Repository, UnitType, error) { | |||||
| func NewAttachment(attach *Attachment, buf []byte, file io.Reader) (_ *Attachment, err error) { | func NewAttachment(attach *Attachment, buf []byte, file io.Reader) (_ *Attachment, err error) { | ||||
| attach.UUID = gouuid.NewV4().String() | attach.UUID = gouuid.NewV4().String() | ||||
| localPath := attach.LocalPath() | |||||
| if err = os.MkdirAll(path.Dir(localPath), os.ModePerm); err != nil { | |||||
| return nil, fmt.Errorf("MkdirAll: %v", err) | |||||
| } | |||||
| fw, err := os.Create(localPath) | |||||
| size, err := storage.Attachments.Save(attach.RelativePath(), io.MultiReader(bytes.NewReader(buf), file)) | |||||
| if err != nil { | if err != nil { | ||||
| return nil, fmt.Errorf("Create: %v", err) | return nil, fmt.Errorf("Create: %v", err) | ||||
| } | } | ||||
| defer fw.Close() | |||||
| if _, err = fw.Write(buf); err != nil { | |||||
| return nil, fmt.Errorf("Write: %v", err) | |||||
| } else if _, err = io.Copy(fw, file); err != nil { | |||||
| return nil, fmt.Errorf("Copy: %v", err) | |||||
| } | |||||
| // Update file size | |||||
| var fi os.FileInfo | |||||
| if fi, err = fw.Stat(); err != nil { | |||||
| return nil, fmt.Errorf("file size: %v", err) | |||||
| } | |||||
| attach.Size = fi.Size() | |||||
| attach.Size = size | |||||
| if _, err := x.Insert(attach); err != nil { | if _, err := x.Insert(attach); err != nil { | ||||
| return nil, err | return nil, err | ||||
| @@ -238,7 +215,7 @@ func DeleteAttachments(attachments []*Attachment, remove bool) (int, error) { | |||||
| if remove { | if remove { | ||||
| for i, a := range attachments { | for i, a := range attachments { | ||||
| if err := os.Remove(a.LocalPath()); err != nil { | |||||
| if storage.Attachments.Delete(a.RelativePath()); err != nil { | |||||
| return i, err | return i, err | ||||
| } | } | ||||
| } | } | ||||
| @@ -290,3 +267,25 @@ func DeleteAttachmentsByRelease(releaseID int64) error { | |||||
| _, err := x.Where("release_id = ?", releaseID).Delete(&Attachment{}) | _, err := x.Where("release_id = ?", releaseID).Delete(&Attachment{}) | ||||
| return err | return err | ||||
| } | } | ||||
| // IterateAttachment iterates attachments | |||||
| func IterateAttachment(f func(attach *Attachment) error) error { | |||||
| var start int | |||||
| const batchSize = 100 | |||||
| for { | |||||
| var attachments = make([]*Attachment, 0, batchSize) | |||||
| if err := x.Limit(batchSize, start).Find(&attachments); err != nil { | |||||
| return err | |||||
| } | |||||
| if len(attachments) == 0 { | |||||
| return nil | |||||
| } | |||||
| start += len(attachments) | |||||
| for _, attach := range attachments { | |||||
| if err := f(attach); err != nil { | |||||
| return err | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -5,9 +5,8 @@ | |||||
| package migrations | package migrations | ||||
| import ( | import ( | ||||
| "os" | |||||
| "code.gitea.io/gitea/modules/storage" | |||||
| "code.gitea.io/gitea/models" | |||||
| "xorm.io/builder" | "xorm.io/builder" | ||||
| "xorm.io/xorm" | "xorm.io/xorm" | ||||
| ) | ) | ||||
| @@ -27,7 +26,7 @@ func removeAttachmentMissedRepo(x *xorm.Engine) error { | |||||
| } | } | ||||
| for i := 0; i < len(attachments); i++ { | for i := 0; i < len(attachments); i++ { | ||||
| os.RemoveAll(models.AttachmentLocalPath(attachments[i].UUID)) | |||||
| storage.Attachments.Delete(relativePath(attachments[i].UUID)) | |||||
| } | } | ||||
| if len(attachments) < 50 { | if len(attachments) < 50 { | ||||
| @@ -5,16 +5,19 @@ | |||||
| package migrations | package migrations | ||||
| import ( | import ( | ||||
| "os" | |||||
| "path" | |||||
| "code.gitea.io/gitea/models" | |||||
| "code.gitea.io/gitea/modules/setting" | "code.gitea.io/gitea/modules/setting" | ||||
| "code.gitea.io/gitea/modules/storage" | |||||
| "xorm.io/xorm" | "xorm.io/xorm" | ||||
| ) | ) | ||||
| func deleteOrphanedAttachments(x *xorm.Engine) error { | |||||
| func relativePath(uuid string) string { | |||||
| return path.Join(uuid[0:1], uuid[1:2], uuid) | |||||
| } | |||||
| func deleteOrphanedAttachments(x *xorm.Engine) error { | |||||
| type Attachment struct { | type Attachment struct { | ||||
| ID int64 `xorm:"pk autoincr"` | ID int64 `xorm:"pk autoincr"` | ||||
| UUID string `xorm:"uuid UNIQUE"` | UUID string `xorm:"uuid UNIQUE"` | ||||
| @@ -47,12 +50,14 @@ func deleteOrphanedAttachments(x *xorm.Engine) error { | |||||
| for _, attachment := range attachements { | for _, attachment := range attachements { | ||||
| ids = append(ids, attachment.ID) | ids = append(ids, attachment.ID) | ||||
| } | } | ||||
| if _, err := sess.In("id", ids).Delete(new(Attachment)); err != nil { | |||||
| return err | |||||
| if len(ids) > 0 { | |||||
| if _, err := sess.In("id", ids).Delete(new(Attachment)); err != nil { | |||||
| return err | |||||
| } | |||||
| } | } | ||||
| for _, attachment := range attachements { | for _, attachment := range attachements { | ||||
| if err := os.RemoveAll(models.AttachmentLocalPath(attachment.UUID)); err != nil { | |||||
| if err := storage.Attachments.Delete(relativePath(attachment.UUID)); err != nil { | |||||
| return err | return err | ||||
| } | } | ||||
| } | } | ||||
| @@ -30,6 +30,7 @@ import ( | |||||
| "code.gitea.io/gitea/modules/markup" | "code.gitea.io/gitea/modules/markup" | ||||
| "code.gitea.io/gitea/modules/options" | "code.gitea.io/gitea/modules/options" | ||||
| "code.gitea.io/gitea/modules/setting" | "code.gitea.io/gitea/modules/setting" | ||||
| "code.gitea.io/gitea/modules/storage" | |||||
| api "code.gitea.io/gitea/modules/structs" | api "code.gitea.io/gitea/modules/structs" | ||||
| "code.gitea.io/gitea/modules/timeutil" | "code.gitea.io/gitea/modules/timeutil" | ||||
| "code.gitea.io/gitea/modules/util" | "code.gitea.io/gitea/modules/util" | ||||
| @@ -1548,7 +1549,7 @@ func DeleteRepository(doer *User, uid, repoID int64) error { | |||||
| } | } | ||||
| releaseAttachments := make([]string, 0, len(attachments)) | releaseAttachments := make([]string, 0, len(attachments)) | ||||
| for i := 0; i < len(attachments); i++ { | for i := 0; i < len(attachments); i++ { | ||||
| releaseAttachments = append(releaseAttachments, attachments[i].LocalPath()) | |||||
| releaseAttachments = append(releaseAttachments, attachments[i].UUID) | |||||
| } | } | ||||
| if err = deleteBeans(sess, | if err = deleteBeans(sess, | ||||
| @@ -1627,7 +1628,7 @@ func DeleteRepository(doer *User, uid, repoID int64) error { | |||||
| } | } | ||||
| attachmentPaths := make([]string, 0, len(attachments)) | attachmentPaths := make([]string, 0, len(attachments)) | ||||
| for j := range attachments { | for j := range attachments { | ||||
| attachmentPaths = append(attachmentPaths, attachments[j].LocalPath()) | |||||
| attachmentPaths = append(attachmentPaths, attachments[j].UUID) | |||||
| } | } | ||||
| if _, err = sess.In("issue_id", deleteCond). | if _, err = sess.In("issue_id", deleteCond). | ||||
| @@ -1715,12 +1716,12 @@ func DeleteRepository(doer *User, uid, repoID int64) error { | |||||
| // Remove issue attachment files. | // Remove issue attachment files. | ||||
| for i := range attachmentPaths { | for i := range attachmentPaths { | ||||
| removeAllWithNotice(x, "Delete issue attachment", attachmentPaths[i]) | |||||
| RemoveStorageWithNotice(storage.Attachments, "Delete issue attachment", attachmentPaths[i]) | |||||
| } | } | ||||
| // Remove release attachment files. | // Remove release attachment files. | ||||
| for i := range releaseAttachments { | for i := range releaseAttachments { | ||||
| removeAllWithNotice(x, "Delete release attachment", releaseAttachments[i]) | |||||
| RemoveStorageWithNotice(storage.Attachments, "Delete release attachment", releaseAttachments[i]) | |||||
| } | } | ||||
| if len(repo.Avatar) > 0 { | if len(repo.Avatar) > 0 { | ||||
| @@ -13,7 +13,6 @@ import ( | |||||
| "net/http" | "net/http" | ||||
| "net/url" | "net/url" | ||||
| "os" | "os" | ||||
| "path" | |||||
| "path/filepath" | "path/filepath" | ||||
| "strings" | "strings" | ||||
| "sync" | "sync" | ||||
| @@ -26,6 +25,7 @@ import ( | |||||
| "code.gitea.io/gitea/modules/repository" | "code.gitea.io/gitea/modules/repository" | ||||
| repo_module "code.gitea.io/gitea/modules/repository" | repo_module "code.gitea.io/gitea/modules/repository" | ||||
| "code.gitea.io/gitea/modules/setting" | "code.gitea.io/gitea/modules/setting" | ||||
| "code.gitea.io/gitea/modules/storage" | |||||
| "code.gitea.io/gitea/modules/structs" | "code.gitea.io/gitea/modules/structs" | ||||
| "code.gitea.io/gitea/modules/timeutil" | "code.gitea.io/gitea/modules/timeutil" | ||||
| @@ -275,18 +275,7 @@ func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error { | |||||
| } | } | ||||
| defer resp.Body.Close() | defer resp.Body.Close() | ||||
| localPath := attach.LocalPath() | |||||
| if err = os.MkdirAll(path.Dir(localPath), os.ModePerm); err != nil { | |||||
| return fmt.Errorf("MkdirAll: %v", err) | |||||
| } | |||||
| fw, err := os.Create(localPath) | |||||
| if err != nil { | |||||
| return fmt.Errorf("Create: %v", err) | |||||
| } | |||||
| defer fw.Close() | |||||
| _, err = io.Copy(fw, resp.Body) | |||||
| _, err = storage.Attachments.Save(attach.RelativePath(), resp.Body) | |||||
| return err | return err | ||||
| }() | }() | ||||
| if err != nil { | if err != nil { | ||||
| @@ -0,0 +1,62 @@ | |||||
| package storage | |||||
| import ( | |||||
| "io" | |||||
| "os" | |||||
| "path/filepath" | |||||
| ) | |||||
| var ( | |||||
| _ ObjectStorage = &LocalStorage{} | |||||
| ) | |||||
| // LocalStorage represents a local files storage | |||||
| type LocalStorage struct { | |||||
| dir string | |||||
| } | |||||
| // NewLocalStorage returns a local files | |||||
| func NewLocalStorage(bucket string) (*LocalStorage, error) { | |||||
| if err := os.MkdirAll(bucket, os.ModePerm); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return &LocalStorage{ | |||||
| dir: bucket, | |||||
| }, nil | |||||
| } | |||||
| // Open open a file | |||||
| func (l *LocalStorage) Open(path string) (io.ReadCloser, error) { | |||||
| f, err := os.Open(filepath.Join(l.dir, path)) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return f, nil | |||||
| } | |||||
| // Save save a file | |||||
| func (l *LocalStorage) Save(path string, r io.Reader) (int64, error) { | |||||
| p := filepath.Join(l.dir, path) | |||||
| if err := os.MkdirAll(filepath.Dir(p), os.ModePerm); err != nil { | |||||
| return 0, err | |||||
| } | |||||
| // always override | |||||
| if err := os.RemoveAll(p); err != nil { | |||||
| return 0, err | |||||
| } | |||||
| f, err := os.Create(p) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| defer f.Close() | |||||
| return io.Copy(f, r) | |||||
| } | |||||
| // Delete delete a file | |||||
| func (l *LocalStorage) Delete(path string) error { | |||||
| p := filepath.Join(l.dir, path) | |||||
| return os.Remove(p) | |||||
| } | |||||
| @@ -0,0 +1,59 @@ | |||||
| package storage | |||||
| import ( | |||||
| "io" | |||||
| "strings" | |||||
| "github.com/minio/minio-go" | |||||
| ) | |||||
| var ( | |||||
| _ ObjectStorage = &MinioStorage{} | |||||
| ) | |||||
| // MinioStorage returns a minio bucket storage | |||||
| type MinioStorage struct { | |||||
| client *minio.Client | |||||
| location string | |||||
| bucket string | |||||
| basePath string | |||||
| } | |||||
| // NewMinioStorage returns a minio storage | |||||
| func NewMinioStorage(endpoint, accessKeyID, secretAccessKey, location, bucket, basePath string, useSSL bool) (*MinioStorage, error) { | |||||
| minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return &MinioStorage{ | |||||
| location: location, | |||||
| client: minioClient, | |||||
| bucket: bucket, | |||||
| basePath: basePath, | |||||
| }, nil | |||||
| } | |||||
| func buildMinioPath(p string) string { | |||||
| return strings.TrimPrefix(p, "/") | |||||
| } | |||||
| // Open open a file | |||||
| func (m *MinioStorage) Open(path string) (io.ReadCloser, error) { | |||||
| var opts = minio.GetObjectOptions{} | |||||
| object, err := m.client.GetObject(m.bucket, buildMinioPath(path), opts) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return object, nil | |||||
| } | |||||
| // Save save a file to minio | |||||
| func (m *MinioStorage) Save(path string, r io.Reader) (int64, error) { | |||||
| return m.client.PutObject(m.bucket, buildMinioPath(path), r, -1, minio.PutObjectOptions{ContentType: "application/octet-stream"}) | |||||
| } | |||||
| // Delete delete a file | |||||
| func (m *MinioStorage) Delete(path string) error { | |||||
| return m.client.RemoveObject(m.bucket, buildMinioPath(path)) | |||||
| } | |||||
| @@ -0,0 +1,42 @@ | |||||
| package storage | |||||
| import ( | |||||
| "io" | |||||
| "code.gitea.io/gitea/modules/setting" | |||||
| ) | |||||
| // ObjectStorage represents an object storage to handle a bucket and files | |||||
| type ObjectStorage interface { | |||||
| Save(path string, r io.Reader) (int64, error) | |||||
| Open(path string) (io.ReadCloser, error) | |||||
| Delete(path string) error | |||||
| } | |||||
| // Copy copys a file from source ObjectStorage to dest ObjectStorage | |||||
| func Copy(dstStorage ObjectStorage, dstPath string, srcStorage ObjectStorage, srcPath string) (int64, error) { | |||||
| f, err := srcStorage.Open(srcPath) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| defer f.Close() | |||||
| return dstStorage.Save(dstPath, f) | |||||
| } | |||||
| var ( | |||||
| // Attachments represents attachments storage | |||||
| Attachments ObjectStorage | |||||
| ) | |||||
| // Init init the stoarge | |||||
| func Init() error { | |||||
| var err error | |||||
| Attachments, err = NewLocalStorage(setting.AttachmentPath) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return nil | |||||
| } | |||||
| @@ -28,6 +28,7 @@ import ( | |||||
| "code.gitea.io/gitea/modules/options" | "code.gitea.io/gitea/modules/options" | ||||
| "code.gitea.io/gitea/modules/setting" | "code.gitea.io/gitea/modules/setting" | ||||
| "code.gitea.io/gitea/modules/ssh" | "code.gitea.io/gitea/modules/ssh" | ||||
| "code.gitea.io/gitea/modules/storage" | |||||
| "code.gitea.io/gitea/modules/task" | "code.gitea.io/gitea/modules/task" | ||||
| "code.gitea.io/gitea/modules/webhook" | "code.gitea.io/gitea/modules/webhook" | ||||
| "code.gitea.io/gitea/services/mailer" | "code.gitea.io/gitea/services/mailer" | ||||
| @@ -54,6 +55,9 @@ func checkRunMode() { | |||||
| // NewServices init new services | // NewServices init new services | ||||
| func NewServices() { | func NewServices() { | ||||
| setting.NewServices() | setting.NewServices() | ||||
| if err := storage.Init(); err != nil { | |||||
| log.Fatal("storage init failed: %v", err) | |||||
| } | |||||
| mailer.NewContext() | mailer.NewContext() | ||||
| _ = cache.NewContext() | _ = cache.NewContext() | ||||
| notification.NewContext() | notification.NewContext() | ||||
| @@ -7,13 +7,13 @@ package repo | |||||
| import ( | import ( | ||||
| "fmt" | "fmt" | ||||
| "net/http" | "net/http" | ||||
| "os" | |||||
| "strings" | "strings" | ||||
| "code.gitea.io/gitea/models" | "code.gitea.io/gitea/models" | ||||
| "code.gitea.io/gitea/modules/context" | "code.gitea.io/gitea/modules/context" | ||||
| "code.gitea.io/gitea/modules/log" | "code.gitea.io/gitea/modules/log" | ||||
| "code.gitea.io/gitea/modules/setting" | "code.gitea.io/gitea/modules/setting" | ||||
| "code.gitea.io/gitea/modules/storage" | |||||
| "code.gitea.io/gitea/modules/upload" | "code.gitea.io/gitea/modules/upload" | ||||
| ) | ) | ||||
| @@ -123,7 +123,7 @@ func GetAttachment(ctx *context.Context) { | |||||
| } | } | ||||
| //If we have matched and access to release or issue | //If we have matched and access to release or issue | ||||
| fr, err := os.Open(attach.LocalPath()) | |||||
| fr, err := storage.Attachments.Open(attach.RelativePath()) | |||||
| if err != nil { | if err != nil { | ||||
| ctx.ServerError("Open", err) | ctx.ServerError("Open", err) | ||||
| return | return | ||||
| @@ -6,7 +6,6 @@ package release | |||||
| import ( | import ( | ||||
| "fmt" | "fmt" | ||||
| "os" | |||||
| "strings" | "strings" | ||||
| "code.gitea.io/gitea/models" | "code.gitea.io/gitea/models" | ||||
| @@ -14,6 +13,7 @@ import ( | |||||
| "code.gitea.io/gitea/modules/log" | "code.gitea.io/gitea/modules/log" | ||||
| "code.gitea.io/gitea/modules/notification" | "code.gitea.io/gitea/modules/notification" | ||||
| "code.gitea.io/gitea/modules/repository" | "code.gitea.io/gitea/modules/repository" | ||||
| "code.gitea.io/gitea/modules/storage" | |||||
| "code.gitea.io/gitea/modules/timeutil" | "code.gitea.io/gitea/modules/timeutil" | ||||
| ) | ) | ||||
| @@ -161,7 +161,7 @@ func DeleteReleaseByID(id int64, doer *models.User, delTag bool) error { | |||||
| for i := range rel.Attachments { | for i := range rel.Attachments { | ||||
| attachment := rel.Attachments[i] | attachment := rel.Attachments[i] | ||||
| if err := os.RemoveAll(attachment.LocalPath()); err != nil { | |||||
| if err := storage.Attachments.Delete(attachment.RelativePath()); err != nil { | |||||
| log.Error("Delete attachment %s of release %s failed: %v", attachment.UUID, rel.ID, err) | log.Error("Delete attachment %s of release %s failed: %v", attachment.UUID, rel.ID, err) | ||||
| } | } | ||||
| } | } | ||||
| @@ -0,0 +1,6 @@ | |||||
| testdata/conf_out.ini | |||||
| ini.sublime-project | |||||
| ini.sublime-workspace | |||||
| testdata/conf_reflect.ini | |||||
| .idea | |||||
| /.vscode | |||||
| @@ -0,0 +1,191 @@ | |||||
| Apache License | |||||
| Version 2.0, January 2004 | |||||
| http://www.apache.org/licenses/ | |||||
| TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||||
| 1. Definitions. | |||||
| "License" shall mean the terms and conditions for use, reproduction, and | |||||
| distribution as defined by Sections 1 through 9 of this document. | |||||
| "Licensor" shall mean the copyright owner or entity authorized by the copyright | |||||
| owner that is granting the License. | |||||
| "Legal Entity" shall mean the union of the acting entity and all other entities | |||||
| that control, are controlled by, or are under common control with that entity. | |||||
| For the purposes of this definition, "control" means (i) the power, direct or | |||||
| indirect, to cause the direction or management of such entity, whether by | |||||
| contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||||
| outstanding shares, or (iii) beneficial ownership of such entity. | |||||
| "You" (or "Your") shall mean an individual or Legal Entity exercising | |||||
| permissions granted by this License. | |||||
| "Source" form shall mean the preferred form for making modifications, including | |||||
| but not limited to software source code, documentation source, and configuration | |||||
| files. | |||||
| "Object" form shall mean any form resulting from mechanical transformation or | |||||
| translation of a Source form, including but not limited to compiled object code, | |||||
| generated documentation, and conversions to other media types. | |||||
| "Work" shall mean the work of authorship, whether in Source or Object form, made | |||||
| available under the License, as indicated by a copyright notice that is included | |||||
| in or attached to the work (an example is provided in the Appendix below). | |||||
| "Derivative Works" shall mean any work, whether in Source or Object form, that | |||||
| is based on (or derived from) the Work and for which the editorial revisions, | |||||
| annotations, elaborations, or other modifications represent, as a whole, an | |||||
| original work of authorship. For the purposes of this License, Derivative Works | |||||
| shall not include works that remain separable from, or merely link (or bind by | |||||
| name) to the interfaces of, the Work and Derivative Works thereof. | |||||
| "Contribution" shall mean any work of authorship, including the original version | |||||
| of the Work and any modifications or additions to that Work or Derivative Works | |||||
| thereof, that is intentionally submitted to Licensor for inclusion in the Work | |||||
| by the copyright owner or by an individual or Legal Entity authorized to submit | |||||
| on behalf of the copyright owner. For the purposes of this definition, | |||||
| "submitted" means any form of electronic, verbal, or written communication sent | |||||
| to the Licensor or its representatives, including but not limited to | |||||
| communication on electronic mailing lists, source code control systems, and | |||||
| issue tracking systems that are managed by, or on behalf of, the Licensor for | |||||
| the purpose of discussing and improving the Work, but excluding communication | |||||
| that is conspicuously marked or otherwise designated in writing by the copyright | |||||
| owner as "Not a Contribution." | |||||
| "Contributor" shall mean Licensor and any individual or Legal Entity on behalf | |||||
| of whom a Contribution has been received by Licensor and subsequently | |||||
| incorporated within the Work. | |||||
| 2. Grant of Copyright License. | |||||
| Subject to the terms and conditions of this License, each Contributor hereby | |||||
| grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, | |||||
| irrevocable copyright license to reproduce, prepare Derivative Works of, | |||||
| publicly display, publicly perform, sublicense, and distribute the Work and such | |||||
| Derivative Works in Source or Object form. | |||||
| 3. Grant of Patent License. | |||||
| Subject to the terms and conditions of this License, each Contributor hereby | |||||
| grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, | |||||
| irrevocable (except as stated in this section) patent license to make, have | |||||
| made, use, offer to sell, sell, import, and otherwise transfer the Work, where | |||||
| such license applies only to those patent claims licensable by such Contributor | |||||
| that are necessarily infringed by their Contribution(s) alone or by combination | |||||
| of their Contribution(s) with the Work to which such Contribution(s) was | |||||
| submitted. If You institute patent litigation against any entity (including a | |||||
| cross-claim or counterclaim in a lawsuit) alleging that the Work or a | |||||
| Contribution incorporated within the Work constitutes direct or contributory | |||||
| patent infringement, then any patent licenses granted to You under this License | |||||
| for that Work shall terminate as of the date such litigation is filed. | |||||
| 4. Redistribution. | |||||
| You may reproduce and distribute copies of the Work or Derivative Works thereof | |||||
| in any medium, with or without modifications, and in Source or Object form, | |||||
| provided that You meet the following conditions: | |||||
| You must give any other recipients of the Work or Derivative Works a copy of | |||||
| this License; and | |||||
| You must cause any modified files to carry prominent notices stating that You | |||||
| changed the files; and | |||||
| You must retain, in the Source form of any Derivative Works that You distribute, | |||||
| all copyright, patent, trademark, and attribution notices from the Source form | |||||
| of the Work, excluding those notices that do not pertain to any part of the | |||||
| Derivative Works; and | |||||
| If the Work includes a "NOTICE" text file as part of its distribution, then any | |||||
| Derivative Works that You distribute must include a readable copy of the | |||||
| attribution notices contained within such NOTICE file, excluding those notices | |||||
| that do not pertain to any part of the Derivative Works, in at least one of the | |||||
| following places: within a NOTICE text file distributed as part of the | |||||
| Derivative Works; within the Source form or documentation, if provided along | |||||
| with the Derivative Works; or, within a display generated by the Derivative | |||||
| Works, if and wherever such third-party notices normally appear. The contents of | |||||
| the NOTICE file are for informational purposes only and do not modify the | |||||
| License. You may add Your own attribution notices within Derivative Works that | |||||
| You distribute, alongside or as an addendum to the NOTICE text from the Work, | |||||
| provided that such additional attribution notices cannot be construed as | |||||
| modifying the License. | |||||
| You may add Your own copyright statement to Your modifications and may provide | |||||
| additional or different license terms and conditions for use, reproduction, or | |||||
| distribution of Your modifications, or for any such Derivative Works as a whole, | |||||
| provided Your use, reproduction, and distribution of the Work otherwise complies | |||||
| with the conditions stated in this License. | |||||
| 5. Submission of Contributions. | |||||
| Unless You explicitly state otherwise, any Contribution intentionally submitted | |||||
| for inclusion in the Work by You to the Licensor shall be under the terms and | |||||
| conditions of this License, without any additional terms or conditions. | |||||
| Notwithstanding the above, nothing herein shall supersede or modify the terms of | |||||
| any separate license agreement you may have executed with Licensor regarding | |||||
| such Contributions. | |||||
| 6. Trademarks. | |||||
| This License does not grant permission to use the trade names, trademarks, | |||||
| service marks, or product names of the Licensor, except as required for | |||||
| reasonable and customary use in describing the origin of the Work and | |||||
| reproducing the content of the NOTICE file. | |||||
| 7. Disclaimer of Warranty. | |||||
| Unless required by applicable law or agreed to in writing, Licensor provides the | |||||
| Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, | |||||
| including, without limitation, any warranties or conditions of TITLE, | |||||
| NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are | |||||
| solely responsible for determining the appropriateness of using or | |||||
| redistributing the Work and assume any risks associated with Your exercise of | |||||
| permissions under this License. | |||||
| 8. Limitation of Liability. | |||||
| In no event and under no legal theory, whether in tort (including negligence), | |||||
| contract, or otherwise, unless required by applicable law (such as deliberate | |||||
| and grossly negligent acts) or agreed to in writing, shall any Contributor be | |||||
| liable to You for damages, including any direct, indirect, special, incidental, | |||||
| or consequential damages of any character arising as a result of this License or | |||||
| out of the use or inability to use the Work (including but not limited to | |||||
| damages for loss of goodwill, work stoppage, computer failure or malfunction, or | |||||
| any and all other commercial damages or losses), even if such Contributor has | |||||
| been advised of the possibility of such damages. | |||||
| 9. Accepting Warranty or Additional Liability. | |||||
| While redistributing the Work or Derivative Works thereof, You may choose to | |||||
| offer, and charge a fee for, acceptance of support, warranty, indemnity, or | |||||
| other liability obligations and/or rights consistent with this License. However, | |||||
| in accepting such obligations, You may act only on Your own behalf and on Your | |||||
| sole responsibility, not on behalf of any other Contributor, and only if You | |||||
| agree to indemnify, defend, and hold each Contributor harmless for any liability | |||||
| incurred by, or claims asserted against, such Contributor by reason of your | |||||
| accepting any such warranty or additional liability. | |||||
| END OF TERMS AND CONDITIONS | |||||
| APPENDIX: How to apply the Apache License to your work | |||||
| To apply the Apache License to your work, attach the following boilerplate | |||||
| notice, with the fields enclosed by brackets "[]" replaced with your own | |||||
| identifying information. (Don't include the brackets!) The text should be | |||||
| enclosed in the appropriate comment syntax for the file format. We also | |||||
| recommend that a file or class name and description of purpose be included on | |||||
| the same "printed page" as the copyright notice for easier identification within | |||||
| third-party archives. | |||||
| Copyright 2014 Unknwon | |||||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| you may not use this file except in compliance with the License. | |||||
| You may obtain a copy of the License at | |||||
| http://www.apache.org/licenses/LICENSE-2.0 | |||||
| Unless required by applicable law or agreed to in writing, software | |||||
| distributed under the License is distributed on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| See the License for the specific language governing permissions and | |||||
| limitations under the License. | |||||
| @@ -0,0 +1,15 @@ | |||||
| .PHONY: build test bench vet coverage | |||||
| build: vet bench | |||||
| test: | |||||
| go test -v -cover -race | |||||
| bench: | |||||
| go test -v -cover -test.bench=. -test.benchmem | |||||
| vet: | |||||
| go vet | |||||
| coverage: | |||||
| go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out | |||||
| @@ -0,0 +1,43 @@ | |||||
| # INI | |||||
| [](https://github.com/go-ini/ini/actions?query=workflow%3AGo) | |||||
| [](https://codecov.io/gh/go-ini/ini) | |||||
| [](https://pkg.go.dev/github.com/go-ini/ini?tab=doc) | |||||
| [](https://sourcegraph.com/github.com/go-ini/ini) | |||||
|  | |||||
| Package ini provides INI file read and write functionality in Go. | |||||
| ## Features | |||||
| - Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites. | |||||
| - Read with recursion values. | |||||
| - Read with parent-child sections. | |||||
| - Read with auto-increment key names. | |||||
| - Read with multiple-line values. | |||||
| - Read with tons of helper methods. | |||||
| - Read and convert values to Go types. | |||||
| - Read and **WRITE** comments of sections and keys. | |||||
| - Manipulate sections, keys and comments with ease. | |||||
| - Keep sections and keys in order as you parse and save. | |||||
| ## Installation | |||||
| The minimum requirement of Go is **1.6**. | |||||
| ```sh | |||||
| $ go get gopkg.in/ini.v1 | |||||
| ``` | |||||
| Please add `-u` flag to update in the future. | |||||
| ## Getting Help | |||||
| - [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) | |||||
| - [API Documentation](https://gowalker.org/gopkg.in/ini.v1) | |||||
| - 中国大陆镜像:https://ini.unknwon.cn | |||||
| ## License | |||||
| This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. | |||||
| @@ -0,0 +1,9 @@ | |||||
| coverage: | |||||
| range: "60...95" | |||||
| status: | |||||
| project: | |||||
| default: | |||||
| threshold: 1% | |||||
| comment: | |||||
| layout: 'diff, files' | |||||
| @@ -0,0 +1,76 @@ | |||||
| // Copyright 2019 Unknwon | |||||
| // | |||||
| // Licensed under the Apache License, Version 2.0 (the "License"): you may | |||||
| // not use this file except in compliance with the License. You may obtain | |||||
| // a copy of the License at | |||||
| // | |||||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||||
| // | |||||
| // Unless required by applicable law or agreed to in writing, software | |||||
| // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||||
| // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||||
| // License for the specific language governing permissions and limitations | |||||
| // under the License. | |||||
| package ini | |||||
| import ( | |||||
| "bytes" | |||||
| "fmt" | |||||
| "io" | |||||
| "io/ioutil" | |||||
| "os" | |||||
| ) | |||||
| var ( | |||||
| _ dataSource = (*sourceFile)(nil) | |||||
| _ dataSource = (*sourceData)(nil) | |||||
| _ dataSource = (*sourceReadCloser)(nil) | |||||
| ) | |||||
| // dataSource is an interface that returns object which can be read and closed. | |||||
| type dataSource interface { | |||||
| ReadCloser() (io.ReadCloser, error) | |||||
| } | |||||
| // sourceFile represents an object that contains content on the local file system. | |||||
| type sourceFile struct { | |||||
| name string | |||||
| } | |||||
| func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { | |||||
| return os.Open(s.name) | |||||
| } | |||||
| // sourceData represents an object that contains content in memory. | |||||
| type sourceData struct { | |||||
| data []byte | |||||
| } | |||||
| func (s *sourceData) ReadCloser() (io.ReadCloser, error) { | |||||
| return ioutil.NopCloser(bytes.NewReader(s.data)), nil | |||||
| } | |||||
| // sourceReadCloser represents an input stream with Close method. | |||||
| type sourceReadCloser struct { | |||||
| reader io.ReadCloser | |||||
| } | |||||
| func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { | |||||
| return s.reader, nil | |||||
| } | |||||
| func parseDataSource(source interface{}) (dataSource, error) { | |||||
| switch s := source.(type) { | |||||
| case string: | |||||
| return sourceFile{s}, nil | |||||
| case []byte: | |||||
| return &sourceData{s}, nil | |||||
| case io.ReadCloser: | |||||
| return &sourceReadCloser{s}, nil | |||||
| case io.Reader: | |||||
| return &sourceReadCloser{ioutil.NopCloser(s)}, nil | |||||
| default: | |||||
| return nil, fmt.Errorf("error parsing data source: unknown type %q", s) | |||||
| } | |||||
| } | |||||
| @@ -0,0 +1,25 @@ | |||||
| // Copyright 2019 Unknwon | |||||
| // | |||||
| // Licensed under the Apache License, Version 2.0 (the "License"): you may | |||||
| // not use this file except in compliance with the License. You may obtain | |||||
| // a copy of the License at | |||||
| // | |||||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||||
| // | |||||
| // Unless required by applicable law or agreed to in writing, software | |||||
| // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||||
| // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||||
| // License for the specific language governing permissions and limitations | |||||
| // under the License. | |||||
| package ini | |||||
| const ( | |||||
| // Deprecated: Use "DefaultSection" instead. | |||||
| DEFAULT_SECTION = DefaultSection | |||||
| ) | |||||
| var ( | |||||
| // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. | |||||
| AllCapsUnderscore = SnackCase | |||||
| ) | |||||
| @@ -0,0 +1,34 @@ | |||||
| // Copyright 2016 Unknwon | |||||
| // | |||||
| // Licensed under the Apache License, Version 2.0 (the "License"): you may | |||||
| // not use this file except in compliance with the License. You may obtain | |||||
| // a copy of the License at | |||||
| // | |||||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||||
| // | |||||
| // Unless required by applicable law or agreed to in writing, software | |||||
| // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||||
| // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||||
| // License for the specific language governing permissions and limitations | |||||
| // under the License. | |||||
| package ini | |||||
| import ( | |||||
| "fmt" | |||||
| ) | |||||
| // ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one. | |||||
| type ErrDelimiterNotFound struct { | |||||
| Line string | |||||
| } | |||||
| // IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound. | |||||
| func IsErrDelimiterNotFound(err error) bool { | |||||
| _, ok := err.(ErrDelimiterNotFound) | |||||
| return ok | |||||
| } | |||||
| func (err ErrDelimiterNotFound) Error() string { | |||||
| return fmt.Sprintf("key-value delimiter not found: %s", err.Line) | |||||
| } | |||||
| @@ -0,0 +1,509 @@ | |||||
| // Copyright 2017 Unknwon | |||||
| // | |||||
| // Licensed under the Apache License, Version 2.0 (the "License"): you may | |||||
| // not use this file except in compliance with the License. You may obtain | |||||
| // a copy of the License at | |||||
| // | |||||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||||
| // | |||||
| // Unless required by applicable law or agreed to in writing, software | |||||
| // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||||
| // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||||
| // License for the specific language governing permissions and limitations | |||||
| // under the License. | |||||
| package ini | |||||
| import ( | |||||
| "bytes" | |||||
| "errors" | |||||
| "fmt" | |||||
| "io" | |||||
| "io/ioutil" | |||||
| "os" | |||||
| "strings" | |||||
| "sync" | |||||
| ) | |||||
| // File represents a combination of one or more INI files in memory. | |||||
| type File struct { | |||||
| options LoadOptions | |||||
| dataSources []dataSource | |||||
| // Should make things safe, but sometimes doesn't matter. | |||||
| BlockMode bool | |||||
| lock sync.RWMutex | |||||
| // To keep data in order. | |||||
| sectionList []string | |||||
| // To keep track of the index of a section with same name. | |||||
| // This meta list is only used with non-unique section names are allowed. | |||||
| sectionIndexes []int | |||||
| // Actual data is stored here. | |||||
| sections map[string][]*Section | |||||
| NameMapper | |||||
| ValueMapper | |||||
| } | |||||
| // newFile initializes File object with given data sources. | |||||
| func newFile(dataSources []dataSource, opts LoadOptions) *File { | |||||
| if len(opts.KeyValueDelimiters) == 0 { | |||||
| opts.KeyValueDelimiters = "=:" | |||||
| } | |||||
| if len(opts.KeyValueDelimiterOnWrite) == 0 { | |||||
| opts.KeyValueDelimiterOnWrite = "=" | |||||
| } | |||||
| return &File{ | |||||
| BlockMode: true, | |||||
| dataSources: dataSources, | |||||
| sections: make(map[string][]*Section), | |||||
| options: opts, | |||||
| } | |||||
| } | |||||
| // Empty returns an empty file object. | |||||
| func Empty(opts ...LoadOptions) *File { | |||||
| var opt LoadOptions | |||||
| if len(opts) > 0 { | |||||
| opt = opts[0] | |||||
| } | |||||
| // Ignore error here, we are sure our data is good. | |||||
| f, _ := LoadSources(opt, []byte("")) | |||||
| return f | |||||
| } | |||||
| // NewSection creates a new section. | |||||
| func (f *File) NewSection(name string) (*Section, error) { | |||||
| if len(name) == 0 { | |||||
| return nil, errors.New("empty section name") | |||||
| } | |||||
| if f.options.Insensitive && name != DefaultSection { | |||||
| name = strings.ToLower(name) | |||||
| } | |||||
| if f.BlockMode { | |||||
| f.lock.Lock() | |||||
| defer f.lock.Unlock() | |||||
| } | |||||
| if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) { | |||||
| return f.sections[name][0], nil | |||||
| } | |||||
| f.sectionList = append(f.sectionList, name) | |||||
| // NOTE: Append to indexes must happen before appending to sections, | |||||
| // otherwise index will have off-by-one problem. | |||||
| f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name])) | |||||
| sec := newSection(f, name) | |||||
| f.sections[name] = append(f.sections[name], sec) | |||||
| return sec, nil | |||||
| } | |||||
| // NewRawSection creates a new section with an unparseable body. | |||||
| func (f *File) NewRawSection(name, body string) (*Section, error) { | |||||
| section, err := f.NewSection(name) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| section.isRawSection = true | |||||
| section.rawBody = body | |||||
| return section, nil | |||||
| } | |||||
| // NewSections creates a list of sections. | |||||
| func (f *File) NewSections(names ...string) (err error) { | |||||
| for _, name := range names { | |||||
| if _, err = f.NewSection(name); err != nil { | |||||
| return err | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // GetSection returns section by given name. | |||||
| func (f *File) GetSection(name string) (*Section, error) { | |||||
| secs, err := f.SectionsByName(name) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return secs[0], err | |||||
| } | |||||
| // SectionsByName returns all sections with given name. | |||||
| func (f *File) SectionsByName(name string) ([]*Section, error) { | |||||
| if len(name) == 0 { | |||||
| name = DefaultSection | |||||
| } | |||||
| if f.options.Insensitive { | |||||
| name = strings.ToLower(name) | |||||
| } | |||||
| if f.BlockMode { | |||||
| f.lock.RLock() | |||||
| defer f.lock.RUnlock() | |||||
| } | |||||
| secs := f.sections[name] | |||||
| if len(secs) == 0 { | |||||
| return nil, fmt.Errorf("section %q does not exist", name) | |||||
| } | |||||
| return secs, nil | |||||
| } | |||||
| // Section assumes named section exists and returns a zero-value when not. | |||||
| func (f *File) Section(name string) *Section { | |||||
| sec, err := f.GetSection(name) | |||||
| if err != nil { | |||||
| // Note: It's OK here because the only possible error is empty section name, | |||||
| // but if it's empty, this piece of code won't be executed. | |||||
| sec, _ = f.NewSection(name) | |||||
| return sec | |||||
| } | |||||
| return sec | |||||
| } | |||||
| // SectionWithIndex assumes named section exists and returns a new section when not. | |||||
| func (f *File) SectionWithIndex(name string, index int) *Section { | |||||
| secs, err := f.SectionsByName(name) | |||||
| if err != nil || len(secs) <= index { | |||||
| // NOTE: It's OK here because the only possible error is empty section name, | |||||
| // but if it's empty, this piece of code won't be executed. | |||||
| newSec, _ := f.NewSection(name) | |||||
| return newSec | |||||
| } | |||||
| return secs[index] | |||||
| } | |||||
| // Sections returns a list of Section stored in the current instance. | |||||
| func (f *File) Sections() []*Section { | |||||
| if f.BlockMode { | |||||
| f.lock.RLock() | |||||
| defer f.lock.RUnlock() | |||||
| } | |||||
| sections := make([]*Section, len(f.sectionList)) | |||||
| for i, name := range f.sectionList { | |||||
| sections[i] = f.sections[name][f.sectionIndexes[i]] | |||||
| } | |||||
| return sections | |||||
| } | |||||
| // ChildSections returns a list of child sections of given section name. | |||||
| func (f *File) ChildSections(name string) []*Section { | |||||
| return f.Section(name).ChildSections() | |||||
| } | |||||
| // SectionStrings returns list of section names. | |||||
| func (f *File) SectionStrings() []string { | |||||
| list := make([]string, len(f.sectionList)) | |||||
| copy(list, f.sectionList) | |||||
| return list | |||||
| } | |||||
| // DeleteSection deletes a section or all sections with given name. | |||||
| func (f *File) DeleteSection(name string) { | |||||
| secs, err := f.SectionsByName(name) | |||||
| if err != nil { | |||||
| return | |||||
| } | |||||
| for i := 0; i < len(secs); i++ { | |||||
| // For non-unique sections, it is always needed to remove the first one so | |||||
| // in the next iteration, the subsequent section continue having index 0. | |||||
| // Ignoring the error as index 0 never returns an error. | |||||
| _ = f.DeleteSectionWithIndex(name, 0) | |||||
| } | |||||
| } | |||||
| // DeleteSectionWithIndex deletes a section with given name and index. | |||||
| func (f *File) DeleteSectionWithIndex(name string, index int) error { | |||||
| if !f.options.AllowNonUniqueSections && index != 0 { | |||||
| return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled") | |||||
| } | |||||
| if len(name) == 0 { | |||||
| name = DefaultSection | |||||
| } | |||||
| if f.options.Insensitive { | |||||
| name = strings.ToLower(name) | |||||
| } | |||||
| if f.BlockMode { | |||||
| f.lock.Lock() | |||||
| defer f.lock.Unlock() | |||||
| } | |||||
| // Count occurrences of the sections | |||||
| occurrences := 0 | |||||
| sectionListCopy := make([]string, len(f.sectionList)) | |||||
| copy(sectionListCopy, f.sectionList) | |||||
| for i, s := range sectionListCopy { | |||||
| if s != name { | |||||
| continue | |||||
| } | |||||
| if occurrences == index { | |||||
| if len(f.sections[name]) <= 1 { | |||||
| delete(f.sections, name) // The last one in the map | |||||
| } else { | |||||
| f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...) | |||||
| } | |||||
| // Fix section lists | |||||
| f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) | |||||
| f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...) | |||||
| } else if occurrences > index { | |||||
| // Fix the indices of all following sections with this name. | |||||
| f.sectionIndexes[i-1]-- | |||||
| } | |||||
| occurrences++ | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func (f *File) reload(s dataSource) error { | |||||
| r, err := s.ReadCloser() | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| defer r.Close() | |||||
| return f.parse(r) | |||||
| } | |||||
| // Reload reloads and parses all data sources. | |||||
| func (f *File) Reload() (err error) { | |||||
| for _, s := range f.dataSources { | |||||
| if err = f.reload(s); err != nil { | |||||
| // In loose mode, we create an empty default section for nonexistent files. | |||||
| if os.IsNotExist(err) && f.options.Loose { | |||||
| _ = f.parse(bytes.NewBuffer(nil)) | |||||
| continue | |||||
| } | |||||
| return err | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // Append appends one or more data sources and reloads automatically. | |||||
| func (f *File) Append(source interface{}, others ...interface{}) error { | |||||
| ds, err := parseDataSource(source) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| f.dataSources = append(f.dataSources, ds) | |||||
| for _, s := range others { | |||||
| ds, err = parseDataSource(s) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| f.dataSources = append(f.dataSources, ds) | |||||
| } | |||||
| return f.Reload() | |||||
| } | |||||
| func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { | |||||
| equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight | |||||
| if PrettyFormat || PrettyEqual { | |||||
| equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite) | |||||
| } | |||||
| // Use buffer to make sure target is safe until finish encoding. | |||||
| buf := bytes.NewBuffer(nil) | |||||
| for i, sname := range f.sectionList { | |||||
| sec := f.SectionWithIndex(sname, f.sectionIndexes[i]) | |||||
| if len(sec.Comment) > 0 { | |||||
| // Support multiline comments | |||||
| lines := strings.Split(sec.Comment, LineBreak) | |||||
| for i := range lines { | |||||
| if lines[i][0] != '#' && lines[i][0] != ';' { | |||||
| lines[i] = "; " + lines[i] | |||||
| } else { | |||||
| lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) | |||||
| } | |||||
| if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| } | |||||
| if i > 0 || DefaultHeader { | |||||
| if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } else { | |||||
| // Write nothing if default section is empty | |||||
| if len(sec.keyList) == 0 { | |||||
| continue | |||||
| } | |||||
| } | |||||
| if sec.isRawSection { | |||||
| if _, err := buf.WriteString(sec.rawBody); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if PrettySection { | |||||
| // Put a line between sections | |||||
| if _, err := buf.WriteString(LineBreak); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| continue | |||||
| } | |||||
| // Count and generate alignment length and buffer spaces using the | |||||
| // longest key. Keys may be modified if they contain certain characters so | |||||
| // we need to take that into account in our calculation. | |||||
| alignLength := 0 | |||||
| if PrettyFormat { | |||||
| for _, kname := range sec.keyList { | |||||
| keyLength := len(kname) | |||||
| // First case will surround key by ` and second by """ | |||||
| if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { | |||||
| keyLength += 2 | |||||
| } else if strings.Contains(kname, "`") { | |||||
| keyLength += 6 | |||||
| } | |||||
| if keyLength > alignLength { | |||||
| alignLength = keyLength | |||||
| } | |||||
| } | |||||
| } | |||||
| alignSpaces := bytes.Repeat([]byte(" "), alignLength) | |||||
| KeyList: | |||||
| for _, kname := range sec.keyList { | |||||
| key := sec.Key(kname) | |||||
| if len(key.Comment) > 0 { | |||||
| if len(indent) > 0 && sname != DefaultSection { | |||||
| buf.WriteString(indent) | |||||
| } | |||||
| // Support multiline comments | |||||
| lines := strings.Split(key.Comment, LineBreak) | |||||
| for i := range lines { | |||||
| if lines[i][0] != '#' && lines[i][0] != ';' { | |||||
| lines[i] = "; " + strings.TrimSpace(lines[i]) | |||||
| } else { | |||||
| lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) | |||||
| } | |||||
| if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| } | |||||
| if len(indent) > 0 && sname != DefaultSection { | |||||
| buf.WriteString(indent) | |||||
| } | |||||
| switch { | |||||
| case key.isAutoIncrement: | |||||
| kname = "-" | |||||
| case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): | |||||
| kname = "`" + kname + "`" | |||||
| case strings.Contains(kname, "`"): | |||||
| kname = `"""` + kname + `"""` | |||||
| } | |||||
| for _, val := range key.ValueWithShadows() { | |||||
| if _, err := buf.WriteString(kname); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if key.isBooleanType { | |||||
| if kname != sec.keyList[len(sec.keyList)-1] { | |||||
| buf.WriteString(LineBreak) | |||||
| } | |||||
| continue KeyList | |||||
| } | |||||
| // Write out alignment spaces before "=" sign | |||||
| if PrettyFormat { | |||||
| buf.Write(alignSpaces[:alignLength-len(kname)]) | |||||
| } | |||||
| // In case key value contains "\n", "`", "\"", "#" or ";" | |||||
| if strings.ContainsAny(val, "\n`") { | |||||
| val = `"""` + val + `"""` | |||||
| } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { | |||||
| val = "`" + val + "`" | |||||
| } | |||||
| if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| for _, val := range key.nestedValues { | |||||
| if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| } | |||||
| if PrettySection { | |||||
| // Put a line between sections | |||||
| if _, err := buf.WriteString(LineBreak); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| } | |||||
| return buf, nil | |||||
| } | |||||
| // WriteToIndent writes content into io.Writer with given indention. | |||||
| // If PrettyFormat has been set to be true, | |||||
| // it will align "=" sign with spaces under each section. | |||||
| func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { | |||||
| buf, err := f.writeToBuffer(indent) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| return buf.WriteTo(w) | |||||
| } | |||||
| // WriteTo writes file content into io.Writer. | |||||
| func (f *File) WriteTo(w io.Writer) (int64, error) { | |||||
| return f.WriteToIndent(w, "") | |||||
| } | |||||
| // SaveToIndent writes content to file system with given value indention. | |||||
| func (f *File) SaveToIndent(filename, indent string) error { | |||||
| // Note: Because we are truncating with os.Create, | |||||
| // so it's safer to save to a temporary file location and rename afte done. | |||||
| buf, err := f.writeToBuffer(indent) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return ioutil.WriteFile(filename, buf.Bytes(), 0666) | |||||
| } | |||||
| // SaveTo writes content to file system. | |||||
| func (f *File) SaveTo(filename string) error { | |||||
| return f.SaveToIndent(filename, "") | |||||
| } | |||||
| @@ -0,0 +1,24 @@ | |||||
| // Copyright 2019 Unknwon | |||||
| // | |||||
| // Licensed under the Apache License, Version 2.0 (the "License"): you may | |||||
| // not use this file except in compliance with the License. You may obtain | |||||
| // a copy of the License at | |||||
| // | |||||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||||
| // | |||||
| // Unless required by applicable law or agreed to in writing, software | |||||
| // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||||
| // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||||
| // License for the specific language governing permissions and limitations | |||||
| // under the License. | |||||
| package ini | |||||
| func inSlice(str string, s []string) bool { | |||||
| for _, v := range s { | |||||
| if str == v { | |||||
| return true | |||||
| } | |||||
| } | |||||
| return false | |||||
| } | |||||
| @@ -0,0 +1,168 @@ | |||||
| // +build go1.6 | |||||
| // Copyright 2014 Unknwon | |||||
| // | |||||
| // Licensed under the Apache License, Version 2.0 (the "License"): you may | |||||
| // not use this file except in compliance with the License. You may obtain | |||||
| // a copy of the License at | |||||
| // | |||||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||||
| // | |||||
| // Unless required by applicable law or agreed to in writing, software | |||||
| // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||||
| // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||||
| // License for the specific language governing permissions and limitations | |||||
| // under the License. | |||||
| // Package ini provides INI file read and write functionality in Go. | |||||
| package ini | |||||
| import ( | |||||
| "os" | |||||
| "regexp" | |||||
| "runtime" | |||||
| "strings" | |||||
| ) | |||||
| const ( | |||||
| // DefaultSection is the name of default section. You can use this constant or the string literal. | |||||
| // In most of cases, an empty string is all you need to access the section. | |||||
| DefaultSection = "DEFAULT" | |||||
| // Maximum allowed depth when recursively substituing variable names. | |||||
| depthValues = 99 | |||||
| ) | |||||
| var ( | |||||
| // LineBreak is the delimiter to determine or compose a new line. | |||||
| // This variable will be changed to "\r\n" automatically on Windows at package init time. | |||||
| LineBreak = "\n" | |||||
| // Variable regexp pattern: %(variable)s | |||||
| varPattern = regexp.MustCompile(`%\(([^)]+)\)s`) | |||||
| // DefaultHeader explicitly writes default section header. | |||||
| DefaultHeader = false | |||||
| // PrettySection indicates whether to put a line between sections. | |||||
| PrettySection = true | |||||
| // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output | |||||
| // or reduce all possible spaces for compact format. | |||||
| PrettyFormat = true | |||||
| // PrettyEqual places spaces around "=" sign even when PrettyFormat is false. | |||||
| PrettyEqual = false | |||||
| // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled. | |||||
| DefaultFormatLeft = "" | |||||
| // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled. | |||||
| DefaultFormatRight = "" | |||||
| ) | |||||
| var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") | |||||
| func init() { | |||||
| if runtime.GOOS == "windows" && !inTest { | |||||
| LineBreak = "\r\n" | |||||
| } | |||||
| } | |||||
| // LoadOptions contains all customized options used for load data source(s). | |||||
| type LoadOptions struct { | |||||
| // Loose indicates whether the parser should ignore nonexistent files or return error. | |||||
| Loose bool | |||||
| // Insensitive indicates whether the parser forces all section and key names to lowercase. | |||||
| Insensitive bool | |||||
| // IgnoreContinuation indicates whether to ignore continuation lines while parsing. | |||||
| IgnoreContinuation bool | |||||
| // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. | |||||
| IgnoreInlineComment bool | |||||
| // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. | |||||
| SkipUnrecognizableLines bool | |||||
| // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. | |||||
| // This type of keys are mostly used in my.cnf. | |||||
| AllowBooleanKeys bool | |||||
| // AllowShadows indicates whether to keep track of keys with same name under same section. | |||||
| AllowShadows bool | |||||
| // AllowNestedValues indicates whether to allow AWS-like nested values. | |||||
| // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values | |||||
| AllowNestedValues bool | |||||
| // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. | |||||
| // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure | |||||
| // Relevant quote: Values can also span multiple lines, as long as they are indented deeper | |||||
| // than the first line of the value. | |||||
| AllowPythonMultilineValues bool | |||||
| // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. | |||||
| // Docs: https://docs.python.org/2/library/configparser.html | |||||
| // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. | |||||
| // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. | |||||
| SpaceBeforeInlineComment bool | |||||
| // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format | |||||
| // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" | |||||
| UnescapeValueDoubleQuotes bool | |||||
| // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format | |||||
| // when value is NOT surrounded by any quotes. | |||||
| // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. | |||||
| UnescapeValueCommentSymbols bool | |||||
| // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise | |||||
| // conform to key/value pairs. Specify the names of those blocks here. | |||||
| UnparseableSections []string | |||||
| // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". | |||||
| KeyValueDelimiters string | |||||
| // KeyValueDelimiters is the delimiter that are used to separate key and value output. By default, it is "=". | |||||
| KeyValueDelimiterOnWrite string | |||||
| // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). | |||||
| PreserveSurroundedQuote bool | |||||
| // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). | |||||
| DebugFunc DebugFunc | |||||
| // ReaderBufferSize is the buffer size of the reader in bytes. | |||||
| ReaderBufferSize int | |||||
| // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times. | |||||
| AllowNonUniqueSections bool | |||||
| } | |||||
| // DebugFunc is the type of function called to log parse events. | |||||
| type DebugFunc func(message string) | |||||
| // LoadSources allows caller to apply customized options for loading from data source(s). | |||||
| func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { | |||||
| sources := make([]dataSource, len(others)+1) | |||||
| sources[0], err = parseDataSource(source) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| for i := range others { | |||||
| sources[i+1], err = parseDataSource(others[i]) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| f := newFile(sources, opts) | |||||
| if err = f.Reload(); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return f, nil | |||||
| } | |||||
| // Load loads and parses from INI data sources. | |||||
| // Arguments can be mixed of file name with string type, or raw data in []byte. | |||||
| // It will return error if list contains nonexistent files. | |||||
| func Load(source interface{}, others ...interface{}) (*File, error) { | |||||
| return LoadSources(LoadOptions{}, source, others...) | |||||
| } | |||||
| // LooseLoad has exactly same functionality as Load function | |||||
| // except it ignores nonexistent files instead of returning error. | |||||
| func LooseLoad(source interface{}, others ...interface{}) (*File, error) { | |||||
| return LoadSources(LoadOptions{Loose: true}, source, others...) | |||||
| } | |||||
| // InsensitiveLoad has exactly same functionality as Load function | |||||
| // except it forces all section and key names to be lowercased. | |||||
| func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { | |||||
| return LoadSources(LoadOptions{Insensitive: true}, source, others...) | |||||
| } | |||||
| // ShadowLoad has exactly same functionality as Load function | |||||
| // except it allows have shadow keys. | |||||
| func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { | |||||
| return LoadSources(LoadOptions{AllowShadows: true}, source, others...) | |||||
| } | |||||
| @@ -0,0 +1,829 @@ | |||||
| // Copyright 2014 Unknwon | |||||
| // | |||||
| // Licensed under the Apache License, Version 2.0 (the "License"): you may | |||||
| // not use this file except in compliance with the License. You may obtain | |||||
| // a copy of the License at | |||||
| // | |||||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||||
| // | |||||
| // Unless required by applicable law or agreed to in writing, software | |||||
| // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||||
| // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||||
| // License for the specific language governing permissions and limitations | |||||
| // under the License. | |||||
| package ini | |||||
| import ( | |||||
| "bytes" | |||||
| "errors" | |||||
| "fmt" | |||||
| "strconv" | |||||
| "strings" | |||||
| "time" | |||||
| ) | |||||
| // Key represents a key under a section. | |||||
| type Key struct { | |||||
| s *Section | |||||
| Comment string | |||||
| name string | |||||
| value string | |||||
| isAutoIncrement bool | |||||
| isBooleanType bool | |||||
| isShadow bool | |||||
| shadows []*Key | |||||
| nestedValues []string | |||||
| } | |||||
| // newKey simply return a key object with given values. | |||||
| func newKey(s *Section, name, val string) *Key { | |||||
| return &Key{ | |||||
| s: s, | |||||
| name: name, | |||||
| value: val, | |||||
| } | |||||
| } | |||||
| func (k *Key) addShadow(val string) error { | |||||
| if k.isShadow { | |||||
| return errors.New("cannot add shadow to another shadow key") | |||||
| } else if k.isAutoIncrement || k.isBooleanType { | |||||
| return errors.New("cannot add shadow to auto-increment or boolean key") | |||||
| } | |||||
| // Deduplicate shadows based on their values. | |||||
| if k.value == val { | |||||
| return nil | |||||
| } | |||||
| for i := range k.shadows { | |||||
| if k.shadows[i].value == val { | |||||
| return nil | |||||
| } | |||||
| } | |||||
| shadow := newKey(k.s, k.name, val) | |||||
| shadow.isShadow = true | |||||
| k.shadows = append(k.shadows, shadow) | |||||
| return nil | |||||
| } | |||||
| // AddShadow adds a new shadow key to itself. | |||||
| func (k *Key) AddShadow(val string) error { | |||||
| if !k.s.f.options.AllowShadows { | |||||
| return errors.New("shadow key is not allowed") | |||||
| } | |||||
| return k.addShadow(val) | |||||
| } | |||||
| func (k *Key) addNestedValue(val string) error { | |||||
| if k.isAutoIncrement || k.isBooleanType { | |||||
| return errors.New("cannot add nested value to auto-increment or boolean key") | |||||
| } | |||||
| k.nestedValues = append(k.nestedValues, val) | |||||
| return nil | |||||
| } | |||||
| // AddNestedValue adds a nested value to the key. | |||||
| func (k *Key) AddNestedValue(val string) error { | |||||
| if !k.s.f.options.AllowNestedValues { | |||||
| return errors.New("nested value is not allowed") | |||||
| } | |||||
| return k.addNestedValue(val) | |||||
| } | |||||
| // ValueMapper represents a mapping function for values, e.g. os.ExpandEnv | |||||
| type ValueMapper func(string) string | |||||
| // Name returns name of key. | |||||
| func (k *Key) Name() string { | |||||
| return k.name | |||||
| } | |||||
| // Value returns raw value of key for performance purpose. | |||||
| func (k *Key) Value() string { | |||||
| return k.value | |||||
| } | |||||
| // ValueWithShadows returns raw values of key and its shadows if any. | |||||
| func (k *Key) ValueWithShadows() []string { | |||||
| if len(k.shadows) == 0 { | |||||
| return []string{k.value} | |||||
| } | |||||
| vals := make([]string, len(k.shadows)+1) | |||||
| vals[0] = k.value | |||||
| for i := range k.shadows { | |||||
| vals[i+1] = k.shadows[i].value | |||||
| } | |||||
| return vals | |||||
| } | |||||
| // NestedValues returns nested values stored in the key. | |||||
| // It is possible returned value is nil if no nested values stored in the key. | |||||
| func (k *Key) NestedValues() []string { | |||||
| return k.nestedValues | |||||
| } | |||||
| // transformValue takes a raw value and transforms to its final string. | |||||
| func (k *Key) transformValue(val string) string { | |||||
| if k.s.f.ValueMapper != nil { | |||||
| val = k.s.f.ValueMapper(val) | |||||
| } | |||||
| // Fail-fast if no indicate char found for recursive value | |||||
| if !strings.Contains(val, "%") { | |||||
| return val | |||||
| } | |||||
| for i := 0; i < depthValues; i++ { | |||||
| vr := varPattern.FindString(val) | |||||
| if len(vr) == 0 { | |||||
| break | |||||
| } | |||||
| // Take off leading '%(' and trailing ')s'. | |||||
| noption := vr[2 : len(vr)-2] | |||||
| // Search in the same section. | |||||
| // If not found or found the key itself, then search again in default section. | |||||
| nk, err := k.s.GetKey(noption) | |||||
| if err != nil || k == nk { | |||||
| nk, _ = k.s.f.Section("").GetKey(noption) | |||||
| if nk == nil { | |||||
| // Stop when no results found in the default section, | |||||
| // and returns the value as-is. | |||||
| break | |||||
| } | |||||
| } | |||||
| // Substitute by new value and take off leading '%(' and trailing ')s'. | |||||
| val = strings.Replace(val, vr, nk.value, -1) | |||||
| } | |||||
| return val | |||||
| } | |||||
| // String returns string representation of value. | |||||
| func (k *Key) String() string { | |||||
| return k.transformValue(k.value) | |||||
| } | |||||
| // Validate accepts a validate function which can | |||||
| // return modifed result as key value. | |||||
| func (k *Key) Validate(fn func(string) string) string { | |||||
| return fn(k.String()) | |||||
| } | |||||
| // parseBool returns the boolean value represented by the string. | |||||
| // | |||||
| // It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, | |||||
| // 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. | |||||
| // Any other value returns an error. | |||||
| func parseBool(str string) (value bool, err error) { | |||||
| switch str { | |||||
| case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": | |||||
| return true, nil | |||||
| case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": | |||||
| return false, nil | |||||
| } | |||||
| return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) | |||||
| } | |||||
| // Bool returns bool type value. | |||||
| func (k *Key) Bool() (bool, error) { | |||||
| return parseBool(k.String()) | |||||
| } | |||||
| // Float64 returns float64 type value. | |||||
| func (k *Key) Float64() (float64, error) { | |||||
| return strconv.ParseFloat(k.String(), 64) | |||||
| } | |||||
| // Int returns int type value. | |||||
| func (k *Key) Int() (int, error) { | |||||
| v, err := strconv.ParseInt(k.String(), 0, 64) | |||||
| return int(v), err | |||||
| } | |||||
| // Int64 returns int64 type value. | |||||
| func (k *Key) Int64() (int64, error) { | |||||
| return strconv.ParseInt(k.String(), 0, 64) | |||||
| } | |||||
| // Uint returns uint type valued. | |||||
| func (k *Key) Uint() (uint, error) { | |||||
| u, e := strconv.ParseUint(k.String(), 0, 64) | |||||
| return uint(u), e | |||||
| } | |||||
| // Uint64 returns uint64 type value. | |||||
| func (k *Key) Uint64() (uint64, error) { | |||||
| return strconv.ParseUint(k.String(), 0, 64) | |||||
| } | |||||
| // Duration returns time.Duration type value. | |||||
| func (k *Key) Duration() (time.Duration, error) { | |||||
| return time.ParseDuration(k.String()) | |||||
| } | |||||
| // TimeFormat parses with given format and returns time.Time type value. | |||||
| func (k *Key) TimeFormat(format string) (time.Time, error) { | |||||
| return time.Parse(format, k.String()) | |||||
| } | |||||
| // Time parses with RFC3339 format and returns time.Time type value. | |||||
| func (k *Key) Time() (time.Time, error) { | |||||
| return k.TimeFormat(time.RFC3339) | |||||
| } | |||||
| // MustString returns default value if key value is empty. | |||||
| func (k *Key) MustString(defaultVal string) string { | |||||
| val := k.String() | |||||
| if len(val) == 0 { | |||||
| k.value = defaultVal | |||||
| return defaultVal | |||||
| } | |||||
| return val | |||||
| } | |||||
| // MustBool always returns value without error, | |||||
| // it returns false if error occurs. | |||||
| func (k *Key) MustBool(defaultVal ...bool) bool { | |||||
| val, err := k.Bool() | |||||
| if len(defaultVal) > 0 && err != nil { | |||||
| k.value = strconv.FormatBool(defaultVal[0]) | |||||
| return defaultVal[0] | |||||
| } | |||||
| return val | |||||
| } | |||||
| // MustFloat64 always returns value without error, | |||||
| // it returns 0.0 if error occurs. | |||||
| func (k *Key) MustFloat64(defaultVal ...float64) float64 { | |||||
| val, err := k.Float64() | |||||
| if len(defaultVal) > 0 && err != nil { | |||||
| k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) | |||||
| return defaultVal[0] | |||||
| } | |||||
| return val | |||||
| } | |||||
| // MustInt always returns value without error, | |||||
| // it returns 0 if error occurs. | |||||
| func (k *Key) MustInt(defaultVal ...int) int { | |||||
| val, err := k.Int() | |||||
| if len(defaultVal) > 0 && err != nil { | |||||
| k.value = strconv.FormatInt(int64(defaultVal[0]), 10) | |||||
| return defaultVal[0] | |||||
| } | |||||
| return val | |||||
| } | |||||
| // MustInt64 always returns value without error, | |||||
| // it returns 0 if error occurs. | |||||
| func (k *Key) MustInt64(defaultVal ...int64) int64 { | |||||
| val, err := k.Int64() | |||||
| if len(defaultVal) > 0 && err != nil { | |||||
| k.value = strconv.FormatInt(defaultVal[0], 10) | |||||
| return defaultVal[0] | |||||
| } | |||||
| return val | |||||
| } | |||||
| // MustUint always returns value without error, | |||||
| // it returns 0 if error occurs. | |||||
| func (k *Key) MustUint(defaultVal ...uint) uint { | |||||
| val, err := k.Uint() | |||||
| if len(defaultVal) > 0 && err != nil { | |||||
| k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) | |||||
| return defaultVal[0] | |||||
| } | |||||
| return val | |||||
| } | |||||
| // MustUint64 always returns value without error, | |||||
| // it returns 0 if error occurs. | |||||
| func (k *Key) MustUint64(defaultVal ...uint64) uint64 { | |||||
| val, err := k.Uint64() | |||||
| if len(defaultVal) > 0 && err != nil { | |||||
| k.value = strconv.FormatUint(defaultVal[0], 10) | |||||
| return defaultVal[0] | |||||
| } | |||||
| return val | |||||
| } | |||||
| // MustDuration always returns value without error, | |||||
| // it returns zero value if error occurs. | |||||
| func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { | |||||
| val, err := k.Duration() | |||||
| if len(defaultVal) > 0 && err != nil { | |||||
| k.value = defaultVal[0].String() | |||||
| return defaultVal[0] | |||||
| } | |||||
| return val | |||||
| } | |||||
| // MustTimeFormat always parses with given format and returns value without error, | |||||
| // it returns zero value if error occurs. | |||||
| func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { | |||||
| val, err := k.TimeFormat(format) | |||||
| if len(defaultVal) > 0 && err != nil { | |||||
| k.value = defaultVal[0].Format(format) | |||||
| return defaultVal[0] | |||||
| } | |||||
| return val | |||||
| } | |||||
| // MustTime always parses with RFC3339 format and returns value without error, | |||||
| // it returns zero value if error occurs. | |||||
| func (k *Key) MustTime(defaultVal ...time.Time) time.Time { | |||||
| return k.MustTimeFormat(time.RFC3339, defaultVal...) | |||||
| } | |||||
| // In always returns value without error, | |||||
| // it returns default value if error occurs or doesn't fit into candidates. | |||||
| func (k *Key) In(defaultVal string, candidates []string) string { | |||||
| val := k.String() | |||||
| for _, cand := range candidates { | |||||
| if val == cand { | |||||
| return val | |||||
| } | |||||
| } | |||||
| return defaultVal | |||||
| } | |||||
| // InFloat64 always returns value without error, | |||||
| // it returns default value if error occurs or doesn't fit into candidates. | |||||
| func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { | |||||
| val := k.MustFloat64() | |||||
| for _, cand := range candidates { | |||||
| if val == cand { | |||||
| return val | |||||
| } | |||||
| } | |||||
| return defaultVal | |||||
| } | |||||
| // InInt always returns value without error, | |||||
| // it returns default value if error occurs or doesn't fit into candidates. | |||||
| func (k *Key) InInt(defaultVal int, candidates []int) int { | |||||
| val := k.MustInt() | |||||
| for _, cand := range candidates { | |||||
| if val == cand { | |||||
| return val | |||||
| } | |||||
| } | |||||
| return defaultVal | |||||
| } | |||||
| // InInt64 always returns value without error, | |||||
| // it returns default value if error occurs or doesn't fit into candidates. | |||||
| func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { | |||||
| val := k.MustInt64() | |||||
| for _, cand := range candidates { | |||||
| if val == cand { | |||||
| return val | |||||
| } | |||||
| } | |||||
| return defaultVal | |||||
| } | |||||
| // InUint always returns value without error, | |||||
| // it returns default value if error occurs or doesn't fit into candidates. | |||||
| func (k *Key) InUint(defaultVal uint, candidates []uint) uint { | |||||
| val := k.MustUint() | |||||
| for _, cand := range candidates { | |||||
| if val == cand { | |||||
| return val | |||||
| } | |||||
| } | |||||
| return defaultVal | |||||
| } | |||||
| // InUint64 always returns value without error, | |||||
| // it returns default value if error occurs or doesn't fit into candidates. | |||||
| func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { | |||||
| val := k.MustUint64() | |||||
| for _, cand := range candidates { | |||||
| if val == cand { | |||||
| return val | |||||
| } | |||||
| } | |||||
| return defaultVal | |||||
| } | |||||
| // InTimeFormat always parses with given format and returns value without error, | |||||
| // it returns default value if error occurs or doesn't fit into candidates. | |||||
| func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { | |||||
| val := k.MustTimeFormat(format) | |||||
| for _, cand := range candidates { | |||||
| if val == cand { | |||||
| return val | |||||
| } | |||||
| } | |||||
| return defaultVal | |||||
| } | |||||
| // InTime always parses with RFC3339 format and returns value without error, | |||||
| // it returns default value if error occurs or doesn't fit into candidates. | |||||
| func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { | |||||
| return k.InTimeFormat(time.RFC3339, defaultVal, candidates) | |||||
| } | |||||
| // RangeFloat64 checks if value is in given range inclusively, | |||||
| // and returns default value if it's not. | |||||
| func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { | |||||
| val := k.MustFloat64() | |||||
| if val < min || val > max { | |||||
| return defaultVal | |||||
| } | |||||
| return val | |||||
| } | |||||
| // RangeInt checks if value is in given range inclusively, | |||||
| // and returns default value if it's not. | |||||
| func (k *Key) RangeInt(defaultVal, min, max int) int { | |||||
| val := k.MustInt() | |||||
| if val < min || val > max { | |||||
| return defaultVal | |||||
| } | |||||
| return val | |||||
| } | |||||
| // RangeInt64 checks if value is in given range inclusively, | |||||
| // and returns default value if it's not. | |||||
| func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { | |||||
| val := k.MustInt64() | |||||
| if val < min || val > max { | |||||
| return defaultVal | |||||
| } | |||||
| return val | |||||
| } | |||||
| // RangeTimeFormat checks if value with given format is in given range inclusively, | |||||
| // and returns default value if it's not. | |||||
| func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { | |||||
| val := k.MustTimeFormat(format) | |||||
| if val.Unix() < min.Unix() || val.Unix() > max.Unix() { | |||||
| return defaultVal | |||||
| } | |||||
| return val | |||||
| } | |||||
| // RangeTime checks if value with RFC3339 format is in given range inclusively, | |||||
| // and returns default value if it's not. | |||||
| func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { | |||||
| return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) | |||||
| } | |||||
| // Strings returns list of string divided by given delimiter. | |||||
| func (k *Key) Strings(delim string) []string { | |||||
| str := k.String() | |||||
| if len(str) == 0 { | |||||
| return []string{} | |||||
| } | |||||
| runes := []rune(str) | |||||
| vals := make([]string, 0, 2) | |||||
| var buf bytes.Buffer | |||||
| escape := false | |||||
| idx := 0 | |||||
| for { | |||||
| if escape { | |||||
| escape = false | |||||
| if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { | |||||
| buf.WriteRune('\\') | |||||
| } | |||||
| buf.WriteRune(runes[idx]) | |||||
| } else { | |||||
| if runes[idx] == '\\' { | |||||
| escape = true | |||||
| } else if strings.HasPrefix(string(runes[idx:]), delim) { | |||||
| idx += len(delim) - 1 | |||||
| vals = append(vals, strings.TrimSpace(buf.String())) | |||||
| buf.Reset() | |||||
| } else { | |||||
| buf.WriteRune(runes[idx]) | |||||
| } | |||||
| } | |||||
| idx++ | |||||
| if idx == len(runes) { | |||||
| break | |||||
| } | |||||
| } | |||||
| if buf.Len() > 0 { | |||||
| vals = append(vals, strings.TrimSpace(buf.String())) | |||||
| } | |||||
| return vals | |||||
| } | |||||
| // StringsWithShadows returns list of string divided by given delimiter. | |||||
| // Shadows will also be appended if any. | |||||
| func (k *Key) StringsWithShadows(delim string) []string { | |||||
| vals := k.ValueWithShadows() | |||||
| results := make([]string, 0, len(vals)*2) | |||||
| for i := range vals { | |||||
| if len(vals) == 0 { | |||||
| continue | |||||
| } | |||||
| results = append(results, strings.Split(vals[i], delim)...) | |||||
| } | |||||
| for i := range results { | |||||
| results[i] = k.transformValue(strings.TrimSpace(results[i])) | |||||
| } | |||||
| return results | |||||
| } | |||||
| // Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. | |||||
| func (k *Key) Float64s(delim string) []float64 { | |||||
| vals, _ := k.parseFloat64s(k.Strings(delim), true, false) | |||||
| return vals | |||||
| } | |||||
| // Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. | |||||
| func (k *Key) Ints(delim string) []int { | |||||
| vals, _ := k.parseInts(k.Strings(delim), true, false) | |||||
| return vals | |||||
| } | |||||
| // Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. | |||||
| func (k *Key) Int64s(delim string) []int64 { | |||||
| vals, _ := k.parseInt64s(k.Strings(delim), true, false) | |||||
| return vals | |||||
| } | |||||
| // Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. | |||||
| func (k *Key) Uints(delim string) []uint { | |||||
| vals, _ := k.parseUints(k.Strings(delim), true, false) | |||||
| return vals | |||||
| } | |||||
| // Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. | |||||
| func (k *Key) Uint64s(delim string) []uint64 { | |||||
| vals, _ := k.parseUint64s(k.Strings(delim), true, false) | |||||
| return vals | |||||
| } | |||||
| // Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value. | |||||
| func (k *Key) Bools(delim string) []bool { | |||||
| vals, _ := k.parseBools(k.Strings(delim), true, false) | |||||
| return vals | |||||
| } | |||||
| // TimesFormat parses with given format and returns list of time.Time divided by given delimiter. | |||||
| // Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). | |||||
| func (k *Key) TimesFormat(format, delim string) []time.Time { | |||||
| vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) | |||||
| return vals | |||||
| } | |||||
| // Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. | |||||
| // Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). | |||||
| func (k *Key) Times(delim string) []time.Time { | |||||
| return k.TimesFormat(time.RFC3339, delim) | |||||
| } | |||||
| // ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then | |||||
| // it will not be included to result list. | |||||
| func (k *Key) ValidFloat64s(delim string) []float64 { | |||||
| vals, _ := k.parseFloat64s(k.Strings(delim), false, false) | |||||
| return vals | |||||
| } | |||||
| // ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will | |||||
| // not be included to result list. | |||||
| func (k *Key) ValidInts(delim string) []int { | |||||
| vals, _ := k.parseInts(k.Strings(delim), false, false) | |||||
| return vals | |||||
| } | |||||
| // ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, | |||||
| // then it will not be included to result list. | |||||
| func (k *Key) ValidInt64s(delim string) []int64 { | |||||
| vals, _ := k.parseInt64s(k.Strings(delim), false, false) | |||||
| return vals | |||||
| } | |||||
| // ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, | |||||
| // then it will not be included to result list. | |||||
| func (k *Key) ValidUints(delim string) []uint { | |||||
| vals, _ := k.parseUints(k.Strings(delim), false, false) | |||||
| return vals | |||||
| } | |||||
| // ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned | |||||
| // integer, then it will not be included to result list. | |||||
| func (k *Key) ValidUint64s(delim string) []uint64 { | |||||
| vals, _ := k.parseUint64s(k.Strings(delim), false, false) | |||||
| return vals | |||||
| } | |||||
| // ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned | |||||
| // integer, then it will not be included to result list. | |||||
| func (k *Key) ValidBools(delim string) []bool { | |||||
| vals, _ := k.parseBools(k.Strings(delim), false, false) | |||||
| return vals | |||||
| } | |||||
| // ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. | |||||
| func (k *Key) ValidTimesFormat(format, delim string) []time.Time { | |||||
| vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) | |||||
| return vals | |||||
| } | |||||
| // ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. | |||||
| func (k *Key) ValidTimes(delim string) []time.Time { | |||||
| return k.ValidTimesFormat(time.RFC3339, delim) | |||||
| } | |||||
| // StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. | |||||
| func (k *Key) StrictFloat64s(delim string) ([]float64, error) { | |||||
| return k.parseFloat64s(k.Strings(delim), false, true) | |||||
| } | |||||
| // StrictInts returns list of int divided by given delimiter or error on first invalid input. | |||||
| func (k *Key) StrictInts(delim string) ([]int, error) { | |||||
| return k.parseInts(k.Strings(delim), false, true) | |||||
| } | |||||
| // StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. | |||||
| func (k *Key) StrictInt64s(delim string) ([]int64, error) { | |||||
| return k.parseInt64s(k.Strings(delim), false, true) | |||||
| } | |||||
| // StrictUints returns list of uint divided by given delimiter or error on first invalid input. | |||||
| func (k *Key) StrictUints(delim string) ([]uint, error) { | |||||
| return k.parseUints(k.Strings(delim), false, true) | |||||
| } | |||||
| // StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. | |||||
| func (k *Key) StrictUint64s(delim string) ([]uint64, error) { | |||||
| return k.parseUint64s(k.Strings(delim), false, true) | |||||
| } | |||||
| // StrictBools returns list of bool divided by given delimiter or error on first invalid input. | |||||
| func (k *Key) StrictBools(delim string) ([]bool, error) { | |||||
| return k.parseBools(k.Strings(delim), false, true) | |||||
| } | |||||
| // StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter | |||||
| // or error on first invalid input. | |||||
| func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { | |||||
| return k.parseTimesFormat(format, k.Strings(delim), false, true) | |||||
| } | |||||
| // StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter | |||||
| // or error on first invalid input. | |||||
| func (k *Key) StrictTimes(delim string) ([]time.Time, error) { | |||||
| return k.StrictTimesFormat(time.RFC3339, delim) | |||||
| } | |||||
| // parseBools transforms strings to bools. | |||||
| func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) { | |||||
| vals := make([]bool, 0, len(strs)) | |||||
| parser := func(str string) (interface{}, error) { | |||||
| val, err := parseBool(str) | |||||
| return val, err | |||||
| } | |||||
| rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) | |||||
| if err == nil { | |||||
| for _, val := range rawVals { | |||||
| vals = append(vals, val.(bool)) | |||||
| } | |||||
| } | |||||
| return vals, err | |||||
| } | |||||
| // parseFloat64s transforms strings to float64s. | |||||
| func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { | |||||
| vals := make([]float64, 0, len(strs)) | |||||
| parser := func(str string) (interface{}, error) { | |||||
| val, err := strconv.ParseFloat(str, 64) | |||||
| return val, err | |||||
| } | |||||
| rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) | |||||
| if err == nil { | |||||
| for _, val := range rawVals { | |||||
| vals = append(vals, val.(float64)) | |||||
| } | |||||
| } | |||||
| return vals, err | |||||
| } | |||||
| // parseInts transforms strings to ints. | |||||
| func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { | |||||
| vals := make([]int, 0, len(strs)) | |||||
| parser := func(str string) (interface{}, error) { | |||||
| val, err := strconv.ParseInt(str, 0, 64) | |||||
| return val, err | |||||
| } | |||||
| rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) | |||||
| if err == nil { | |||||
| for _, val := range rawVals { | |||||
| vals = append(vals, int(val.(int64))) | |||||
| } | |||||
| } | |||||
| return vals, err | |||||
| } | |||||
| // parseInt64s transforms strings to int64s. | |||||
| func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { | |||||
| vals := make([]int64, 0, len(strs)) | |||||
| parser := func(str string) (interface{}, error) { | |||||
| val, err := strconv.ParseInt(str, 0, 64) | |||||
| return val, err | |||||
| } | |||||
| rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) | |||||
| if err == nil { | |||||
| for _, val := range rawVals { | |||||
| vals = append(vals, val.(int64)) | |||||
| } | |||||
| } | |||||
| return vals, err | |||||
| } | |||||
| // parseUints transforms strings to uints. | |||||
| func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { | |||||
| vals := make([]uint, 0, len(strs)) | |||||
| parser := func(str string) (interface{}, error) { | |||||
| val, err := strconv.ParseUint(str, 0, 64) | |||||
| return val, err | |||||
| } | |||||
| rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) | |||||
| if err == nil { | |||||
| for _, val := range rawVals { | |||||
| vals = append(vals, uint(val.(uint64))) | |||||
| } | |||||
| } | |||||
| return vals, err | |||||
| } | |||||
| // parseUint64s transforms strings to uint64s. | |||||
| func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { | |||||
| vals := make([]uint64, 0, len(strs)) | |||||
| parser := func(str string) (interface{}, error) { | |||||
| val, err := strconv.ParseUint(str, 0, 64) | |||||
| return val, err | |||||
| } | |||||
| rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) | |||||
| if err == nil { | |||||
| for _, val := range rawVals { | |||||
| vals = append(vals, val.(uint64)) | |||||
| } | |||||
| } | |||||
| return vals, err | |||||
| } | |||||
| type Parser func(str string) (interface{}, error) | |||||
| // parseTimesFormat transforms strings to times in given format. | |||||
| func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { | |||||
| vals := make([]time.Time, 0, len(strs)) | |||||
| parser := func(str string) (interface{}, error) { | |||||
| val, err := time.Parse(format, str) | |||||
| return val, err | |||||
| } | |||||
| rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) | |||||
| if err == nil { | |||||
| for _, val := range rawVals { | |||||
| vals = append(vals, val.(time.Time)) | |||||
| } | |||||
| } | |||||
| return vals, err | |||||
| } | |||||
| // doParse transforms strings to different types | |||||
| func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { | |||||
| vals := make([]interface{}, 0, len(strs)) | |||||
| for _, str := range strs { | |||||
| val, err := parser(str) | |||||
| if err != nil && returnOnInvalid { | |||||
| return nil, err | |||||
| } | |||||
| if err == nil || addInvalid { | |||||
| vals = append(vals, val) | |||||
| } | |||||
| } | |||||
| return vals, nil | |||||
| } | |||||
| // SetValue changes key value. | |||||
| func (k *Key) SetValue(v string) { | |||||
| if k.s.f.BlockMode { | |||||
| k.s.f.lock.Lock() | |||||
| defer k.s.f.lock.Unlock() | |||||
| } | |||||
| k.value = v | |||||
| k.s.keysHash[k.name] = v | |||||
| } | |||||
| @@ -0,0 +1,535 @@ | |||||
| // Copyright 2015 Unknwon | |||||
| // | |||||
| // Licensed under the Apache License, Version 2.0 (the "License"): you may | |||||
| // not use this file except in compliance with the License. You may obtain | |||||
| // a copy of the License at | |||||
| // | |||||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||||
| // | |||||
| // Unless required by applicable law or agreed to in writing, software | |||||
| // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||||
| // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||||
| // License for the specific language governing permissions and limitations | |||||
| // under the License. | |||||
| package ini | |||||
| import ( | |||||
| "bufio" | |||||
| "bytes" | |||||
| "fmt" | |||||
| "io" | |||||
| "regexp" | |||||
| "strconv" | |||||
| "strings" | |||||
| "unicode" | |||||
| ) | |||||
| const minReaderBufferSize = 4096 | |||||
| var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`) | |||||
| type parserOptions struct { | |||||
| IgnoreContinuation bool | |||||
| IgnoreInlineComment bool | |||||
| AllowPythonMultilineValues bool | |||||
| SpaceBeforeInlineComment bool | |||||
| UnescapeValueDoubleQuotes bool | |||||
| UnescapeValueCommentSymbols bool | |||||
| PreserveSurroundedQuote bool | |||||
| DebugFunc DebugFunc | |||||
| ReaderBufferSize int | |||||
| } | |||||
| type parser struct { | |||||
| buf *bufio.Reader | |||||
| options parserOptions | |||||
| isEOF bool | |||||
| count int | |||||
| comment *bytes.Buffer | |||||
| } | |||||
| func (p *parser) debug(format string, args ...interface{}) { | |||||
| if p.options.DebugFunc != nil { | |||||
| p.options.DebugFunc(fmt.Sprintf(format, args...)) | |||||
| } | |||||
| } | |||||
| func newParser(r io.Reader, opts parserOptions) *parser { | |||||
| size := opts.ReaderBufferSize | |||||
| if size < minReaderBufferSize { | |||||
| size = minReaderBufferSize | |||||
| } | |||||
| return &parser{ | |||||
| buf: bufio.NewReaderSize(r, size), | |||||
| options: opts, | |||||
| count: 1, | |||||
| comment: &bytes.Buffer{}, | |||||
| } | |||||
| } | |||||
| // BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. | |||||
| // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding | |||||
| func (p *parser) BOM() error { | |||||
| mask, err := p.buf.Peek(2) | |||||
| if err != nil && err != io.EOF { | |||||
| return err | |||||
| } else if len(mask) < 2 { | |||||
| return nil | |||||
| } | |||||
| switch { | |||||
| case mask[0] == 254 && mask[1] == 255: | |||||
| fallthrough | |||||
| case mask[0] == 255 && mask[1] == 254: | |||||
| _, err = p.buf.Read(mask) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| case mask[0] == 239 && mask[1] == 187: | |||||
| mask, err := p.buf.Peek(3) | |||||
| if err != nil && err != io.EOF { | |||||
| return err | |||||
| } else if len(mask) < 3 { | |||||
| return nil | |||||
| } | |||||
| if mask[2] == 191 { | |||||
| _, err = p.buf.Read(mask) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func (p *parser) readUntil(delim byte) ([]byte, error) { | |||||
| data, err := p.buf.ReadBytes(delim) | |||||
| if err != nil { | |||||
| if err == io.EOF { | |||||
| p.isEOF = true | |||||
| } else { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| return data, nil | |||||
| } | |||||
| func cleanComment(in []byte) ([]byte, bool) { | |||||
| i := bytes.IndexAny(in, "#;") | |||||
| if i == -1 { | |||||
| return nil, false | |||||
| } | |||||
| return in[i:], true | |||||
| } | |||||
| func readKeyName(delimiters string, in []byte) (string, int, error) { | |||||
| line := string(in) | |||||
| // Check if key name surrounded by quotes. | |||||
| var keyQuote string | |||||
| if line[0] == '"' { | |||||
| if len(line) > 6 && string(line[0:3]) == `"""` { | |||||
| keyQuote = `"""` | |||||
| } else { | |||||
| keyQuote = `"` | |||||
| } | |||||
| } else if line[0] == '`' { | |||||
| keyQuote = "`" | |||||
| } | |||||
| // Get out key name | |||||
| var endIdx int | |||||
| if len(keyQuote) > 0 { | |||||
| startIdx := len(keyQuote) | |||||
| // FIXME: fail case -> """"""name"""=value | |||||
| pos := strings.Index(line[startIdx:], keyQuote) | |||||
| if pos == -1 { | |||||
| return "", -1, fmt.Errorf("missing closing key quote: %s", line) | |||||
| } | |||||
| pos += startIdx | |||||
| // Find key-value delimiter | |||||
| i := strings.IndexAny(line[pos+startIdx:], delimiters) | |||||
| if i < 0 { | |||||
| return "", -1, ErrDelimiterNotFound{line} | |||||
| } | |||||
| endIdx = pos + i | |||||
| return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil | |||||
| } | |||||
| endIdx = strings.IndexAny(line, delimiters) | |||||
| if endIdx < 0 { | |||||
| return "", -1, ErrDelimiterNotFound{line} | |||||
| } | |||||
| return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil | |||||
| } | |||||
| func (p *parser) readMultilines(line, val, valQuote string) (string, error) { | |||||
| for { | |||||
| data, err := p.readUntil('\n') | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| next := string(data) | |||||
| pos := strings.LastIndex(next, valQuote) | |||||
| if pos > -1 { | |||||
| val += next[:pos] | |||||
| comment, has := cleanComment([]byte(next[pos:])) | |||||
| if has { | |||||
| p.comment.Write(bytes.TrimSpace(comment)) | |||||
| } | |||||
| break | |||||
| } | |||||
| val += next | |||||
| if p.isEOF { | |||||
| return "", fmt.Errorf("missing closing key quote from %q to %q", line, next) | |||||
| } | |||||
| } | |||||
| return val, nil | |||||
| } | |||||
| func (p *parser) readContinuationLines(val string) (string, error) { | |||||
| for { | |||||
| data, err := p.readUntil('\n') | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| next := strings.TrimSpace(string(data)) | |||||
| if len(next) == 0 { | |||||
| break | |||||
| } | |||||
| val += next | |||||
| if val[len(val)-1] != '\\' { | |||||
| break | |||||
| } | |||||
| val = val[:len(val)-1] | |||||
| } | |||||
| return val, nil | |||||
| } | |||||
| // hasSurroundedQuote check if and only if the first and last characters | |||||
| // are quotes \" or \'. | |||||
| // It returns false if any other parts also contain same kind of quotes. | |||||
| func hasSurroundedQuote(in string, quote byte) bool { | |||||
| return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && | |||||
| strings.IndexByte(in[1:], quote) == len(in)-2 | |||||
| } | |||||
| func (p *parser) readValue(in []byte, bufferSize int) (string, error) { | |||||
| line := strings.TrimLeftFunc(string(in), unicode.IsSpace) | |||||
| if len(line) == 0 { | |||||
| if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' { | |||||
| return p.readPythonMultilines(line, bufferSize) | |||||
| } | |||||
| return "", nil | |||||
| } | |||||
| var valQuote string | |||||
| if len(line) > 3 && string(line[0:3]) == `"""` { | |||||
| valQuote = `"""` | |||||
| } else if line[0] == '`' { | |||||
| valQuote = "`" | |||||
| } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' { | |||||
| valQuote = `"` | |||||
| } | |||||
| if len(valQuote) > 0 { | |||||
| startIdx := len(valQuote) | |||||
| pos := strings.LastIndex(line[startIdx:], valQuote) | |||||
| // Check for multi-line value | |||||
| if pos == -1 { | |||||
| return p.readMultilines(line, line[startIdx:], valQuote) | |||||
| } | |||||
| if p.options.UnescapeValueDoubleQuotes && valQuote == `"` { | |||||
| return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil | |||||
| } | |||||
| return line[startIdx : pos+startIdx], nil | |||||
| } | |||||
| lastChar := line[len(line)-1] | |||||
| // Won't be able to reach here if value only contains whitespace | |||||
| line = strings.TrimSpace(line) | |||||
| trimmedLastChar := line[len(line)-1] | |||||
| // Check continuation lines when desired | |||||
| if !p.options.IgnoreContinuation && trimmedLastChar == '\\' { | |||||
| return p.readContinuationLines(line[:len(line)-1]) | |||||
| } | |||||
| // Check if ignore inline comment | |||||
| if !p.options.IgnoreInlineComment { | |||||
| var i int | |||||
| if p.options.SpaceBeforeInlineComment { | |||||
| i = strings.Index(line, " #") | |||||
| if i == -1 { | |||||
| i = strings.Index(line, " ;") | |||||
| } | |||||
| } else { | |||||
| i = strings.IndexAny(line, "#;") | |||||
| } | |||||
| if i > -1 { | |||||
| p.comment.WriteString(line[i:]) | |||||
| line = strings.TrimSpace(line[:i]) | |||||
| } | |||||
| } | |||||
| // Trim single and double quotes | |||||
| if (hasSurroundedQuote(line, '\'') || | |||||
| hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { | |||||
| line = line[1 : len(line)-1] | |||||
| } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { | |||||
| if strings.Contains(line, `\;`) { | |||||
| line = strings.Replace(line, `\;`, ";", -1) | |||||
| } | |||||
| if strings.Contains(line, `\#`) { | |||||
| line = strings.Replace(line, `\#`, "#", -1) | |||||
| } | |||||
| } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { | |||||
| return p.readPythonMultilines(line, bufferSize) | |||||
| } | |||||
| return line, nil | |||||
| } | |||||
| func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) { | |||||
| parserBufferPeekResult, _ := p.buf.Peek(bufferSize) | |||||
| peekBuffer := bytes.NewBuffer(parserBufferPeekResult) | |||||
| indentSize := 0 | |||||
| for { | |||||
| peekData, peekErr := peekBuffer.ReadBytes('\n') | |||||
| if peekErr != nil { | |||||
| if peekErr == io.EOF { | |||||
| p.debug("readPythonMultilines: io.EOF, peekData: %q, line: %q", string(peekData), line) | |||||
| return line, nil | |||||
| } | |||||
| p.debug("readPythonMultilines: failed to peek with error: %v", peekErr) | |||||
| return "", peekErr | |||||
| } | |||||
| p.debug("readPythonMultilines: parsing %q", string(peekData)) | |||||
| peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) | |||||
| p.debug("readPythonMultilines: matched %d parts", len(peekMatches)) | |||||
| for n, v := range peekMatches { | |||||
| p.debug(" %d: %q", n, v) | |||||
| } | |||||
| // Return if not a Python multiline value. | |||||
| if len(peekMatches) != 3 { | |||||
| p.debug("readPythonMultilines: end of value, got: %q", line) | |||||
| return line, nil | |||||
| } | |||||
| // Determine indent size and line prefix. | |||||
| currentIndentSize := len(peekMatches[1]) | |||||
| if indentSize < 1 { | |||||
| indentSize = currentIndentSize | |||||
| p.debug("readPythonMultilines: indent size is %d", indentSize) | |||||
| } | |||||
| // Make sure each line is indented at least as far as first line. | |||||
| if currentIndentSize < indentSize { | |||||
| p.debug("readPythonMultilines: end of value, current indent: %d, expected indent: %d, line: %q", currentIndentSize, indentSize, line) | |||||
| return line, nil | |||||
| } | |||||
| // Advance the parser reader (buffer) in-sync with the peek buffer. | |||||
| _, err := p.buf.Discard(len(peekData)) | |||||
| if err != nil { | |||||
| p.debug("readPythonMultilines: failed to skip to the end, returning error") | |||||
| return "", err | |||||
| } | |||||
| // Handle indented empty line. | |||||
| line += "\n" + peekMatches[1][indentSize:] + peekMatches[2] | |||||
| } | |||||
| } | |||||
| // parse parses data through an io.Reader. | |||||
| func (f *File) parse(reader io.Reader) (err error) { | |||||
| p := newParser(reader, parserOptions{ | |||||
| IgnoreContinuation: f.options.IgnoreContinuation, | |||||
| IgnoreInlineComment: f.options.IgnoreInlineComment, | |||||
| AllowPythonMultilineValues: f.options.AllowPythonMultilineValues, | |||||
| SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment, | |||||
| UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes, | |||||
| UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols, | |||||
| PreserveSurroundedQuote: f.options.PreserveSurroundedQuote, | |||||
| DebugFunc: f.options.DebugFunc, | |||||
| ReaderBufferSize: f.options.ReaderBufferSize, | |||||
| }) | |||||
| if err = p.BOM(); err != nil { | |||||
| return fmt.Errorf("BOM: %v", err) | |||||
| } | |||||
| // Ignore error because default section name is never empty string. | |||||
| name := DefaultSection | |||||
| if f.options.Insensitive { | |||||
| name = strings.ToLower(DefaultSection) | |||||
| } | |||||
| section, _ := f.NewSection(name) | |||||
| // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key | |||||
| var isLastValueEmpty bool | |||||
| var lastRegularKey *Key | |||||
| var line []byte | |||||
| var inUnparseableSection bool | |||||
| // NOTE: Iterate and increase `currentPeekSize` until | |||||
| // the size of the parser buffer is found. | |||||
| // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. | |||||
| parserBufferSize := 0 | |||||
| // NOTE: Peek 4kb at a time. | |||||
| currentPeekSize := minReaderBufferSize | |||||
| if f.options.AllowPythonMultilineValues { | |||||
| for { | |||||
| peekBytes, _ := p.buf.Peek(currentPeekSize) | |||||
| peekBytesLength := len(peekBytes) | |||||
| if parserBufferSize >= peekBytesLength { | |||||
| break | |||||
| } | |||||
| currentPeekSize *= 2 | |||||
| parserBufferSize = peekBytesLength | |||||
| } | |||||
| } | |||||
| for !p.isEOF { | |||||
| line, err = p.readUntil('\n') | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if f.options.AllowNestedValues && | |||||
| isLastValueEmpty && len(line) > 0 { | |||||
| if line[0] == ' ' || line[0] == '\t' { | |||||
| err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| continue | |||||
| } | |||||
| } | |||||
| line = bytes.TrimLeftFunc(line, unicode.IsSpace) | |||||
| if len(line) == 0 { | |||||
| continue | |||||
| } | |||||
| // Comments | |||||
| if line[0] == '#' || line[0] == ';' { | |||||
| // Note: we do not care ending line break, | |||||
| // it is needed for adding second line, | |||||
| // so just clean it once at the end when set to value. | |||||
| p.comment.Write(line) | |||||
| continue | |||||
| } | |||||
| // Section | |||||
| if line[0] == '[' { | |||||
| // Read to the next ']' (TODO: support quoted strings) | |||||
| closeIdx := bytes.LastIndexByte(line, ']') | |||||
| if closeIdx == -1 { | |||||
| return fmt.Errorf("unclosed section: %s", line) | |||||
| } | |||||
| name := string(line[1:closeIdx]) | |||||
| section, err = f.NewSection(name) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| comment, has := cleanComment(line[closeIdx+1:]) | |||||
| if has { | |||||
| p.comment.Write(comment) | |||||
| } | |||||
| section.Comment = strings.TrimSpace(p.comment.String()) | |||||
| // Reset auto-counter and comments | |||||
| p.comment.Reset() | |||||
| p.count = 1 | |||||
| inUnparseableSection = false | |||||
| for i := range f.options.UnparseableSections { | |||||
| if f.options.UnparseableSections[i] == name || | |||||
| (f.options.Insensitive && strings.EqualFold(f.options.UnparseableSections[i], name)) { | |||||
| inUnparseableSection = true | |||||
| continue | |||||
| } | |||||
| } | |||||
| continue | |||||
| } | |||||
| if inUnparseableSection { | |||||
| section.isRawSection = true | |||||
| section.rawBody += string(line) | |||||
| continue | |||||
| } | |||||
| kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) | |||||
| if err != nil { | |||||
| // Treat as boolean key when desired, and whole line is key name. | |||||
| if IsErrDelimiterNotFound(err) { | |||||
| switch { | |||||
| case f.options.AllowBooleanKeys: | |||||
| kname, err := p.readValue(line, parserBufferSize) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| key, err := section.NewBooleanKey(kname) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| key.Comment = strings.TrimSpace(p.comment.String()) | |||||
| p.comment.Reset() | |||||
| continue | |||||
| case f.options.SkipUnrecognizableLines: | |||||
| continue | |||||
| } | |||||
| } | |||||
| return err | |||||
| } | |||||
| // Auto increment. | |||||
| isAutoIncr := false | |||||
| if kname == "-" { | |||||
| isAutoIncr = true | |||||
| kname = "#" + strconv.Itoa(p.count) | |||||
| p.count++ | |||||
| } | |||||
| value, err := p.readValue(line[offset:], parserBufferSize) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| isLastValueEmpty = len(value) == 0 | |||||
| key, err := section.NewKey(kname, value) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| key.isAutoIncrement = isAutoIncr | |||||
| key.Comment = strings.TrimSpace(p.comment.String()) | |||||
| p.comment.Reset() | |||||
| lastRegularKey = key | |||||
| } | |||||
| return nil | |||||
| } | |||||
| @@ -0,0 +1,256 @@ | |||||
| // Copyright 2014 Unknwon | |||||
| // | |||||
| // Licensed under the Apache License, Version 2.0 (the "License"): you may | |||||
| // not use this file except in compliance with the License. You may obtain | |||||
| // a copy of the License at | |||||
| // | |||||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||||
| // | |||||
| // Unless required by applicable law or agreed to in writing, software | |||||
| // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||||
| // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||||
| // License for the specific language governing permissions and limitations | |||||
| // under the License. | |||||
| package ini | |||||
| import ( | |||||
| "errors" | |||||
| "fmt" | |||||
| "strings" | |||||
| ) | |||||
| // Section represents a config section. | |||||
| type Section struct { | |||||
| f *File | |||||
| Comment string | |||||
| name string | |||||
| keys map[string]*Key | |||||
| keyList []string | |||||
| keysHash map[string]string | |||||
| isRawSection bool | |||||
| rawBody string | |||||
| } | |||||
| func newSection(f *File, name string) *Section { | |||||
| return &Section{ | |||||
| f: f, | |||||
| name: name, | |||||
| keys: make(map[string]*Key), | |||||
| keyList: make([]string, 0, 10), | |||||
| keysHash: make(map[string]string), | |||||
| } | |||||
| } | |||||
| // Name returns name of Section. | |||||
| func (s *Section) Name() string { | |||||
| return s.name | |||||
| } | |||||
| // Body returns rawBody of Section if the section was marked as unparseable. | |||||
| // It still follows the other rules of the INI format surrounding leading/trailing whitespace. | |||||
| func (s *Section) Body() string { | |||||
| return strings.TrimSpace(s.rawBody) | |||||
| } | |||||
| // SetBody updates body content only if section is raw. | |||||
| func (s *Section) SetBody(body string) { | |||||
| if !s.isRawSection { | |||||
| return | |||||
| } | |||||
| s.rawBody = body | |||||
| } | |||||
| // NewKey creates a new key to given section. | |||||
| func (s *Section) NewKey(name, val string) (*Key, error) { | |||||
| if len(name) == 0 { | |||||
| return nil, errors.New("error creating new key: empty key name") | |||||
| } else if s.f.options.Insensitive { | |||||
| name = strings.ToLower(name) | |||||
| } | |||||
| if s.f.BlockMode { | |||||
| s.f.lock.Lock() | |||||
| defer s.f.lock.Unlock() | |||||
| } | |||||
| if inSlice(name, s.keyList) { | |||||
| if s.f.options.AllowShadows { | |||||
| if err := s.keys[name].addShadow(val); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } else { | |||||
| s.keys[name].value = val | |||||
| s.keysHash[name] = val | |||||
| } | |||||
| return s.keys[name], nil | |||||
| } | |||||
| s.keyList = append(s.keyList, name) | |||||
| s.keys[name] = newKey(s, name, val) | |||||
| s.keysHash[name] = val | |||||
| return s.keys[name], nil | |||||
| } | |||||
| // NewBooleanKey creates a new boolean type key to given section. | |||||
| func (s *Section) NewBooleanKey(name string) (*Key, error) { | |||||
| key, err := s.NewKey(name, "true") | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| key.isBooleanType = true | |||||
| return key, nil | |||||
| } | |||||
| // GetKey returns key in section by given name. | |||||
| func (s *Section) GetKey(name string) (*Key, error) { | |||||
| if s.f.BlockMode { | |||||
| s.f.lock.RLock() | |||||
| } | |||||
| if s.f.options.Insensitive { | |||||
| name = strings.ToLower(name) | |||||
| } | |||||
| key := s.keys[name] | |||||
| if s.f.BlockMode { | |||||
| s.f.lock.RUnlock() | |||||
| } | |||||
| if key == nil { | |||||
| // Check if it is a child-section. | |||||
| sname := s.name | |||||
| for { | |||||
| if i := strings.LastIndex(sname, "."); i > -1 { | |||||
| sname = sname[:i] | |||||
| sec, err := s.f.GetSection(sname) | |||||
| if err != nil { | |||||
| continue | |||||
| } | |||||
| return sec.GetKey(name) | |||||
| } | |||||
| break | |||||
| } | |||||
| return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name) | |||||
| } | |||||
| return key, nil | |||||
| } | |||||
| // HasKey returns true if section contains a key with given name. | |||||
| func (s *Section) HasKey(name string) bool { | |||||
| key, _ := s.GetKey(name) | |||||
| return key != nil | |||||
| } | |||||
| // Deprecated: Use "HasKey" instead. | |||||
| func (s *Section) Haskey(name string) bool { | |||||
| return s.HasKey(name) | |||||
| } | |||||
| // HasValue returns true if section contains given raw value. | |||||
| func (s *Section) HasValue(value string) bool { | |||||
| if s.f.BlockMode { | |||||
| s.f.lock.RLock() | |||||
| defer s.f.lock.RUnlock() | |||||
| } | |||||
| for _, k := range s.keys { | |||||
| if value == k.value { | |||||
| return true | |||||
| } | |||||
| } | |||||
| return false | |||||
| } | |||||
| // Key assumes named Key exists in section and returns a zero-value when not. | |||||
| func (s *Section) Key(name string) *Key { | |||||
| key, err := s.GetKey(name) | |||||
| if err != nil { | |||||
| // It's OK here because the only possible error is empty key name, | |||||
| // but if it's empty, this piece of code won't be executed. | |||||
| key, _ = s.NewKey(name, "") | |||||
| return key | |||||
| } | |||||
| return key | |||||
| } | |||||
| // Keys returns list of keys of section. | |||||
| func (s *Section) Keys() []*Key { | |||||
| keys := make([]*Key, len(s.keyList)) | |||||
| for i := range s.keyList { | |||||
| keys[i] = s.Key(s.keyList[i]) | |||||
| } | |||||
| return keys | |||||
| } | |||||
| // ParentKeys returns list of keys of parent section. | |||||
| func (s *Section) ParentKeys() []*Key { | |||||
| var parentKeys []*Key | |||||
| sname := s.name | |||||
| for { | |||||
| if i := strings.LastIndex(sname, "."); i > -1 { | |||||
| sname = sname[:i] | |||||
| sec, err := s.f.GetSection(sname) | |||||
| if err != nil { | |||||
| continue | |||||
| } | |||||
| parentKeys = append(parentKeys, sec.Keys()...) | |||||
| } else { | |||||
| break | |||||
| } | |||||
| } | |||||
| return parentKeys | |||||
| } | |||||
| // KeyStrings returns list of key names of section. | |||||
| func (s *Section) KeyStrings() []string { | |||||
| list := make([]string, len(s.keyList)) | |||||
| copy(list, s.keyList) | |||||
| return list | |||||
| } | |||||
| // KeysHash returns keys hash consisting of names and values. | |||||
| func (s *Section) KeysHash() map[string]string { | |||||
| if s.f.BlockMode { | |||||
| s.f.lock.RLock() | |||||
| defer s.f.lock.RUnlock() | |||||
| } | |||||
| hash := map[string]string{} | |||||
| for key, value := range s.keysHash { | |||||
| hash[key] = value | |||||
| } | |||||
| return hash | |||||
| } | |||||
| // DeleteKey deletes a key from section. | |||||
| func (s *Section) DeleteKey(name string) { | |||||
| if s.f.BlockMode { | |||||
| s.f.lock.Lock() | |||||
| defer s.f.lock.Unlock() | |||||
| } | |||||
| for i, k := range s.keyList { | |||||
| if k == name { | |||||
| s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) | |||||
| delete(s.keys, name) | |||||
| delete(s.keysHash, name) | |||||
| return | |||||
| } | |||||
| } | |||||
| } | |||||
| // ChildSections returns a list of child sections of current section. | |||||
| // For example, "[parent.child1]" and "[parent.child12]" are child sections | |||||
| // of section "[parent]". | |||||
| func (s *Section) ChildSections() []*Section { | |||||
| prefix := s.name + "." | |||||
| children := make([]*Section, 0, 3) | |||||
| for _, name := range s.f.sectionList { | |||||
| if strings.HasPrefix(name, prefix) { | |||||
| children = append(children, s.f.sections[name]...) | |||||
| } | |||||
| } | |||||
| return children | |||||
| } | |||||
| @@ -0,0 +1,724 @@ | |||||
| // Copyright 2014 Unknwon | |||||
| // | |||||
| // Licensed under the Apache License, Version 2.0 (the "License"): you may | |||||
| // not use this file except in compliance with the License. You may obtain | |||||
| // a copy of the License at | |||||
| // | |||||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||||
| // | |||||
| // Unless required by applicable law or agreed to in writing, software | |||||
| // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | |||||
| // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | |||||
| // License for the specific language governing permissions and limitations | |||||
| // under the License. | |||||
| package ini | |||||
| import ( | |||||
| "bytes" | |||||
| "errors" | |||||
| "fmt" | |||||
| "reflect" | |||||
| "strings" | |||||
| "time" | |||||
| "unicode" | |||||
| ) | |||||
| // NameMapper represents a ini tag name mapper. | |||||
| type NameMapper func(string) string | |||||
| // Built-in name getters. | |||||
| var ( | |||||
| // SnackCase converts to format SNACK_CASE. | |||||
| SnackCase NameMapper = func(raw string) string { | |||||
| newstr := make([]rune, 0, len(raw)) | |||||
| for i, chr := range raw { | |||||
| if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { | |||||
| if i > 0 { | |||||
| newstr = append(newstr, '_') | |||||
| } | |||||
| } | |||||
| newstr = append(newstr, unicode.ToUpper(chr)) | |||||
| } | |||||
| return string(newstr) | |||||
| } | |||||
| // TitleUnderscore converts to format title_underscore. | |||||
| TitleUnderscore NameMapper = func(raw string) string { | |||||
| newstr := make([]rune, 0, len(raw)) | |||||
| for i, chr := range raw { | |||||
| if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { | |||||
| if i > 0 { | |||||
| newstr = append(newstr, '_') | |||||
| } | |||||
| chr -= 'A' - 'a' | |||||
| } | |||||
| newstr = append(newstr, chr) | |||||
| } | |||||
| return string(newstr) | |||||
| } | |||||
| ) | |||||
| func (s *Section) parseFieldName(raw, actual string) string { | |||||
| if len(actual) > 0 { | |||||
| return actual | |||||
| } | |||||
| if s.f.NameMapper != nil { | |||||
| return s.f.NameMapper(raw) | |||||
| } | |||||
| return raw | |||||
| } | |||||
| func parseDelim(actual string) string { | |||||
| if len(actual) > 0 { | |||||
| return actual | |||||
| } | |||||
| return "," | |||||
| } | |||||
| var reflectTime = reflect.TypeOf(time.Now()).Kind() | |||||
| // setSliceWithProperType sets proper values to slice based on its type. | |||||
| func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { | |||||
| var strs []string | |||||
| if allowShadow { | |||||
| strs = key.StringsWithShadows(delim) | |||||
| } else { | |||||
| strs = key.Strings(delim) | |||||
| } | |||||
| numVals := len(strs) | |||||
| if numVals == 0 { | |||||
| return nil | |||||
| } | |||||
| var vals interface{} | |||||
| var err error | |||||
| sliceOf := field.Type().Elem().Kind() | |||||
| switch sliceOf { | |||||
| case reflect.String: | |||||
| vals = strs | |||||
| case reflect.Int: | |||||
| vals, err = key.parseInts(strs, true, false) | |||||
| case reflect.Int64: | |||||
| vals, err = key.parseInt64s(strs, true, false) | |||||
| case reflect.Uint: | |||||
| vals, err = key.parseUints(strs, true, false) | |||||
| case reflect.Uint64: | |||||
| vals, err = key.parseUint64s(strs, true, false) | |||||
| case reflect.Float64: | |||||
| vals, err = key.parseFloat64s(strs, true, false) | |||||
| case reflect.Bool: | |||||
| vals, err = key.parseBools(strs, true, false) | |||||
| case reflectTime: | |||||
| vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) | |||||
| default: | |||||
| return fmt.Errorf("unsupported type '[]%s'", sliceOf) | |||||
| } | |||||
| if err != nil && isStrict { | |||||
| return err | |||||
| } | |||||
| slice := reflect.MakeSlice(field.Type(), numVals, numVals) | |||||
| for i := 0; i < numVals; i++ { | |||||
| switch sliceOf { | |||||
| case reflect.String: | |||||
| slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) | |||||
| case reflect.Int: | |||||
| slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) | |||||
| case reflect.Int64: | |||||
| slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) | |||||
| case reflect.Uint: | |||||
| slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) | |||||
| case reflect.Uint64: | |||||
| slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) | |||||
| case reflect.Float64: | |||||
| slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) | |||||
| case reflect.Bool: | |||||
| slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i])) | |||||
| case reflectTime: | |||||
| slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) | |||||
| } | |||||
| } | |||||
| field.Set(slice) | |||||
| return nil | |||||
| } | |||||
| func wrapStrictError(err error, isStrict bool) error { | |||||
| if isStrict { | |||||
| return err | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // setWithProperType sets proper value to field based on its type, | |||||
| // but it does not return error for failing parsing, | |||||
| // because we want to use default value that is already assigned to struct. | |||||
| func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { | |||||
| vt := t | |||||
| isPtr := t.Kind() == reflect.Ptr | |||||
| if isPtr { | |||||
| vt = t.Elem() | |||||
| } | |||||
| switch vt.Kind() { | |||||
| case reflect.String: | |||||
| stringVal := key.String() | |||||
| if isPtr { | |||||
| field.Set(reflect.ValueOf(&stringVal)) | |||||
| } else if len(stringVal) > 0 { | |||||
| field.SetString(key.String()) | |||||
| } | |||||
| case reflect.Bool: | |||||
| boolVal, err := key.Bool() | |||||
| if err != nil { | |||||
| return wrapStrictError(err, isStrict) | |||||
| } | |||||
| if isPtr { | |||||
| field.Set(reflect.ValueOf(&boolVal)) | |||||
| } else { | |||||
| field.SetBool(boolVal) | |||||
| } | |||||
| case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: | |||||
| // ParseDuration will not return err for `0`, so check the type name | |||||
| if vt.Name() == "Duration" { | |||||
| durationVal, err := key.Duration() | |||||
| if err != nil { | |||||
| if intVal, err := key.Int64(); err == nil { | |||||
| field.SetInt(intVal) | |||||
| return nil | |||||
| } | |||||
| return wrapStrictError(err, isStrict) | |||||
| } | |||||
| if isPtr { | |||||
| field.Set(reflect.ValueOf(&durationVal)) | |||||
| } else if int64(durationVal) > 0 { | |||||
| field.Set(reflect.ValueOf(durationVal)) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| intVal, err := key.Int64() | |||||
| if err != nil { | |||||
| return wrapStrictError(err, isStrict) | |||||
| } | |||||
| if isPtr { | |||||
| pv := reflect.New(t.Elem()) | |||||
| pv.Elem().SetInt(intVal) | |||||
| field.Set(pv) | |||||
| } else { | |||||
| field.SetInt(intVal) | |||||
| } | |||||
| // byte is an alias for uint8, so supporting uint8 breaks support for byte | |||||
| case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: | |||||
| durationVal, err := key.Duration() | |||||
| // Skip zero value | |||||
| if err == nil && uint64(durationVal) > 0 { | |||||
| if isPtr { | |||||
| field.Set(reflect.ValueOf(&durationVal)) | |||||
| } else { | |||||
| field.Set(reflect.ValueOf(durationVal)) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| uintVal, err := key.Uint64() | |||||
| if err != nil { | |||||
| return wrapStrictError(err, isStrict) | |||||
| } | |||||
| if isPtr { | |||||
| pv := reflect.New(t.Elem()) | |||||
| pv.Elem().SetUint(uintVal) | |||||
| field.Set(pv) | |||||
| } else { | |||||
| field.SetUint(uintVal) | |||||
| } | |||||
| case reflect.Float32, reflect.Float64: | |||||
| floatVal, err := key.Float64() | |||||
| if err != nil { | |||||
| return wrapStrictError(err, isStrict) | |||||
| } | |||||
| if isPtr { | |||||
| pv := reflect.New(t.Elem()) | |||||
| pv.Elem().SetFloat(floatVal) | |||||
| field.Set(pv) | |||||
| } else { | |||||
| field.SetFloat(floatVal) | |||||
| } | |||||
| case reflectTime: | |||||
| timeVal, err := key.Time() | |||||
| if err != nil { | |||||
| return wrapStrictError(err, isStrict) | |||||
| } | |||||
| if isPtr { | |||||
| field.Set(reflect.ValueOf(&timeVal)) | |||||
| } else { | |||||
| field.Set(reflect.ValueOf(timeVal)) | |||||
| } | |||||
| case reflect.Slice: | |||||
| return setSliceWithProperType(key, field, delim, allowShadow, isStrict) | |||||
| default: | |||||
| return fmt.Errorf("unsupported type %q", t) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool) { | |||||
| opts := strings.SplitN(tag, ",", 4) | |||||
| rawName = opts[0] | |||||
| if len(opts) > 1 { | |||||
| omitEmpty = opts[1] == "omitempty" | |||||
| } | |||||
| if len(opts) > 2 { | |||||
| allowShadow = opts[2] == "allowshadow" | |||||
| } | |||||
| if len(opts) > 3 { | |||||
| allowNonUnique = opts[3] == "nonunique" | |||||
| } | |||||
| return rawName, omitEmpty, allowShadow, allowNonUnique | |||||
| } | |||||
| func (s *Section) mapToField(val reflect.Value, isStrict bool) error { | |||||
| if val.Kind() == reflect.Ptr { | |||||
| val = val.Elem() | |||||
| } | |||||
| typ := val.Type() | |||||
| for i := 0; i < typ.NumField(); i++ { | |||||
| field := val.Field(i) | |||||
| tpField := typ.Field(i) | |||||
| tag := tpField.Tag.Get("ini") | |||||
| if tag == "-" { | |||||
| continue | |||||
| } | |||||
| rawName, _, allowShadow, allowNonUnique := parseTagOptions(tag) | |||||
| fieldName := s.parseFieldName(tpField.Name, rawName) | |||||
| if len(fieldName) == 0 || !field.CanSet() { | |||||
| continue | |||||
| } | |||||
| isStruct := tpField.Type.Kind() == reflect.Struct | |||||
| isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct | |||||
| isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous | |||||
| if isAnonymous { | |||||
| field.Set(reflect.New(tpField.Type.Elem())) | |||||
| } | |||||
| if isAnonymous || isStruct || isStructPtr { | |||||
| if sec, err := s.f.GetSection(fieldName); err == nil { | |||||
| // Only set the field to non-nil struct value if we have a section for it. | |||||
| // Otherwise, we end up with a non-nil struct ptr even though there is no data. | |||||
| if isStructPtr && field.IsNil() { | |||||
| field.Set(reflect.New(tpField.Type.Elem())) | |||||
| } | |||||
| if err = sec.mapToField(field, isStrict); err != nil { | |||||
| return fmt.Errorf("map to field %q: %v", fieldName, err) | |||||
| } | |||||
| continue | |||||
| } | |||||
| } | |||||
| // Map non-unique sections | |||||
| if allowNonUnique && tpField.Type.Kind() == reflect.Slice { | |||||
| newField, err := s.mapToSlice(fieldName, field, isStrict) | |||||
| if err != nil { | |||||
| return fmt.Errorf("map to slice %q: %v", fieldName, err) | |||||
| } | |||||
| field.Set(newField) | |||||
| continue | |||||
| } | |||||
| if key, err := s.GetKey(fieldName); err == nil { | |||||
| delim := parseDelim(tpField.Tag.Get("delim")) | |||||
| if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { | |||||
| return fmt.Errorf("set field %q: %v", fieldName, err) | |||||
| } | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // mapToSlice maps all sections with the same name and returns the new value. | |||||
| // The type of the Value must be a slice. | |||||
| func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) { | |||||
| secs, err := s.f.SectionsByName(secName) | |||||
| if err != nil { | |||||
| return reflect.Value{}, err | |||||
| } | |||||
| typ := val.Type().Elem() | |||||
| for _, sec := range secs { | |||||
| elem := reflect.New(typ) | |||||
| if err = sec.mapToField(elem, isStrict); err != nil { | |||||
| return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) | |||||
| } | |||||
| val = reflect.Append(val, elem.Elem()) | |||||
| } | |||||
| return val, nil | |||||
| } | |||||
| // mapTo maps a section to object v. | |||||
| func (s *Section) mapTo(v interface{}, isStrict bool) error { | |||||
| typ := reflect.TypeOf(v) | |||||
| val := reflect.ValueOf(v) | |||||
| if typ.Kind() == reflect.Ptr { | |||||
| typ = typ.Elem() | |||||
| val = val.Elem() | |||||
| } else { | |||||
| return errors.New("not a pointer to a struct") | |||||
| } | |||||
| if typ.Kind() == reflect.Slice { | |||||
| newField, err := s.mapToSlice(s.name, val, isStrict) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| val.Set(newField) | |||||
| return nil | |||||
| } | |||||
| return s.mapToField(val, isStrict) | |||||
| } | |||||
| // MapTo maps section to given struct. | |||||
| func (s *Section) MapTo(v interface{}) error { | |||||
| return s.mapTo(v, false) | |||||
| } | |||||
| // StrictMapTo maps section to given struct in strict mode, | |||||
| // which returns all possible error including value parsing error. | |||||
| func (s *Section) StrictMapTo(v interface{}) error { | |||||
| return s.mapTo(v, true) | |||||
| } | |||||
| // MapTo maps file to given struct. | |||||
| func (f *File) MapTo(v interface{}) error { | |||||
| return f.Section("").MapTo(v) | |||||
| } | |||||
| // StrictMapTo maps file to given struct in strict mode, | |||||
| // which returns all possible error including value parsing error. | |||||
| func (f *File) StrictMapTo(v interface{}) error { | |||||
| return f.Section("").StrictMapTo(v) | |||||
| } | |||||
| // MapToWithMapper maps data sources to given struct with name mapper. | |||||
| func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { | |||||
| cfg, err := Load(source, others...) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| cfg.NameMapper = mapper | |||||
| return cfg.MapTo(v) | |||||
| } | |||||
| // StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, | |||||
| // which returns all possible error including value parsing error. | |||||
| func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { | |||||
| cfg, err := Load(source, others...) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| cfg.NameMapper = mapper | |||||
| return cfg.StrictMapTo(v) | |||||
| } | |||||
| // MapTo maps data sources to given struct. | |||||
| func MapTo(v, source interface{}, others ...interface{}) error { | |||||
| return MapToWithMapper(v, nil, source, others...) | |||||
| } | |||||
| // StrictMapTo maps data sources to given struct in strict mode, | |||||
| // which returns all possible error including value parsing error. | |||||
| func StrictMapTo(v, source interface{}, others ...interface{}) error { | |||||
| return StrictMapToWithMapper(v, nil, source, others...) | |||||
| } | |||||
| // reflectSliceWithProperType does the opposite thing as setSliceWithProperType. | |||||
| func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { | |||||
| slice := field.Slice(0, field.Len()) | |||||
| if field.Len() == 0 { | |||||
| return nil | |||||
| } | |||||
| sliceOf := field.Type().Elem().Kind() | |||||
| if allowShadow { | |||||
| var keyWithShadows *Key | |||||
| for i := 0; i < field.Len(); i++ { | |||||
| var val string | |||||
| switch sliceOf { | |||||
| case reflect.String: | |||||
| val = slice.Index(i).String() | |||||
| case reflect.Int, reflect.Int64: | |||||
| val = fmt.Sprint(slice.Index(i).Int()) | |||||
| case reflect.Uint, reflect.Uint64: | |||||
| val = fmt.Sprint(slice.Index(i).Uint()) | |||||
| case reflect.Float64: | |||||
| val = fmt.Sprint(slice.Index(i).Float()) | |||||
| case reflect.Bool: | |||||
| val = fmt.Sprint(slice.Index(i).Bool()) | |||||
| case reflectTime: | |||||
| val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339) | |||||
| default: | |||||
| return fmt.Errorf("unsupported type '[]%s'", sliceOf) | |||||
| } | |||||
| if i == 0 { | |||||
| keyWithShadows = newKey(key.s, key.name, val) | |||||
| } else { | |||||
| _ = keyWithShadows.AddShadow(val) | |||||
| } | |||||
| } | |||||
| key = keyWithShadows | |||||
| return nil | |||||
| } | |||||
| var buf bytes.Buffer | |||||
| for i := 0; i < field.Len(); i++ { | |||||
| switch sliceOf { | |||||
| case reflect.String: | |||||
| buf.WriteString(slice.Index(i).String()) | |||||
| case reflect.Int, reflect.Int64: | |||||
| buf.WriteString(fmt.Sprint(slice.Index(i).Int())) | |||||
| case reflect.Uint, reflect.Uint64: | |||||
| buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) | |||||
| case reflect.Float64: | |||||
| buf.WriteString(fmt.Sprint(slice.Index(i).Float())) | |||||
| case reflect.Bool: | |||||
| buf.WriteString(fmt.Sprint(slice.Index(i).Bool())) | |||||
| case reflectTime: | |||||
| buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) | |||||
| default: | |||||
| return fmt.Errorf("unsupported type '[]%s'", sliceOf) | |||||
| } | |||||
| buf.WriteString(delim) | |||||
| } | |||||
| key.SetValue(buf.String()[:buf.Len()-len(delim)]) | |||||
| return nil | |||||
| } | |||||
| // reflectWithProperType does the opposite thing as setWithProperType. | |||||
| func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { | |||||
| switch t.Kind() { | |||||
| case reflect.String: | |||||
| key.SetValue(field.String()) | |||||
| case reflect.Bool: | |||||
| key.SetValue(fmt.Sprint(field.Bool())) | |||||
| case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: | |||||
| key.SetValue(fmt.Sprint(field.Int())) | |||||
| case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: | |||||
| key.SetValue(fmt.Sprint(field.Uint())) | |||||
| case reflect.Float32, reflect.Float64: | |||||
| key.SetValue(fmt.Sprint(field.Float())) | |||||
| case reflectTime: | |||||
| key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) | |||||
| case reflect.Slice: | |||||
| return reflectSliceWithProperType(key, field, delim, allowShadow) | |||||
| case reflect.Ptr: | |||||
| if !field.IsNil() { | |||||
| return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow) | |||||
| } | |||||
| default: | |||||
| return fmt.Errorf("unsupported type %q", t) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // CR: copied from encoding/json/encode.go with modifications of time.Time support. | |||||
| // TODO: add more test coverage. | |||||
| func isEmptyValue(v reflect.Value) bool { | |||||
| switch v.Kind() { | |||||
| case reflect.Array, reflect.Map, reflect.Slice, reflect.String: | |||||
| return v.Len() == 0 | |||||
| case reflect.Bool: | |||||
| return !v.Bool() | |||||
| case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: | |||||
| return v.Int() == 0 | |||||
| case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: | |||||
| return v.Uint() == 0 | |||||
| case reflect.Float32, reflect.Float64: | |||||
| return v.Float() == 0 | |||||
| case reflect.Interface, reflect.Ptr: | |||||
| return v.IsNil() | |||||
| case reflectTime: | |||||
| t, ok := v.Interface().(time.Time) | |||||
| return ok && t.IsZero() | |||||
| } | |||||
| return false | |||||
| } | |||||
| // StructReflector is the interface implemented by struct types that can extract themselves into INI objects. | |||||
| type StructReflector interface { | |||||
| ReflectINIStruct(*File) error | |||||
| } | |||||
| func (s *Section) reflectFrom(val reflect.Value) error { | |||||
| if val.Kind() == reflect.Ptr { | |||||
| val = val.Elem() | |||||
| } | |||||
| typ := val.Type() | |||||
| for i := 0; i < typ.NumField(); i++ { | |||||
| if !val.Field(i).CanInterface() { | |||||
| continue | |||||
| } | |||||
| field := val.Field(i) | |||||
| tpField := typ.Field(i) | |||||
| tag := tpField.Tag.Get("ini") | |||||
| if tag == "-" { | |||||
| continue | |||||
| } | |||||
| rawName, omitEmpty, allowShadow, allowNonUnique := parseTagOptions(tag) | |||||
| if omitEmpty && isEmptyValue(field) { | |||||
| continue | |||||
| } | |||||
| if r, ok := field.Interface().(StructReflector); ok { | |||||
| return r.ReflectINIStruct(s.f) | |||||
| } | |||||
| fieldName := s.parseFieldName(tpField.Name, rawName) | |||||
| if len(fieldName) == 0 || !field.CanSet() { | |||||
| continue | |||||
| } | |||||
| if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || | |||||
| (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { | |||||
| // Note: The only error here is section doesn't exist. | |||||
| sec, err := s.f.GetSection(fieldName) | |||||
| if err != nil { | |||||
| // Note: fieldName can never be empty here, ignore error. | |||||
| sec, _ = s.f.NewSection(fieldName) | |||||
| } | |||||
| // Add comment from comment tag | |||||
| if len(sec.Comment) == 0 { | |||||
| sec.Comment = tpField.Tag.Get("comment") | |||||
| } | |||||
| if err = sec.reflectFrom(field); err != nil { | |||||
| return fmt.Errorf("reflect from field %q: %v", fieldName, err) | |||||
| } | |||||
| continue | |||||
| } | |||||
| if allowNonUnique && tpField.Type.Kind() == reflect.Slice { | |||||
| slice := field.Slice(0, field.Len()) | |||||
| if field.Len() == 0 { | |||||
| return nil | |||||
| } | |||||
| sliceOf := field.Type().Elem().Kind() | |||||
| for i := 0; i < field.Len(); i++ { | |||||
| if sliceOf != reflect.Struct && sliceOf != reflect.Ptr { | |||||
| return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName) | |||||
| } | |||||
| sec, err := s.f.NewSection(fieldName) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| // Add comment from comment tag | |||||
| if len(sec.Comment) == 0 { | |||||
| sec.Comment = tpField.Tag.Get("comment") | |||||
| } | |||||
| if err := sec.reflectFrom(slice.Index(i)); err != nil { | |||||
| return fmt.Errorf("reflect from field %q: %v", fieldName, err) | |||||
| } | |||||
| } | |||||
| continue | |||||
| } | |||||
| // Note: Same reason as section. | |||||
| key, err := s.GetKey(fieldName) | |||||
| if err != nil { | |||||
| key, _ = s.NewKey(fieldName, "") | |||||
| } | |||||
| // Add comment from comment tag | |||||
| if len(key.Comment) == 0 { | |||||
| key.Comment = tpField.Tag.Get("comment") | |||||
| } | |||||
| delim := parseDelim(tpField.Tag.Get("delim")) | |||||
| if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { | |||||
| return fmt.Errorf("reflect field %q: %v", fieldName, err) | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // ReflectFrom reflects section from given struct. It overwrites existing ones. | |||||
| func (s *Section) ReflectFrom(v interface{}) error { | |||||
| typ := reflect.TypeOf(v) | |||||
| val := reflect.ValueOf(v) | |||||
| if s.name != DefaultSection && s.f.options.AllowNonUniqueSections && | |||||
| (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) { | |||||
| // Clear sections to make sure none exists before adding the new ones | |||||
| s.f.DeleteSection(s.name) | |||||
| if typ.Kind() == reflect.Ptr { | |||||
| sec, err := s.f.NewSection(s.name) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return sec.reflectFrom(val.Elem()) | |||||
| } | |||||
| slice := val.Slice(0, val.Len()) | |||||
| sliceOf := val.Type().Elem().Kind() | |||||
| if sliceOf != reflect.Ptr { | |||||
| return fmt.Errorf("not a slice of pointers") | |||||
| } | |||||
| for i := 0; i < slice.Len(); i++ { | |||||
| sec, err := s.f.NewSection(s.name) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| err = sec.reflectFrom(slice.Index(i)) | |||||
| if err != nil { | |||||
| return fmt.Errorf("reflect from %dth field: %v", i, err) | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| if typ.Kind() == reflect.Ptr { | |||||
| val = val.Elem() | |||||
| } else { | |||||
| return errors.New("not a pointer to a struct") | |||||
| } | |||||
| return s.reflectFrom(val) | |||||
| } | |||||
| // ReflectFrom reflects file from given struct. | |||||
| func (f *File) ReflectFrom(v interface{}) error { | |||||
| return f.Section("").ReflectFrom(v) | |||||
| } | |||||
| // ReflectFromWithMapper reflects data sources from given struct with name mapper. | |||||
| func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { | |||||
| cfg.NameMapper = mapper | |||||
| return cfg.ReflectFrom(v) | |||||
| } | |||||
| // ReflectFrom reflects data sources from given struct. | |||||
| func ReflectFrom(cfg *File, v interface{}) error { | |||||
| return ReflectFromWithMapper(cfg, v, nil) | |||||
| } | |||||
| @@ -0,0 +1,3 @@ | |||||
| *~ | |||||
| *.test | |||||
| validator | |||||
| @@ -0,0 +1,28 @@ | |||||
| sudo: false | |||||
| language: go | |||||
| os: | |||||
| - linux | |||||
| env: | |||||
| - ARCH=x86_64 | |||||
| - ARCH=i686 | |||||
| go: | |||||
| - 1.11.x | |||||
| - tip | |||||
| matrix: | |||||
| fast_finish: true | |||||
| allow_failures: | |||||
| - go: tip | |||||
| addons: | |||||
| apt: | |||||
| packages: | |||||
| - devscripts | |||||
| script: | |||||
| - diff -au <(gofmt -d .) <(printf "") | |||||
| - diff -au <(licensecheck --check '.go$' --recursive --lines 0 * | grep -v -w 'Apache (v2.0)') <(printf "") | |||||
| - make | |||||
| @@ -0,0 +1,23 @@ | |||||
| ### Developer Guidelines | |||||
| ``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: | |||||
| * Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. | |||||
| - Fork it | |||||
| - Create your feature branch (git checkout -b my-new-feature) | |||||
| - Commit your changes (git commit -am 'Add some feature') | |||||
| - Push to the branch (git push origin my-new-feature) | |||||
| - Create new Pull Request | |||||
| * When you're ready to create a pull request, be sure to: | |||||
| - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. | |||||
| - Run `go fmt` | |||||
| - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. | |||||
| - Make sure `go test -race ./...` and `go build` completes. | |||||
| NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables | |||||
| ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...`` | |||||
| * Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project | |||||
| - `minio-go` project is strictly conformant with Golang style | |||||
| - if you happen to observe offending code, please feel free to send a pull request | |||||
| @@ -0,0 +1,202 @@ | |||||
| Apache License | |||||
| Version 2.0, January 2004 | |||||
| http://www.apache.org/licenses/ | |||||
| TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||||
| 1. Definitions. | |||||
| "License" shall mean the terms and conditions for use, reproduction, | |||||
| and distribution as defined by Sections 1 through 9 of this document. | |||||
| "Licensor" shall mean the copyright owner or entity authorized by | |||||
| the copyright owner that is granting the License. | |||||
| "Legal Entity" shall mean the union of the acting entity and all | |||||
| other entities that control, are controlled by, or are under common | |||||
| control with that entity. For the purposes of this definition, | |||||
| "control" means (i) the power, direct or indirect, to cause the | |||||
| direction or management of such entity, whether by contract or | |||||
| otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||||
| outstanding shares, or (iii) beneficial ownership of such entity. | |||||
| "You" (or "Your") shall mean an individual or Legal Entity | |||||
| exercising permissions granted by this License. | |||||
| "Source" form shall mean the preferred form for making modifications, | |||||
| including but not limited to software source code, documentation | |||||
| source, and configuration files. | |||||
| "Object" form shall mean any form resulting from mechanical | |||||
| transformation or translation of a Source form, including but | |||||
| not limited to compiled object code, generated documentation, | |||||
| and conversions to other media types. | |||||
| "Work" shall mean the work of authorship, whether in Source or | |||||
| Object form, made available under the License, as indicated by a | |||||
| copyright notice that is included in or attached to the work | |||||
| (an example is provided in the Appendix below). | |||||
| "Derivative Works" shall mean any work, whether in Source or Object | |||||
| form, that is based on (or derived from) the Work and for which the | |||||
| editorial revisions, annotations, elaborations, or other modifications | |||||
| represent, as a whole, an original work of authorship. For the purposes | |||||
| of this License, Derivative Works shall not include works that remain | |||||
| separable from, or merely link (or bind by name) to the interfaces of, | |||||
| the Work and Derivative Works thereof. | |||||
| "Contribution" shall mean any work of authorship, including | |||||
| the original version of the Work and any modifications or additions | |||||
| to that Work or Derivative Works thereof, that is intentionally | |||||
| submitted to Licensor for inclusion in the Work by the copyright owner | |||||
| or by an individual or Legal Entity authorized to submit on behalf of | |||||
| the copyright owner. For the purposes of this definition, "submitted" | |||||
| means any form of electronic, verbal, or written communication sent | |||||
| to the Licensor or its representatives, including but not limited to | |||||
| communication on electronic mailing lists, source code control systems, | |||||
| and issue tracking systems that are managed by, or on behalf of, the | |||||
| Licensor for the purpose of discussing and improving the Work, but | |||||
| excluding communication that is conspicuously marked or otherwise | |||||
| designated in writing by the copyright owner as "Not a Contribution." | |||||
| "Contributor" shall mean Licensor and any individual or Legal Entity | |||||
| on behalf of whom a Contribution has been received by Licensor and | |||||
| subsequently incorporated within the Work. | |||||
| 2. Grant of Copyright License. Subject to the terms and conditions of | |||||
| this License, each Contributor hereby grants to You a perpetual, | |||||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||||
| copyright license to reproduce, prepare Derivative Works of, | |||||
| publicly display, publicly perform, sublicense, and distribute the | |||||
| Work and such Derivative Works in Source or Object form. | |||||
| 3. Grant of Patent License. Subject to the terms and conditions of | |||||
| this License, each Contributor hereby grants to You a perpetual, | |||||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||||
| (except as stated in this section) patent license to make, have made, | |||||
| use, offer to sell, sell, import, and otherwise transfer the Work, | |||||
| where such license applies only to those patent claims licensable | |||||
| by such Contributor that are necessarily infringed by their | |||||
| Contribution(s) alone or by combination of their Contribution(s) | |||||
| with the Work to which such Contribution(s) was submitted. If You | |||||
| institute patent litigation against any entity (including a | |||||
| cross-claim or counterclaim in a lawsuit) alleging that the Work | |||||
| or a Contribution incorporated within the Work constitutes direct | |||||
| or contributory patent infringement, then any patent licenses | |||||
| granted to You under this License for that Work shall terminate | |||||
| as of the date such litigation is filed. | |||||
| 4. Redistribution. You may reproduce and distribute copies of the | |||||
| Work or Derivative Works thereof in any medium, with or without | |||||
| modifications, and in Source or Object form, provided that You | |||||
| meet the following conditions: | |||||
| (a) You must give any other recipients of the Work or | |||||
| Derivative Works a copy of this License; and | |||||
| (b) You must cause any modified files to carry prominent notices | |||||
| stating that You changed the files; and | |||||
| (c) You must retain, in the Source form of any Derivative Works | |||||
| that You distribute, all copyright, patent, trademark, and | |||||
| attribution notices from the Source form of the Work, | |||||
| excluding those notices that do not pertain to any part of | |||||
| the Derivative Works; and | |||||
| (d) If the Work includes a "NOTICE" text file as part of its | |||||
| distribution, then any Derivative Works that You distribute must | |||||
| include a readable copy of the attribution notices contained | |||||
| within such NOTICE file, excluding those notices that do not | |||||
| pertain to any part of the Derivative Works, in at least one | |||||
| of the following places: within a NOTICE text file distributed | |||||
| as part of the Derivative Works; within the Source form or | |||||
| documentation, if provided along with the Derivative Works; or, | |||||
| within a display generated by the Derivative Works, if and | |||||
| wherever such third-party notices normally appear. The contents | |||||
| of the NOTICE file are for informational purposes only and | |||||
| do not modify the License. You may add Your own attribution | |||||
| notices within Derivative Works that You distribute, alongside | |||||
| or as an addendum to the NOTICE text from the Work, provided | |||||
| that such additional attribution notices cannot be construed | |||||
| as modifying the License. | |||||
| You may add Your own copyright statement to Your modifications and | |||||
| may provide additional or different license terms and conditions | |||||
| for use, reproduction, or distribution of Your modifications, or | |||||
| for any such Derivative Works as a whole, provided Your use, | |||||
| reproduction, and distribution of the Work otherwise complies with | |||||
| the conditions stated in this License. | |||||
| 5. Submission of Contributions. Unless You explicitly state otherwise, | |||||
| any Contribution intentionally submitted for inclusion in the Work | |||||
| by You to the Licensor shall be under the terms and conditions of | |||||
| this License, without any additional terms or conditions. | |||||
| Notwithstanding the above, nothing herein shall supersede or modify | |||||
| the terms of any separate license agreement you may have executed | |||||
| with Licensor regarding such Contributions. | |||||
| 6. Trademarks. This License does not grant permission to use the trade | |||||
| names, trademarks, service marks, or product names of the Licensor, | |||||
| except as required for reasonable and customary use in describing the | |||||
| origin of the Work and reproducing the content of the NOTICE file. | |||||
| 7. Disclaimer of Warranty. Unless required by applicable law or | |||||
| agreed to in writing, Licensor provides the Work (and each | |||||
| Contributor provides its Contributions) on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||||
| implied, including, without limitation, any warranties or conditions | |||||
| of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||||
| PARTICULAR PURPOSE. You are solely responsible for determining the | |||||
| appropriateness of using or redistributing the Work and assume any | |||||
| risks associated with Your exercise of permissions under this License. | |||||
| 8. Limitation of Liability. In no event and under no legal theory, | |||||
| whether in tort (including negligence), contract, or otherwise, | |||||
| unless required by applicable law (such as deliberate and grossly | |||||
| negligent acts) or agreed to in writing, shall any Contributor be | |||||
| liable to You for damages, including any direct, indirect, special, | |||||
| incidental, or consequential damages of any character arising as a | |||||
| result of this License or out of the use or inability to use the | |||||
| Work (including but not limited to damages for loss of goodwill, | |||||
| work stoppage, computer failure or malfunction, or any and all | |||||
| other commercial damages or losses), even if such Contributor | |||||
| has been advised of the possibility of such damages. | |||||
| 9. Accepting Warranty or Additional Liability. While redistributing | |||||
| the Work or Derivative Works thereof, You may choose to offer, | |||||
| and charge a fee for, acceptance of support, warranty, indemnity, | |||||
| or other liability obligations and/or rights consistent with this | |||||
| License. However, in accepting such obligations, You may act only | |||||
| on Your own behalf and on Your sole responsibility, not on behalf | |||||
| of any other Contributor, and only if You agree to indemnify, | |||||
| defend, and hold each Contributor harmless for any liability | |||||
| incurred by, or claims asserted against, such Contributor by reason | |||||
| of your accepting any such warranty or additional liability. | |||||
| END OF TERMS AND CONDITIONS | |||||
| APPENDIX: How to apply the Apache License to your work. | |||||
| To apply the Apache License to your work, attach the following | |||||
| boilerplate notice, with the fields enclosed by brackets "[]" | |||||
| replaced with your own identifying information. (Don't include | |||||
| the brackets!) The text should be enclosed in the appropriate | |||||
| comment syntax for the file format. We also recommend that a | |||||
| file or class name and description of purpose be included on the | |||||
| same "printed page" as the copyright notice for easier | |||||
| identification within third-party archives. | |||||
| Copyright [yyyy] [name of copyright owner] | |||||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| you may not use this file except in compliance with the License. | |||||
| You may obtain a copy of the License at | |||||
| http://www.apache.org/licenses/LICENSE-2.0 | |||||
| Unless required by applicable law or agreed to in writing, software | |||||
| distributed under the License is distributed on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| See the License for the specific language governing permissions and | |||||
| limitations under the License. | |||||
| @@ -0,0 +1,35 @@ | |||||
| # For maintainers only | |||||
| ## Responsibilities | |||||
| Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) | |||||
| ### Making new releases | |||||
| Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key. | |||||
| ```sh | |||||
| $ export GNUPGHOME=/media/${USER}/minio/trusted | |||||
| $ git tag -s 4.0.0 | |||||
| $ git push | |||||
| $ git push --tags | |||||
| ``` | |||||
| ### Update version | |||||
| Once release has been made update `libraryVersion` constant in `api.go` to next to be released version. | |||||
| ```sh | |||||
| $ grep libraryVersion api.go | |||||
| libraryVersion = "4.0.1" | |||||
| ``` | |||||
| Commit your changes | |||||
| ``` | |||||
| $ git commit -a -m "Update version for next release" --author "Minio Trusted <trusted@minio.io>" | |||||
| ``` | |||||
| ### Announce | |||||
| Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@minio.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. | |||||
| To generate `changelog` | |||||
| ```sh | |||||
| $ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' <last_release_tag>..<latest_release_tag> | |||||
| ``` | |||||
| @@ -0,0 +1,15 @@ | |||||
| all: checks | |||||
| checks: | |||||
| @go get -t ./... | |||||
| @go vet ./... | |||||
| @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... | |||||
| @go get github.com/dustin/go-humanize/... | |||||
| @go get github.com/sirupsen/logrus/... | |||||
| @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go | |||||
| @mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done | |||||
| @go get -u github.com/a8m/mark/... | |||||
| @go get -u github.com/minio/cli/... | |||||
| @go get -u golang.org/x/tools/cmd/goimports | |||||
| @go get -u github.com/gernest/wow/... | |||||
| @go build docs/validator.go && ./validator -m docs/API.md -t docs/checker.go.tpl | |||||
| @@ -0,0 +1,2 @@ | |||||
| minio-go | |||||
| Copyright 2015-2017 Minio, Inc. | |||||
| @@ -0,0 +1,239 @@ | |||||
| # Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [](https://slack.minio.io) [](https://sourcegraph.com/github.com/minio/minio-go?badge) [](https://github.com/minio/minio-go/blob/master/LICENSE) | |||||
| The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. | |||||
| This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference). | |||||
| This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang). | |||||
| ## Download from Github | |||||
| ```sh | |||||
| go get -u github.com/minio/minio-go | |||||
| ``` | |||||
| ## Initialize Minio Client | |||||
| Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage. | |||||
| | Parameter | Description| | |||||
| | :--- | :--- | | |||||
| | endpoint | URL to object storage service. | | |||||
| | accessKeyID | Access key is the user ID that uniquely identifies your account. | | |||||
| | secretAccessKey | Secret key is the password to your account. | | |||||
| | secure | Set this value to 'true' to enable secure (HTTPS) access. | | |||||
| ```go | |||||
| package main | |||||
| import ( | |||||
| "github.com/minio/minio-go" | |||||
| "log" | |||||
| ) | |||||
| func main() { | |||||
| endpoint := "play.minio.io:9000" | |||||
| accessKeyID := "Q3AM3UQ867SPQQA43P2F" | |||||
| secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" | |||||
| useSSL := true | |||||
| // Initialize minio client object. | |||||
| minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) | |||||
| if err != nil { | |||||
| log.Fatalln(err) | |||||
| } | |||||
| log.Printf("%#v\n", minioClient) // minioClient is now setup | |||||
| } | |||||
| ``` | |||||
| ## Quick Start Example - File Uploader | |||||
| This example program connects to an object storage server, creates a bucket and uploads a file to the bucket. | |||||
| We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public. | |||||
| ### FileUploader.go | |||||
| ```go | |||||
| package main | |||||
| import ( | |||||
| "github.com/minio/minio-go" | |||||
| "log" | |||||
| ) | |||||
| func main() { | |||||
| endpoint := "play.minio.io:9000" | |||||
| accessKeyID := "Q3AM3UQ867SPQQA43P2F" | |||||
| secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" | |||||
| useSSL := true | |||||
| // Initialize minio client object. | |||||
| minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) | |||||
| if err != nil { | |||||
| log.Fatalln(err) | |||||
| } | |||||
| // Make a new bucket called mymusic. | |||||
| bucketName := "mymusic" | |||||
| location := "us-east-1" | |||||
| err = minioClient.MakeBucket(bucketName, location) | |||||
| if err != nil { | |||||
| // Check to see if we already own this bucket (which happens if you run this twice) | |||||
| exists, err := minioClient.BucketExists(bucketName) | |||||
| if err == nil && exists { | |||||
| log.Printf("We already own %s\n", bucketName) | |||||
| } else { | |||||
| log.Fatalln(err) | |||||
| } | |||||
| } else { | |||||
| log.Printf("Successfully created %s\n", bucketName) | |||||
| } | |||||
| // Upload the zip file | |||||
| objectName := "golden-oldies.zip" | |||||
| filePath := "/tmp/golden-oldies.zip" | |||||
| contentType := "application/zip" | |||||
| // Upload the zip file with FPutObject | |||||
| n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType}) | |||||
| if err != nil { | |||||
| log.Fatalln(err) | |||||
| } | |||||
| log.Printf("Successfully uploaded %s of size %d\n", objectName, n) | |||||
| } | |||||
| ``` | |||||
| ### Run FileUploader | |||||
| ```sh | |||||
| go run file-uploader.go | |||||
| 2016/08/13 17:03:28 Successfully created mymusic | |||||
| 2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 | |||||
| mc ls play/mymusic/ | |||||
| [2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip | |||||
| ``` | |||||
| ## API Reference | |||||
| The full API Reference is available here. | |||||
| * [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference) | |||||
| ### API Reference : Bucket Operations | |||||
| * [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket) | |||||
| * [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets) | |||||
| * [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists) | |||||
| * [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket) | |||||
| * [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects) | |||||
| * [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2) | |||||
| * [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads) | |||||
| ### API Reference : Bucket policy Operations | |||||
| * [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy) | |||||
| * [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy) | |||||
| ### API Reference : Bucket notification Operations | |||||
| * [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification) | |||||
| * [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification) | |||||
| * [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification) | |||||
| * [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension) | |||||
| ### API Reference : File Object Operations | |||||
| * [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) | |||||
| * [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FGetObject) | |||||
| * [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext) | |||||
| * [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext) | |||||
| ### API Reference : Object Operations | |||||
| * [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject) | |||||
| * [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject) | |||||
| * [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext) | |||||
| * [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext) | |||||
| * [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming) | |||||
| * [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject) | |||||
| * [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject) | |||||
| * [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject) | |||||
| * [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects) | |||||
| * [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload) | |||||
| * [`SelectObjectContent`](https://docs.minio.io/docs/golang-client-api-reference#SelectObjectContent) | |||||
| ### API Reference : Presigned Operations | |||||
| * [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject) | |||||
| * [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject) | |||||
| * [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject) | |||||
| * [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy) | |||||
| ### API Reference : Client custom settings | |||||
| * [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo) | |||||
| * [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport) | |||||
| * [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn) | |||||
| * [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff) | |||||
| ## Full Examples | |||||
| ### Full Examples : Bucket Operations | |||||
| * [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) | |||||
| * [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) | |||||
| * [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) | |||||
| * [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) | |||||
| * [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) | |||||
| * [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) | |||||
| * [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) | |||||
| ### Full Examples : Bucket policy Operations | |||||
| * [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) | |||||
| * [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) | |||||
| * [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) | |||||
| ### Full Examples : Bucket lifecycle Operations | |||||
| * [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) | |||||
| * [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) | |||||
| ### Full Examples : Bucket notification Operations | |||||
| * [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) | |||||
| * [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) | |||||
| * [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) | |||||
| * [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension) | |||||
| ### Full Examples : File Object Operations | |||||
| * [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) | |||||
| * [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) | |||||
| * [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) | |||||
| * [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) | |||||
| ### Full Examples : Object Operations | |||||
| * [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) | |||||
| * [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) | |||||
| * [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) | |||||
| * [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) | |||||
| * [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) | |||||
| * [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) | |||||
| * [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) | |||||
| * [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) | |||||
| * [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) | |||||
| ### Full Examples : Encrypted Object Operations | |||||
| * [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) | |||||
| * [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) | |||||
| * [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) | |||||
| ### Full Examples : Presigned Operations | |||||
| * [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) | |||||
| * [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) | |||||
| * [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) | |||||
| * [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) | |||||
| ## Explore Further | |||||
| * [Complete Documentation](https://docs.minio.io) | |||||
| * [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference) | |||||
| * [Go Music Player App Full Application Example](https://docs.minio.io/docs/go-music-player-app) | |||||
| ## Contribute | |||||
| [Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) | |||||
| [](https://travis-ci.org/minio/minio-go) | |||||
| [](https://ci.appveyor.com/project/harshavardhana/minio-go) | |||||
| ## License | |||||
| This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](./LICENSE) and [NOTICE](./NOTICE) for more information. | |||||
| @@ -0,0 +1,245 @@ | |||||
| # 适用于与Amazon S3兼容云存储的Minio Go SDK [](https://slack.minio.io) [](https://sourcegraph.com/github.com/minio/minio-go?badge) | |||||
| Minio Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。 | |||||
| **支持的云存储:** | |||||
| - AWS Signature Version 4 | |||||
| - Amazon S3 | |||||
| - Minio | |||||
| - AWS Signature Version 2 | |||||
| - Google Cloud Storage (兼容模式) | |||||
| - Openstack Swift + Swift3 middleware | |||||
| - Ceph Object Gateway | |||||
| - Riak CS | |||||
| 本文我们将学习如何安装Minio client SDK,连接到Minio,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference)。 | |||||
| 本文假设你已经有 [Go开发环境](https://docs.minio.io/docs/how-to-install-golang)。 | |||||
| ## 从Github下载 | |||||
| ```sh | |||||
| go get -u github.com/minio/minio-go | |||||
| ``` | |||||
| ## 初始化Minio Client | |||||
| Minio client需要以下4个参数来连接与Amazon S3兼容的对象存储。 | |||||
| | 参数 | 描述| | |||||
| | :--- | :--- | | |||||
| | endpoint | 对象存储服务的URL | | |||||
| | accessKeyID | Access key是唯一标识你的账户的用户ID。 | | |||||
| | secretAccessKey | Secret key是你账户的密码。 | | |||||
| | secure | true代表使用HTTPS | | |||||
| ```go | |||||
| package main | |||||
| import ( | |||||
| "github.com/minio/minio-go" | |||||
| "log" | |||||
| ) | |||||
| func main() { | |||||
| endpoint := "play.minio.io:9000" | |||||
| accessKeyID := "Q3AM3UQ867SPQQA43P2F" | |||||
| secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" | |||||
| useSSL := true | |||||
| // 初使化 minio client对象。 | |||||
| minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) | |||||
| if err != nil { | |||||
| log.Fatalln(err) | |||||
| } | |||||
| log.Printf("%#v\n", minioClient) // minioClient初使化成功 | |||||
| } | |||||
| ``` | |||||
| ## 示例-文件上传 | |||||
| 本示例连接到一个对象存储服务,创建一个存储桶并上传一个文件到存储桶中。 | |||||
| 我们在本示例中使用运行在 [https://play.minio.io:9000](https://play.minio.io:9000) 上的Minio服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。 | |||||
| ### FileUploader.go | |||||
| ```go | |||||
| package main | |||||
| import ( | |||||
| "github.com/minio/minio-go" | |||||
| "log" | |||||
| ) | |||||
| func main() { | |||||
| endpoint := "play.minio.io:9000" | |||||
| accessKeyID := "Q3AM3UQ867SPQQA43P2F" | |||||
| secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" | |||||
| useSSL := true | |||||
| // 初使化minio client对象。 | |||||
| minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) | |||||
| if err != nil { | |||||
| log.Fatalln(err) | |||||
| } | |||||
| // 创建一个叫mymusic的存储桶。 | |||||
| bucketName := "mymusic" | |||||
| location := "us-east-1" | |||||
| err = minioClient.MakeBucket(bucketName, location) | |||||
| if err != nil { | |||||
| // 检查存储桶是否已经存在。 | |||||
| exists, err := minioClient.BucketExists(bucketName) | |||||
| if err == nil && exists { | |||||
| log.Printf("We already own %s\n", bucketName) | |||||
| } else { | |||||
| log.Fatalln(err) | |||||
| } | |||||
| } | |||||
| log.Printf("Successfully created %s\n", bucketName) | |||||
| // 上传一个zip文件。 | |||||
| objectName := "golden-oldies.zip" | |||||
| filePath := "/tmp/golden-oldies.zip" | |||||
| contentType := "application/zip" | |||||
| // 使用FPutObject上传一个zip文件。 | |||||
| n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType}) | |||||
| if err != nil { | |||||
| log.Fatalln(err) | |||||
| } | |||||
| log.Printf("Successfully uploaded %s of size %d\n", objectName, n) | |||||
| } | |||||
| ``` | |||||
| ### 运行FileUploader | |||||
| ```sh | |||||
| go run file-uploader.go | |||||
| 2016/08/13 17:03:28 Successfully created mymusic | |||||
| 2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 | |||||
| mc ls play/mymusic/ | |||||
| [2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip | |||||
| ``` | |||||
| ## API文档 | |||||
| 完整的API文档在这里。 | |||||
| * [完整API文档](https://docs.minio.io/docs/golang-client-api-reference) | |||||
| ### API文档 : 操作存储桶 | |||||
| * [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket) | |||||
| * [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets) | |||||
| * [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists) | |||||
| * [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket) | |||||
| * [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects) | |||||
| * [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2) | |||||
| * [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads) | |||||
| ### API文档 : 存储桶策略 | |||||
| * [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy) | |||||
| * [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy) | |||||
| ### API文档 : 存储桶通知 | |||||
| * [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification) | |||||
| * [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification) | |||||
| * [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification) | |||||
| * [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension) | |||||
| ### API文档 : 操作文件对象 | |||||
| * [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) | |||||
| * [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) | |||||
| * [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext) | |||||
| * [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext) | |||||
| ### API文档 : 操作对象 | |||||
| * [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject) | |||||
| * [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject) | |||||
| * [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext) | |||||
| * [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext) | |||||
| * [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming) | |||||
| * [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject) | |||||
| * [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject) | |||||
| * [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject) | |||||
| * [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects) | |||||
| * [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload) | |||||
| ### API文档: 操作加密对象 | |||||
| * [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject) | |||||
| * [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject) | |||||
| ### API文档 : Presigned操作 | |||||
| * [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject) | |||||
| * [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject) | |||||
| * [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject) | |||||
| * [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy) | |||||
| ### API文档 : 客户端自定义设置 | |||||
| * [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo) | |||||
| * [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport) | |||||
| * [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn) | |||||
| * [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff) | |||||
| ## 完整示例 | |||||
| ### 完整示例 : 操作存储桶 | |||||
| * [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) | |||||
| * [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) | |||||
| * [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) | |||||
| * [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) | |||||
| * [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) | |||||
| * [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) | |||||
| * [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) | |||||
| ### 完整示例 : 存储桶策略 | |||||
| * [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) | |||||
| * [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) | |||||
| * [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) | |||||
| ### 完整示例 : 存储桶通知 | |||||
| * [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) | |||||
| * [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) | |||||
| * [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) | |||||
| * [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio扩展) | |||||
| ### 完整示例 : 操作文件对象 | |||||
| * [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) | |||||
| * [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) | |||||
| * [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) | |||||
| * [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) | |||||
| ### 完整示例 : 操作对象 | |||||
| * [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) | |||||
| * [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) | |||||
| * [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) | |||||
| * [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) | |||||
| * [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) | |||||
| * [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) | |||||
| * [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) | |||||
| * [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) | |||||
| * [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) | |||||
| ### 完整示例 : 操作加密对象 | |||||
| * [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) | |||||
| * [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) | |||||
| * [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) | |||||
| ### 完整示例 : Presigned操作 | |||||
| * [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) | |||||
| * [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) | |||||
| * [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) | |||||
| * [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) | |||||
| ## 了解更多 | |||||
| * [完整文档](https://docs.minio.io) | |||||
| * [Minio Go Client SDK API文档](https://docs.minio.io/docs/golang-client-api-reference) | |||||
| * [Go 音乐播放器完整示例](https://docs.minio.io/docs/go-music-player-app) | |||||
| ## 贡献 | |||||
| [贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md) | |||||
| [](https://travis-ci.org/minio/minio-go) | |||||
| [](https://ci.appveyor.com/project/harshavardhana/minio-go) | |||||
| @@ -0,0 +1,565 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017, 2018 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "context" | |||||
| "fmt" | |||||
| "io" | |||||
| "io/ioutil" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "strconv" | |||||
| "strings" | |||||
| "time" | |||||
| "github.com/minio/minio-go/pkg/encrypt" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // DestinationInfo - type with information about the object to be | |||||
| // created via server-side copy requests, using the Compose API. | |||||
| type DestinationInfo struct { | |||||
| bucket, object string | |||||
| encryption encrypt.ServerSide | |||||
| // if no user-metadata is provided, it is copied from source | |||||
| // (when there is only once source object in the compose | |||||
| // request) | |||||
| userMetadata map[string]string | |||||
| } | |||||
| // NewDestinationInfo - creates a compose-object/copy-source | |||||
| // destination info object. | |||||
| // | |||||
| // `encSSEC` is the key info for server-side-encryption with customer | |||||
| // provided key. If it is nil, no encryption is performed. | |||||
| // | |||||
| // `userMeta` is the user-metadata key-value pairs to be set on the | |||||
| // destination. The keys are automatically prefixed with `x-amz-meta-` | |||||
| // if needed. If nil is passed, and if only a single source (of any | |||||
| // size) is provided in the ComposeObject call, then metadata from the | |||||
| // source is copied to the destination. | |||||
| func NewDestinationInfo(bucket, object string, sse encrypt.ServerSide, userMeta map[string]string) (d DestinationInfo, err error) { | |||||
| // Input validation. | |||||
| if err = s3utils.CheckValidBucketName(bucket); err != nil { | |||||
| return d, err | |||||
| } | |||||
| if err = s3utils.CheckValidObjectName(object); err != nil { | |||||
| return d, err | |||||
| } | |||||
| // Process custom-metadata to remove a `x-amz-meta-` prefix if | |||||
| // present and validate that keys are distinct (after this | |||||
| // prefix removal). | |||||
| m := make(map[string]string) | |||||
| for k, v := range userMeta { | |||||
| if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { | |||||
| k = k[len("x-amz-meta-"):] | |||||
| } | |||||
| if _, ok := m[k]; ok { | |||||
| return d, ErrInvalidArgument(fmt.Sprintf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k)) | |||||
| } | |||||
| m[k] = v | |||||
| } | |||||
| return DestinationInfo{ | |||||
| bucket: bucket, | |||||
| object: object, | |||||
| encryption: sse, | |||||
| userMetadata: m, | |||||
| }, nil | |||||
| } | |||||
| // getUserMetaHeadersMap - construct appropriate key-value pairs to send | |||||
| // as headers from metadata map to pass into copy-object request. For | |||||
| // single part copy-object (i.e. non-multipart object), enable the | |||||
| // withCopyDirectiveHeader to set the `x-amz-metadata-directive` to | |||||
| // `REPLACE`, so that metadata headers from the source are not copied | |||||
| // over. | |||||
| func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) map[string]string { | |||||
| if len(d.userMetadata) == 0 { | |||||
| return nil | |||||
| } | |||||
| r := make(map[string]string) | |||||
| if withCopyDirectiveHeader { | |||||
| r["x-amz-metadata-directive"] = "REPLACE" | |||||
| } | |||||
| for k, v := range d.userMetadata { | |||||
| if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { | |||||
| r[k] = v | |||||
| } else { | |||||
| r["x-amz-meta-"+k] = v | |||||
| } | |||||
| } | |||||
| return r | |||||
| } | |||||
| // SourceInfo - represents a source object to be copied, using | |||||
| // server-side copying APIs. | |||||
| type SourceInfo struct { | |||||
| bucket, object string | |||||
| start, end int64 | |||||
| encryption encrypt.ServerSide | |||||
| // Headers to send with the upload-part-copy request involving | |||||
| // this source object. | |||||
| Headers http.Header | |||||
| } | |||||
| // NewSourceInfo - create a compose-object/copy-object source info | |||||
| // object. | |||||
| // | |||||
| // `decryptSSEC` is the decryption key using server-side-encryption | |||||
| // with customer provided key. It may be nil if the source is not | |||||
| // encrypted. | |||||
| func NewSourceInfo(bucket, object string, sse encrypt.ServerSide) SourceInfo { | |||||
| r := SourceInfo{ | |||||
| bucket: bucket, | |||||
| object: object, | |||||
| start: -1, // range is unspecified by default | |||||
| encryption: sse, | |||||
| Headers: make(http.Header), | |||||
| } | |||||
| // Set the source header | |||||
| r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object)) | |||||
| return r | |||||
| } | |||||
| // SetRange - Set the start and end offset of the source object to be | |||||
| // copied. If this method is not called, the whole source object is | |||||
| // copied. | |||||
| func (s *SourceInfo) SetRange(start, end int64) error { | |||||
| if start > end || start < 0 { | |||||
| return ErrInvalidArgument("start must be non-negative, and start must be at most end.") | |||||
| } | |||||
| // Note that 0 <= start <= end | |||||
| s.start, s.end = start, end | |||||
| return nil | |||||
| } | |||||
| // SetMatchETagCond - Set ETag match condition. The object is copied | |||||
| // only if the etag of the source matches the value given here. | |||||
| func (s *SourceInfo) SetMatchETagCond(etag string) error { | |||||
| if etag == "" { | |||||
| return ErrInvalidArgument("ETag cannot be empty.") | |||||
| } | |||||
| s.Headers.Set("x-amz-copy-source-if-match", etag) | |||||
| return nil | |||||
| } | |||||
| // SetMatchETagExceptCond - Set the ETag match exception | |||||
| // condition. The object is copied only if the etag of the source is | |||||
| // not the value given here. | |||||
| func (s *SourceInfo) SetMatchETagExceptCond(etag string) error { | |||||
| if etag == "" { | |||||
| return ErrInvalidArgument("ETag cannot be empty.") | |||||
| } | |||||
| s.Headers.Set("x-amz-copy-source-if-none-match", etag) | |||||
| return nil | |||||
| } | |||||
| // SetModifiedSinceCond - Set the modified since condition. | |||||
| func (s *SourceInfo) SetModifiedSinceCond(modTime time.Time) error { | |||||
| if modTime.IsZero() { | |||||
| return ErrInvalidArgument("Input time cannot be 0.") | |||||
| } | |||||
| s.Headers.Set("x-amz-copy-source-if-modified-since", modTime.Format(http.TimeFormat)) | |||||
| return nil | |||||
| } | |||||
| // SetUnmodifiedSinceCond - Set the unmodified since condition. | |||||
| func (s *SourceInfo) SetUnmodifiedSinceCond(modTime time.Time) error { | |||||
| if modTime.IsZero() { | |||||
| return ErrInvalidArgument("Input time cannot be 0.") | |||||
| } | |||||
| s.Headers.Set("x-amz-copy-source-if-unmodified-since", modTime.Format(http.TimeFormat)) | |||||
| return nil | |||||
| } | |||||
| // Helper to fetch size and etag of an object using a StatObject call. | |||||
| func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[string]string, err error) { | |||||
| // Get object info - need size and etag here. Also, decryption | |||||
| // headers are added to the stat request if given. | |||||
| var objInfo ObjectInfo | |||||
| opts := StatObjectOptions{GetObjectOptions{ServerSideEncryption: encrypt.SSE(s.encryption)}} | |||||
| objInfo, err = c.statObject(context.Background(), s.bucket, s.object, opts) | |||||
| if err != nil { | |||||
| err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)) | |||||
| } else { | |||||
| size = objInfo.Size | |||||
| etag = objInfo.ETag | |||||
| userMeta = make(map[string]string) | |||||
| for k, v := range objInfo.Metadata { | |||||
| if strings.HasPrefix(k, "x-amz-meta-") { | |||||
| if len(v) > 0 { | |||||
| userMeta[k] = v[0] | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| return | |||||
| } | |||||
| // Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. | |||||
| func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, | |||||
| metadata map[string]string) (ObjectInfo, error) { | |||||
| // Build headers. | |||||
| headers := make(http.Header) | |||||
| // Set all the metadata headers. | |||||
| for k, v := range metadata { | |||||
| headers.Set(k, v) | |||||
| } | |||||
| // Set the source header | |||||
| headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) | |||||
| // Send upload-part-copy request | |||||
| resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ | |||||
| bucketName: destBucket, | |||||
| objectName: destObject, | |||||
| customHeader: headers, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return ObjectInfo{}, err | |||||
| } | |||||
| // Check if we got an error response. | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject) | |||||
| } | |||||
| cpObjRes := copyObjectResult{} | |||||
| err = xmlDecoder(resp.Body, &cpObjRes) | |||||
| if err != nil { | |||||
| return ObjectInfo{}, err | |||||
| } | |||||
| objInfo := ObjectInfo{ | |||||
| Key: destObject, | |||||
| ETag: strings.Trim(cpObjRes.ETag, "\""), | |||||
| LastModified: cpObjRes.LastModified, | |||||
| } | |||||
| return objInfo, nil | |||||
| } | |||||
| func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, | |||||
| partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) { | |||||
| headers := make(http.Header) | |||||
| // Set source | |||||
| headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) | |||||
| if startOffset < 0 { | |||||
| return p, ErrInvalidArgument("startOffset must be non-negative") | |||||
| } | |||||
| if length >= 0 { | |||||
| headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1)) | |||||
| } | |||||
| for k, v := range metadata { | |||||
| headers.Set(k, v) | |||||
| } | |||||
| queryValues := make(url.Values) | |||||
| queryValues.Set("partNumber", strconv.Itoa(partID)) | |||||
| queryValues.Set("uploadId", uploadID) | |||||
| resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ | |||||
| bucketName: destBucket, | |||||
| objectName: destObject, | |||||
| customHeader: headers, | |||||
| queryValues: queryValues, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return | |||||
| } | |||||
| // Check if we got an error response. | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return p, httpRespToErrorResponse(resp, destBucket, destObject) | |||||
| } | |||||
| // Decode copy-part response on success. | |||||
| cpObjRes := copyObjectResult{} | |||||
| err = xmlDecoder(resp.Body, &cpObjRes) | |||||
| if err != nil { | |||||
| return p, err | |||||
| } | |||||
| p.PartNumber, p.ETag = partID, cpObjRes.ETag | |||||
| return p, nil | |||||
| } | |||||
| // uploadPartCopy - helper function to create a part in a multipart | |||||
| // upload via an upload-part-copy request | |||||
| // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html | |||||
| func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, | |||||
| headers http.Header) (p CompletePart, err error) { | |||||
| // Build query parameters | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("partNumber", strconv.Itoa(partNumber)) | |||||
| urlValues.Set("uploadId", uploadID) | |||||
| // Send upload-part-copy request | |||||
| resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ | |||||
| bucketName: bucket, | |||||
| objectName: object, | |||||
| customHeader: headers, | |||||
| queryValues: urlValues, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return p, err | |||||
| } | |||||
| // Check if we got an error response. | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return p, httpRespToErrorResponse(resp, bucket, object) | |||||
| } | |||||
| // Decode copy-part response on success. | |||||
| cpObjRes := copyObjectResult{} | |||||
| err = xmlDecoder(resp.Body, &cpObjRes) | |||||
| if err != nil { | |||||
| return p, err | |||||
| } | |||||
| p.PartNumber, p.ETag = partNumber, cpObjRes.ETag | |||||
| return p, nil | |||||
| } | |||||
| // ComposeObjectWithProgress - creates an object using server-side copying of | |||||
| // existing objects. It takes a list of source objects (with optional | |||||
| // offsets) and concatenates them into a new object using only | |||||
| // server-side copying operations. Optionally takes progress reader hook | |||||
| // for applications to look at current progress. | |||||
| func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo, progress io.Reader) error { | |||||
| if len(srcs) < 1 || len(srcs) > maxPartsCount { | |||||
| return ErrInvalidArgument("There must be as least one and up to 10000 source objects.") | |||||
| } | |||||
| ctx := context.Background() | |||||
| srcSizes := make([]int64, len(srcs)) | |||||
| var totalSize, size, totalParts int64 | |||||
| var srcUserMeta map[string]string | |||||
| etags := make([]string, len(srcs)) | |||||
| var err error | |||||
| for i, src := range srcs { | |||||
| size, etags[i], srcUserMeta, err = src.getProps(c) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| // Error out if client side encryption is used in this source object when | |||||
| // more than one source objects are given. | |||||
| if len(srcs) > 1 && src.Headers.Get("x-amz-meta-x-amz-key") != "" { | |||||
| return ErrInvalidArgument( | |||||
| fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object)) | |||||
| } | |||||
| // Check if a segment is specified, and if so, is the | |||||
| // segment within object bounds? | |||||
| if src.start != -1 { | |||||
| // Since range is specified, | |||||
| // 0 <= src.start <= src.end | |||||
| // so only invalid case to check is: | |||||
| if src.end >= size { | |||||
| return ErrInvalidArgument( | |||||
| fmt.Sprintf("SourceInfo %d has invalid segment-to-copy [%d, %d] (size is %d)", | |||||
| i, src.start, src.end, size)) | |||||
| } | |||||
| size = src.end - src.start + 1 | |||||
| } | |||||
| // Only the last source may be less than `absMinPartSize` | |||||
| if size < absMinPartSize && i < len(srcs)-1 { | |||||
| return ErrInvalidArgument( | |||||
| fmt.Sprintf("SourceInfo %d is too small (%d) and it is not the last part", i, size)) | |||||
| } | |||||
| // Is data to copy too large? | |||||
| totalSize += size | |||||
| if totalSize > maxMultipartPutObjectSize { | |||||
| return ErrInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize)) | |||||
| } | |||||
| // record source size | |||||
| srcSizes[i] = size | |||||
| // calculate parts needed for current source | |||||
| totalParts += partsRequired(size) | |||||
| // Do we need more parts than we are allowed? | |||||
| if totalParts > maxPartsCount { | |||||
| return ErrInvalidArgument(fmt.Sprintf( | |||||
| "Your proposed compose object requires more than %d parts", maxPartsCount)) | |||||
| } | |||||
| } | |||||
| // Single source object case (i.e. when only one source is | |||||
| // involved, it is being copied wholly and at most 5GiB in | |||||
| // size, emptyfiles are also supported). | |||||
| if (totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { | |||||
| return c.CopyObjectWithProgress(dst, srcs[0], progress) | |||||
| } | |||||
| // Now, handle multipart-copy cases. | |||||
| // 1. Ensure that the object has not been changed while | |||||
| // we are copying data. | |||||
| for i, src := range srcs { | |||||
| if src.Headers.Get("x-amz-copy-source-if-match") == "" { | |||||
| src.SetMatchETagCond(etags[i]) | |||||
| } | |||||
| } | |||||
| // 2. Initiate a new multipart upload. | |||||
| // Set user-metadata on the destination object. If no | |||||
| // user-metadata is specified, and there is only one source, | |||||
| // (only) then metadata from source is copied. | |||||
| userMeta := dst.getUserMetaHeadersMap(false) | |||||
| metaMap := userMeta | |||||
| if len(userMeta) == 0 && len(srcs) == 1 { | |||||
| metaMap = srcUserMeta | |||||
| } | |||||
| metaHeaders := make(map[string]string) | |||||
| for k, v := range metaMap { | |||||
| metaHeaders[k] = v | |||||
| } | |||||
| uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{ServerSideEncryption: dst.encryption, UserMetadata: metaHeaders}) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| // 3. Perform copy part uploads | |||||
| objParts := []CompletePart{} | |||||
| partIndex := 1 | |||||
| for i, src := range srcs { | |||||
| h := src.Headers | |||||
| if src.encryption != nil { | |||||
| encrypt.SSECopy(src.encryption).Marshal(h) | |||||
| } | |||||
| // Add destination encryption headers | |||||
| if dst.encryption != nil { | |||||
| dst.encryption.Marshal(h) | |||||
| } | |||||
| // calculate start/end indices of parts after | |||||
| // splitting. | |||||
| startIdx, endIdx := calculateEvenSplits(srcSizes[i], src) | |||||
| for j, start := range startIdx { | |||||
| end := endIdx[j] | |||||
| // Add (or reset) source range header for | |||||
| // upload part copy request. | |||||
| h.Set("x-amz-copy-source-range", | |||||
| fmt.Sprintf("bytes=%d-%d", start, end)) | |||||
| // make upload-part-copy request | |||||
| complPart, err := c.uploadPartCopy(ctx, dst.bucket, | |||||
| dst.object, uploadID, partIndex, h) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if progress != nil { | |||||
| io.CopyN(ioutil.Discard, progress, end-start+1) | |||||
| } | |||||
| objParts = append(objParts, complPart) | |||||
| partIndex++ | |||||
| } | |||||
| } | |||||
| // 4. Make final complete-multipart request. | |||||
| _, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID, | |||||
| completeMultipartUpload{Parts: objParts}) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // ComposeObject - creates an object using server-side copying of | |||||
| // existing objects. It takes a list of source objects (with optional | |||||
| // offsets) and concatenates them into a new object using only | |||||
| // server-side copying operations. | |||||
| func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { | |||||
| return c.ComposeObjectWithProgress(dst, srcs, nil) | |||||
| } | |||||
| // partsRequired is maximum parts possible with | |||||
| // max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1)) | |||||
| func partsRequired(size int64) int64 { | |||||
| maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1) | |||||
| r := size / int64(maxPartSize) | |||||
| if size%int64(maxPartSize) > 0 { | |||||
| r++ | |||||
| } | |||||
| return r | |||||
| } | |||||
| // calculateEvenSplits - computes splits for a source and returns | |||||
| // start and end index slices. Splits happen evenly to be sure that no | |||||
| // part is less than 5MiB, as that could fail the multipart request if | |||||
| // it is not the last part. | |||||
| func calculateEvenSplits(size int64, src SourceInfo) (startIndex, endIndex []int64) { | |||||
| if size == 0 { | |||||
| return | |||||
| } | |||||
| reqParts := partsRequired(size) | |||||
| startIndex = make([]int64, reqParts) | |||||
| endIndex = make([]int64, reqParts) | |||||
| // Compute number of required parts `k`, as: | |||||
| // | |||||
| // k = ceiling(size / copyPartSize) | |||||
| // | |||||
| // Now, distribute the `size` bytes in the source into | |||||
| // k parts as evenly as possible: | |||||
| // | |||||
| // r parts sized (q+1) bytes, and | |||||
| // (k - r) parts sized q bytes, where | |||||
| // | |||||
| // size = q * k + r (by simple division of size by k, | |||||
| // so that 0 <= r < k) | |||||
| // | |||||
| start := src.start | |||||
| if start == -1 { | |||||
| start = 0 | |||||
| } | |||||
| quot, rem := size/reqParts, size%reqParts | |||||
| nextStart := start | |||||
| for j := int64(0); j < reqParts; j++ { | |||||
| curPartSize := quot | |||||
| if j < rem { | |||||
| curPartSize++ | |||||
| } | |||||
| cStart := nextStart | |||||
| cEnd := cStart + curPartSize - 1 | |||||
| nextStart = cEnd + 1 | |||||
| startIndex[j], endIndex[j] = cStart, cEnd | |||||
| } | |||||
| return | |||||
| } | |||||
| @@ -0,0 +1,84 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "net/http" | |||||
| "time" | |||||
| ) | |||||
| // BucketInfo container for bucket metadata. | |||||
| type BucketInfo struct { | |||||
| // The name of the bucket. | |||||
| Name string `json:"name"` | |||||
| // Date the bucket was created. | |||||
| CreationDate time.Time `json:"creationDate"` | |||||
| } | |||||
| // ObjectInfo container for object metadata. | |||||
| type ObjectInfo struct { | |||||
| // An ETag is optionally set to md5sum of an object. In case of multipart objects, | |||||
| // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of | |||||
| // each parts concatenated into one string. | |||||
| ETag string `json:"etag"` | |||||
| Key string `json:"name"` // Name of the object | |||||
| LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. | |||||
| Size int64 `json:"size"` // Size in bytes of the object. | |||||
| ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. | |||||
| // Collection of additional metadata on the object. | |||||
| // eg: x-amz-meta-*, content-encoding etc. | |||||
| Metadata http.Header `json:"metadata" xml:"-"` | |||||
| // Owner name. | |||||
| Owner struct { | |||||
| DisplayName string `json:"name"` | |||||
| ID string `json:"id"` | |||||
| } `json:"owner"` | |||||
| // The class of storage used to store the object. | |||||
| StorageClass string `json:"storageClass"` | |||||
| // Error | |||||
| Err error `json:"-"` | |||||
| } | |||||
| // ObjectMultipartInfo container for multipart object metadata. | |||||
| type ObjectMultipartInfo struct { | |||||
| // Date and time at which the multipart upload was initiated. | |||||
| Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` | |||||
| Initiator initiator | |||||
| Owner owner | |||||
| // The type of storage to use for the object. Defaults to 'STANDARD'. | |||||
| StorageClass string | |||||
| // Key of the object for which the multipart upload was initiated. | |||||
| Key string | |||||
| // Size in bytes of the object. | |||||
| Size int64 | |||||
| // Upload ID that identifies the multipart upload. | |||||
| UploadID string `xml:"UploadId"` | |||||
| // Error | |||||
| Err error | |||||
| } | |||||
| @@ -0,0 +1,282 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "encoding/xml" | |||||
| "fmt" | |||||
| "net/http" | |||||
| ) | |||||
| /* **** SAMPLE ERROR RESPONSE **** | |||||
| <?xml version="1.0" encoding="UTF-8"?> | |||||
| <Error> | |||||
| <Code>AccessDenied</Code> | |||||
| <Message>Access Denied</Message> | |||||
| <BucketName>bucketName</BucketName> | |||||
| <Key>objectName</Key> | |||||
| <RequestId>F19772218238A85A</RequestId> | |||||
| <HostId>GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD</HostId> | |||||
| </Error> | |||||
| */ | |||||
| // ErrorResponse - Is the typed error returned by all API operations. | |||||
| // ErrorResponse struct should be comparable since it is compared inside | |||||
| // golang http API (https://github.com/golang/go/issues/29768) | |||||
| type ErrorResponse struct { | |||||
| XMLName xml.Name `xml:"Error" json:"-"` | |||||
| Code string | |||||
| Message string | |||||
| BucketName string | |||||
| Key string | |||||
| RequestID string `xml:"RequestId"` | |||||
| HostID string `xml:"HostId"` | |||||
| // Region where the bucket is located. This header is returned | |||||
| // only in HEAD bucket and ListObjects response. | |||||
| Region string | |||||
| // Underlying HTTP status code for the returned error | |||||
| StatusCode int `xml:"-" json:"-"` | |||||
| } | |||||
| // ToErrorResponse - Returns parsed ErrorResponse struct from body and | |||||
| // http headers. | |||||
| // | |||||
| // For example: | |||||
| // | |||||
| // import s3 "github.com/minio/minio-go" | |||||
| // ... | |||||
| // ... | |||||
| // reader, stat, err := s3.GetObject(...) | |||||
| // if err != nil { | |||||
| // resp := s3.ToErrorResponse(err) | |||||
| // } | |||||
| // ... | |||||
| func ToErrorResponse(err error) ErrorResponse { | |||||
| switch err := err.(type) { | |||||
| case ErrorResponse: | |||||
| return err | |||||
| default: | |||||
| return ErrorResponse{} | |||||
| } | |||||
| } | |||||
| // Error - Returns S3 error string. | |||||
| func (e ErrorResponse) Error() string { | |||||
| if e.Message == "" { | |||||
| msg, ok := s3ErrorResponseMap[e.Code] | |||||
| if !ok { | |||||
| msg = fmt.Sprintf("Error response code %s.", e.Code) | |||||
| } | |||||
| return msg | |||||
| } | |||||
| return e.Message | |||||
| } | |||||
| // Common string for errors to report issue location in unexpected | |||||
| // cases. | |||||
| const ( | |||||
| reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." | |||||
| ) | |||||
| // httpRespToErrorResponse returns a new encoded ErrorResponse | |||||
| // structure as error. | |||||
| func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { | |||||
| if resp == nil { | |||||
| msg := "Response is empty. " + reportIssue | |||||
| return ErrInvalidArgument(msg) | |||||
| } | |||||
| errResp := ErrorResponse{ | |||||
| StatusCode: resp.StatusCode, | |||||
| } | |||||
| err := xmlDecoder(resp.Body, &errResp) | |||||
| // Xml decoding failed with no body, fall back to HTTP headers. | |||||
| if err != nil { | |||||
| switch resp.StatusCode { | |||||
| case http.StatusNotFound: | |||||
| if objectName == "" { | |||||
| errResp = ErrorResponse{ | |||||
| StatusCode: resp.StatusCode, | |||||
| Code: "NoSuchBucket", | |||||
| Message: "The specified bucket does not exist.", | |||||
| BucketName: bucketName, | |||||
| } | |||||
| } else { | |||||
| errResp = ErrorResponse{ | |||||
| StatusCode: resp.StatusCode, | |||||
| Code: "NoSuchKey", | |||||
| Message: "The specified key does not exist.", | |||||
| BucketName: bucketName, | |||||
| Key: objectName, | |||||
| } | |||||
| } | |||||
| case http.StatusForbidden: | |||||
| errResp = ErrorResponse{ | |||||
| StatusCode: resp.StatusCode, | |||||
| Code: "AccessDenied", | |||||
| Message: "Access Denied.", | |||||
| BucketName: bucketName, | |||||
| Key: objectName, | |||||
| } | |||||
| case http.StatusConflict: | |||||
| errResp = ErrorResponse{ | |||||
| StatusCode: resp.StatusCode, | |||||
| Code: "Conflict", | |||||
| Message: "Bucket not empty.", | |||||
| BucketName: bucketName, | |||||
| } | |||||
| case http.StatusPreconditionFailed: | |||||
| errResp = ErrorResponse{ | |||||
| StatusCode: resp.StatusCode, | |||||
| Code: "PreconditionFailed", | |||||
| Message: s3ErrorResponseMap["PreconditionFailed"], | |||||
| BucketName: bucketName, | |||||
| Key: objectName, | |||||
| } | |||||
| default: | |||||
| errResp = ErrorResponse{ | |||||
| StatusCode: resp.StatusCode, | |||||
| Code: resp.Status, | |||||
| Message: resp.Status, | |||||
| BucketName: bucketName, | |||||
| } | |||||
| } | |||||
| } | |||||
| // Save hostID, requestID and region information | |||||
| // from headers if not available through error XML. | |||||
| if errResp.RequestID == "" { | |||||
| errResp.RequestID = resp.Header.Get("x-amz-request-id") | |||||
| } | |||||
| if errResp.HostID == "" { | |||||
| errResp.HostID = resp.Header.Get("x-amz-id-2") | |||||
| } | |||||
| if errResp.Region == "" { | |||||
| errResp.Region = resp.Header.Get("x-amz-bucket-region") | |||||
| } | |||||
| if errResp.Code == "InvalidRegion" && errResp.Region != "" { | |||||
| errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region) | |||||
| } | |||||
| return errResp | |||||
| } | |||||
| // ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. | |||||
| func ErrTransferAccelerationBucket(bucketName string) error { | |||||
| return ErrorResponse{ | |||||
| StatusCode: http.StatusBadRequest, | |||||
| Code: "InvalidArgument", | |||||
| Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.", | |||||
| BucketName: bucketName, | |||||
| } | |||||
| } | |||||
| // ErrEntityTooLarge - Input size is larger than supported maximum. | |||||
| func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { | |||||
| msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) | |||||
| return ErrorResponse{ | |||||
| StatusCode: http.StatusBadRequest, | |||||
| Code: "EntityTooLarge", | |||||
| Message: msg, | |||||
| BucketName: bucketName, | |||||
| Key: objectName, | |||||
| } | |||||
| } | |||||
| // ErrEntityTooSmall - Input size is smaller than supported minimum. | |||||
| func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error { | |||||
| msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) | |||||
| return ErrorResponse{ | |||||
| StatusCode: http.StatusBadRequest, | |||||
| Code: "EntityTooSmall", | |||||
| Message: msg, | |||||
| BucketName: bucketName, | |||||
| Key: objectName, | |||||
| } | |||||
| } | |||||
| // ErrUnexpectedEOF - Unexpected end of file reached. | |||||
| func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { | |||||
| msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize) | |||||
| return ErrorResponse{ | |||||
| StatusCode: http.StatusBadRequest, | |||||
| Code: "UnexpectedEOF", | |||||
| Message: msg, | |||||
| BucketName: bucketName, | |||||
| Key: objectName, | |||||
| } | |||||
| } | |||||
| // ErrInvalidBucketName - Invalid bucket name response. | |||||
| func ErrInvalidBucketName(message string) error { | |||||
| return ErrorResponse{ | |||||
| StatusCode: http.StatusBadRequest, | |||||
| Code: "InvalidBucketName", | |||||
| Message: message, | |||||
| RequestID: "minio", | |||||
| } | |||||
| } | |||||
| // ErrInvalidObjectName - Invalid object name response. | |||||
| func ErrInvalidObjectName(message string) error { | |||||
| return ErrorResponse{ | |||||
| StatusCode: http.StatusNotFound, | |||||
| Code: "NoSuchKey", | |||||
| Message: message, | |||||
| RequestID: "minio", | |||||
| } | |||||
| } | |||||
| // ErrInvalidObjectPrefix - Invalid object prefix response is | |||||
| // similar to object name response. | |||||
| var ErrInvalidObjectPrefix = ErrInvalidObjectName | |||||
| // ErrInvalidArgument - Invalid argument response. | |||||
| func ErrInvalidArgument(message string) error { | |||||
| return ErrorResponse{ | |||||
| StatusCode: http.StatusBadRequest, | |||||
| Code: "InvalidArgument", | |||||
| Message: message, | |||||
| RequestID: "minio", | |||||
| } | |||||
| } | |||||
| // ErrNoSuchBucketPolicy - No Such Bucket Policy response | |||||
| // The specified bucket does not have a bucket policy. | |||||
| func ErrNoSuchBucketPolicy(message string) error { | |||||
| return ErrorResponse{ | |||||
| StatusCode: http.StatusNotFound, | |||||
| Code: "NoSuchBucketPolicy", | |||||
| Message: message, | |||||
| RequestID: "minio", | |||||
| } | |||||
| } | |||||
| // ErrAPINotSupported - API not supported response | |||||
| // The specified API call is not supported | |||||
| func ErrAPINotSupported(message string) error { | |||||
| return ErrorResponse{ | |||||
| StatusCode: http.StatusNotImplemented, | |||||
| Code: "APINotSupported", | |||||
| Message: message, | |||||
| RequestID: "minio", | |||||
| } | |||||
| } | |||||
| @@ -0,0 +1,77 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "context" | |||||
| "io/ioutil" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // GetBucketLifecycle - get bucket lifecycle. | |||||
| func (c Client) GetBucketLifecycle(bucketName string) (string, error) { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return "", err | |||||
| } | |||||
| bucketLifecycle, err := c.getBucketLifecycle(bucketName) | |||||
| if err != nil { | |||||
| errResponse := ToErrorResponse(err) | |||||
| if errResponse.Code == "NoSuchLifecycleConfiguration" { | |||||
| return "", nil | |||||
| } | |||||
| return "", err | |||||
| } | |||||
| return bucketLifecycle, nil | |||||
| } | |||||
| // Request server for current bucket lifecycle. | |||||
| func (c Client) getBucketLifecycle(bucketName string) (string, error) { | |||||
| // Get resources properly escaped and lined up before | |||||
| // using them in http request. | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("lifecycle", "") | |||||
| // Execute GET on bucket to get lifecycle. | |||||
| resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| queryValues: urlValues, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return "", httpRespToErrorResponse(resp, bucketName, "") | |||||
| } | |||||
| } | |||||
| bucketLifecycleBuf, err := ioutil.ReadAll(resp.Body) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| lifecycle := string(bucketLifecycleBuf) | |||||
| return lifecycle, err | |||||
| } | |||||
| @@ -0,0 +1,136 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2018 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "context" | |||||
| "net/http" | |||||
| "net/url" | |||||
| ) | |||||
| type accessControlPolicy struct { | |||||
| Owner struct { | |||||
| ID string `xml:"ID"` | |||||
| DisplayName string `xml:"DisplayName"` | |||||
| } `xml:"Owner"` | |||||
| AccessControlList struct { | |||||
| Grant []struct { | |||||
| Grantee struct { | |||||
| ID string `xml:"ID"` | |||||
| DisplayName string `xml:"DisplayName"` | |||||
| URI string `xml:"URI"` | |||||
| } `xml:"Grantee"` | |||||
| Permission string `xml:"Permission"` | |||||
| } `xml:"Grant"` | |||||
| } `xml:"AccessControlList"` | |||||
| } | |||||
| //GetObjectACL get object ACLs | |||||
| func (c Client) GetObjectACL(bucketName, objectName string) (*ObjectInfo, error) { | |||||
| resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| objectName: objectName, | |||||
| queryValues: url.Values{ | |||||
| "acl": []string{""}, | |||||
| }, | |||||
| }) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| defer closeResponse(resp) | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return nil, httpRespToErrorResponse(resp, bucketName, objectName) | |||||
| } | |||||
| res := &accessControlPolicy{} | |||||
| if err := xmlDecoder(resp.Body, res); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| objInfo, err := c.statObject(context.Background(), bucketName, objectName, StatObjectOptions{}) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| cannedACL := getCannedACL(res) | |||||
| if cannedACL != "" { | |||||
| objInfo.Metadata.Add("X-Amz-Acl", cannedACL) | |||||
| return &objInfo, nil | |||||
| } | |||||
| grantACL := getAmzGrantACL(res) | |||||
| for k, v := range grantACL { | |||||
| objInfo.Metadata[k] = v | |||||
| } | |||||
| return &objInfo, nil | |||||
| } | |||||
| func getCannedACL(aCPolicy *accessControlPolicy) string { | |||||
| grants := aCPolicy.AccessControlList.Grant | |||||
| switch { | |||||
| case len(grants) == 1: | |||||
| if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { | |||||
| return "private" | |||||
| } | |||||
| case len(grants) == 2: | |||||
| for _, g := range grants { | |||||
| if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { | |||||
| return "authenticated-read" | |||||
| } | |||||
| if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { | |||||
| return "public-read" | |||||
| } | |||||
| if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID { | |||||
| return "bucket-owner-read" | |||||
| } | |||||
| } | |||||
| case len(grants) == 3: | |||||
| for _, g := range grants { | |||||
| if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { | |||||
| return "public-read-write" | |||||
| } | |||||
| } | |||||
| } | |||||
| return "" | |||||
| } | |||||
| func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string { | |||||
| grants := aCPolicy.AccessControlList.Grant | |||||
| res := map[string][]string{} | |||||
| for _, g := range grants { | |||||
| switch { | |||||
| case g.Permission == "READ": | |||||
| res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID) | |||||
| case g.Permission == "WRITE": | |||||
| res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID) | |||||
| case g.Permission == "READ_ACP": | |||||
| res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID) | |||||
| case g.Permission == "WRITE_ACP": | |||||
| res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID) | |||||
| case g.Permission == "FULL_CONTROL": | |||||
| res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID) | |||||
| } | |||||
| } | |||||
| return res | |||||
| } | |||||
| @@ -0,0 +1,26 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import "context" | |||||
| // GetObjectWithContext - returns an seekable, readable object. | |||||
| // The options can be used to specify the GET request further. | |||||
| func (c Client) GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { | |||||
| return c.getObjectWithContext(ctx, bucketName, objectName, opts) | |||||
| } | |||||
| @@ -0,0 +1,125 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "context" | |||||
| "io" | |||||
| "os" | |||||
| "path/filepath" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // FGetObjectWithContext - download contents of an object to a local file. | |||||
| // The options can be used to specify the GET request further. | |||||
| func (c Client) FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { | |||||
| return c.fGetObjectWithContext(ctx, bucketName, objectName, filePath, opts) | |||||
| } | |||||
| // FGetObject - download contents of an object to a local file. | |||||
| func (c Client) FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error { | |||||
| return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) | |||||
| } | |||||
| // fGetObjectWithContext - fgetObject wrapper function with context | |||||
| func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return err | |||||
| } | |||||
| // Verify if destination already exists. | |||||
| st, err := os.Stat(filePath) | |||||
| if err == nil { | |||||
| // If the destination exists and is a directory. | |||||
| if st.IsDir() { | |||||
| return ErrInvalidArgument("fileName is a directory.") | |||||
| } | |||||
| } | |||||
| // Proceed if file does not exist. return for all other errors. | |||||
| if err != nil { | |||||
| if !os.IsNotExist(err) { | |||||
| return err | |||||
| } | |||||
| } | |||||
| // Extract top level directory. | |||||
| objectDir, _ := filepath.Split(filePath) | |||||
| if objectDir != "" { | |||||
| // Create any missing top level directories. | |||||
| if err := os.MkdirAll(objectDir, 0700); err != nil { | |||||
| return err | |||||
| } | |||||
| } | |||||
| // Gather md5sum. | |||||
| objectStat, err := c.StatObject(bucketName, objectName, StatObjectOptions{opts}) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| // Write to a temporary file "fileName.part.minio" before saving. | |||||
| filePartPath := filePath + objectStat.ETag + ".part.minio" | |||||
| // If exists, open in append mode. If not create it as a part file. | |||||
| filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| // Issue Stat to get the current offset. | |||||
| st, err = filePart.Stat() | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| // Initialize get object request headers to set the | |||||
| // appropriate range offsets to read from. | |||||
| if st.Size() > 0 { | |||||
| opts.SetRange(st.Size(), 0) | |||||
| } | |||||
| // Seek to current position for incoming reader. | |||||
| objectReader, objectStat, err := c.getObject(ctx, bucketName, objectName, opts) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| // Write to the part file. | |||||
| if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil { | |||||
| return err | |||||
| } | |||||
| // Close the file before rename, this is specifically needed for Windows users. | |||||
| if err = filePart.Close(); err != nil { | |||||
| return err | |||||
| } | |||||
| // Safely completed. Now commit by renaming to actual filename. | |||||
| if err = os.Rename(filePartPath, filePath); err != nil { | |||||
| return err | |||||
| } | |||||
| // Return. | |||||
| return nil | |||||
| } | |||||
| @@ -0,0 +1,659 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "context" | |||||
| "errors" | |||||
| "fmt" | |||||
| "io" | |||||
| "net/http" | |||||
| "strings" | |||||
| "sync" | |||||
| "time" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // GetObject - returns an seekable, readable object. | |||||
| func (c Client) GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) { | |||||
| return c.getObjectWithContext(context.Background(), bucketName, objectName, opts) | |||||
| } | |||||
| // GetObject wrapper function that accepts a request context | |||||
| func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| var httpReader io.ReadCloser | |||||
| var objectInfo ObjectInfo | |||||
| var err error | |||||
| // Create request channel. | |||||
| reqCh := make(chan getRequest) | |||||
| // Create response channel. | |||||
| resCh := make(chan getResponse) | |||||
| // Create done channel. | |||||
| doneCh := make(chan struct{}) | |||||
| // This routine feeds partial object data as and when the caller reads. | |||||
| go func() { | |||||
| defer close(reqCh) | |||||
| defer close(resCh) | |||||
| // Used to verify if etag of object has changed since last read. | |||||
| var etag string | |||||
| // Loop through the incoming control messages and read data. | |||||
| for { | |||||
| select { | |||||
| // When the done channel is closed exit our routine. | |||||
| case <-doneCh: | |||||
| // Close the http response body before returning. | |||||
| // This ends the connection with the server. | |||||
| if httpReader != nil { | |||||
| httpReader.Close() | |||||
| } | |||||
| return | |||||
| // Gather incoming request. | |||||
| case req := <-reqCh: | |||||
| // If this is the first request we may not need to do a getObject request yet. | |||||
| if req.isFirstReq { | |||||
| // First request is a Read/ReadAt. | |||||
| if req.isReadOp { | |||||
| // Differentiate between wanting the whole object and just a range. | |||||
| if req.isReadAt { | |||||
| // If this is a ReadAt request only get the specified range. | |||||
| // Range is set with respect to the offset and length of the buffer requested. | |||||
| // Do not set objectInfo from the first readAt request because it will not get | |||||
| // the whole object. | |||||
| opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) | |||||
| } else if req.Offset > 0 { | |||||
| opts.SetRange(req.Offset, 0) | |||||
| } | |||||
| httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts) | |||||
| if err != nil { | |||||
| resCh <- getResponse{Error: err} | |||||
| return | |||||
| } | |||||
| etag = objectInfo.ETag | |||||
| // Read at least firstReq.Buffer bytes, if not we have | |||||
| // reached our EOF. | |||||
| size, err := io.ReadFull(httpReader, req.Buffer) | |||||
| if size > 0 && err == io.ErrUnexpectedEOF { | |||||
| // If an EOF happens after reading some but not | |||||
| // all the bytes ReadFull returns ErrUnexpectedEOF | |||||
| err = io.EOF | |||||
| } | |||||
| // Send back the first response. | |||||
| resCh <- getResponse{ | |||||
| objectInfo: objectInfo, | |||||
| Size: int(size), | |||||
| Error: err, | |||||
| didRead: true, | |||||
| } | |||||
| } else { | |||||
| // First request is a Stat or Seek call. | |||||
| // Only need to run a StatObject until an actual Read or ReadAt request comes through. | |||||
| // Remove range header if already set, for stat Operations to get original file size. | |||||
| delete(opts.headers, "Range") | |||||
| objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts}) | |||||
| if err != nil { | |||||
| resCh <- getResponse{ | |||||
| Error: err, | |||||
| } | |||||
| // Exit the go-routine. | |||||
| return | |||||
| } | |||||
| etag = objectInfo.ETag | |||||
| // Send back the first response. | |||||
| resCh <- getResponse{ | |||||
| objectInfo: objectInfo, | |||||
| } | |||||
| } | |||||
| } else if req.settingObjectInfo { // Request is just to get objectInfo. | |||||
| // Remove range header if already set, for stat Operations to get original file size. | |||||
| delete(opts.headers, "Range") | |||||
| if etag != "" { | |||||
| opts.SetMatchETag(etag) | |||||
| } | |||||
| objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts}) | |||||
| if err != nil { | |||||
| resCh <- getResponse{ | |||||
| Error: err, | |||||
| } | |||||
| // Exit the goroutine. | |||||
| return | |||||
| } | |||||
| // Send back the objectInfo. | |||||
| resCh <- getResponse{ | |||||
| objectInfo: objectInfo, | |||||
| } | |||||
| } else { | |||||
| // Offset changes fetch the new object at an Offset. | |||||
| // Because the httpReader may not be set by the first | |||||
| // request if it was a stat or seek it must be checked | |||||
| // if the object has been read or not to only initialize | |||||
| // new ones when they haven't been already. | |||||
| // All readAt requests are new requests. | |||||
| if req.DidOffsetChange || !req.beenRead { | |||||
| if etag != "" { | |||||
| opts.SetMatchETag(etag) | |||||
| } | |||||
| if httpReader != nil { | |||||
| // Close previously opened http reader. | |||||
| httpReader.Close() | |||||
| } | |||||
| // If this request is a readAt only get the specified range. | |||||
| if req.isReadAt { | |||||
| // Range is set with respect to the offset and length of the buffer requested. | |||||
| opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) | |||||
| } else if req.Offset > 0 { // Range is set with respect to the offset. | |||||
| opts.SetRange(req.Offset, 0) | |||||
| } | |||||
| httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts) | |||||
| if err != nil { | |||||
| resCh <- getResponse{ | |||||
| Error: err, | |||||
| } | |||||
| return | |||||
| } | |||||
| } | |||||
| // Read at least req.Buffer bytes, if not we have | |||||
| // reached our EOF. | |||||
| size, err := io.ReadFull(httpReader, req.Buffer) | |||||
| if err == io.ErrUnexpectedEOF { | |||||
| // If an EOF happens after reading some but not | |||||
| // all the bytes ReadFull returns ErrUnexpectedEOF | |||||
| err = io.EOF | |||||
| } | |||||
| // Reply back how much was read. | |||||
| resCh <- getResponse{ | |||||
| Size: int(size), | |||||
| Error: err, | |||||
| didRead: true, | |||||
| objectInfo: objectInfo, | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| }() | |||||
| // Create a newObject through the information sent back by reqCh. | |||||
| return newObject(reqCh, resCh, doneCh), nil | |||||
| } | |||||
| // get request message container to communicate with internal | |||||
| // go-routine. | |||||
| type getRequest struct { | |||||
| Buffer []byte | |||||
| Offset int64 // readAt offset. | |||||
| DidOffsetChange bool // Tracks the offset changes for Seek requests. | |||||
| beenRead bool // Determines if this is the first time an object is being read. | |||||
| isReadAt bool // Determines if this request is a request to a specific range | |||||
| isReadOp bool // Determines if this request is a Read or Read/At request. | |||||
| isFirstReq bool // Determines if this request is the first time an object is being accessed. | |||||
| settingObjectInfo bool // Determines if this request is to set the objectInfo of an object. | |||||
| } | |||||
| // get response message container to reply back for the request. | |||||
| type getResponse struct { | |||||
| Size int | |||||
| Error error | |||||
| didRead bool // Lets subsequent calls know whether or not httpReader has been initiated. | |||||
| objectInfo ObjectInfo // Used for the first request. | |||||
| } | |||||
| // Object represents an open object. It implements | |||||
| // Reader, ReaderAt, Seeker, Closer for a HTTP stream. | |||||
| type Object struct { | |||||
| // Mutex. | |||||
| mutex *sync.Mutex | |||||
| // User allocated and defined. | |||||
| reqCh chan<- getRequest | |||||
| resCh <-chan getResponse | |||||
| doneCh chan<- struct{} | |||||
| currOffset int64 | |||||
| objectInfo ObjectInfo | |||||
| // Ask lower level to initiate data fetching based on currOffset | |||||
| seekData bool | |||||
| // Keeps track of closed call. | |||||
| isClosed bool | |||||
| // Keeps track of if this is the first call. | |||||
| isStarted bool | |||||
| // Previous error saved for future calls. | |||||
| prevErr error | |||||
| // Keeps track of if this object has been read yet. | |||||
| beenRead bool | |||||
| // Keeps track of if objectInfo has been set yet. | |||||
| objectInfoSet bool | |||||
| } | |||||
| // doGetRequest - sends and blocks on the firstReqCh and reqCh of an object. | |||||
| // Returns back the size of the buffer read, if anything was read, as well | |||||
| // as any error encountered. For all first requests sent on the object | |||||
| // it is also responsible for sending back the objectInfo. | |||||
| func (o *Object) doGetRequest(request getRequest) (getResponse, error) { | |||||
| o.reqCh <- request | |||||
| response := <-o.resCh | |||||
| // Return any error to the top level. | |||||
| if response.Error != nil { | |||||
| return response, response.Error | |||||
| } | |||||
| // This was the first request. | |||||
| if !o.isStarted { | |||||
| // The object has been operated on. | |||||
| o.isStarted = true | |||||
| } | |||||
| // Set the objectInfo if the request was not readAt | |||||
| // and it hasn't been set before. | |||||
| if !o.objectInfoSet && !request.isReadAt { | |||||
| o.objectInfo = response.objectInfo | |||||
| o.objectInfoSet = true | |||||
| } | |||||
| // Set beenRead only if it has not been set before. | |||||
| if !o.beenRead { | |||||
| o.beenRead = response.didRead | |||||
| } | |||||
| // Data are ready on the wire, no need to reinitiate connection in lower level | |||||
| o.seekData = false | |||||
| return response, nil | |||||
| } | |||||
| // setOffset - handles the setting of offsets for | |||||
| // Read/ReadAt/Seek requests. | |||||
| func (o *Object) setOffset(bytesRead int64) error { | |||||
| // Update the currentOffset. | |||||
| o.currOffset += bytesRead | |||||
| if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size { | |||||
| return io.EOF | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // Read reads up to len(b) bytes into b. It returns the number of | |||||
| // bytes read (0 <= n <= len(b)) and any error encountered. Returns | |||||
| // io.EOF upon end of file. | |||||
| func (o *Object) Read(b []byte) (n int, err error) { | |||||
| if o == nil { | |||||
| return 0, ErrInvalidArgument("Object is nil") | |||||
| } | |||||
| // Locking. | |||||
| o.mutex.Lock() | |||||
| defer o.mutex.Unlock() | |||||
| // prevErr is previous error saved from previous operation. | |||||
| if o.prevErr != nil || o.isClosed { | |||||
| return 0, o.prevErr | |||||
| } | |||||
| // Create a new request. | |||||
| readReq := getRequest{ | |||||
| isReadOp: true, | |||||
| beenRead: o.beenRead, | |||||
| Buffer: b, | |||||
| } | |||||
| // Alert that this is the first request. | |||||
| if !o.isStarted { | |||||
| readReq.isFirstReq = true | |||||
| } | |||||
| // Ask to establish a new data fetch routine based on seekData flag | |||||
| readReq.DidOffsetChange = o.seekData | |||||
| readReq.Offset = o.currOffset | |||||
| // Send and receive from the first request. | |||||
| response, err := o.doGetRequest(readReq) | |||||
| if err != nil && err != io.EOF { | |||||
| // Save the error for future calls. | |||||
| o.prevErr = err | |||||
| return response.Size, err | |||||
| } | |||||
| // Bytes read. | |||||
| bytesRead := int64(response.Size) | |||||
| // Set the new offset. | |||||
| oerr := o.setOffset(bytesRead) | |||||
| if oerr != nil { | |||||
| // Save the error for future calls. | |||||
| o.prevErr = oerr | |||||
| return response.Size, oerr | |||||
| } | |||||
| // Return the response. | |||||
| return response.Size, err | |||||
| } | |||||
| // Stat returns the ObjectInfo structure describing Object. | |||||
| func (o *Object) Stat() (ObjectInfo, error) { | |||||
| if o == nil { | |||||
| return ObjectInfo{}, ErrInvalidArgument("Object is nil") | |||||
| } | |||||
| // Locking. | |||||
| o.mutex.Lock() | |||||
| defer o.mutex.Unlock() | |||||
| if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { | |||||
| return ObjectInfo{}, o.prevErr | |||||
| } | |||||
| // This is the first request. | |||||
| if !o.isStarted || !o.objectInfoSet { | |||||
| // Send the request and get the response. | |||||
| _, err := o.doGetRequest(getRequest{ | |||||
| isFirstReq: !o.isStarted, | |||||
| settingObjectInfo: !o.objectInfoSet, | |||||
| }) | |||||
| if err != nil { | |||||
| o.prevErr = err | |||||
| return ObjectInfo{}, err | |||||
| } | |||||
| } | |||||
| return o.objectInfo, nil | |||||
| } | |||||
| // ReadAt reads len(b) bytes from the File starting at byte offset | |||||
| // off. It returns the number of bytes read and the error, if any. | |||||
| // ReadAt always returns a non-nil error when n < len(b). At end of | |||||
| // file, that error is io.EOF. | |||||
| func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { | |||||
| if o == nil { | |||||
| return 0, ErrInvalidArgument("Object is nil") | |||||
| } | |||||
| // Locking. | |||||
| o.mutex.Lock() | |||||
| defer o.mutex.Unlock() | |||||
| // prevErr is error which was saved in previous operation. | |||||
| if o.prevErr != nil || o.isClosed { | |||||
| return 0, o.prevErr | |||||
| } | |||||
| // Can only compare offsets to size when size has been set. | |||||
| if o.objectInfoSet { | |||||
| // If offset is negative than we return io.EOF. | |||||
| // If offset is greater than or equal to object size we return io.EOF. | |||||
| if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 { | |||||
| return 0, io.EOF | |||||
| } | |||||
| } | |||||
| // Create the new readAt request. | |||||
| readAtReq := getRequest{ | |||||
| isReadOp: true, | |||||
| isReadAt: true, | |||||
| DidOffsetChange: true, // Offset always changes. | |||||
| beenRead: o.beenRead, // Set if this is the first request to try and read. | |||||
| Offset: offset, // Set the offset. | |||||
| Buffer: b, | |||||
| } | |||||
| // Alert that this is the first request. | |||||
| if !o.isStarted { | |||||
| readAtReq.isFirstReq = true | |||||
| } | |||||
| // Send and receive from the first request. | |||||
| response, err := o.doGetRequest(readAtReq) | |||||
| if err != nil && err != io.EOF { | |||||
| // Save the error. | |||||
| o.prevErr = err | |||||
| return response.Size, err | |||||
| } | |||||
| // Bytes read. | |||||
| bytesRead := int64(response.Size) | |||||
| // There is no valid objectInfo yet | |||||
| // to compare against for EOF. | |||||
| if !o.objectInfoSet { | |||||
| // Update the currentOffset. | |||||
| o.currOffset += bytesRead | |||||
| } else { | |||||
| // If this was not the first request update | |||||
| // the offsets and compare against objectInfo | |||||
| // for EOF. | |||||
| oerr := o.setOffset(bytesRead) | |||||
| if oerr != nil { | |||||
| o.prevErr = oerr | |||||
| return response.Size, oerr | |||||
| } | |||||
| } | |||||
| return response.Size, err | |||||
| } | |||||
| // Seek sets the offset for the next Read or Write to offset, | |||||
| // interpreted according to whence: 0 means relative to the | |||||
| // origin of the file, 1 means relative to the current offset, | |||||
| // and 2 means relative to the end. | |||||
| // Seek returns the new offset and an error, if any. | |||||
| // | |||||
| // Seeking to a negative offset is an error. Seeking to any positive | |||||
| // offset is legal, subsequent io operations succeed until the | |||||
| // underlying object is not closed. | |||||
| func (o *Object) Seek(offset int64, whence int) (n int64, err error) { | |||||
| if o == nil { | |||||
| return 0, ErrInvalidArgument("Object is nil") | |||||
| } | |||||
| // Locking. | |||||
| o.mutex.Lock() | |||||
| defer o.mutex.Unlock() | |||||
| if o.prevErr != nil { | |||||
| // At EOF seeking is legal allow only io.EOF, for any other errors we return. | |||||
| if o.prevErr != io.EOF { | |||||
| return 0, o.prevErr | |||||
| } | |||||
| } | |||||
| // Negative offset is valid for whence of '2'. | |||||
| if offset < 0 && whence != 2 { | |||||
| return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) | |||||
| } | |||||
| // This is the first request. So before anything else | |||||
| // get the ObjectInfo. | |||||
| if !o.isStarted || !o.objectInfoSet { | |||||
| // Create the new Seek request. | |||||
| seekReq := getRequest{ | |||||
| isReadOp: false, | |||||
| Offset: offset, | |||||
| isFirstReq: true, | |||||
| } | |||||
| // Send and receive from the seek request. | |||||
| _, err := o.doGetRequest(seekReq) | |||||
| if err != nil { | |||||
| // Save the error. | |||||
| o.prevErr = err | |||||
| return 0, err | |||||
| } | |||||
| } | |||||
| // Switch through whence. | |||||
| switch whence { | |||||
| default: | |||||
| return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) | |||||
| case 0: | |||||
| if o.objectInfo.Size > -1 && offset > o.objectInfo.Size { | |||||
| return 0, io.EOF | |||||
| } | |||||
| o.currOffset = offset | |||||
| case 1: | |||||
| if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size { | |||||
| return 0, io.EOF | |||||
| } | |||||
| o.currOffset += offset | |||||
| case 2: | |||||
| // If we don't know the object size return an error for io.SeekEnd | |||||
| if o.objectInfo.Size < 0 { | |||||
| return 0, ErrInvalidArgument("Whence END is not supported when the object size is unknown") | |||||
| } | |||||
| // Seeking to positive offset is valid for whence '2', but | |||||
| // since we are backing a Reader we have reached 'EOF' if | |||||
| // offset is positive. | |||||
| if offset > 0 { | |||||
| return 0, io.EOF | |||||
| } | |||||
| // Seeking to negative position not allowed for whence. | |||||
| if o.objectInfo.Size+offset < 0 { | |||||
| return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) | |||||
| } | |||||
| o.currOffset = o.objectInfo.Size + offset | |||||
| } | |||||
| // Reset the saved error since we successfully seeked, let the Read | |||||
| // and ReadAt decide. | |||||
| if o.prevErr == io.EOF { | |||||
| o.prevErr = nil | |||||
| } | |||||
| // Ask lower level to fetch again from source | |||||
| o.seekData = true | |||||
| // Return the effective offset. | |||||
| return o.currOffset, nil | |||||
| } | |||||
| // Close - The behavior of Close after the first call returns error | |||||
| // for subsequent Close() calls. | |||||
| func (o *Object) Close() (err error) { | |||||
| if o == nil { | |||||
| return ErrInvalidArgument("Object is nil") | |||||
| } | |||||
| // Locking. | |||||
| o.mutex.Lock() | |||||
| defer o.mutex.Unlock() | |||||
| // if already closed return an error. | |||||
| if o.isClosed { | |||||
| return o.prevErr | |||||
| } | |||||
| // Close successfully. | |||||
| close(o.doneCh) | |||||
| // Save for future operations. | |||||
| errMsg := "Object is already closed. Bad file descriptor." | |||||
| o.prevErr = errors.New(errMsg) | |||||
| // Save here that we closed done channel successfully. | |||||
| o.isClosed = true | |||||
| return nil | |||||
| } | |||||
| // newObject instantiates a new *minio.Object* | |||||
| // ObjectInfo will be set by setObjectInfo | |||||
| func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object { | |||||
| return &Object{ | |||||
| mutex: &sync.Mutex{}, | |||||
| reqCh: reqCh, | |||||
| resCh: resCh, | |||||
| doneCh: doneCh, | |||||
| } | |||||
| } | |||||
| // getObject - retrieve object from Object Storage. | |||||
| // | |||||
| // Additionally this function also takes range arguments to download the specified | |||||
| // range bytes of an object. Setting offset and length = 0 will download the full object. | |||||
| // | |||||
| // For more information about the HTTP Range header. | |||||
| // go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. | |||||
| func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) { | |||||
| // Validate input arguments. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return nil, ObjectInfo{}, err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return nil, ObjectInfo{}, err | |||||
| } | |||||
| // Execute GET on objectName. | |||||
| resp, err := c.executeMethod(ctx, "GET", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| objectName: objectName, | |||||
| customHeader: opts.Header(), | |||||
| contentSHA256Hex: emptySHA256Hex, | |||||
| }) | |||||
| if err != nil { | |||||
| return nil, ObjectInfo{}, err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { | |||||
| return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) | |||||
| } | |||||
| } | |||||
| // Trim off the odd double quotes from ETag in the beginning and end. | |||||
| md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") | |||||
| md5sum = strings.TrimSuffix(md5sum, "\"") | |||||
| // Parse the date. | |||||
| date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) | |||||
| if err != nil { | |||||
| msg := "Last-Modified time format not recognized. " + reportIssue | |||||
| return nil, ObjectInfo{}, ErrorResponse{ | |||||
| Code: "InternalError", | |||||
| Message: msg, | |||||
| RequestID: resp.Header.Get("x-amz-request-id"), | |||||
| HostID: resp.Header.Get("x-amz-id-2"), | |||||
| Region: resp.Header.Get("x-amz-bucket-region"), | |||||
| } | |||||
| } | |||||
| // Get content-type. | |||||
| contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) | |||||
| if contentType == "" { | |||||
| contentType = "application/octet-stream" | |||||
| } | |||||
| objectStat := ObjectInfo{ | |||||
| ETag: md5sum, | |||||
| Key: objectName, | |||||
| Size: resp.ContentLength, | |||||
| LastModified: date, | |||||
| ContentType: contentType, | |||||
| // Extract only the relevant header keys describing the object. | |||||
| // following function filters out a list of standard set of keys | |||||
| // which are not part of object metadata. | |||||
| Metadata: extractObjMetadata(resp.Header), | |||||
| } | |||||
| // do not close body here, caller will close | |||||
| return resp.Body, objectStat, nil | |||||
| } | |||||
| @@ -0,0 +1,128 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "fmt" | |||||
| "net/http" | |||||
| "time" | |||||
| "github.com/minio/minio-go/pkg/encrypt" | |||||
| ) | |||||
| // GetObjectOptions are used to specify additional headers or options | |||||
| // during GET requests. | |||||
| type GetObjectOptions struct { | |||||
| headers map[string]string | |||||
| ServerSideEncryption encrypt.ServerSide | |||||
| } | |||||
| // StatObjectOptions are used to specify additional headers or options | |||||
| // during GET info/stat requests. | |||||
| type StatObjectOptions struct { | |||||
| GetObjectOptions | |||||
| } | |||||
| // Header returns the http.Header representation of the GET options. | |||||
| func (o GetObjectOptions) Header() http.Header { | |||||
| headers := make(http.Header, len(o.headers)) | |||||
| for k, v := range o.headers { | |||||
| headers.Set(k, v) | |||||
| } | |||||
| if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { | |||||
| o.ServerSideEncryption.Marshal(headers) | |||||
| } | |||||
| return headers | |||||
| } | |||||
| // Set adds a key value pair to the options. The | |||||
| // key-value pair will be part of the HTTP GET request | |||||
| // headers. | |||||
| func (o *GetObjectOptions) Set(key, value string) { | |||||
| if o.headers == nil { | |||||
| o.headers = make(map[string]string) | |||||
| } | |||||
| o.headers[http.CanonicalHeaderKey(key)] = value | |||||
| } | |||||
| // SetMatchETag - set match etag. | |||||
| func (o *GetObjectOptions) SetMatchETag(etag string) error { | |||||
| if etag == "" { | |||||
| return ErrInvalidArgument("ETag cannot be empty.") | |||||
| } | |||||
| o.Set("If-Match", "\""+etag+"\"") | |||||
| return nil | |||||
| } | |||||
| // SetMatchETagExcept - set match etag except. | |||||
| func (o *GetObjectOptions) SetMatchETagExcept(etag string) error { | |||||
| if etag == "" { | |||||
| return ErrInvalidArgument("ETag cannot be empty.") | |||||
| } | |||||
| o.Set("If-None-Match", "\""+etag+"\"") | |||||
| return nil | |||||
| } | |||||
| // SetUnmodified - set unmodified time since. | |||||
| func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error { | |||||
| if modTime.IsZero() { | |||||
| return ErrInvalidArgument("Modified since cannot be empty.") | |||||
| } | |||||
| o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) | |||||
| return nil | |||||
| } | |||||
| // SetModified - set modified time since. | |||||
| func (o *GetObjectOptions) SetModified(modTime time.Time) error { | |||||
| if modTime.IsZero() { | |||||
| return ErrInvalidArgument("Modified since cannot be empty.") | |||||
| } | |||||
| o.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) | |||||
| return nil | |||||
| } | |||||
| // SetRange - set the start and end offset of the object to be read. | |||||
| // See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. | |||||
| func (o *GetObjectOptions) SetRange(start, end int64) error { | |||||
| switch { | |||||
| case start == 0 && end < 0: | |||||
| // Read last '-end' bytes. `bytes=-N`. | |||||
| o.Set("Range", fmt.Sprintf("bytes=%d", end)) | |||||
| case 0 < start && end == 0: | |||||
| // Read everything starting from offset | |||||
| // 'start'. `bytes=N-`. | |||||
| o.Set("Range", fmt.Sprintf("bytes=%d-", start)) | |||||
| case 0 <= start && start <= end: | |||||
| // Read everything starting at 'start' till the | |||||
| // 'end'. `bytes=N-M` | |||||
| o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) | |||||
| default: | |||||
| // All other cases such as | |||||
| // bytes=-3- | |||||
| // bytes=5-3 | |||||
| // bytes=-2-4 | |||||
| // bytes=-3-0 | |||||
| // bytes=-3--2 | |||||
| // are invalid. | |||||
| return ErrInvalidArgument( | |||||
| fmt.Sprintf( | |||||
| "Invalid range specified: start=%d end=%d", | |||||
| start, end)) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| @@ -0,0 +1,78 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "context" | |||||
| "io/ioutil" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // GetBucketPolicy - get bucket policy at a given path. | |||||
| func (c Client) GetBucketPolicy(bucketName string) (string, error) { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return "", err | |||||
| } | |||||
| bucketPolicy, err := c.getBucketPolicy(bucketName) | |||||
| if err != nil { | |||||
| errResponse := ToErrorResponse(err) | |||||
| if errResponse.Code == "NoSuchBucketPolicy" { | |||||
| return "", nil | |||||
| } | |||||
| return "", err | |||||
| } | |||||
| return bucketPolicy, nil | |||||
| } | |||||
| // Request server for current bucket policy. | |||||
| func (c Client) getBucketPolicy(bucketName string) (string, error) { | |||||
| // Get resources properly escaped and lined up before | |||||
| // using them in http request. | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("policy", "") | |||||
| // Execute GET on bucket to list objects. | |||||
| resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| queryValues: urlValues, | |||||
| contentSHA256Hex: emptySHA256Hex, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return "", httpRespToErrorResponse(resp, bucketName, "") | |||||
| } | |||||
| } | |||||
| bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| policy := string(bucketPolicyBuf) | |||||
| return policy, err | |||||
| } | |||||
| @@ -0,0 +1,715 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "context" | |||||
| "errors" | |||||
| "fmt" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "strings" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // ListBuckets list all buckets owned by this authenticated user. | |||||
| // | |||||
| // This call requires explicit authentication, no anonymous requests are | |||||
| // allowed for listing buckets. | |||||
| // | |||||
| // api := client.New(....) | |||||
| // for message := range api.ListBuckets() { | |||||
| // fmt.Println(message) | |||||
| // } | |||||
| // | |||||
| func (c Client) ListBuckets() ([]BucketInfo, error) { | |||||
| // Execute GET on service. | |||||
| resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{contentSHA256Hex: emptySHA256Hex}) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return nil, httpRespToErrorResponse(resp, "", "") | |||||
| } | |||||
| } | |||||
| listAllMyBucketsResult := listAllMyBucketsResult{} | |||||
| err = xmlDecoder(resp.Body, &listAllMyBucketsResult) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return listAllMyBucketsResult.Buckets.Bucket, nil | |||||
| } | |||||
| /// Bucket Read Operations. | |||||
| // ListObjectsV2 lists all objects matching the objectPrefix from | |||||
| // the specified bucket. If recursion is enabled it would list | |||||
| // all subdirectories and all its contents. | |||||
| // | |||||
| // Your input parameters are just bucketName, objectPrefix, recursive | |||||
| // and a done channel for pro-actively closing the internal go | |||||
| // routine. If you enable recursive as 'true' this function will | |||||
| // return back all the objects in a given bucket name and object | |||||
| // prefix. | |||||
| // | |||||
| // api := client.New(....) | |||||
| // // Create a done channel. | |||||
| // doneCh := make(chan struct{}) | |||||
| // defer close(doneCh) | |||||
| // // Recursively list all objects in 'mytestbucket' | |||||
| // recursive := true | |||||
| // for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) { | |||||
| // fmt.Println(message) | |||||
| // } | |||||
| // | |||||
| func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { | |||||
| // Allocate new list objects channel. | |||||
| objectStatCh := make(chan ObjectInfo, 1) | |||||
| // Default listing is delimited at "/" | |||||
| delimiter := "/" | |||||
| if recursive { | |||||
| // If recursive we do not delimit. | |||||
| delimiter = "" | |||||
| } | |||||
| // Return object owner information by default | |||||
| fetchOwner := true | |||||
| // Validate bucket name. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| defer close(objectStatCh) | |||||
| objectStatCh <- ObjectInfo{ | |||||
| Err: err, | |||||
| } | |||||
| return objectStatCh | |||||
| } | |||||
| // Validate incoming object prefix. | |||||
| if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { | |||||
| defer close(objectStatCh) | |||||
| objectStatCh <- ObjectInfo{ | |||||
| Err: err, | |||||
| } | |||||
| return objectStatCh | |||||
| } | |||||
| // Initiate list objects goroutine here. | |||||
| go func(objectStatCh chan<- ObjectInfo) { | |||||
| defer close(objectStatCh) | |||||
| // Save continuationToken for next request. | |||||
| var continuationToken string | |||||
| for { | |||||
| // Get list of objects a maximum of 1000 per request. | |||||
| result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000, "") | |||||
| if err != nil { | |||||
| objectStatCh <- ObjectInfo{ | |||||
| Err: err, | |||||
| } | |||||
| return | |||||
| } | |||||
| // If contents are available loop through and send over channel. | |||||
| for _, object := range result.Contents { | |||||
| select { | |||||
| // Send object content. | |||||
| case objectStatCh <- object: | |||||
| // If receives done from the caller, return here. | |||||
| case <-doneCh: | |||||
| return | |||||
| } | |||||
| } | |||||
| // Send all common prefixes if any. | |||||
| // NOTE: prefixes are only present if the request is delimited. | |||||
| for _, obj := range result.CommonPrefixes { | |||||
| select { | |||||
| // Send object prefixes. | |||||
| case objectStatCh <- ObjectInfo{ | |||||
| Key: obj.Prefix, | |||||
| Size: 0, | |||||
| }: | |||||
| // If receives done from the caller, return here. | |||||
| case <-doneCh: | |||||
| return | |||||
| } | |||||
| } | |||||
| // If continuation token present, save it for next request. | |||||
| if result.NextContinuationToken != "" { | |||||
| continuationToken = result.NextContinuationToken | |||||
| } | |||||
| // Listing ends result is not truncated, return right here. | |||||
| if !result.IsTruncated { | |||||
| return | |||||
| } | |||||
| } | |||||
| }(objectStatCh) | |||||
| return objectStatCh | |||||
| } | |||||
| // listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket. | |||||
| // | |||||
| // You can use the request parameters as selection criteria to return a subset of the objects in a bucket. | |||||
| // request parameters :- | |||||
| // --------- | |||||
| // ?continuation-token - Used to continue iterating over a set of objects | |||||
| // ?delimiter - A delimiter is a character you use to group keys. | |||||
| // ?prefix - Limits the response to keys that begin with the specified prefix. | |||||
| // ?max-keys - Sets the maximum number of keys returned in the response body. | |||||
| // ?start-after - Specifies the key to start after when listing objects in a bucket. | |||||
| func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) { | |||||
| // Validate bucket name. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return ListBucketV2Result{}, err | |||||
| } | |||||
| // Validate object prefix. | |||||
| if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { | |||||
| return ListBucketV2Result{}, err | |||||
| } | |||||
| // Get resources properly escaped and lined up before | |||||
| // using them in http request. | |||||
| urlValues := make(url.Values) | |||||
| // Always set list-type in ListObjects V2 | |||||
| urlValues.Set("list-type", "2") | |||||
| // Set object prefix, prefix value to be set to empty is okay. | |||||
| urlValues.Set("prefix", objectPrefix) | |||||
| // Set delimiter, delimiter value to be set to empty is okay. | |||||
| urlValues.Set("delimiter", delimiter) | |||||
| // Set continuation token | |||||
| if continuationToken != "" { | |||||
| urlValues.Set("continuation-token", continuationToken) | |||||
| } | |||||
| // Fetch owner when listing | |||||
| if fetchOwner { | |||||
| urlValues.Set("fetch-owner", "true") | |||||
| } | |||||
| // maxkeys should default to 1000 or less. | |||||
| if maxkeys == 0 || maxkeys > 1000 { | |||||
| maxkeys = 1000 | |||||
| } | |||||
| // Set max keys. | |||||
| urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) | |||||
| // Set start-after | |||||
| if startAfter != "" { | |||||
| urlValues.Set("start-after", startAfter) | |||||
| } | |||||
| // Execute GET on bucket to list objects. | |||||
| resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| queryValues: urlValues, | |||||
| contentSHA256Hex: emptySHA256Hex, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return ListBucketV2Result{}, err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "") | |||||
| } | |||||
| } | |||||
| // Decode listBuckets XML. | |||||
| listBucketResult := ListBucketV2Result{} | |||||
| if err = xmlDecoder(resp.Body, &listBucketResult); err != nil { | |||||
| return listBucketResult, err | |||||
| } | |||||
| // This is an additional verification check to make | |||||
| // sure proper responses are received. | |||||
| if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" { | |||||
| return listBucketResult, errors.New("Truncated response should have continuation token set") | |||||
| } | |||||
| // Success. | |||||
| return listBucketResult, nil | |||||
| } | |||||
| // ListObjects - (List Objects) - List some objects or all recursively. | |||||
| // | |||||
| // ListObjects lists all objects matching the objectPrefix from | |||||
| // the specified bucket. If recursion is enabled it would list | |||||
| // all subdirectories and all its contents. | |||||
| // | |||||
| // Your input parameters are just bucketName, objectPrefix, recursive | |||||
| // and a done channel for pro-actively closing the internal go | |||||
| // routine. If you enable recursive as 'true' this function will | |||||
| // return back all the objects in a given bucket name and object | |||||
| // prefix. | |||||
| // | |||||
| // api := client.New(....) | |||||
| // // Create a done channel. | |||||
| // doneCh := make(chan struct{}) | |||||
| // defer close(doneCh) | |||||
| // // Recurively list all objects in 'mytestbucket' | |||||
| // recursive := true | |||||
| // for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) { | |||||
| // fmt.Println(message) | |||||
| // } | |||||
| // | |||||
| func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { | |||||
| // Allocate new list objects channel. | |||||
| objectStatCh := make(chan ObjectInfo, 1) | |||||
| // Default listing is delimited at "/" | |||||
| delimiter := "/" | |||||
| if recursive { | |||||
| // If recursive we do not delimit. | |||||
| delimiter = "" | |||||
| } | |||||
| // Validate bucket name. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| defer close(objectStatCh) | |||||
| objectStatCh <- ObjectInfo{ | |||||
| Err: err, | |||||
| } | |||||
| return objectStatCh | |||||
| } | |||||
| // Validate incoming object prefix. | |||||
| if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { | |||||
| defer close(objectStatCh) | |||||
| objectStatCh <- ObjectInfo{ | |||||
| Err: err, | |||||
| } | |||||
| return objectStatCh | |||||
| } | |||||
| // Initiate list objects goroutine here. | |||||
| go func(objectStatCh chan<- ObjectInfo) { | |||||
| defer close(objectStatCh) | |||||
| // Save marker for next request. | |||||
| var marker string | |||||
| for { | |||||
| // Get list of objects a maximum of 1000 per request. | |||||
| result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000) | |||||
| if err != nil { | |||||
| objectStatCh <- ObjectInfo{ | |||||
| Err: err, | |||||
| } | |||||
| return | |||||
| } | |||||
| // If contents are available loop through and send over channel. | |||||
| for _, object := range result.Contents { | |||||
| // Save the marker. | |||||
| marker = object.Key | |||||
| select { | |||||
| // Send object content. | |||||
| case objectStatCh <- object: | |||||
| // If receives done from the caller, return here. | |||||
| case <-doneCh: | |||||
| return | |||||
| } | |||||
| } | |||||
| // Send all common prefixes if any. | |||||
| // NOTE: prefixes are only present if the request is delimited. | |||||
| for _, obj := range result.CommonPrefixes { | |||||
| object := ObjectInfo{} | |||||
| object.Key = obj.Prefix | |||||
| object.Size = 0 | |||||
| select { | |||||
| // Send object prefixes. | |||||
| case objectStatCh <- object: | |||||
| // If receives done from the caller, return here. | |||||
| case <-doneCh: | |||||
| return | |||||
| } | |||||
| } | |||||
| // If next marker present, save it for next request. | |||||
| if result.NextMarker != "" { | |||||
| marker = result.NextMarker | |||||
| } | |||||
| // Listing ends result is not truncated, return right here. | |||||
| if !result.IsTruncated { | |||||
| return | |||||
| } | |||||
| } | |||||
| }(objectStatCh) | |||||
| return objectStatCh | |||||
| } | |||||
| // listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. | |||||
| // | |||||
| // You can use the request parameters as selection criteria to return a subset of the objects in a bucket. | |||||
| // request parameters :- | |||||
| // --------- | |||||
| // ?marker - Specifies the key to start with when listing objects in a bucket. | |||||
| // ?delimiter - A delimiter is a character you use to group keys. | |||||
| // ?prefix - Limits the response to keys that begin with the specified prefix. | |||||
| // ?max-keys - Sets the maximum number of keys returned in the response body. | |||||
| func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) { | |||||
| // Validate bucket name. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return ListBucketResult{}, err | |||||
| } | |||||
| // Validate object prefix. | |||||
| if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { | |||||
| return ListBucketResult{}, err | |||||
| } | |||||
| // Get resources properly escaped and lined up before | |||||
| // using them in http request. | |||||
| urlValues := make(url.Values) | |||||
| // Set object prefix, prefix value to be set to empty is okay. | |||||
| urlValues.Set("prefix", objectPrefix) | |||||
| // Set delimiter, delimiter value to be set to empty is okay. | |||||
| urlValues.Set("delimiter", delimiter) | |||||
| // Set object marker. | |||||
| if objectMarker != "" { | |||||
| urlValues.Set("marker", objectMarker) | |||||
| } | |||||
| // maxkeys should default to 1000 or less. | |||||
| if maxkeys == 0 || maxkeys > 1000 { | |||||
| maxkeys = 1000 | |||||
| } | |||||
| // Set max keys. | |||||
| urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) | |||||
| // Execute GET on bucket to list objects. | |||||
| resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| queryValues: urlValues, | |||||
| contentSHA256Hex: emptySHA256Hex, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return ListBucketResult{}, err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") | |||||
| } | |||||
| } | |||||
| // Decode listBuckets XML. | |||||
| listBucketResult := ListBucketResult{} | |||||
| err = xmlDecoder(resp.Body, &listBucketResult) | |||||
| if err != nil { | |||||
| return listBucketResult, err | |||||
| } | |||||
| return listBucketResult, nil | |||||
| } | |||||
| // ListIncompleteUploads - List incompletely uploaded multipart objects. | |||||
| // | |||||
| // ListIncompleteUploads lists all incompleted objects matching the | |||||
| // objectPrefix from the specified bucket. If recursion is enabled | |||||
| // it would list all subdirectories and all its contents. | |||||
| // | |||||
| // Your input parameters are just bucketName, objectPrefix, recursive | |||||
| // and a done channel to pro-actively close the internal go routine. | |||||
| // If you enable recursive as 'true' this function will return back all | |||||
| // the multipart objects in a given bucket name. | |||||
| // | |||||
| // api := client.New(....) | |||||
| // // Create a done channel. | |||||
| // doneCh := make(chan struct{}) | |||||
| // defer close(doneCh) | |||||
| // // Recurively list all objects in 'mytestbucket' | |||||
| // recursive := true | |||||
| // for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) { | |||||
| // fmt.Println(message) | |||||
| // } | |||||
| // | |||||
| func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { | |||||
| // Turn on size aggregation of individual parts. | |||||
| isAggregateSize := true | |||||
| return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh) | |||||
| } | |||||
| // listIncompleteUploads lists all incomplete uploads. | |||||
| func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { | |||||
| // Allocate channel for multipart uploads. | |||||
| objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) | |||||
| // Delimiter is set to "/" by default. | |||||
| delimiter := "/" | |||||
| if recursive { | |||||
| // If recursive do not delimit. | |||||
| delimiter = "" | |||||
| } | |||||
| // Validate bucket name. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| defer close(objectMultipartStatCh) | |||||
| objectMultipartStatCh <- ObjectMultipartInfo{ | |||||
| Err: err, | |||||
| } | |||||
| return objectMultipartStatCh | |||||
| } | |||||
| // Validate incoming object prefix. | |||||
| if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { | |||||
| defer close(objectMultipartStatCh) | |||||
| objectMultipartStatCh <- ObjectMultipartInfo{ | |||||
| Err: err, | |||||
| } | |||||
| return objectMultipartStatCh | |||||
| } | |||||
| go func(objectMultipartStatCh chan<- ObjectMultipartInfo) { | |||||
| defer close(objectMultipartStatCh) | |||||
| // object and upload ID marker for future requests. | |||||
| var objectMarker string | |||||
| var uploadIDMarker string | |||||
| for { | |||||
| // list all multipart uploads. | |||||
| result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000) | |||||
| if err != nil { | |||||
| objectMultipartStatCh <- ObjectMultipartInfo{ | |||||
| Err: err, | |||||
| } | |||||
| return | |||||
| } | |||||
| // Save objectMarker and uploadIDMarker for next request. | |||||
| objectMarker = result.NextKeyMarker | |||||
| uploadIDMarker = result.NextUploadIDMarker | |||||
| // Send all multipart uploads. | |||||
| for _, obj := range result.Uploads { | |||||
| // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. | |||||
| if aggregateSize { | |||||
| // Get total multipart size. | |||||
| obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID) | |||||
| if err != nil { | |||||
| objectMultipartStatCh <- ObjectMultipartInfo{ | |||||
| Err: err, | |||||
| } | |||||
| continue | |||||
| } | |||||
| } | |||||
| select { | |||||
| // Send individual uploads here. | |||||
| case objectMultipartStatCh <- obj: | |||||
| // If done channel return here. | |||||
| case <-doneCh: | |||||
| return | |||||
| } | |||||
| } | |||||
| // Send all common prefixes if any. | |||||
| // NOTE: prefixes are only present if the request is delimited. | |||||
| for _, obj := range result.CommonPrefixes { | |||||
| object := ObjectMultipartInfo{} | |||||
| object.Key = obj.Prefix | |||||
| object.Size = 0 | |||||
| select { | |||||
| // Send delimited prefixes here. | |||||
| case objectMultipartStatCh <- object: | |||||
| // If done channel return here. | |||||
| case <-doneCh: | |||||
| return | |||||
| } | |||||
| } | |||||
| // Listing ends if result not truncated, return right here. | |||||
| if !result.IsTruncated { | |||||
| return | |||||
| } | |||||
| } | |||||
| }(objectMultipartStatCh) | |||||
| // return. | |||||
| return objectMultipartStatCh | |||||
| } | |||||
| // listMultipartUploads - (List Multipart Uploads). | |||||
| // - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. | |||||
| // | |||||
| // You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. | |||||
| // request parameters. :- | |||||
| // --------- | |||||
| // ?key-marker - Specifies the multipart upload after which listing should begin. | |||||
| // ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin. | |||||
| // ?delimiter - A delimiter is a character you use to group keys. | |||||
| // ?prefix - Limits the response to keys that begin with the specified prefix. | |||||
| // ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. | |||||
| func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { | |||||
| // Get resources properly escaped and lined up before using them in http request. | |||||
| urlValues := make(url.Values) | |||||
| // Set uploads. | |||||
| urlValues.Set("uploads", "") | |||||
| // Set object key marker. | |||||
| if keyMarker != "" { | |||||
| urlValues.Set("key-marker", keyMarker) | |||||
| } | |||||
| // Set upload id marker. | |||||
| if uploadIDMarker != "" { | |||||
| urlValues.Set("upload-id-marker", uploadIDMarker) | |||||
| } | |||||
| // Set object prefix, prefix value to be set to empty is okay. | |||||
| urlValues.Set("prefix", prefix) | |||||
| // Set delimiter, delimiter value to be set to empty is okay. | |||||
| urlValues.Set("delimiter", delimiter) | |||||
| // maxUploads should be 1000 or less. | |||||
| if maxUploads == 0 || maxUploads > 1000 { | |||||
| maxUploads = 1000 | |||||
| } | |||||
| // Set max-uploads. | |||||
| urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) | |||||
| // Execute GET on bucketName to list multipart uploads. | |||||
| resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| queryValues: urlValues, | |||||
| contentSHA256Hex: emptySHA256Hex, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return ListMultipartUploadsResult{}, err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") | |||||
| } | |||||
| } | |||||
| // Decode response body. | |||||
| listMultipartUploadsResult := ListMultipartUploadsResult{} | |||||
| err = xmlDecoder(resp.Body, &listMultipartUploadsResult) | |||||
| if err != nil { | |||||
| return listMultipartUploadsResult, err | |||||
| } | |||||
| return listMultipartUploadsResult, nil | |||||
| } | |||||
| // listObjectParts list all object parts recursively. | |||||
| func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { | |||||
| // Part number marker for the next batch of request. | |||||
| var nextPartNumberMarker int | |||||
| partsInfo = make(map[int]ObjectPart) | |||||
| for { | |||||
| // Get list of uploaded parts a maximum of 1000 per request. | |||||
| listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| // Append to parts info. | |||||
| for _, part := range listObjPartsResult.ObjectParts { | |||||
| // Trim off the odd double quotes from ETag in the beginning and end. | |||||
| part.ETag = strings.TrimPrefix(part.ETag, "\"") | |||||
| part.ETag = strings.TrimSuffix(part.ETag, "\"") | |||||
| partsInfo[part.PartNumber] = part | |||||
| } | |||||
| // Keep part number marker, for the next iteration. | |||||
| nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker | |||||
| // Listing ends result is not truncated, return right here. | |||||
| if !listObjPartsResult.IsTruncated { | |||||
| break | |||||
| } | |||||
| } | |||||
| // Return all the parts. | |||||
| return partsInfo, nil | |||||
| } | |||||
| // findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. | |||||
| func (c Client) findUploadIDs(bucketName, objectName string) ([]string, error) { | |||||
| var uploadIDs []string | |||||
| // Make list incomplete uploads recursive. | |||||
| isRecursive := true | |||||
| // Turn off size aggregation of individual parts, in this request. | |||||
| isAggregateSize := false | |||||
| // Create done channel to cleanup the routine. | |||||
| doneCh := make(chan struct{}) | |||||
| defer close(doneCh) | |||||
| // List all incomplete uploads. | |||||
| for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) { | |||||
| if mpUpload.Err != nil { | |||||
| return nil, mpUpload.Err | |||||
| } | |||||
| if objectName == mpUpload.Key { | |||||
| uploadIDs = append(uploadIDs, mpUpload.UploadID) | |||||
| } | |||||
| } | |||||
| // Return the latest upload id. | |||||
| return uploadIDs, nil | |||||
| } | |||||
| // getTotalMultipartSize - calculate total uploaded size for the a given multipart object. | |||||
| func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) { | |||||
| // Iterate over all parts and aggregate the size. | |||||
| partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| for _, partInfo := range partsInfo { | |||||
| size += partInfo.Size | |||||
| } | |||||
| return size, nil | |||||
| } | |||||
| // listObjectPartsQuery (List Parts query) | |||||
| // - lists some or all (up to 1000) parts that have been uploaded | |||||
| // for a specific multipart upload | |||||
| // | |||||
| // You can use the request parameters as selection criteria to return | |||||
| // a subset of the uploads in a bucket, request parameters :- | |||||
| // --------- | |||||
| // ?part-number-marker - Specifies the part after which listing should | |||||
| // begin. | |||||
| // ?max-parts - Maximum parts to be listed per request. | |||||
| func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { | |||||
| // Get resources properly escaped and lined up before using them in http request. | |||||
| urlValues := make(url.Values) | |||||
| // Set part number marker. | |||||
| urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) | |||||
| // Set upload id. | |||||
| urlValues.Set("uploadId", uploadID) | |||||
| // maxParts should be 1000 or less. | |||||
| if maxParts == 0 || maxParts > 1000 { | |||||
| maxParts = 1000 | |||||
| } | |||||
| // Set max parts. | |||||
| urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) | |||||
| // Execute GET on objectName to get list of parts. | |||||
| resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| objectName: objectName, | |||||
| queryValues: urlValues, | |||||
| contentSHA256Hex: emptySHA256Hex, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return ListObjectPartsResult{}, err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) | |||||
| } | |||||
| } | |||||
| // Decode list object parts XML. | |||||
| listObjectPartsResult := ListObjectPartsResult{} | |||||
| err = xmlDecoder(resp.Body, &listObjectPartsResult) | |||||
| if err != nil { | |||||
| return listObjectPartsResult, err | |||||
| } | |||||
| return listObjectPartsResult, nil | |||||
| } | |||||
| @@ -0,0 +1,228 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "bufio" | |||||
| "context" | |||||
| "encoding/json" | |||||
| "io" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "time" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // GetBucketNotification - get bucket notification at a given path. | |||||
| func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return BucketNotification{}, err | |||||
| } | |||||
| notification, err := c.getBucketNotification(bucketName) | |||||
| if err != nil { | |||||
| return BucketNotification{}, err | |||||
| } | |||||
| return notification, nil | |||||
| } | |||||
| // Request server for notification rules. | |||||
| func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) { | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("notification", "") | |||||
| // Execute GET on bucket to list objects. | |||||
| resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| queryValues: urlValues, | |||||
| contentSHA256Hex: emptySHA256Hex, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return BucketNotification{}, err | |||||
| } | |||||
| return processBucketNotificationResponse(bucketName, resp) | |||||
| } | |||||
| // processes the GetNotification http response from the server. | |||||
| func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| errResponse := httpRespToErrorResponse(resp, bucketName, "") | |||||
| return BucketNotification{}, errResponse | |||||
| } | |||||
| var bucketNotification BucketNotification | |||||
| err := xmlDecoder(resp.Body, &bucketNotification) | |||||
| if err != nil { | |||||
| return BucketNotification{}, err | |||||
| } | |||||
| return bucketNotification, nil | |||||
| } | |||||
| // Indentity represents the user id, this is a compliance field. | |||||
| type identity struct { | |||||
| PrincipalID string `json:"principalId"` | |||||
| } | |||||
| // Notification event bucket metadata. | |||||
| type bucketMeta struct { | |||||
| Name string `json:"name"` | |||||
| OwnerIdentity identity `json:"ownerIdentity"` | |||||
| ARN string `json:"arn"` | |||||
| } | |||||
| // Notification event object metadata. | |||||
| type objectMeta struct { | |||||
| Key string `json:"key"` | |||||
| Size int64 `json:"size,omitempty"` | |||||
| ETag string `json:"eTag,omitempty"` | |||||
| VersionID string `json:"versionId,omitempty"` | |||||
| Sequencer string `json:"sequencer"` | |||||
| } | |||||
| // Notification event server specific metadata. | |||||
| type eventMeta struct { | |||||
| SchemaVersion string `json:"s3SchemaVersion"` | |||||
| ConfigurationID string `json:"configurationId"` | |||||
| Bucket bucketMeta `json:"bucket"` | |||||
| Object objectMeta `json:"object"` | |||||
| } | |||||
| // sourceInfo represents information on the client that | |||||
| // triggered the event notification. | |||||
| type sourceInfo struct { | |||||
| Host string `json:"host"` | |||||
| Port string `json:"port"` | |||||
| UserAgent string `json:"userAgent"` | |||||
| } | |||||
| // NotificationEvent represents an Amazon an S3 bucket notification event. | |||||
| type NotificationEvent struct { | |||||
| EventVersion string `json:"eventVersion"` | |||||
| EventSource string `json:"eventSource"` | |||||
| AwsRegion string `json:"awsRegion"` | |||||
| EventTime string `json:"eventTime"` | |||||
| EventName string `json:"eventName"` | |||||
| UserIdentity identity `json:"userIdentity"` | |||||
| RequestParameters map[string]string `json:"requestParameters"` | |||||
| ResponseElements map[string]string `json:"responseElements"` | |||||
| S3 eventMeta `json:"s3"` | |||||
| Source sourceInfo `json:"source"` | |||||
| } | |||||
| // NotificationInfo - represents the collection of notification events, additionally | |||||
| // also reports errors if any while listening on bucket notifications. | |||||
| type NotificationInfo struct { | |||||
| Records []NotificationEvent | |||||
| Err error | |||||
| } | |||||
| // ListenBucketNotification - listen on bucket notifications. | |||||
| func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo { | |||||
| notificationInfoCh := make(chan NotificationInfo, 1) | |||||
| // Only success, start a routine to start reading line by line. | |||||
| go func(notificationInfoCh chan<- NotificationInfo) { | |||||
| defer close(notificationInfoCh) | |||||
| // Validate the bucket name. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| notificationInfoCh <- NotificationInfo{ | |||||
| Err: err, | |||||
| } | |||||
| return | |||||
| } | |||||
| // Check ARN partition to verify if listening bucket is supported | |||||
| if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) { | |||||
| notificationInfoCh <- NotificationInfo{ | |||||
| Err: ErrAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"), | |||||
| } | |||||
| return | |||||
| } | |||||
| // Continuously run and listen on bucket notification. | |||||
| // Create a done channel to control 'ListObjects' go routine. | |||||
| retryDoneCh := make(chan struct{}, 1) | |||||
| // Indicate to our routine to exit cleanly upon return. | |||||
| defer close(retryDoneCh) | |||||
| // Wait on the jitter retry loop. | |||||
| for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) { | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("prefix", prefix) | |||||
| urlValues.Set("suffix", suffix) | |||||
| urlValues["events"] = events | |||||
| // Execute GET on bucket to list objects. | |||||
| resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| queryValues: urlValues, | |||||
| contentSHA256Hex: emptySHA256Hex, | |||||
| }) | |||||
| if err != nil { | |||||
| notificationInfoCh <- NotificationInfo{ | |||||
| Err: err, | |||||
| } | |||||
| return | |||||
| } | |||||
| // Validate http response, upon error return quickly. | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| errResponse := httpRespToErrorResponse(resp, bucketName, "") | |||||
| notificationInfoCh <- NotificationInfo{ | |||||
| Err: errResponse, | |||||
| } | |||||
| return | |||||
| } | |||||
| // Initialize a new bufio scanner, to read line by line. | |||||
| bio := bufio.NewScanner(resp.Body) | |||||
| // Close the response body. | |||||
| defer resp.Body.Close() | |||||
| // Unmarshal each line, returns marshalled values. | |||||
| for bio.Scan() { | |||||
| var notificationInfo NotificationInfo | |||||
| if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { | |||||
| continue | |||||
| } | |||||
| // Send notificationInfo | |||||
| select { | |||||
| case notificationInfoCh <- notificationInfo: | |||||
| case <-doneCh: | |||||
| return | |||||
| } | |||||
| } | |||||
| // Look for any underlying errors. | |||||
| if err = bio.Err(); err != nil { | |||||
| // For an unexpected connection drop from server, we close the body | |||||
| // and re-connect. | |||||
| if err == io.ErrUnexpectedEOF { | |||||
| resp.Body.Close() | |||||
| } | |||||
| } | |||||
| } | |||||
| }(notificationInfoCh) | |||||
| // Returns the notification info channel, for caller to start reading from. | |||||
| return notificationInfoCh | |||||
| } | |||||
| @@ -0,0 +1,215 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "errors" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "time" | |||||
| "github.com/minio/minio-go/pkg/s3signer" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // presignURL - Returns a presigned URL for an input 'method'. | |||||
| // Expires maximum is 7days - ie. 604800 and minimum is 1. | |||||
| func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { | |||||
| // Input validation. | |||||
| if method == "" { | |||||
| return nil, ErrInvalidArgument("method cannot be empty.") | |||||
| } | |||||
| if err = s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if err = isValidExpiry(expires); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| // Convert expires into seconds. | |||||
| expireSeconds := int64(expires / time.Second) | |||||
| reqMetadata := requestMetadata{ | |||||
| presignURL: true, | |||||
| bucketName: bucketName, | |||||
| objectName: objectName, | |||||
| expires: expireSeconds, | |||||
| queryValues: reqParams, | |||||
| } | |||||
| // Instantiate a new request. | |||||
| // Since expires is set newRequest will presign the request. | |||||
| var req *http.Request | |||||
| if req, err = c.newRequest(method, reqMetadata); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return req.URL, nil | |||||
| } | |||||
| // PresignedGetObject - Returns a presigned URL to access an object | |||||
| // data without credentials. URL can have a maximum expiry of | |||||
| // upto 7days or a minimum of 1sec. Additionally you can override | |||||
| // a set of response headers using the query parameters. | |||||
| func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { | |||||
| if err = s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return c.presignURL("GET", bucketName, objectName, expires, reqParams) | |||||
| } | |||||
| // PresignedHeadObject - Returns a presigned URL to access object | |||||
| // metadata without credentials. URL can have a maximum expiry of | |||||
| // upto 7days or a minimum of 1sec. Additionally you can override | |||||
| // a set of response headers using the query parameters. | |||||
| func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { | |||||
| if err = s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return c.presignURL("HEAD", bucketName, objectName, expires, reqParams) | |||||
| } | |||||
| // PresignedPutObject - Returns a presigned URL to upload an object | |||||
| // without credentials. URL can have a maximum expiry of upto 7days | |||||
| // or a minimum of 1sec. | |||||
| func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { | |||||
| if err = s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return c.presignURL("PUT", bucketName, objectName, expires, nil) | |||||
| } | |||||
| // Presign - returns a presigned URL for any http method of your choice | |||||
| // along with custom request params. URL can have a maximum expiry of | |||||
| // upto 7days or a minimum of 1sec. | |||||
| func (c Client) Presign(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { | |||||
| return c.presignURL(method, bucketName, objectName, expires, reqParams) | |||||
| } | |||||
| // PresignedPostPolicy - Returns POST urlString, form data to upload an object. | |||||
| func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) { | |||||
| // Validate input arguments. | |||||
| if p.expiration.IsZero() { | |||||
| return nil, nil, errors.New("Expiration time must be specified") | |||||
| } | |||||
| if _, ok := p.formData["key"]; !ok { | |||||
| return nil, nil, errors.New("object key must be specified") | |||||
| } | |||||
| if _, ok := p.formData["bucket"]; !ok { | |||||
| return nil, nil, errors.New("bucket name must be specified") | |||||
| } | |||||
| bucketName := p.formData["bucket"] | |||||
| // Fetch the bucket location. | |||||
| location, err := c.getBucketLocation(bucketName) | |||||
| if err != nil { | |||||
| return nil, nil, err | |||||
| } | |||||
| isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) | |||||
| u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil) | |||||
| if err != nil { | |||||
| return nil, nil, err | |||||
| } | |||||
| // Get credentials from the configured credentials provider. | |||||
| credValues, err := c.credsProvider.Get() | |||||
| if err != nil { | |||||
| return nil, nil, err | |||||
| } | |||||
| var ( | |||||
| signerType = credValues.SignerType | |||||
| sessionToken = credValues.SessionToken | |||||
| accessKeyID = credValues.AccessKeyID | |||||
| secretAccessKey = credValues.SecretAccessKey | |||||
| ) | |||||
| if signerType.IsAnonymous() { | |||||
| return nil, nil, ErrInvalidArgument("Presigned operations are not supported for anonymous credentials") | |||||
| } | |||||
| // Keep time. | |||||
| t := time.Now().UTC() | |||||
| // For signature version '2' handle here. | |||||
| if signerType.IsV2() { | |||||
| policyBase64 := p.base64() | |||||
| p.formData["policy"] = policyBase64 | |||||
| // For Google endpoint set this value to be 'GoogleAccessId'. | |||||
| if s3utils.IsGoogleEndpoint(*c.endpointURL) { | |||||
| p.formData["GoogleAccessId"] = accessKeyID | |||||
| } else { | |||||
| // For all other endpoints set this value to be 'AWSAccessKeyId'. | |||||
| p.formData["AWSAccessKeyId"] = accessKeyID | |||||
| } | |||||
| // Sign the policy. | |||||
| p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, secretAccessKey) | |||||
| return u, p.formData, nil | |||||
| } | |||||
| // Add date policy. | |||||
| if err = p.addNewPolicy(policyCondition{ | |||||
| matchType: "eq", | |||||
| condition: "$x-amz-date", | |||||
| value: t.Format(iso8601DateFormat), | |||||
| }); err != nil { | |||||
| return nil, nil, err | |||||
| } | |||||
| // Add algorithm policy. | |||||
| if err = p.addNewPolicy(policyCondition{ | |||||
| matchType: "eq", | |||||
| condition: "$x-amz-algorithm", | |||||
| value: signV4Algorithm, | |||||
| }); err != nil { | |||||
| return nil, nil, err | |||||
| } | |||||
| // Add a credential policy. | |||||
| credential := s3signer.GetCredential(accessKeyID, location, t) | |||||
| if err = p.addNewPolicy(policyCondition{ | |||||
| matchType: "eq", | |||||
| condition: "$x-amz-credential", | |||||
| value: credential, | |||||
| }); err != nil { | |||||
| return nil, nil, err | |||||
| } | |||||
| if sessionToken != "" { | |||||
| if err = p.addNewPolicy(policyCondition{ | |||||
| matchType: "eq", | |||||
| condition: "$x-amz-security-token", | |||||
| value: sessionToken, | |||||
| }); err != nil { | |||||
| return nil, nil, err | |||||
| } | |||||
| } | |||||
| // Get base64 encoded policy. | |||||
| policyBase64 := p.base64() | |||||
| // Fill in the form data. | |||||
| p.formData["policy"] = policyBase64 | |||||
| p.formData["x-amz-algorithm"] = signV4Algorithm | |||||
| p.formData["x-amz-credential"] = credential | |||||
| p.formData["x-amz-date"] = t.Format(iso8601DateFormat) | |||||
| if sessionToken != "" { | |||||
| p.formData["x-amz-security-token"] = sessionToken | |||||
| } | |||||
| p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) | |||||
| return u, p.formData, nil | |||||
| } | |||||
| @@ -0,0 +1,306 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "bytes" | |||||
| "context" | |||||
| "encoding/xml" | |||||
| "io/ioutil" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "strings" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| /// Bucket operations | |||||
| // MakeBucket creates a new bucket with bucketName. | |||||
| // | |||||
| // Location is an optional argument, by default all buckets are | |||||
| // created in US Standard Region. | |||||
| // | |||||
| // For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html | |||||
| // For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations | |||||
| func (c Client) MakeBucket(bucketName string, location string) (err error) { | |||||
| defer func() { | |||||
| // Save the location into cache on a successful makeBucket response. | |||||
| if err == nil { | |||||
| c.bucketLocCache.Set(bucketName, location) | |||||
| } | |||||
| }() | |||||
| // Validate the input arguments. | |||||
| if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { | |||||
| return err | |||||
| } | |||||
| // If location is empty, treat is a default region 'us-east-1'. | |||||
| if location == "" { | |||||
| location = "us-east-1" | |||||
| // For custom region clients, default | |||||
| // to custom region instead not 'us-east-1'. | |||||
| if c.region != "" { | |||||
| location = c.region | |||||
| } | |||||
| } | |||||
| // PUT bucket request metadata. | |||||
| reqMetadata := requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| bucketLocation: location, | |||||
| } | |||||
| // If location is not 'us-east-1' create bucket location config. | |||||
| if location != "us-east-1" && location != "" { | |||||
| createBucketConfig := createBucketConfiguration{} | |||||
| createBucketConfig.Location = location | |||||
| var createBucketConfigBytes []byte | |||||
| createBucketConfigBytes, err = xml.Marshal(createBucketConfig) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes) | |||||
| reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes) | |||||
| reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) | |||||
| reqMetadata.contentLength = int64(len(createBucketConfigBytes)) | |||||
| } | |||||
| // Execute PUT to create a new bucket. | |||||
| resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return httpRespToErrorResponse(resp, bucketName, "") | |||||
| } | |||||
| } | |||||
| // Success. | |||||
| return nil | |||||
| } | |||||
| // SetBucketPolicy set the access permissions on an existing bucket. | |||||
| func (c Client) SetBucketPolicy(bucketName, policy string) error { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return err | |||||
| } | |||||
| // If policy is empty then delete the bucket policy. | |||||
| if policy == "" { | |||||
| return c.removeBucketPolicy(bucketName) | |||||
| } | |||||
| // Save the updated policies. | |||||
| return c.putBucketPolicy(bucketName, policy) | |||||
| } | |||||
| // Saves a new bucket policy. | |||||
| func (c Client) putBucketPolicy(bucketName, policy string) error { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return err | |||||
| } | |||||
| // Get resources properly escaped and lined up before | |||||
| // using them in http request. | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("policy", "") | |||||
| // Content-length is mandatory for put policy request | |||||
| policyReader := strings.NewReader(policy) | |||||
| b, err := ioutil.ReadAll(policyReader) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| reqMetadata := requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| queryValues: urlValues, | |||||
| contentBody: policyReader, | |||||
| contentLength: int64(len(b)), | |||||
| } | |||||
| // Execute PUT to upload a new bucket policy. | |||||
| resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusNoContent { | |||||
| return httpRespToErrorResponse(resp, bucketName, "") | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // Removes all policies on a bucket. | |||||
| func (c Client) removeBucketPolicy(bucketName string) error { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return err | |||||
| } | |||||
| // Get resources properly escaped and lined up before | |||||
| // using them in http request. | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("policy", "") | |||||
| // Execute DELETE on objectName. | |||||
| resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| queryValues: urlValues, | |||||
| contentSHA256Hex: emptySHA256Hex, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // SetBucketLifecycle set the lifecycle on an existing bucket. | |||||
| func (c Client) SetBucketLifecycle(bucketName, lifecycle string) error { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return err | |||||
| } | |||||
| // If lifecycle is empty then delete it. | |||||
| if lifecycle == "" { | |||||
| return c.removeBucketLifecycle(bucketName) | |||||
| } | |||||
| // Save the updated lifecycle. | |||||
| return c.putBucketLifecycle(bucketName, lifecycle) | |||||
| } | |||||
| // Saves a new bucket lifecycle. | |||||
| func (c Client) putBucketLifecycle(bucketName, lifecycle string) error { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return err | |||||
| } | |||||
| // Get resources properly escaped and lined up before | |||||
| // using them in http request. | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("lifecycle", "") | |||||
| // Content-length is mandatory for put lifecycle request | |||||
| lifecycleReader := strings.NewReader(lifecycle) | |||||
| b, err := ioutil.ReadAll(lifecycleReader) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| reqMetadata := requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| queryValues: urlValues, | |||||
| contentBody: lifecycleReader, | |||||
| contentLength: int64(len(b)), | |||||
| contentMD5Base64: sumMD5Base64(b), | |||||
| } | |||||
| // Execute PUT to upload a new bucket lifecycle. | |||||
| resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return httpRespToErrorResponse(resp, bucketName, "") | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // Remove lifecycle from a bucket. | |||||
| func (c Client) removeBucketLifecycle(bucketName string) error { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return err | |||||
| } | |||||
| // Get resources properly escaped and lined up before | |||||
| // using them in http request. | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("lifecycle", "") | |||||
| // Execute DELETE on objectName. | |||||
| resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| queryValues: urlValues, | |||||
| contentSHA256Hex: emptySHA256Hex, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // SetBucketNotification saves a new bucket notification. | |||||
| func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return err | |||||
| } | |||||
| // Get resources properly escaped and lined up before | |||||
| // using them in http request. | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("notification", "") | |||||
| notifBytes, err := xml.Marshal(bucketNotification) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| notifBuffer := bytes.NewReader(notifBytes) | |||||
| reqMetadata := requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| queryValues: urlValues, | |||||
| contentBody: notifBuffer, | |||||
| contentLength: int64(len(notifBytes)), | |||||
| contentMD5Base64: sumMD5Base64(notifBytes), | |||||
| contentSHA256Hex: sum256Hex(notifBytes), | |||||
| } | |||||
| // Execute PUT to upload a new bucket notification. | |||||
| resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return httpRespToErrorResponse(resp, bucketName, "") | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // RemoveAllBucketNotification - Remove bucket notification clears all previously specified config | |||||
| func (c Client) RemoveAllBucketNotification(bucketName string) error { | |||||
| return c.SetBucketNotification(bucketName, BucketNotification{}) | |||||
| } | |||||
| @@ -0,0 +1,111 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "context" | |||||
| "io" | |||||
| "math" | |||||
| "os" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // Verify if reader is *minio.Object | |||||
| func isObject(reader io.Reader) (ok bool) { | |||||
| _, ok = reader.(*Object) | |||||
| return | |||||
| } | |||||
| // Verify if reader is a generic ReaderAt | |||||
| func isReadAt(reader io.Reader) (ok bool) { | |||||
| _, ok = reader.(io.ReaderAt) | |||||
| if ok { | |||||
| var v *os.File | |||||
| v, ok = reader.(*os.File) | |||||
| if ok { | |||||
| // Stdin, Stdout and Stderr all have *os.File type | |||||
| // which happen to also be io.ReaderAt compatible | |||||
| // we need to add special conditions for them to | |||||
| // be ignored by this function. | |||||
| for _, f := range []string{ | |||||
| "/dev/stdin", | |||||
| "/dev/stdout", | |||||
| "/dev/stderr", | |||||
| } { | |||||
| if f == v.Name() { | |||||
| ok = false | |||||
| break | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| return | |||||
| } | |||||
| // optimalPartInfo - calculate the optimal part info for a given | |||||
| // object size. | |||||
| // | |||||
| // NOTE: Assumption here is that for any object to be uploaded to any S3 compatible | |||||
| // object storage it will have the following parameters as constants. | |||||
| // | |||||
| // maxPartsCount - 10000 | |||||
| // minPartSize - 64MiB | |||||
| // maxMultipartPutObjectSize - 5TiB | |||||
| // | |||||
| func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { | |||||
| // object size is '-1' set it to 5TiB. | |||||
| if objectSize == -1 { | |||||
| objectSize = maxMultipartPutObjectSize | |||||
| } | |||||
| // object size is larger than supported maximum. | |||||
| if objectSize > maxMultipartPutObjectSize { | |||||
| err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") | |||||
| return | |||||
| } | |||||
| // Use floats for part size for all calculations to avoid | |||||
| // overflows during float64 to int64 conversions. | |||||
| partSizeFlt := math.Ceil(float64(objectSize / maxPartsCount)) | |||||
| partSizeFlt = math.Ceil(partSizeFlt/minPartSize) * minPartSize | |||||
| // Total parts count. | |||||
| totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt)) | |||||
| // Part size. | |||||
| partSize = int64(partSizeFlt) | |||||
| // Last part size. | |||||
| lastPartSize = objectSize - int64(totalPartsCount-1)*partSize | |||||
| return totalPartsCount, partSize, lastPartSize, nil | |||||
| } | |||||
| // getUploadID - fetch upload id if already present for an object name | |||||
| // or initiate a new request to fetch a new upload id. | |||||
| func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return "", err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return "", err | |||||
| } | |||||
| // Initiate multipart upload for an object. | |||||
| initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| return initMultipartUploadResult.UploadID, nil | |||||
| } | |||||
| @@ -0,0 +1,33 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "context" | |||||
| "io" | |||||
| ) | |||||
| // PutObjectWithContext - Identical to PutObject call, but accepts context to facilitate request cancellation. | |||||
| func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, | |||||
| opts PutObjectOptions) (n int64, err error) { | |||||
| err = opts.validate() | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) | |||||
| } | |||||
| @@ -0,0 +1,83 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017, 2018 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "context" | |||||
| "io" | |||||
| "io/ioutil" | |||||
| "net/http" | |||||
| "github.com/minio/minio-go/pkg/encrypt" | |||||
| ) | |||||
| // CopyObject - copy a source object into a new object | |||||
| func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error { | |||||
| return c.CopyObjectWithProgress(dst, src, nil) | |||||
| } | |||||
| // CopyObjectWithProgress - copy a source object into a new object, optionally takes | |||||
| // progress bar input to notify current progress. | |||||
| func (c Client) CopyObjectWithProgress(dst DestinationInfo, src SourceInfo, progress io.Reader) error { | |||||
| header := make(http.Header) | |||||
| for k, v := range src.Headers { | |||||
| header[k] = v | |||||
| } | |||||
| var err error | |||||
| var size int64 | |||||
| // If progress bar is specified, size should be requested as well initiate a StatObject request. | |||||
| if progress != nil { | |||||
| size, _, _, err = src.getProps(c) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| } | |||||
| if src.encryption != nil { | |||||
| encrypt.SSECopy(src.encryption).Marshal(header) | |||||
| } | |||||
| if dst.encryption != nil { | |||||
| dst.encryption.Marshal(header) | |||||
| } | |||||
| for k, v := range dst.getUserMetaHeadersMap(true) { | |||||
| header.Set(k, v) | |||||
| } | |||||
| resp, err := c.executeMethod(context.Background(), "PUT", requestMetadata{ | |||||
| bucketName: dst.bucket, | |||||
| objectName: dst.object, | |||||
| customHeader: header, | |||||
| }) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| defer closeResponse(resp) | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return httpRespToErrorResponse(resp, dst.bucket, dst.object) | |||||
| } | |||||
| // Update the progress properly after successful copy. | |||||
| if progress != nil { | |||||
| io.CopyN(ioutil.Discard, progress, size) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| @@ -0,0 +1,64 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "context" | |||||
| "mime" | |||||
| "os" | |||||
| "path/filepath" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // FPutObjectWithContext - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. | |||||
| func (c Client) FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return 0, err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return 0, err | |||||
| } | |||||
| // Open the referenced file. | |||||
| fileReader, err := os.Open(filePath) | |||||
| // If any error fail quickly here. | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| defer fileReader.Close() | |||||
| // Save the file stat. | |||||
| fileStat, err := fileReader.Stat() | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| // Save the file size. | |||||
| fileSize := fileStat.Size() | |||||
| // Set contentType based on filepath extension if not given or default | |||||
| // value of "application/octet-stream" if the extension has no associated type. | |||||
| if opts.ContentType == "" { | |||||
| if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" { | |||||
| opts.ContentType = "application/octet-stream" | |||||
| } | |||||
| } | |||||
| return c.PutObjectWithContext(ctx, bucketName, objectName, fileReader, fileSize, opts) | |||||
| } | |||||
| @@ -0,0 +1,27 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "context" | |||||
| ) | |||||
| // FPutObject - Create an object in a bucket, with contents from file at filePath | |||||
| func (c Client) FPutObject(bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { | |||||
| return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) | |||||
| } | |||||
| @@ -0,0 +1,372 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "bytes" | |||||
| "context" | |||||
| "encoding/base64" | |||||
| "encoding/hex" | |||||
| "encoding/xml" | |||||
| "fmt" | |||||
| "io" | |||||
| "io/ioutil" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "runtime/debug" | |||||
| "sort" | |||||
| "strconv" | |||||
| "strings" | |||||
| "github.com/minio/minio-go/pkg/encrypt" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, | |||||
| opts PutObjectOptions) (n int64, err error) { | |||||
| n, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) | |||||
| if err != nil { | |||||
| errResp := ToErrorResponse(err) | |||||
| // Verify if multipart functionality is not available, if not | |||||
| // fall back to single PutObject operation. | |||||
| if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { | |||||
| // Verify if size of reader is greater than '5GiB'. | |||||
| if size > maxSinglePutObjectSize { | |||||
| return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) | |||||
| } | |||||
| // Fall back to uploading as single PutObject operation. | |||||
| return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) | |||||
| } | |||||
| } | |||||
| return n, err | |||||
| } | |||||
| func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { | |||||
| // Input validation. | |||||
| if err = s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return 0, err | |||||
| } | |||||
| if err = s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return 0, err | |||||
| } | |||||
| // Total data read and written to server. should be equal to | |||||
| // 'size' at the end of the call. | |||||
| var totalUploadedSize int64 | |||||
| // Complete multipart upload. | |||||
| var complMultipartUpload completeMultipartUpload | |||||
| // Calculate the optimal parts info for a given size. | |||||
| totalPartsCount, partSize, _, err := optimalPartInfo(-1) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| // Initiate a new multipart upload. | |||||
| uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| defer func() { | |||||
| if err != nil { | |||||
| c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) | |||||
| } | |||||
| }() | |||||
| // Part number always starts with '1'. | |||||
| partNumber := 1 | |||||
| // Initialize parts uploaded map. | |||||
| partsInfo := make(map[int]ObjectPart) | |||||
| // Create a buffer. | |||||
| buf := make([]byte, partSize) | |||||
| defer debug.FreeOSMemory() | |||||
| for partNumber <= totalPartsCount { | |||||
| // Choose hash algorithms to be calculated by hashCopyN, | |||||
| // avoid sha256 with non-v4 signature request or | |||||
| // HTTPS connection. | |||||
| hashAlgos, hashSums := c.hashMaterials() | |||||
| length, rErr := io.ReadFull(reader, buf) | |||||
| if rErr == io.EOF { | |||||
| break | |||||
| } | |||||
| if rErr != nil && rErr != io.ErrUnexpectedEOF { | |||||
| return 0, rErr | |||||
| } | |||||
| // Calculates hash sums while copying partSize bytes into cw. | |||||
| for k, v := range hashAlgos { | |||||
| v.Write(buf[:length]) | |||||
| hashSums[k] = v.Sum(nil) | |||||
| } | |||||
| // Update progress reader appropriately to the latest offset | |||||
| // as we read from the source. | |||||
| rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) | |||||
| // Checksums.. | |||||
| var ( | |||||
| md5Base64 string | |||||
| sha256Hex string | |||||
| ) | |||||
| if hashSums["md5"] != nil { | |||||
| md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) | |||||
| } | |||||
| if hashSums["sha256"] != nil { | |||||
| sha256Hex = hex.EncodeToString(hashSums["sha256"]) | |||||
| } | |||||
| // Proceed to upload the part. | |||||
| var objPart ObjectPart | |||||
| objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, | |||||
| md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption) | |||||
| if err != nil { | |||||
| return totalUploadedSize, err | |||||
| } | |||||
| // Save successfully uploaded part metadata. | |||||
| partsInfo[partNumber] = objPart | |||||
| // Save successfully uploaded size. | |||||
| totalUploadedSize += int64(length) | |||||
| // Increment part number. | |||||
| partNumber++ | |||||
| // For unknown size, Read EOF we break away. | |||||
| // We do not have to upload till totalPartsCount. | |||||
| if rErr == io.EOF { | |||||
| break | |||||
| } | |||||
| } | |||||
| // Loop over total uploaded parts to save them in | |||||
| // Parts array before completing the multipart request. | |||||
| for i := 1; i < partNumber; i++ { | |||||
| part, ok := partsInfo[i] | |||||
| if !ok { | |||||
| return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) | |||||
| } | |||||
| complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ | |||||
| ETag: part.ETag, | |||||
| PartNumber: part.PartNumber, | |||||
| }) | |||||
| } | |||||
| // Sort all completed parts. | |||||
| sort.Sort(completedParts(complMultipartUpload.Parts)) | |||||
| if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { | |||||
| return totalUploadedSize, err | |||||
| } | |||||
| // Return final size. | |||||
| return totalUploadedSize, nil | |||||
| } | |||||
| // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. | |||||
| func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return initiateMultipartUploadResult{}, err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return initiateMultipartUploadResult{}, err | |||||
| } | |||||
| // Initialize url queries. | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("uploads", "") | |||||
| // Set ContentType header. | |||||
| customHeader := opts.Header() | |||||
| reqMetadata := requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| objectName: objectName, | |||||
| queryValues: urlValues, | |||||
| customHeader: customHeader, | |||||
| } | |||||
| // Execute POST on an objectName to initiate multipart upload. | |||||
| resp, err := c.executeMethod(ctx, "POST", reqMetadata) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return initiateMultipartUploadResult{}, err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) | |||||
| } | |||||
| } | |||||
| // Decode xml for new multipart upload. | |||||
| initiateMultipartUploadResult := initiateMultipartUploadResult{} | |||||
| err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) | |||||
| if err != nil { | |||||
| return initiateMultipartUploadResult, err | |||||
| } | |||||
| return initiateMultipartUploadResult, nil | |||||
| } | |||||
| // uploadPart - Uploads a part in a multipart upload. | |||||
| func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, | |||||
| partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return ObjectPart{}, err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return ObjectPart{}, err | |||||
| } | |||||
| if size > maxPartSize { | |||||
| return ObjectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName) | |||||
| } | |||||
| if size <= -1 { | |||||
| return ObjectPart{}, ErrEntityTooSmall(size, bucketName, objectName) | |||||
| } | |||||
| if partNumber <= 0 { | |||||
| return ObjectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.") | |||||
| } | |||||
| if uploadID == "" { | |||||
| return ObjectPart{}, ErrInvalidArgument("UploadID cannot be empty.") | |||||
| } | |||||
| // Get resources properly escaped and lined up before using them in http request. | |||||
| urlValues := make(url.Values) | |||||
| // Set part number. | |||||
| urlValues.Set("partNumber", strconv.Itoa(partNumber)) | |||||
| // Set upload id. | |||||
| urlValues.Set("uploadId", uploadID) | |||||
| // Set encryption headers, if any. | |||||
| customHeader := make(http.Header) | |||||
| // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html | |||||
| // Server-side encryption is supported by the S3 Multipart Upload actions. | |||||
| // Unless you are using a customer-provided encryption key, you don't need | |||||
| // to specify the encryption parameters in each UploadPart request. | |||||
| if sse != nil && sse.Type() == encrypt.SSEC { | |||||
| sse.Marshal(customHeader) | |||||
| } | |||||
| reqMetadata := requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| objectName: objectName, | |||||
| queryValues: urlValues, | |||||
| customHeader: customHeader, | |||||
| contentBody: reader, | |||||
| contentLength: size, | |||||
| contentMD5Base64: md5Base64, | |||||
| contentSHA256Hex: sha256Hex, | |||||
| } | |||||
| // Execute PUT on each part. | |||||
| resp, err := c.executeMethod(ctx, "PUT", reqMetadata) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return ObjectPart{}, err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) | |||||
| } | |||||
| } | |||||
| // Once successfully uploaded, return completed part. | |||||
| objPart := ObjectPart{} | |||||
| objPart.Size = size | |||||
| objPart.PartNumber = partNumber | |||||
| // Trim off the odd double quotes from ETag in the beginning and end. | |||||
| objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") | |||||
| objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"") | |||||
| return objPart, nil | |||||
| } | |||||
| // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. | |||||
| func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, | |||||
| complete completeMultipartUpload) (completeMultipartUploadResult, error) { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return completeMultipartUploadResult{}, err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return completeMultipartUploadResult{}, err | |||||
| } | |||||
| // Initialize url queries. | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("uploadId", uploadID) | |||||
| // Marshal complete multipart body. | |||||
| completeMultipartUploadBytes, err := xml.Marshal(complete) | |||||
| if err != nil { | |||||
| return completeMultipartUploadResult{}, err | |||||
| } | |||||
| // Instantiate all the complete multipart buffer. | |||||
| completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) | |||||
| reqMetadata := requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| objectName: objectName, | |||||
| queryValues: urlValues, | |||||
| contentBody: completeMultipartUploadBuffer, | |||||
| contentLength: int64(len(completeMultipartUploadBytes)), | |||||
| contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), | |||||
| } | |||||
| // Execute POST to complete multipart upload for an objectName. | |||||
| resp, err := c.executeMethod(ctx, "POST", reqMetadata) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return completeMultipartUploadResult{}, err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) | |||||
| } | |||||
| } | |||||
| // Read resp.Body into a []bytes to parse for Error response inside the body | |||||
| var b []byte | |||||
| b, err = ioutil.ReadAll(resp.Body) | |||||
| if err != nil { | |||||
| return completeMultipartUploadResult{}, err | |||||
| } | |||||
| // Decode completed multipart upload response on success. | |||||
| completeMultipartUploadResult := completeMultipartUploadResult{} | |||||
| err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) | |||||
| if err != nil { | |||||
| // xml parsing failure due to presence an ill-formed xml fragment | |||||
| return completeMultipartUploadResult, err | |||||
| } else if completeMultipartUploadResult.Bucket == "" { | |||||
| // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. | |||||
| // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values | |||||
| // of the members. | |||||
| // Decode completed multipart upload response on failure | |||||
| completeMultipartUploadErr := ErrorResponse{} | |||||
| err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) | |||||
| if err != nil { | |||||
| // xml parsing failure due to presence an ill-formed xml fragment | |||||
| return completeMultipartUploadResult, err | |||||
| } | |||||
| return completeMultipartUploadResult, completeMultipartUploadErr | |||||
| } | |||||
| return completeMultipartUploadResult, nil | |||||
| } | |||||
| @@ -0,0 +1,417 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "context" | |||||
| "fmt" | |||||
| "io" | |||||
| "net/http" | |||||
| "sort" | |||||
| "strings" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // putObjectMultipartStream - upload a large object using | |||||
| // multipart upload and streaming signature for signing payload. | |||||
| // Comprehensive put object operation involving multipart uploads. | |||||
| // | |||||
| // Following code handles these types of readers. | |||||
| // | |||||
| // - *minio.Object | |||||
| // - Any reader which has a method 'ReadAt()' | |||||
| // | |||||
| func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, | |||||
| reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { | |||||
| if !isObject(reader) && isReadAt(reader) { | |||||
| // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. | |||||
| n, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) | |||||
| } else { | |||||
| n, err = c.putObjectMultipartStreamNoChecksum(ctx, bucketName, objectName, reader, size, opts) | |||||
| } | |||||
| if err != nil { | |||||
| errResp := ToErrorResponse(err) | |||||
| // Verify if multipart functionality is not available, if not | |||||
| // fall back to single PutObject operation. | |||||
| if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { | |||||
| // Verify if size of reader is greater than '5GiB'. | |||||
| if size > maxSinglePutObjectSize { | |||||
| return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) | |||||
| } | |||||
| // Fall back to uploading as single PutObject operation. | |||||
| return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) | |||||
| } | |||||
| } | |||||
| return n, err | |||||
| } | |||||
| // uploadedPartRes - the response received from a part upload. | |||||
| type uploadedPartRes struct { | |||||
| Error error // Any error encountered while uploading the part. | |||||
| PartNum int // Number of the part uploaded. | |||||
| Size int64 // Size of the part uploaded. | |||||
| Part *ObjectPart | |||||
| } | |||||
| type uploadPartReq struct { | |||||
| PartNum int // Number of the part uploaded. | |||||
| Part *ObjectPart // Size of the part uploaded. | |||||
| } | |||||
| // putObjectMultipartFromReadAt - Uploads files bigger than 64MiB. | |||||
| // Supports all readers which implements io.ReaderAt interface | |||||
| // (ReadAt method). | |||||
| // | |||||
| // NOTE: This function is meant to be used for all readers which | |||||
| // implement io.ReaderAt which allows us for resuming multipart | |||||
| // uploads but reading at an offset, which would avoid re-read the | |||||
| // data which was already uploaded. Internally this function uses | |||||
| // temporary files for staging all the data, these temporary files are | |||||
| // cleaned automatically when the caller i.e http client closes the | |||||
| // stream after uploading all the contents successfully. | |||||
| func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, | |||||
| reader io.ReaderAt, size int64, opts PutObjectOptions) (n int64, err error) { | |||||
| // Input validation. | |||||
| if err = s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return 0, err | |||||
| } | |||||
| if err = s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return 0, err | |||||
| } | |||||
| // Calculate the optimal parts info for a given size. | |||||
| totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| // Initiate a new multipart upload. | |||||
| uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| // Aborts the multipart upload in progress, if the | |||||
| // function returns any error, since we do not resume | |||||
| // we should purge the parts which have been uploaded | |||||
| // to relinquish storage space. | |||||
| defer func() { | |||||
| if err != nil { | |||||
| c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) | |||||
| } | |||||
| }() | |||||
| // Total data read and written to server. should be equal to 'size' at the end of the call. | |||||
| var totalUploadedSize int64 | |||||
| // Complete multipart upload. | |||||
| var complMultipartUpload completeMultipartUpload | |||||
| // Declare a channel that sends the next part number to be uploaded. | |||||
| // Buffered to 10000 because thats the maximum number of parts allowed | |||||
| // by S3. | |||||
| uploadPartsCh := make(chan uploadPartReq, 10000) | |||||
| // Declare a channel that sends back the response of a part upload. | |||||
| // Buffered to 10000 because thats the maximum number of parts allowed | |||||
| // by S3. | |||||
| uploadedPartsCh := make(chan uploadedPartRes, 10000) | |||||
| // Used for readability, lastPartNumber is always totalPartsCount. | |||||
| lastPartNumber := totalPartsCount | |||||
| // Send each part number to the channel to be processed. | |||||
| for p := 1; p <= totalPartsCount; p++ { | |||||
| uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} | |||||
| } | |||||
| close(uploadPartsCh) | |||||
| // Receive each part number from the channel allowing three parallel uploads. | |||||
| for w := 1; w <= opts.getNumThreads(); w++ { | |||||
| go func(partSize int64) { | |||||
| // Each worker will draw from the part channel and upload in parallel. | |||||
| for uploadReq := range uploadPartsCh { | |||||
| // If partNumber was not uploaded we calculate the missing | |||||
| // part offset and size. For all other part numbers we | |||||
| // calculate offset based on multiples of partSize. | |||||
| readOffset := int64(uploadReq.PartNum-1) * partSize | |||||
| // As a special case if partNumber is lastPartNumber, we | |||||
| // calculate the offset based on the last part size. | |||||
| if uploadReq.PartNum == lastPartNumber { | |||||
| readOffset = (size - lastPartSize) | |||||
| partSize = lastPartSize | |||||
| } | |||||
| // Get a section reader on a particular offset. | |||||
| sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) | |||||
| // Proceed to upload the part. | |||||
| var objPart ObjectPart | |||||
| objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, | |||||
| sectionReader, uploadReq.PartNum, | |||||
| "", "", partSize, opts.ServerSideEncryption) | |||||
| if err != nil { | |||||
| uploadedPartsCh <- uploadedPartRes{ | |||||
| Size: 0, | |||||
| Error: err, | |||||
| } | |||||
| // Exit the goroutine. | |||||
| return | |||||
| } | |||||
| // Save successfully uploaded part metadata. | |||||
| uploadReq.Part = &objPart | |||||
| // Send successful part info through the channel. | |||||
| uploadedPartsCh <- uploadedPartRes{ | |||||
| Size: objPart.Size, | |||||
| PartNum: uploadReq.PartNum, | |||||
| Part: uploadReq.Part, | |||||
| Error: nil, | |||||
| } | |||||
| } | |||||
| }(partSize) | |||||
| } | |||||
| // Gather the responses as they occur and update any | |||||
| // progress bar. | |||||
| for u := 1; u <= totalPartsCount; u++ { | |||||
| uploadRes := <-uploadedPartsCh | |||||
| if uploadRes.Error != nil { | |||||
| return totalUploadedSize, uploadRes.Error | |||||
| } | |||||
| // Retrieve each uploaded part and store it to be completed. | |||||
| // part, ok := partsInfo[uploadRes.PartNum] | |||||
| part := uploadRes.Part | |||||
| if part == nil { | |||||
| return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum)) | |||||
| } | |||||
| // Update the totalUploadedSize. | |||||
| totalUploadedSize += uploadRes.Size | |||||
| // Store the parts to be completed in order. | |||||
| complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ | |||||
| ETag: part.ETag, | |||||
| PartNumber: part.PartNumber, | |||||
| }) | |||||
| } | |||||
| // Verify if we uploaded all the data. | |||||
| if totalUploadedSize != size { | |||||
| return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) | |||||
| } | |||||
| // Sort all completed parts. | |||||
| sort.Sort(completedParts(complMultipartUpload.Parts)) | |||||
| _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) | |||||
| if err != nil { | |||||
| return totalUploadedSize, err | |||||
| } | |||||
| // Return final size. | |||||
| return totalUploadedSize, nil | |||||
| } | |||||
| func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketName, objectName string, | |||||
| reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { | |||||
| // Input validation. | |||||
| if err = s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return 0, err | |||||
| } | |||||
| if err = s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return 0, err | |||||
| } | |||||
| // Calculate the optimal parts info for a given size. | |||||
| totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| // Initiates a new multipart request | |||||
| uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| // Aborts the multipart upload if the function returns | |||||
| // any error, since we do not resume we should purge | |||||
| // the parts which have been uploaded to relinquish | |||||
| // storage space. | |||||
| defer func() { | |||||
| if err != nil { | |||||
| c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) | |||||
| } | |||||
| }() | |||||
| // Total data read and written to server. should be equal to 'size' at the end of the call. | |||||
| var totalUploadedSize int64 | |||||
| // Initialize parts uploaded map. | |||||
| partsInfo := make(map[int]ObjectPart) | |||||
| // Part number always starts with '1'. | |||||
| var partNumber int | |||||
| for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { | |||||
| // Update progress reader appropriately to the latest offset | |||||
| // as we read from the source. | |||||
| hookReader := newHook(reader, opts.Progress) | |||||
| // Proceed to upload the part. | |||||
| if partNumber == totalPartsCount { | |||||
| partSize = lastPartSize | |||||
| } | |||||
| var objPart ObjectPart | |||||
| objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, | |||||
| io.LimitReader(hookReader, partSize), | |||||
| partNumber, "", "", partSize, opts.ServerSideEncryption) | |||||
| if err != nil { | |||||
| return totalUploadedSize, err | |||||
| } | |||||
| // Save successfully uploaded part metadata. | |||||
| partsInfo[partNumber] = objPart | |||||
| // Save successfully uploaded size. | |||||
| totalUploadedSize += partSize | |||||
| } | |||||
| // Verify if we uploaded all the data. | |||||
| if size > 0 { | |||||
| if totalUploadedSize != size { | |||||
| return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) | |||||
| } | |||||
| } | |||||
| // Complete multipart upload. | |||||
| var complMultipartUpload completeMultipartUpload | |||||
| // Loop over total uploaded parts to save them in | |||||
| // Parts array before completing the multipart request. | |||||
| for i := 1; i < partNumber; i++ { | |||||
| part, ok := partsInfo[i] | |||||
| if !ok { | |||||
| return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) | |||||
| } | |||||
| complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ | |||||
| ETag: part.ETag, | |||||
| PartNumber: part.PartNumber, | |||||
| }) | |||||
| } | |||||
| // Sort all completed parts. | |||||
| sort.Sort(completedParts(complMultipartUpload.Parts)) | |||||
| _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) | |||||
| if err != nil { | |||||
| return totalUploadedSize, err | |||||
| } | |||||
| // Return final size. | |||||
| return totalUploadedSize, nil | |||||
| } | |||||
| // putObjectNoChecksum special function used Google Cloud Storage. This special function | |||||
| // is used for Google Cloud Storage since Google's multipart API is not S3 compatible. | |||||
| func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return 0, err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return 0, err | |||||
| } | |||||
| // Size -1 is only supported on Google Cloud Storage, we error | |||||
| // out in all other situations. | |||||
| if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) { | |||||
| return 0, ErrEntityTooSmall(size, bucketName, objectName) | |||||
| } | |||||
| if size > 0 { | |||||
| if isReadAt(reader) && !isObject(reader) { | |||||
| seeker, _ := reader.(io.Seeker) | |||||
| offset, err := seeker.Seek(0, io.SeekCurrent) | |||||
| if err != nil { | |||||
| return 0, ErrInvalidArgument(err.Error()) | |||||
| } | |||||
| reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) | |||||
| } | |||||
| } | |||||
| // Update progress reader appropriately to the latest offset as we | |||||
| // read from the source. | |||||
| readSeeker := newHook(reader, opts.Progress) | |||||
| // This function does not calculate sha256 and md5sum for payload. | |||||
| // Execute put object. | |||||
| st, err := c.putObjectDo(ctx, bucketName, objectName, readSeeker, "", "", size, opts) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| if st.Size != size { | |||||
| return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) | |||||
| } | |||||
| return size, nil | |||||
| } | |||||
| // putObjectDo - executes the put object http operation. | |||||
| // NOTE: You must have WRITE permissions on a bucket to add an object to it. | |||||
| func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (ObjectInfo, error) { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return ObjectInfo{}, err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return ObjectInfo{}, err | |||||
| } | |||||
| // Set headers. | |||||
| customHeader := opts.Header() | |||||
| // Populate request metadata. | |||||
| reqMetadata := requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| objectName: objectName, | |||||
| customHeader: customHeader, | |||||
| contentBody: reader, | |||||
| contentLength: size, | |||||
| contentMD5Base64: md5Base64, | |||||
| contentSHA256Hex: sha256Hex, | |||||
| } | |||||
| // Execute PUT an objectName. | |||||
| resp, err := c.executeMethod(ctx, "PUT", reqMetadata) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return ObjectInfo{}, err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) | |||||
| } | |||||
| } | |||||
| var objInfo ObjectInfo | |||||
| // Trim off the odd double quotes from ETag in the beginning and end. | |||||
| objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") | |||||
| objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"") | |||||
| // A success here means data was written to server successfully. | |||||
| objInfo.Size = size | |||||
| // Return here. | |||||
| return objInfo, nil | |||||
| } | |||||
| @@ -0,0 +1,267 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "bytes" | |||||
| "context" | |||||
| "fmt" | |||||
| "io" | |||||
| "net/http" | |||||
| "runtime/debug" | |||||
| "sort" | |||||
| "github.com/minio/minio-go/pkg/encrypt" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| "golang.org/x/net/http/httpguts" | |||||
| ) | |||||
| // PutObjectOptions represents options specified by user for PutObject call | |||||
| type PutObjectOptions struct { | |||||
| UserMetadata map[string]string | |||||
| Progress io.Reader | |||||
| ContentType string | |||||
| ContentEncoding string | |||||
| ContentDisposition string | |||||
| ContentLanguage string | |||||
| CacheControl string | |||||
| ServerSideEncryption encrypt.ServerSide | |||||
| NumThreads uint | |||||
| StorageClass string | |||||
| WebsiteRedirectLocation string | |||||
| } | |||||
| // getNumThreads - gets the number of threads to be used in the multipart | |||||
| // put object operation | |||||
| func (opts PutObjectOptions) getNumThreads() (numThreads int) { | |||||
| if opts.NumThreads > 0 { | |||||
| numThreads = int(opts.NumThreads) | |||||
| } else { | |||||
| numThreads = totalWorkers | |||||
| } | |||||
| return | |||||
| } | |||||
| // Header - constructs the headers from metadata entered by user in | |||||
| // PutObjectOptions struct | |||||
| func (opts PutObjectOptions) Header() (header http.Header) { | |||||
| header = make(http.Header) | |||||
| if opts.ContentType != "" { | |||||
| header["Content-Type"] = []string{opts.ContentType} | |||||
| } else { | |||||
| header["Content-Type"] = []string{"application/octet-stream"} | |||||
| } | |||||
| if opts.ContentEncoding != "" { | |||||
| header["Content-Encoding"] = []string{opts.ContentEncoding} | |||||
| } | |||||
| if opts.ContentDisposition != "" { | |||||
| header["Content-Disposition"] = []string{opts.ContentDisposition} | |||||
| } | |||||
| if opts.ContentLanguage != "" { | |||||
| header["Content-Language"] = []string{opts.ContentLanguage} | |||||
| } | |||||
| if opts.CacheControl != "" { | |||||
| header["Cache-Control"] = []string{opts.CacheControl} | |||||
| } | |||||
| if opts.ServerSideEncryption != nil { | |||||
| opts.ServerSideEncryption.Marshal(header) | |||||
| } | |||||
| if opts.StorageClass != "" { | |||||
| header[amzStorageClass] = []string{opts.StorageClass} | |||||
| } | |||||
| if opts.WebsiteRedirectLocation != "" { | |||||
| header[amzWebsiteRedirectLocation] = []string{opts.WebsiteRedirectLocation} | |||||
| } | |||||
| for k, v := range opts.UserMetadata { | |||||
| if !isAmzHeader(k) && !isStandardHeader(k) && !isStorageClassHeader(k) { | |||||
| header["X-Amz-Meta-"+k] = []string{v} | |||||
| } else { | |||||
| header[k] = []string{v} | |||||
| } | |||||
| } | |||||
| return | |||||
| } | |||||
| // validate() checks if the UserMetadata map has standard headers or and raises an error if so. | |||||
| func (opts PutObjectOptions) validate() (err error) { | |||||
| for k, v := range opts.UserMetadata { | |||||
| if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { | |||||
| return ErrInvalidArgument(k + " unsupported user defined metadata name") | |||||
| } | |||||
| if !httpguts.ValidHeaderFieldValue(v) { | |||||
| return ErrInvalidArgument(v + " unsupported user defined metadata value") | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // completedParts is a collection of parts sortable by their part numbers. | |||||
| // used for sorting the uploaded parts before completing the multipart request. | |||||
| type completedParts []CompletePart | |||||
| func (a completedParts) Len() int { return len(a) } | |||||
| func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } | |||||
| func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } | |||||
| // PutObject creates an object in a bucket. | |||||
| // | |||||
| // You must have WRITE permissions on a bucket to create an object. | |||||
| // | |||||
| // - For size smaller than 64MiB PutObject automatically does a | |||||
| // single atomic Put operation. | |||||
| // - For size larger than 64MiB PutObject automatically does a | |||||
| // multipart Put operation. | |||||
| // - For size input as -1 PutObject does a multipart Put operation | |||||
| // until input stream reaches EOF. Maximum object size that can | |||||
| // be uploaded through this operation will be 5TiB. | |||||
| func (c Client) PutObject(bucketName, objectName string, reader io.Reader, objectSize int64, | |||||
| opts PutObjectOptions) (n int64, err error) { | |||||
| return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, objectSize, opts) | |||||
| } | |||||
| func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { | |||||
| // Check for largest object size allowed. | |||||
| if size > int64(maxMultipartPutObjectSize) { | |||||
| return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) | |||||
| } | |||||
| // NOTE: Streaming signature is not supported by GCS. | |||||
| if s3utils.IsGoogleEndpoint(*c.endpointURL) { | |||||
| // Do not compute MD5 for Google Cloud Storage. | |||||
| return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) | |||||
| } | |||||
| if c.overrideSignerType.IsV2() { | |||||
| if size >= 0 && size < minPartSize { | |||||
| return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) | |||||
| } | |||||
| return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts) | |||||
| } | |||||
| if size < 0 { | |||||
| return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) | |||||
| } | |||||
| if size < minPartSize { | |||||
| return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) | |||||
| } | |||||
| // For all sizes greater than 64MiB do multipart. | |||||
| return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) | |||||
| } | |||||
| func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { | |||||
| // Input validation. | |||||
| if err = s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return 0, err | |||||
| } | |||||
| if err = s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return 0, err | |||||
| } | |||||
| // Total data read and written to server. should be equal to | |||||
| // 'size' at the end of the call. | |||||
| var totalUploadedSize int64 | |||||
| // Complete multipart upload. | |||||
| var complMultipartUpload completeMultipartUpload | |||||
| // Calculate the optimal parts info for a given size. | |||||
| totalPartsCount, partSize, _, err := optimalPartInfo(-1) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| // Initiate a new multipart upload. | |||||
| uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| defer func() { | |||||
| if err != nil { | |||||
| c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) | |||||
| } | |||||
| }() | |||||
| // Part number always starts with '1'. | |||||
| partNumber := 1 | |||||
| // Initialize parts uploaded map. | |||||
| partsInfo := make(map[int]ObjectPart) | |||||
| // Create a buffer. | |||||
| buf := make([]byte, partSize) | |||||
| defer debug.FreeOSMemory() | |||||
| for partNumber <= totalPartsCount { | |||||
| length, rErr := io.ReadFull(reader, buf) | |||||
| if rErr == io.EOF && partNumber > 1 { | |||||
| break | |||||
| } | |||||
| if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { | |||||
| return 0, rErr | |||||
| } | |||||
| // Update progress reader appropriately to the latest offset | |||||
| // as we read from the source. | |||||
| rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) | |||||
| // Proceed to upload the part. | |||||
| var objPart ObjectPart | |||||
| objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, | |||||
| "", "", int64(length), opts.ServerSideEncryption) | |||||
| if err != nil { | |||||
| return totalUploadedSize, err | |||||
| } | |||||
| // Save successfully uploaded part metadata. | |||||
| partsInfo[partNumber] = objPart | |||||
| // Save successfully uploaded size. | |||||
| totalUploadedSize += int64(length) | |||||
| // Increment part number. | |||||
| partNumber++ | |||||
| // For unknown size, Read EOF we break away. | |||||
| // We do not have to upload till totalPartsCount. | |||||
| if rErr == io.EOF { | |||||
| break | |||||
| } | |||||
| } | |||||
| // Loop over total uploaded parts to save them in | |||||
| // Parts array before completing the multipart request. | |||||
| for i := 1; i < partNumber; i++ { | |||||
| part, ok := partsInfo[i] | |||||
| if !ok { | |||||
| return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) | |||||
| } | |||||
| complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ | |||||
| ETag: part.ETag, | |||||
| PartNumber: part.PartNumber, | |||||
| }) | |||||
| } | |||||
| // Sort all completed parts. | |||||
| sort.Sort(completedParts(complMultipartUpload.Parts)) | |||||
| if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { | |||||
| return totalUploadedSize, err | |||||
| } | |||||
| // Return final size. | |||||
| return totalUploadedSize, nil | |||||
| } | |||||
| @@ -0,0 +1,303 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "bytes" | |||||
| "context" | |||||
| "encoding/xml" | |||||
| "io" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // RemoveBucket deletes the bucket name. | |||||
| // | |||||
| // All objects (including all object versions and delete markers). | |||||
| // in the bucket must be deleted before successfully attempting this request. | |||||
| func (c Client) RemoveBucket(bucketName string) error { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return err | |||||
| } | |||||
| // Execute DELETE on bucket. | |||||
| resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| contentSHA256Hex: emptySHA256Hex, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusNoContent { | |||||
| return httpRespToErrorResponse(resp, bucketName, "") | |||||
| } | |||||
| } | |||||
| // Remove the location from cache on a successful delete. | |||||
| c.bucketLocCache.Delete(bucketName) | |||||
| return nil | |||||
| } | |||||
| // RemoveObject remove an object from a bucket. | |||||
| func (c Client) RemoveObject(bucketName, objectName string) error { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return err | |||||
| } | |||||
| // Execute DELETE on objectName. | |||||
| resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| objectName: objectName, | |||||
| contentSHA256Hex: emptySHA256Hex, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if resp != nil { | |||||
| // if some unexpected error happened and max retry is reached, we want to let client know | |||||
| if resp.StatusCode != http.StatusNoContent { | |||||
| return httpRespToErrorResponse(resp, bucketName, objectName) | |||||
| } | |||||
| } | |||||
| // DeleteObject always responds with http '204' even for | |||||
| // objects which do not exist. So no need to handle them | |||||
| // specifically. | |||||
| return nil | |||||
| } | |||||
| // RemoveObjectError - container of Multi Delete S3 API error | |||||
| type RemoveObjectError struct { | |||||
| ObjectName string | |||||
| Err error | |||||
| } | |||||
| // generateRemoveMultiObjects - generate the XML request for remove multi objects request | |||||
| func generateRemoveMultiObjectsRequest(objects []string) []byte { | |||||
| rmObjects := []deleteObject{} | |||||
| for _, obj := range objects { | |||||
| rmObjects = append(rmObjects, deleteObject{Key: obj}) | |||||
| } | |||||
| xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true}) | |||||
| return xmlBytes | |||||
| } | |||||
| // processRemoveMultiObjectsResponse - parse the remove multi objects web service | |||||
| // and return the success/failure result status for each object | |||||
| func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) { | |||||
| // Parse multi delete XML response | |||||
| rmResult := &deleteMultiObjectsResult{} | |||||
| err := xmlDecoder(body, rmResult) | |||||
| if err != nil { | |||||
| errorCh <- RemoveObjectError{ObjectName: "", Err: err} | |||||
| return | |||||
| } | |||||
| // Fill deletion that returned an error. | |||||
| for _, obj := range rmResult.UnDeletedObjects { | |||||
| errorCh <- RemoveObjectError{ | |||||
| ObjectName: obj.Key, | |||||
| Err: ErrorResponse{ | |||||
| Code: obj.Code, | |||||
| Message: obj.Message, | |||||
| }, | |||||
| } | |||||
| } | |||||
| } | |||||
| // RemoveObjectsWithContext - Identical to RemoveObjects call, but accepts context to facilitate request cancellation. | |||||
| func (c Client) RemoveObjectsWithContext(ctx context.Context, bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { | |||||
| errorCh := make(chan RemoveObjectError, 1) | |||||
| // Validate if bucket name is valid. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| defer close(errorCh) | |||||
| errorCh <- RemoveObjectError{ | |||||
| Err: err, | |||||
| } | |||||
| return errorCh | |||||
| } | |||||
| // Validate objects channel to be properly allocated. | |||||
| if objectsCh == nil { | |||||
| defer close(errorCh) | |||||
| errorCh <- RemoveObjectError{ | |||||
| Err: ErrInvalidArgument("Objects channel cannot be nil"), | |||||
| } | |||||
| return errorCh | |||||
| } | |||||
| // Generate and call MultiDelete S3 requests based on entries received from objectsCh | |||||
| go func(errorCh chan<- RemoveObjectError) { | |||||
| maxEntries := 1000 | |||||
| finish := false | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("delete", "") | |||||
| // Close error channel when Multi delete finishes. | |||||
| defer close(errorCh) | |||||
| // Loop over entries by 1000 and call MultiDelete requests | |||||
| for { | |||||
| if finish { | |||||
| break | |||||
| } | |||||
| count := 0 | |||||
| var batch []string | |||||
| // Try to gather 1000 entries | |||||
| for object := range objectsCh { | |||||
| batch = append(batch, object) | |||||
| if count++; count >= maxEntries { | |||||
| break | |||||
| } | |||||
| } | |||||
| if count == 0 { | |||||
| // Multi Objects Delete API doesn't accept empty object list, quit immediately | |||||
| break | |||||
| } | |||||
| if count < maxEntries { | |||||
| // We didn't have 1000 entries, so this is the last batch | |||||
| finish = true | |||||
| } | |||||
| // Generate remove multi objects XML request | |||||
| removeBytes := generateRemoveMultiObjectsRequest(batch) | |||||
| // Execute GET on bucket to list objects. | |||||
| resp, err := c.executeMethod(ctx, "POST", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| queryValues: urlValues, | |||||
| contentBody: bytes.NewReader(removeBytes), | |||||
| contentLength: int64(len(removeBytes)), | |||||
| contentMD5Base64: sumMD5Base64(removeBytes), | |||||
| contentSHA256Hex: sum256Hex(removeBytes), | |||||
| }) | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| e := httpRespToErrorResponse(resp, bucketName, "") | |||||
| errorCh <- RemoveObjectError{ObjectName: "", Err: e} | |||||
| } | |||||
| } | |||||
| if err != nil { | |||||
| for _, b := range batch { | |||||
| errorCh <- RemoveObjectError{ObjectName: b, Err: err} | |||||
| } | |||||
| continue | |||||
| } | |||||
| // Process multiobjects remove xml response | |||||
| processRemoveMultiObjectsResponse(resp.Body, batch, errorCh) | |||||
| closeResponse(resp) | |||||
| } | |||||
| }(errorCh) | |||||
| return errorCh | |||||
| } | |||||
| // RemoveObjects removes multiple objects from a bucket. | |||||
| // The list of objects to remove are received from objectsCh. | |||||
| // Remove failures are sent back via error channel. | |||||
| func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { | |||||
| return c.RemoveObjectsWithContext(context.Background(), bucketName, objectsCh) | |||||
| } | |||||
| // RemoveIncompleteUpload aborts an partially uploaded object. | |||||
| func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return err | |||||
| } | |||||
| // Find multipart upload ids of the object to be aborted. | |||||
| uploadIDs, err := c.findUploadIDs(bucketName, objectName) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| for _, uploadID := range uploadIDs { | |||||
| // abort incomplete multipart upload, based on the upload id passed. | |||||
| err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // abortMultipartUpload aborts a multipart upload for the given | |||||
| // uploadID, all previously uploaded parts are deleted. | |||||
| func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return err | |||||
| } | |||||
| // Initialize url queries. | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("uploadId", uploadID) | |||||
| // Execute DELETE on multipart upload. | |||||
| resp, err := c.executeMethod(ctx, "DELETE", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| objectName: objectName, | |||||
| queryValues: urlValues, | |||||
| contentSHA256Hex: emptySHA256Hex, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusNoContent { | |||||
| // Abort has no response body, handle it for any errors. | |||||
| var errorResponse ErrorResponse | |||||
| switch resp.StatusCode { | |||||
| case http.StatusNotFound: | |||||
| // This is needed specifically for abort and it cannot | |||||
| // be converged into default case. | |||||
| errorResponse = ErrorResponse{ | |||||
| Code: "NoSuchUpload", | |||||
| Message: "The specified multipart upload does not exist.", | |||||
| BucketName: bucketName, | |||||
| Key: objectName, | |||||
| RequestID: resp.Header.Get("x-amz-request-id"), | |||||
| HostID: resp.Header.Get("x-amz-id-2"), | |||||
| Region: resp.Header.Get("x-amz-bucket-region"), | |||||
| } | |||||
| default: | |||||
| return httpRespToErrorResponse(resp, bucketName, objectName) | |||||
| } | |||||
| return errorResponse | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| @@ -0,0 +1,245 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "encoding/xml" | |||||
| "time" | |||||
| ) | |||||
| // listAllMyBucketsResult container for listBuckets response. | |||||
| type listAllMyBucketsResult struct { | |||||
| // Container for one or more buckets. | |||||
| Buckets struct { | |||||
| Bucket []BucketInfo | |||||
| } | |||||
| Owner owner | |||||
| } | |||||
| // owner container for bucket owner information. | |||||
| type owner struct { | |||||
| DisplayName string | |||||
| ID string | |||||
| } | |||||
| // CommonPrefix container for prefix response. | |||||
| type CommonPrefix struct { | |||||
| Prefix string | |||||
| } | |||||
| // ListBucketV2Result container for listObjects response version 2. | |||||
| type ListBucketV2Result struct { | |||||
| // A response can contain CommonPrefixes only if you have | |||||
| // specified a delimiter. | |||||
| CommonPrefixes []CommonPrefix | |||||
| // Metadata about each object returned. | |||||
| Contents []ObjectInfo | |||||
| Delimiter string | |||||
| // Encoding type used to encode object keys in the response. | |||||
| EncodingType string | |||||
| // A flag that indicates whether or not ListObjects returned all of the results | |||||
| // that satisfied the search criteria. | |||||
| IsTruncated bool | |||||
| MaxKeys int64 | |||||
| Name string | |||||
| // Hold the token that will be sent in the next request to fetch the next group of keys | |||||
| NextContinuationToken string | |||||
| ContinuationToken string | |||||
| Prefix string | |||||
| // FetchOwner and StartAfter are currently not used | |||||
| FetchOwner string | |||||
| StartAfter string | |||||
| } | |||||
| // ListBucketResult container for listObjects response. | |||||
| type ListBucketResult struct { | |||||
| // A response can contain CommonPrefixes only if you have | |||||
| // specified a delimiter. | |||||
| CommonPrefixes []CommonPrefix | |||||
| // Metadata about each object returned. | |||||
| Contents []ObjectInfo | |||||
| Delimiter string | |||||
| // Encoding type used to encode object keys in the response. | |||||
| EncodingType string | |||||
| // A flag that indicates whether or not ListObjects returned all of the results | |||||
| // that satisfied the search criteria. | |||||
| IsTruncated bool | |||||
| Marker string | |||||
| MaxKeys int64 | |||||
| Name string | |||||
| // When response is truncated (the IsTruncated element value in | |||||
| // the response is true), you can use the key name in this field | |||||
| // as marker in the subsequent request to get next set of objects. | |||||
| // Object storage lists objects in alphabetical order Note: This | |||||
| // element is returned only if you have delimiter request | |||||
| // parameter specified. If response does not include the NextMaker | |||||
| // and it is truncated, you can use the value of the last Key in | |||||
| // the response as the marker in the subsequent request to get the | |||||
| // next set of object keys. | |||||
| NextMarker string | |||||
| Prefix string | |||||
| } | |||||
| // ListMultipartUploadsResult container for ListMultipartUploads response | |||||
| type ListMultipartUploadsResult struct { | |||||
| Bucket string | |||||
| KeyMarker string | |||||
| UploadIDMarker string `xml:"UploadIdMarker"` | |||||
| NextKeyMarker string | |||||
| NextUploadIDMarker string `xml:"NextUploadIdMarker"` | |||||
| EncodingType string | |||||
| MaxUploads int64 | |||||
| IsTruncated bool | |||||
| Uploads []ObjectMultipartInfo `xml:"Upload"` | |||||
| Prefix string | |||||
| Delimiter string | |||||
| // A response can contain CommonPrefixes only if you specify a delimiter. | |||||
| CommonPrefixes []CommonPrefix | |||||
| } | |||||
| // initiator container for who initiated multipart upload. | |||||
| type initiator struct { | |||||
| ID string | |||||
| DisplayName string | |||||
| } | |||||
| // copyObjectResult container for copy object response. | |||||
| type copyObjectResult struct { | |||||
| ETag string | |||||
| LastModified time.Time // time string format "2006-01-02T15:04:05.000Z" | |||||
| } | |||||
| // ObjectPart container for particular part of an object. | |||||
| type ObjectPart struct { | |||||
| // Part number identifies the part. | |||||
| PartNumber int | |||||
| // Date and time the part was uploaded. | |||||
| LastModified time.Time | |||||
| // Entity tag returned when the part was uploaded, usually md5sum | |||||
| // of the part. | |||||
| ETag string | |||||
| // Size of the uploaded part data. | |||||
| Size int64 | |||||
| } | |||||
| // ListObjectPartsResult container for ListObjectParts response. | |||||
| type ListObjectPartsResult struct { | |||||
| Bucket string | |||||
| Key string | |||||
| UploadID string `xml:"UploadId"` | |||||
| Initiator initiator | |||||
| Owner owner | |||||
| StorageClass string | |||||
| PartNumberMarker int | |||||
| NextPartNumberMarker int | |||||
| MaxParts int | |||||
| // Indicates whether the returned list of parts is truncated. | |||||
| IsTruncated bool | |||||
| ObjectParts []ObjectPart `xml:"Part"` | |||||
| EncodingType string | |||||
| } | |||||
| // initiateMultipartUploadResult container for InitiateMultiPartUpload | |||||
| // response. | |||||
| type initiateMultipartUploadResult struct { | |||||
| Bucket string | |||||
| Key string | |||||
| UploadID string `xml:"UploadId"` | |||||
| } | |||||
| // completeMultipartUploadResult container for completed multipart | |||||
| // upload response. | |||||
| type completeMultipartUploadResult struct { | |||||
| Location string | |||||
| Bucket string | |||||
| Key string | |||||
| ETag string | |||||
| } | |||||
| // CompletePart sub container lists individual part numbers and their | |||||
| // md5sum, part of completeMultipartUpload. | |||||
| type CompletePart struct { | |||||
| XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` | |||||
| // Part number identifies the part. | |||||
| PartNumber int | |||||
| ETag string | |||||
| } | |||||
| // completeMultipartUpload container for completing multipart upload. | |||||
| type completeMultipartUpload struct { | |||||
| XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` | |||||
| Parts []CompletePart `xml:"Part"` | |||||
| } | |||||
| // createBucketConfiguration container for bucket configuration. | |||||
| type createBucketConfiguration struct { | |||||
| XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` | |||||
| Location string `xml:"LocationConstraint"` | |||||
| } | |||||
| // deleteObject container for Delete element in MultiObjects Delete XML request | |||||
| type deleteObject struct { | |||||
| Key string | |||||
| VersionID string `xml:"VersionId,omitempty"` | |||||
| } | |||||
| // deletedObject container for Deleted element in MultiObjects Delete XML response | |||||
| type deletedObject struct { | |||||
| Key string | |||||
| VersionID string `xml:"VersionId,omitempty"` | |||||
| // These fields are ignored. | |||||
| DeleteMarker bool | |||||
| DeleteMarkerVersionID string | |||||
| } | |||||
| // nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response | |||||
| type nonDeletedObject struct { | |||||
| Key string | |||||
| Code string | |||||
| Message string | |||||
| } | |||||
| // deletedMultiObjects container for MultiObjects Delete XML request | |||||
| type deleteMultiObjects struct { | |||||
| XMLName xml.Name `xml:"Delete"` | |||||
| Quiet bool | |||||
| Objects []deleteObject `xml:"Object"` | |||||
| } | |||||
| // deletedMultiObjectsResult container for MultiObjects Delete XML response | |||||
| type deleteMultiObjectsResult struct { | |||||
| XMLName xml.Name `xml:"DeleteResult"` | |||||
| DeletedObjects []deletedObject `xml:"Deleted"` | |||||
| UnDeletedObjects []nonDeletedObject `xml:"Error"` | |||||
| } | |||||
| @@ -0,0 +1,532 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * (C) 2018 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "bytes" | |||||
| "context" | |||||
| "encoding/binary" | |||||
| "encoding/xml" | |||||
| "errors" | |||||
| "fmt" | |||||
| "hash" | |||||
| "hash/crc32" | |||||
| "io" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "strings" | |||||
| "github.com/minio/minio-go/pkg/encrypt" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // CSVFileHeaderInfo - is the parameter for whether to utilize headers. | |||||
| type CSVFileHeaderInfo string | |||||
| // Constants for file header info. | |||||
| const ( | |||||
| CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" | |||||
| CSVFileHeaderInfoIgnore = "IGNORE" | |||||
| CSVFileHeaderInfoUse = "USE" | |||||
| ) | |||||
| // SelectCompressionType - is the parameter for what type of compression is | |||||
| // present | |||||
| type SelectCompressionType string | |||||
| // Constants for compression types under select API. | |||||
| const ( | |||||
| SelectCompressionNONE SelectCompressionType = "NONE" | |||||
| SelectCompressionGZIP = "GZIP" | |||||
| SelectCompressionBZIP = "BZIP2" | |||||
| ) | |||||
| // CSVQuoteFields - is the parameter for how CSV fields are quoted. | |||||
| type CSVQuoteFields string | |||||
| // Constants for csv quote styles. | |||||
| const ( | |||||
| CSVQuoteFieldsAlways CSVQuoteFields = "Always" | |||||
| CSVQuoteFieldsAsNeeded = "AsNeeded" | |||||
| ) | |||||
| // QueryExpressionType - is of what syntax the expression is, this should only | |||||
| // be SQL | |||||
| type QueryExpressionType string | |||||
| // Constants for expression type. | |||||
| const ( | |||||
| QueryExpressionTypeSQL QueryExpressionType = "SQL" | |||||
| ) | |||||
| // JSONType determines json input serialization type. | |||||
| type JSONType string | |||||
| // Constants for JSONTypes. | |||||
| const ( | |||||
| JSONDocumentType JSONType = "DOCUMENT" | |||||
| JSONLinesType = "LINES" | |||||
| ) | |||||
| // ParquetInputOptions parquet input specific options | |||||
| type ParquetInputOptions struct{} | |||||
| // CSVInputOptions csv input specific options | |||||
| type CSVInputOptions struct { | |||||
| FileHeaderInfo CSVFileHeaderInfo | |||||
| RecordDelimiter string | |||||
| FieldDelimiter string | |||||
| QuoteCharacter string | |||||
| QuoteEscapeCharacter string | |||||
| Comments string | |||||
| } | |||||
| // CSVOutputOptions csv output specific options | |||||
| type CSVOutputOptions struct { | |||||
| QuoteFields CSVQuoteFields | |||||
| RecordDelimiter string | |||||
| FieldDelimiter string | |||||
| QuoteCharacter string | |||||
| QuoteEscapeCharacter string | |||||
| } | |||||
| // JSONInputOptions json input specific options | |||||
| type JSONInputOptions struct { | |||||
| Type JSONType | |||||
| } | |||||
| // JSONOutputOptions - json output specific options | |||||
| type JSONOutputOptions struct { | |||||
| RecordDelimiter string | |||||
| } | |||||
| // SelectObjectInputSerialization - input serialization parameters | |||||
| type SelectObjectInputSerialization struct { | |||||
| CompressionType SelectCompressionType | |||||
| Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` | |||||
| CSV *CSVInputOptions `xml:"CSV,omitempty"` | |||||
| JSON *JSONInputOptions `xml:"JSON,omitempty"` | |||||
| } | |||||
| // SelectObjectOutputSerialization - output serialization parameters. | |||||
| type SelectObjectOutputSerialization struct { | |||||
| CSV *CSVOutputOptions `xml:"CSV,omitempty"` | |||||
| JSON *JSONOutputOptions `xml:"JSON,omitempty"` | |||||
| } | |||||
| // SelectObjectOptions - represents the input select body | |||||
| type SelectObjectOptions struct { | |||||
| XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"` | |||||
| ServerSideEncryption encrypt.ServerSide `xml:"-"` | |||||
| Expression string | |||||
| ExpressionType QueryExpressionType | |||||
| InputSerialization SelectObjectInputSerialization | |||||
| OutputSerialization SelectObjectOutputSerialization | |||||
| RequestProgress struct { | |||||
| Enabled bool | |||||
| } | |||||
| } | |||||
| // Header returns the http.Header representation of the SelectObject options. | |||||
| func (o SelectObjectOptions) Header() http.Header { | |||||
| headers := make(http.Header) | |||||
| if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { | |||||
| o.ServerSideEncryption.Marshal(headers) | |||||
| } | |||||
| return headers | |||||
| } | |||||
| // SelectObjectType - is the parameter which defines what type of object the | |||||
| // operation is being performed on. | |||||
| type SelectObjectType string | |||||
| // Constants for input data types. | |||||
| const ( | |||||
| SelectObjectTypeCSV SelectObjectType = "CSV" | |||||
| SelectObjectTypeJSON = "JSON" | |||||
| SelectObjectTypeParquet = "Parquet" | |||||
| ) | |||||
| // preludeInfo is used for keeping track of necessary information from the | |||||
| // prelude. | |||||
| type preludeInfo struct { | |||||
| totalLen uint32 | |||||
| headerLen uint32 | |||||
| } | |||||
| // SelectResults is used for the streaming responses from the server. | |||||
| type SelectResults struct { | |||||
| pipeReader *io.PipeReader | |||||
| resp *http.Response | |||||
| stats *StatsMessage | |||||
| progress *ProgressMessage | |||||
| } | |||||
| // ProgressMessage is a struct for progress xml message. | |||||
| type ProgressMessage struct { | |||||
| XMLName xml.Name `xml:"Progress" json:"-"` | |||||
| StatsMessage | |||||
| } | |||||
| // StatsMessage is a struct for stat xml message. | |||||
| type StatsMessage struct { | |||||
| XMLName xml.Name `xml:"Stats" json:"-"` | |||||
| BytesScanned int64 | |||||
| BytesProcessed int64 | |||||
| BytesReturned int64 | |||||
| } | |||||
| // messageType represents the type of message. | |||||
| type messageType string | |||||
| const ( | |||||
| errorMsg messageType = "error" | |||||
| commonMsg = "event" | |||||
| ) | |||||
| // eventType represents the type of event. | |||||
| type eventType string | |||||
| // list of event-types returned by Select API. | |||||
| const ( | |||||
| endEvent eventType = "End" | |||||
| recordsEvent = "Records" | |||||
| progressEvent = "Progress" | |||||
| statsEvent = "Stats" | |||||
| ) | |||||
| // contentType represents content type of event. | |||||
| type contentType string | |||||
| const ( | |||||
| xmlContent contentType = "text/xml" | |||||
| ) | |||||
| // SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API. | |||||
| func (c Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| selectReqBytes, err := xml.Marshal(opts) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("select", "") | |||||
| urlValues.Set("select-type", "2") | |||||
| // Execute POST on bucket/object. | |||||
| resp, err := c.executeMethod(ctx, "POST", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| objectName: objectName, | |||||
| queryValues: urlValues, | |||||
| customHeader: opts.Header(), | |||||
| contentMD5Base64: sumMD5Base64(selectReqBytes), | |||||
| contentSHA256Hex: sum256Hex(selectReqBytes), | |||||
| contentBody: bytes.NewReader(selectReqBytes), | |||||
| contentLength: int64(len(selectReqBytes)), | |||||
| }) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return nil, httpRespToErrorResponse(resp, bucketName, "") | |||||
| } | |||||
| pipeReader, pipeWriter := io.Pipe() | |||||
| streamer := &SelectResults{ | |||||
| resp: resp, | |||||
| stats: &StatsMessage{}, | |||||
| progress: &ProgressMessage{}, | |||||
| pipeReader: pipeReader, | |||||
| } | |||||
| streamer.start(pipeWriter) | |||||
| return streamer, nil | |||||
| } | |||||
| // Close - closes the underlying response body and the stream reader. | |||||
| func (s *SelectResults) Close() error { | |||||
| defer closeResponse(s.resp) | |||||
| return s.pipeReader.Close() | |||||
| } | |||||
| // Read - is a reader compatible implementation for SelectObjectContent records. | |||||
| func (s *SelectResults) Read(b []byte) (n int, err error) { | |||||
| return s.pipeReader.Read(b) | |||||
| } | |||||
| // Stats - information about a request's stats when processing is complete. | |||||
| func (s *SelectResults) Stats() *StatsMessage { | |||||
| return s.stats | |||||
| } | |||||
| // Progress - information about the progress of a request. | |||||
| func (s *SelectResults) Progress() *ProgressMessage { | |||||
| return s.progress | |||||
| } | |||||
| // start is the main function that decodes the large byte array into | |||||
| // several events that are sent through the eventstream. | |||||
| func (s *SelectResults) start(pipeWriter *io.PipeWriter) { | |||||
| go func() { | |||||
| for { | |||||
| var prelude preludeInfo | |||||
| var headers = make(http.Header) | |||||
| var err error | |||||
| // Create CRC code | |||||
| crc := crc32.New(crc32.IEEETable) | |||||
| crcReader := io.TeeReader(s.resp.Body, crc) | |||||
| // Extract the prelude(12 bytes) into a struct to extract relevant information. | |||||
| prelude, err = processPrelude(crcReader, crc) | |||||
| if err != nil { | |||||
| pipeWriter.CloseWithError(err) | |||||
| closeResponse(s.resp) | |||||
| return | |||||
| } | |||||
| // Extract the headers(variable bytes) into a struct to extract relevant information | |||||
| if prelude.headerLen > 0 { | |||||
| if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil { | |||||
| pipeWriter.CloseWithError(err) | |||||
| closeResponse(s.resp) | |||||
| return | |||||
| } | |||||
| } | |||||
| // Get the actual payload length so that the appropriate amount of | |||||
| // bytes can be read or parsed. | |||||
| payloadLen := prelude.PayloadLen() | |||||
| m := messageType(headers.Get("message-type")) | |||||
| switch m { | |||||
| case errorMsg: | |||||
| pipeWriter.CloseWithError(errors.New("Error Type of " + headers.Get("error-type") + " " + headers.Get("error-message"))) | |||||
| closeResponse(s.resp) | |||||
| return | |||||
| case commonMsg: | |||||
| // Get content-type of the payload. | |||||
| c := contentType(headers.Get("content-type")) | |||||
| // Get event type of the payload. | |||||
| e := eventType(headers.Get("event-type")) | |||||
| // Handle all supported events. | |||||
| switch e { | |||||
| case endEvent: | |||||
| pipeWriter.Close() | |||||
| closeResponse(s.resp) | |||||
| return | |||||
| case recordsEvent: | |||||
| if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil { | |||||
| pipeWriter.CloseWithError(err) | |||||
| closeResponse(s.resp) | |||||
| return | |||||
| } | |||||
| case progressEvent: | |||||
| switch c { | |||||
| case xmlContent: | |||||
| if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil { | |||||
| pipeWriter.CloseWithError(err) | |||||
| closeResponse(s.resp) | |||||
| return | |||||
| } | |||||
| default: | |||||
| pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent)) | |||||
| closeResponse(s.resp) | |||||
| return | |||||
| } | |||||
| case statsEvent: | |||||
| switch c { | |||||
| case xmlContent: | |||||
| if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil { | |||||
| pipeWriter.CloseWithError(err) | |||||
| closeResponse(s.resp) | |||||
| return | |||||
| } | |||||
| default: | |||||
| pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent)) | |||||
| closeResponse(s.resp) | |||||
| return | |||||
| } | |||||
| } | |||||
| } | |||||
| // Ensures that the full message's CRC is correct and | |||||
| // that the message is not corrupted | |||||
| if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil { | |||||
| pipeWriter.CloseWithError(err) | |||||
| closeResponse(s.resp) | |||||
| return | |||||
| } | |||||
| } | |||||
| }() | |||||
| } | |||||
| // PayloadLen is a function that calculates the length of the payload. | |||||
| func (p preludeInfo) PayloadLen() int64 { | |||||
| return int64(p.totalLen - p.headerLen - 16) | |||||
| } | |||||
| // processPrelude is the function that reads the 12 bytes of the prelude and | |||||
| // ensures the CRC is correct while also extracting relevant information into | |||||
| // the struct, | |||||
| func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { | |||||
| var err error | |||||
| var pInfo = preludeInfo{} | |||||
| // reads total length of the message (first 4 bytes) | |||||
| pInfo.totalLen, err = extractUint32(prelude) | |||||
| if err != nil { | |||||
| return pInfo, err | |||||
| } | |||||
| // reads total header length of the message (2nd 4 bytes) | |||||
| pInfo.headerLen, err = extractUint32(prelude) | |||||
| if err != nil { | |||||
| return pInfo, err | |||||
| } | |||||
| // checks that the CRC is correct (3rd 4 bytes) | |||||
| preCRC := crc.Sum32() | |||||
| if err := checkCRC(prelude, preCRC); err != nil { | |||||
| return pInfo, err | |||||
| } | |||||
| return pInfo, nil | |||||
| } | |||||
| // extracts the relevant information from the Headers. | |||||
| func extractHeader(body io.Reader, myHeaders http.Header) error { | |||||
| for { | |||||
| // extracts the first part of the header, | |||||
| headerTypeName, err := extractHeaderType(body) | |||||
| if err != nil { | |||||
| // Since end of file, we have read all of our headers | |||||
| if err == io.EOF { | |||||
| break | |||||
| } | |||||
| return err | |||||
| } | |||||
| // reads the 7 present in the header and ignores it. | |||||
| extractUint8(body) | |||||
| headerValueName, err := extractHeaderValue(body) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| myHeaders.Set(headerTypeName, headerValueName) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // extractHeaderType extracts the first half of the header message, the header type. | |||||
| func extractHeaderType(body io.Reader) (string, error) { | |||||
| // extracts 2 bit integer | |||||
| headerNameLen, err := extractUint8(body) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| // extracts the string with the appropriate number of bytes | |||||
| headerName, err := extractString(body, int(headerNameLen)) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| return strings.TrimPrefix(headerName, ":"), nil | |||||
| } | |||||
| // extractsHeaderValue extracts the second half of the header message, the | |||||
| // header value | |||||
| func extractHeaderValue(body io.Reader) (string, error) { | |||||
| bodyLen, err := extractUint16(body) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| bodyName, err := extractString(body, int(bodyLen)) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| return bodyName, nil | |||||
| } | |||||
| // extracts a string from byte array of a particular number of bytes. | |||||
| func extractString(source io.Reader, lenBytes int) (string, error) { | |||||
| myVal := make([]byte, lenBytes) | |||||
| _, err := source.Read(myVal) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| return string(myVal), nil | |||||
| } | |||||
| // extractUint32 extracts a 4 byte integer from the byte array. | |||||
| func extractUint32(r io.Reader) (uint32, error) { | |||||
| buf := make([]byte, 4) | |||||
| _, err := io.ReadFull(r, buf) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| return binary.BigEndian.Uint32(buf), nil | |||||
| } | |||||
| // extractUint16 extracts a 2 byte integer from the byte array. | |||||
| func extractUint16(r io.Reader) (uint16, error) { | |||||
| buf := make([]byte, 2) | |||||
| _, err := io.ReadFull(r, buf) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| return binary.BigEndian.Uint16(buf), nil | |||||
| } | |||||
| // extractUint8 extracts a 1 byte integer from the byte array. | |||||
| func extractUint8(r io.Reader) (uint8, error) { | |||||
| buf := make([]byte, 1) | |||||
| _, err := io.ReadFull(r, buf) | |||||
| if err != nil { | |||||
| return 0, err | |||||
| } | |||||
| return buf[0], nil | |||||
| } | |||||
| // checkCRC ensures that the CRC matches with the one from the reader. | |||||
| func checkCRC(r io.Reader, expect uint32) error { | |||||
| msgCRC, err := extractUint32(r) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| if msgCRC != expect { | |||||
| return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) | |||||
| } | |||||
| return nil | |||||
| } | |||||
| @@ -0,0 +1,185 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "context" | |||||
| "net/http" | |||||
| "strconv" | |||||
| "strings" | |||||
| "time" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // BucketExists verify if bucket exists and you have permission to access it. | |||||
| func (c Client) BucketExists(bucketName string) (bool, error) { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return false, err | |||||
| } | |||||
| // Execute HEAD on bucketName. | |||||
| resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| contentSHA256Hex: emptySHA256Hex, | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| if ToErrorResponse(err).Code == "NoSuchBucket" { | |||||
| return false, nil | |||||
| } | |||||
| return false, err | |||||
| } | |||||
| if resp != nil { | |||||
| resperr := httpRespToErrorResponse(resp, bucketName, "") | |||||
| if ToErrorResponse(resperr).Code == "NoSuchBucket" { | |||||
| return false, nil | |||||
| } | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return false, httpRespToErrorResponse(resp, bucketName, "") | |||||
| } | |||||
| } | |||||
| return true, nil | |||||
| } | |||||
| // List of header keys to be filtered, usually | |||||
| // from all S3 API http responses. | |||||
| var defaultFilterKeys = []string{ | |||||
| "Connection", | |||||
| "Transfer-Encoding", | |||||
| "Accept-Ranges", | |||||
| "Date", | |||||
| "Server", | |||||
| "Vary", | |||||
| "x-amz-bucket-region", | |||||
| "x-amz-request-id", | |||||
| "x-amz-id-2", | |||||
| "Content-Security-Policy", | |||||
| "X-Xss-Protection", | |||||
| // Add new headers to be ignored. | |||||
| } | |||||
| // Extract only necessary metadata header key/values by | |||||
| // filtering them out with a list of custom header keys. | |||||
| func extractObjMetadata(header http.Header) http.Header { | |||||
| filterKeys := append([]string{ | |||||
| "ETag", | |||||
| "Content-Length", | |||||
| "Last-Modified", | |||||
| "Content-Type", | |||||
| }, defaultFilterKeys...) | |||||
| return filterHeader(header, filterKeys) | |||||
| } | |||||
| // StatObject verifies if object exists and you have permission to access. | |||||
| func (c Client) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return ObjectInfo{}, err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return ObjectInfo{}, err | |||||
| } | |||||
| return c.statObject(context.Background(), bucketName, objectName, opts) | |||||
| } | |||||
| // Lower level API for statObject supporting pre-conditions and range headers. | |||||
| func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { | |||||
| // Input validation. | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return ObjectInfo{}, err | |||||
| } | |||||
| if err := s3utils.CheckValidObjectName(objectName); err != nil { | |||||
| return ObjectInfo{}, err | |||||
| } | |||||
| // Execute HEAD on objectName. | |||||
| resp, err := c.executeMethod(ctx, "HEAD", requestMetadata{ | |||||
| bucketName: bucketName, | |||||
| objectName: objectName, | |||||
| contentSHA256Hex: emptySHA256Hex, | |||||
| customHeader: opts.Header(), | |||||
| }) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return ObjectInfo{}, err | |||||
| } | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { | |||||
| return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) | |||||
| } | |||||
| } | |||||
| // Trim off the odd double quotes from ETag in the beginning and end. | |||||
| md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") | |||||
| md5sum = strings.TrimSuffix(md5sum, "\"") | |||||
| // Parse content length is exists | |||||
| var size int64 = -1 | |||||
| contentLengthStr := resp.Header.Get("Content-Length") | |||||
| if contentLengthStr != "" { | |||||
| size, err = strconv.ParseInt(contentLengthStr, 10, 64) | |||||
| if err != nil { | |||||
| // Content-Length is not valid | |||||
| return ObjectInfo{}, ErrorResponse{ | |||||
| Code: "InternalError", | |||||
| Message: "Content-Length is invalid. " + reportIssue, | |||||
| BucketName: bucketName, | |||||
| Key: objectName, | |||||
| RequestID: resp.Header.Get("x-amz-request-id"), | |||||
| HostID: resp.Header.Get("x-amz-id-2"), | |||||
| Region: resp.Header.Get("x-amz-bucket-region"), | |||||
| } | |||||
| } | |||||
| } | |||||
| // Parse Last-Modified has http time format. | |||||
| date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) | |||||
| if err != nil { | |||||
| return ObjectInfo{}, ErrorResponse{ | |||||
| Code: "InternalError", | |||||
| Message: "Last-Modified time format is invalid. " + reportIssue, | |||||
| BucketName: bucketName, | |||||
| Key: objectName, | |||||
| RequestID: resp.Header.Get("x-amz-request-id"), | |||||
| HostID: resp.Header.Get("x-amz-id-2"), | |||||
| Region: resp.Header.Get("x-amz-bucket-region"), | |||||
| } | |||||
| } | |||||
| // Fetch content type if any present. | |||||
| contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) | |||||
| if contentType == "" { | |||||
| contentType = "application/octet-stream" | |||||
| } | |||||
| // Save object metadata info. | |||||
| return ObjectInfo{ | |||||
| ETag: md5sum, | |||||
| Key: objectName, | |||||
| Size: size, | |||||
| LastModified: date, | |||||
| ContentType: contentType, | |||||
| // Extract only the relevant header keys describing the object. | |||||
| // following function filters out a list of standard set of keys | |||||
| // which are not part of object metadata. | |||||
| Metadata: extractObjMetadata(resp.Header), | |||||
| }, nil | |||||
| } | |||||
| @@ -0,0 +1,898 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2018 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "bytes" | |||||
| "context" | |||||
| "crypto/md5" | |||||
| "crypto/sha256" | |||||
| "errors" | |||||
| "fmt" | |||||
| "hash" | |||||
| "io" | |||||
| "io/ioutil" | |||||
| "math/rand" | |||||
| "net" | |||||
| "net/http" | |||||
| "net/http/cookiejar" | |||||
| "net/http/httputil" | |||||
| "net/url" | |||||
| "os" | |||||
| "runtime" | |||||
| "strings" | |||||
| "sync" | |||||
| "time" | |||||
| "golang.org/x/net/publicsuffix" | |||||
| "github.com/minio/minio-go/pkg/credentials" | |||||
| "github.com/minio/minio-go/pkg/s3signer" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // Client implements Amazon S3 compatible methods. | |||||
| type Client struct { | |||||
| /// Standard options. | |||||
| // Parsed endpoint url provided by the user. | |||||
| endpointURL *url.URL | |||||
| // Holds various credential providers. | |||||
| credsProvider *credentials.Credentials | |||||
| // Custom signerType value overrides all credentials. | |||||
| overrideSignerType credentials.SignatureType | |||||
| // User supplied. | |||||
| appInfo struct { | |||||
| appName string | |||||
| appVersion string | |||||
| } | |||||
| // Indicate whether we are using https or not | |||||
| secure bool | |||||
| // Needs allocation. | |||||
| httpClient *http.Client | |||||
| bucketLocCache *bucketLocationCache | |||||
| // Advanced functionality. | |||||
| isTraceEnabled bool | |||||
| traceOutput io.Writer | |||||
| // S3 specific accelerated endpoint. | |||||
| s3AccelerateEndpoint string | |||||
| // Region endpoint | |||||
| region string | |||||
| // Random seed. | |||||
| random *rand.Rand | |||||
| // lookup indicates type of url lookup supported by server. If not specified, | |||||
| // default to Auto. | |||||
| lookup BucketLookupType | |||||
| } | |||||
| // Options for New method | |||||
| type Options struct { | |||||
| Creds *credentials.Credentials | |||||
| Secure bool | |||||
| Region string | |||||
| BucketLookup BucketLookupType | |||||
| // Add future fields here | |||||
| } | |||||
| // Global constants. | |||||
| const ( | |||||
| libraryName = "minio-go" | |||||
| libraryVersion = "v6.0.14" | |||||
| ) | |||||
| // User Agent should always following the below style. | |||||
| // Please open an issue to discuss any new changes here. | |||||
| // | |||||
| // Minio (OS; ARCH) LIB/VER APP/VER | |||||
| const ( | |||||
| libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") " | |||||
| libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion | |||||
| ) | |||||
| // BucketLookupType is type of url lookup supported by server. | |||||
| type BucketLookupType int | |||||
| // Different types of url lookup supported by the server.Initialized to BucketLookupAuto | |||||
| const ( | |||||
| BucketLookupAuto BucketLookupType = iota | |||||
| BucketLookupDNS | |||||
| BucketLookupPath | |||||
| ) | |||||
| // NewV2 - instantiate minio client with Amazon S3 signature version | |||||
| // '2' compatibility. | |||||
| func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { | |||||
| creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "") | |||||
| clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| clnt.overrideSignerType = credentials.SignatureV2 | |||||
| return clnt, nil | |||||
| } | |||||
| // NewV4 - instantiate minio client with Amazon S3 signature version | |||||
| // '4' compatibility. | |||||
| func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { | |||||
| creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") | |||||
| clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| clnt.overrideSignerType = credentials.SignatureV4 | |||||
| return clnt, nil | |||||
| } | |||||
| // New - instantiate minio client, adds automatic verification of signature. | |||||
| func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { | |||||
| creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") | |||||
| clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| // Google cloud storage should be set to signature V2, force it if not. | |||||
| if s3utils.IsGoogleEndpoint(*clnt.endpointURL) { | |||||
| clnt.overrideSignerType = credentials.SignatureV2 | |||||
| } | |||||
| // If Amazon S3 set to signature v4. | |||||
| if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { | |||||
| clnt.overrideSignerType = credentials.SignatureV4 | |||||
| } | |||||
| return clnt, nil | |||||
| } | |||||
| // NewWithCredentials - instantiate minio client with credentials provider | |||||
| // for retrieving credentials from various credentials provider such as | |||||
| // IAM, File, Env etc. | |||||
| func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) { | |||||
| return privateNew(endpoint, creds, secure, region, BucketLookupAuto) | |||||
| } | |||||
| // NewWithRegion - instantiate minio client, with region configured. Unlike New(), | |||||
| // NewWithRegion avoids bucket-location lookup operations and it is slightly faster. | |||||
| // Use this function when if your application deals with single region. | |||||
| func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) { | |||||
| creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") | |||||
| return privateNew(endpoint, creds, secure, region, BucketLookupAuto) | |||||
| } | |||||
| // NewWithOptions - instantiate minio client with options | |||||
| func NewWithOptions(endpoint string, opts *Options) (*Client, error) { | |||||
| return privateNew(endpoint, opts.Creds, opts.Secure, opts.Region, opts.BucketLookup) | |||||
| } | |||||
| // lockedRandSource provides protected rand source, implements rand.Source interface. | |||||
| type lockedRandSource struct { | |||||
| lk sync.Mutex | |||||
| src rand.Source | |||||
| } | |||||
| // Int63 returns a non-negative pseudo-random 63-bit integer as an int64. | |||||
| func (r *lockedRandSource) Int63() (n int64) { | |||||
| r.lk.Lock() | |||||
| n = r.src.Int63() | |||||
| r.lk.Unlock() | |||||
| return | |||||
| } | |||||
| // Seed uses the provided seed value to initialize the generator to a | |||||
| // deterministic state. | |||||
| func (r *lockedRandSource) Seed(seed int64) { | |||||
| r.lk.Lock() | |||||
| r.src.Seed(seed) | |||||
| r.lk.Unlock() | |||||
| } | |||||
| // Redirect requests by re signing the request. | |||||
| func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error { | |||||
| if len(via) >= 5 { | |||||
| return errors.New("stopped after 5 redirects") | |||||
| } | |||||
| if len(via) == 0 { | |||||
| return nil | |||||
| } | |||||
| lastRequest := via[len(via)-1] | |||||
| var reAuth bool | |||||
| for attr, val := range lastRequest.Header { | |||||
| // if hosts do not match do not copy Authorization header | |||||
| if attr == "Authorization" && req.Host != lastRequest.Host { | |||||
| reAuth = true | |||||
| continue | |||||
| } | |||||
| if _, ok := req.Header[attr]; !ok { | |||||
| req.Header[attr] = val | |||||
| } | |||||
| } | |||||
| *c.endpointURL = *req.URL | |||||
| value, err := c.credsProvider.Get() | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| var ( | |||||
| signerType = value.SignerType | |||||
| accessKeyID = value.AccessKeyID | |||||
| secretAccessKey = value.SecretAccessKey | |||||
| sessionToken = value.SessionToken | |||||
| region = c.region | |||||
| ) | |||||
| // Custom signer set then override the behavior. | |||||
| if c.overrideSignerType != credentials.SignatureDefault { | |||||
| signerType = c.overrideSignerType | |||||
| } | |||||
| // If signerType returned by credentials helper is anonymous, | |||||
| // then do not sign regardless of signerType override. | |||||
| if value.SignerType == credentials.SignatureAnonymous { | |||||
| signerType = credentials.SignatureAnonymous | |||||
| } | |||||
| if reAuth { | |||||
| // Check if there is no region override, if not get it from the URL if possible. | |||||
| if region == "" { | |||||
| region = s3utils.GetRegionFromURL(*c.endpointURL) | |||||
| } | |||||
| switch { | |||||
| case signerType.IsV2(): | |||||
| return errors.New("signature V2 cannot support redirection") | |||||
| case signerType.IsV4(): | |||||
| req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region)) | |||||
| } | |||||
| } | |||||
| return nil | |||||
| } | |||||
| func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string, lookup BucketLookupType) (*Client, error) { | |||||
| // construct endpoint. | |||||
| endpointURL, err := getEndpointURL(endpoint, secure) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| // Initialize cookies to preserve server sent cookies if any and replay | |||||
| // them upon each request. | |||||
| jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| // instantiate new Client. | |||||
| clnt := new(Client) | |||||
| // Save the credentials. | |||||
| clnt.credsProvider = creds | |||||
| // Remember whether we are using https or not | |||||
| clnt.secure = secure | |||||
| // Save endpoint URL, user agent for future uses. | |||||
| clnt.endpointURL = endpointURL | |||||
| // Instantiate http client and bucket location cache. | |||||
| clnt.httpClient = &http.Client{ | |||||
| Jar: jar, | |||||
| Transport: DefaultTransport, | |||||
| CheckRedirect: clnt.redirectHeaders, | |||||
| } | |||||
| // Sets custom region, if region is empty bucket location cache is used automatically. | |||||
| if region == "" { | |||||
| region = s3utils.GetRegionFromURL(*clnt.endpointURL) | |||||
| } | |||||
| clnt.region = region | |||||
| // Instantiate bucket location cache. | |||||
| clnt.bucketLocCache = newBucketLocationCache() | |||||
| // Introduce a new locked random seed. | |||||
| clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) | |||||
| // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined | |||||
| // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. | |||||
| clnt.lookup = lookup | |||||
| // Return. | |||||
| return clnt, nil | |||||
| } | |||||
| // SetAppInfo - add application details to user agent. | |||||
| func (c *Client) SetAppInfo(appName string, appVersion string) { | |||||
| // if app name and version not set, we do not set a new user agent. | |||||
| if appName != "" && appVersion != "" { | |||||
| c.appInfo = struct { | |||||
| appName string | |||||
| appVersion string | |||||
| }{} | |||||
| c.appInfo.appName = appName | |||||
| c.appInfo.appVersion = appVersion | |||||
| } | |||||
| } | |||||
| // SetCustomTransport - set new custom transport. | |||||
| func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { | |||||
| // Set this to override default transport | |||||
| // ``http.DefaultTransport``. | |||||
| // | |||||
| // This transport is usually needed for debugging OR to add your | |||||
| // own custom TLS certificates on the client transport, for custom | |||||
| // CA's and certs which are not part of standard certificate | |||||
| // authority follow this example :- | |||||
| // | |||||
| // tr := &http.Transport{ | |||||
| // TLSClientConfig: &tls.Config{RootCAs: pool}, | |||||
| // DisableCompression: true, | |||||
| // } | |||||
| // api.SetCustomTransport(tr) | |||||
| // | |||||
| if c.httpClient != nil { | |||||
| c.httpClient.Transport = customHTTPTransport | |||||
| } | |||||
| } | |||||
| // TraceOn - enable HTTP tracing. | |||||
| func (c *Client) TraceOn(outputStream io.Writer) { | |||||
| // if outputStream is nil then default to os.Stdout. | |||||
| if outputStream == nil { | |||||
| outputStream = os.Stdout | |||||
| } | |||||
| // Sets a new output stream. | |||||
| c.traceOutput = outputStream | |||||
| // Enable tracing. | |||||
| c.isTraceEnabled = true | |||||
| } | |||||
| // TraceOff - disable HTTP tracing. | |||||
| func (c *Client) TraceOff() { | |||||
| // Disable tracing. | |||||
| c.isTraceEnabled = false | |||||
| } | |||||
| // SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your | |||||
| // requests. This feature is only specific to S3 for all other endpoints this | |||||
| // function does nothing. To read further details on s3 transfer acceleration | |||||
| // please vist - | |||||
| // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html | |||||
| func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { | |||||
| if s3utils.IsAmazonEndpoint(*c.endpointURL) { | |||||
| c.s3AccelerateEndpoint = accelerateEndpoint | |||||
| } | |||||
| } | |||||
| // Hash materials provides relevant initialized hash algo writers | |||||
| // based on the expected signature type. | |||||
| // | |||||
| // - For signature v4 request if the connection is insecure compute only sha256. | |||||
| // - For signature v4 request if the connection is secure compute only md5. | |||||
| // - For anonymous request compute md5. | |||||
| func (c *Client) hashMaterials() (hashAlgos map[string]hash.Hash, hashSums map[string][]byte) { | |||||
| hashSums = make(map[string][]byte) | |||||
| hashAlgos = make(map[string]hash.Hash) | |||||
| if c.overrideSignerType.IsV4() { | |||||
| if c.secure { | |||||
| hashAlgos["md5"] = md5.New() | |||||
| } else { | |||||
| hashAlgos["sha256"] = sha256.New() | |||||
| } | |||||
| } else { | |||||
| if c.overrideSignerType.IsAnonymous() { | |||||
| hashAlgos["md5"] = md5.New() | |||||
| } | |||||
| } | |||||
| return hashAlgos, hashSums | |||||
| } | |||||
| // requestMetadata - is container for all the values to make a request. | |||||
| type requestMetadata struct { | |||||
| // If set newRequest presigns the URL. | |||||
| presignURL bool | |||||
| // User supplied. | |||||
| bucketName string | |||||
| objectName string | |||||
| queryValues url.Values | |||||
| customHeader http.Header | |||||
| expires int64 | |||||
| // Generated by our internal code. | |||||
| bucketLocation string | |||||
| contentBody io.Reader | |||||
| contentLength int64 | |||||
| contentMD5Base64 string // carries base64 encoded md5sum | |||||
| contentSHA256Hex string // carries hex encoded sha256sum | |||||
| } | |||||
| // dumpHTTP - dump HTTP request and response. | |||||
| func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { | |||||
| // Starts http dump. | |||||
| _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| // Filter out Signature field from Authorization header. | |||||
| origAuth := req.Header.Get("Authorization") | |||||
| if origAuth != "" { | |||||
| req.Header.Set("Authorization", redactSignature(origAuth)) | |||||
| } | |||||
| // Only display request header. | |||||
| reqTrace, err := httputil.DumpRequestOut(req, false) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| // Write request to trace output. | |||||
| _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| // Only display response header. | |||||
| var respTrace []byte | |||||
| // For errors we make sure to dump response body as well. | |||||
| if resp.StatusCode != http.StatusOK && | |||||
| resp.StatusCode != http.StatusPartialContent && | |||||
| resp.StatusCode != http.StatusNoContent { | |||||
| respTrace, err = httputil.DumpResponse(resp, true) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| } else { | |||||
| respTrace, err = httputil.DumpResponse(resp, false) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| } | |||||
| // Write response to trace output. | |||||
| _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| // Ends the http dump. | |||||
| _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| // Returns success. | |||||
| return nil | |||||
| } | |||||
| // do - execute http request. | |||||
| func (c Client) do(req *http.Request) (*http.Response, error) { | |||||
| resp, err := c.httpClient.Do(req) | |||||
| if err != nil { | |||||
| // Handle this specifically for now until future Golang versions fix this issue properly. | |||||
| if urlErr, ok := err.(*url.Error); ok { | |||||
| if strings.Contains(urlErr.Err.Error(), "EOF") { | |||||
| return nil, &url.Error{ | |||||
| Op: urlErr.Op, | |||||
| URL: urlErr.URL, | |||||
| Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."), | |||||
| } | |||||
| } | |||||
| } | |||||
| return nil, err | |||||
| } | |||||
| // Response cannot be non-nil, report error if thats the case. | |||||
| if resp == nil { | |||||
| msg := "Response is empty. " + reportIssue | |||||
| return nil, ErrInvalidArgument(msg) | |||||
| } | |||||
| // If trace is enabled, dump http request and response. | |||||
| if c.isTraceEnabled { | |||||
| err = c.dumpHTTP(req, resp) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| return resp, nil | |||||
| } | |||||
| // List of success status. | |||||
| var successStatus = []int{ | |||||
| http.StatusOK, | |||||
| http.StatusNoContent, | |||||
| http.StatusPartialContent, | |||||
| } | |||||
| // executeMethod - instantiates a given method, and retries the | |||||
| // request upon any error up to maxRetries attempts in a binomially | |||||
| // delayed manner using a standard back off algorithm. | |||||
| func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { | |||||
| var isRetryable bool // Indicates if request can be retried. | |||||
| var bodySeeker io.Seeker // Extracted seeker from io.Reader. | |||||
| var reqRetry = MaxRetry // Indicates how many times we can retry the request | |||||
| if metadata.contentBody != nil { | |||||
| // Check if body is seekable then it is retryable. | |||||
| bodySeeker, isRetryable = metadata.contentBody.(io.Seeker) | |||||
| switch bodySeeker { | |||||
| case os.Stdin, os.Stdout, os.Stderr: | |||||
| isRetryable = false | |||||
| } | |||||
| // Retry only when reader is seekable | |||||
| if !isRetryable { | |||||
| reqRetry = 1 | |||||
| } | |||||
| // Figure out if the body can be closed - if yes | |||||
| // we will definitely close it upon the function | |||||
| // return. | |||||
| bodyCloser, ok := metadata.contentBody.(io.Closer) | |||||
| if ok { | |||||
| defer bodyCloser.Close() | |||||
| } | |||||
| } | |||||
| // Create a done channel to control 'newRetryTimer' go routine. | |||||
| doneCh := make(chan struct{}, 1) | |||||
| // Indicate to our routine to exit cleanly upon return. | |||||
| defer close(doneCh) | |||||
| // Blank indentifier is kept here on purpose since 'range' without | |||||
| // blank identifiers is only supported since go1.4 | |||||
| // https://golang.org/doc/go1.4#forrange. | |||||
| for range c.newRetryTimer(reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { | |||||
| // Retry executes the following function body if request has an | |||||
| // error until maxRetries have been exhausted, retry attempts are | |||||
| // performed after waiting for a given period of time in a | |||||
| // binomial fashion. | |||||
| if isRetryable { | |||||
| // Seek back to beginning for each attempt. | |||||
| if _, err = bodySeeker.Seek(0, 0); err != nil { | |||||
| // If seek failed, no need to retry. | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| // Instantiate a new request. | |||||
| var req *http.Request | |||||
| req, err = c.newRequest(method, metadata) | |||||
| if err != nil { | |||||
| errResponse := ToErrorResponse(err) | |||||
| if isS3CodeRetryable(errResponse.Code) { | |||||
| continue // Retry. | |||||
| } | |||||
| return nil, err | |||||
| } | |||||
| // Add context to request | |||||
| req = req.WithContext(ctx) | |||||
| // Initiate the request. | |||||
| res, err = c.do(req) | |||||
| if err != nil { | |||||
| // For supported http requests errors verify. | |||||
| if isHTTPReqErrorRetryable(err) { | |||||
| continue // Retry. | |||||
| } | |||||
| // For other errors, return here no need to retry. | |||||
| return nil, err | |||||
| } | |||||
| // For any known successful http status, return quickly. | |||||
| for _, httpStatus := range successStatus { | |||||
| if httpStatus == res.StatusCode { | |||||
| return res, nil | |||||
| } | |||||
| } | |||||
| // Read the body to be saved later. | |||||
| errBodyBytes, err := ioutil.ReadAll(res.Body) | |||||
| // res.Body should be closed | |||||
| closeResponse(res) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| // Save the body. | |||||
| errBodySeeker := bytes.NewReader(errBodyBytes) | |||||
| res.Body = ioutil.NopCloser(errBodySeeker) | |||||
| // For errors verify if its retryable otherwise fail quickly. | |||||
| errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) | |||||
| // Save the body back again. | |||||
| errBodySeeker.Seek(0, 0) // Seek back to starting point. | |||||
| res.Body = ioutil.NopCloser(errBodySeeker) | |||||
| // Bucket region if set in error response and the error | |||||
| // code dictates invalid region, we can retry the request | |||||
| // with the new region. | |||||
| // | |||||
| // Additionally we should only retry if bucketLocation and custom | |||||
| // region is empty. | |||||
| if metadata.bucketLocation == "" && c.region == "" { | |||||
| if errResponse.Code == "AuthorizationHeaderMalformed" || errResponse.Code == "InvalidRegion" { | |||||
| if metadata.bucketName != "" && errResponse.Region != "" { | |||||
| // Gather Cached location only if bucketName is present. | |||||
| if _, cachedLocationError := c.bucketLocCache.Get(metadata.bucketName); cachedLocationError != false { | |||||
| c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) | |||||
| continue // Retry. | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| // Verify if error response code is retryable. | |||||
| if isS3CodeRetryable(errResponse.Code) { | |||||
| continue // Retry. | |||||
| } | |||||
| // Verify if http status code is retryable. | |||||
| if isHTTPStatusRetryable(res.StatusCode) { | |||||
| continue // Retry. | |||||
| } | |||||
| // For all other cases break out of the retry loop. | |||||
| break | |||||
| } | |||||
| return res, err | |||||
| } | |||||
| // newRequest - instantiate a new HTTP request for a given method. | |||||
| func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) { | |||||
| // If no method is supplied default to 'POST'. | |||||
| if method == "" { | |||||
| method = "POST" | |||||
| } | |||||
| location := metadata.bucketLocation | |||||
| if location == "" { | |||||
| if metadata.bucketName != "" { | |||||
| // Gather location only if bucketName is present. | |||||
| location, err = c.getBucketLocation(metadata.bucketName) | |||||
| if err != nil { | |||||
| if ToErrorResponse(err).Code != "AccessDenied" { | |||||
| return nil, err | |||||
| } | |||||
| } | |||||
| // Upon AccessDenied error on fetching bucket location, default | |||||
| // to possible locations based on endpoint URL. This can usually | |||||
| // happen when GetBucketLocation() is disabled using IAM policies. | |||||
| } | |||||
| if location == "" { | |||||
| location = getDefaultLocation(*c.endpointURL, c.region) | |||||
| } | |||||
| } | |||||
| // Look if target url supports virtual host. | |||||
| isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) | |||||
| // Construct a new target URL. | |||||
| targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, isVirtualHost, metadata.queryValues) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| // Initialize a new HTTP request for the method. | |||||
| req, err = http.NewRequest(method, targetURL.String(), nil) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| // Get credentials from the configured credentials provider. | |||||
| value, err := c.credsProvider.Get() | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| var ( | |||||
| signerType = value.SignerType | |||||
| accessKeyID = value.AccessKeyID | |||||
| secretAccessKey = value.SecretAccessKey | |||||
| sessionToken = value.SessionToken | |||||
| ) | |||||
| // Custom signer set then override the behavior. | |||||
| if c.overrideSignerType != credentials.SignatureDefault { | |||||
| signerType = c.overrideSignerType | |||||
| } | |||||
| // If signerType returned by credentials helper is anonymous, | |||||
| // then do not sign regardless of signerType override. | |||||
| if value.SignerType == credentials.SignatureAnonymous { | |||||
| signerType = credentials.SignatureAnonymous | |||||
| } | |||||
| // Generate presign url if needed, return right here. | |||||
| if metadata.expires != 0 && metadata.presignURL { | |||||
| if signerType.IsAnonymous() { | |||||
| return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") | |||||
| } | |||||
| if signerType.IsV2() { | |||||
| // Presign URL with signature v2. | |||||
| req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) | |||||
| } else if signerType.IsV4() { | |||||
| // Presign URL with signature v4. | |||||
| req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) | |||||
| } | |||||
| return req, nil | |||||
| } | |||||
| // Set 'User-Agent' header for the request. | |||||
| c.setUserAgent(req) | |||||
| // Set all headers. | |||||
| for k, v := range metadata.customHeader { | |||||
| req.Header.Set(k, v[0]) | |||||
| } | |||||
| // Go net/http notoriously closes the request body. | |||||
| // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors. | |||||
| // This can cause underlying *os.File seekers to fail, avoid that | |||||
| // by making sure to wrap the closer as a nop. | |||||
| if metadata.contentLength == 0 { | |||||
| req.Body = nil | |||||
| } else { | |||||
| req.Body = ioutil.NopCloser(metadata.contentBody) | |||||
| } | |||||
| // Set incoming content-length. | |||||
| req.ContentLength = metadata.contentLength | |||||
| if req.ContentLength <= -1 { | |||||
| // For unknown content length, we upload using transfer-encoding: chunked. | |||||
| req.TransferEncoding = []string{"chunked"} | |||||
| } | |||||
| // set md5Sum for content protection. | |||||
| if len(metadata.contentMD5Base64) > 0 { | |||||
| req.Header.Set("Content-Md5", metadata.contentMD5Base64) | |||||
| } | |||||
| // For anonymous requests just return. | |||||
| if signerType.IsAnonymous() { | |||||
| return req, nil | |||||
| } | |||||
| switch { | |||||
| case signerType.IsV2(): | |||||
| // Add signature version '2' authorization header. | |||||
| req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) | |||||
| case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure: | |||||
| // Streaming signature is used by default for a PUT object request. Additionally we also | |||||
| // look if the initialized client is secure, if yes then we don't need to perform | |||||
| // streaming signature. | |||||
| req = s3signer.StreamingSignV4(req, accessKeyID, | |||||
| secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) | |||||
| default: | |||||
| // Set sha256 sum for signature calculation only with signature version '4'. | |||||
| shaHeader := unsignedPayload | |||||
| if metadata.contentSHA256Hex != "" { | |||||
| shaHeader = metadata.contentSHA256Hex | |||||
| } | |||||
| req.Header.Set("X-Amz-Content-Sha256", shaHeader) | |||||
| // Add signature version '4' authorization header. | |||||
| req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) | |||||
| } | |||||
| // Return request. | |||||
| return req, nil | |||||
| } | |||||
| // set User agent. | |||||
| func (c Client) setUserAgent(req *http.Request) { | |||||
| req.Header.Set("User-Agent", libraryUserAgent) | |||||
| if c.appInfo.appName != "" && c.appInfo.appVersion != "" { | |||||
| req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) | |||||
| } | |||||
| } | |||||
| // makeTargetURL make a new target url. | |||||
| func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { | |||||
| host := c.endpointURL.Host | |||||
| // For Amazon S3 endpoint, try to fetch location based endpoint. | |||||
| if s3utils.IsAmazonEndpoint(*c.endpointURL) { | |||||
| if c.s3AccelerateEndpoint != "" && bucketName != "" { | |||||
| // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html | |||||
| // Disable transfer acceleration for non-compliant bucket names. | |||||
| if strings.Contains(bucketName, ".") { | |||||
| return nil, ErrTransferAccelerationBucket(bucketName) | |||||
| } | |||||
| // If transfer acceleration is requested set new host. | |||||
| // For more details about enabling transfer acceleration read here. | |||||
| // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html | |||||
| host = c.s3AccelerateEndpoint | |||||
| } else { | |||||
| // Do not change the host if the endpoint URL is a FIPS S3 endpoint. | |||||
| if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) { | |||||
| // Fetch new host based on the bucket location. | |||||
| host = getS3Endpoint(bucketLocation) | |||||
| } | |||||
| } | |||||
| } | |||||
| // Save scheme. | |||||
| scheme := c.endpointURL.Scheme | |||||
| // Strip port 80 and 443 so we won't send these ports in Host header. | |||||
| // The reason is that browsers and curl automatically remove :80 and :443 | |||||
| // with the generated presigned urls, then a signature mismatch error. | |||||
| if h, p, err := net.SplitHostPort(host); err == nil { | |||||
| if scheme == "http" && p == "80" || scheme == "https" && p == "443" { | |||||
| host = h | |||||
| } | |||||
| } | |||||
| urlStr := scheme + "://" + host + "/" | |||||
| // Make URL only if bucketName is available, otherwise use the | |||||
| // endpoint URL. | |||||
| if bucketName != "" { | |||||
| // If endpoint supports virtual host style use that always. | |||||
| // Currently only S3 and Google Cloud Storage would support | |||||
| // virtual host style. | |||||
| if isVirtualHostStyle { | |||||
| urlStr = scheme + "://" + bucketName + "." + host + "/" | |||||
| if objectName != "" { | |||||
| urlStr = urlStr + s3utils.EncodePath(objectName) | |||||
| } | |||||
| } else { | |||||
| // If not fall back to using path style. | |||||
| urlStr = urlStr + bucketName + "/" | |||||
| if objectName != "" { | |||||
| urlStr = urlStr + s3utils.EncodePath(objectName) | |||||
| } | |||||
| } | |||||
| } | |||||
| // If there are any query values, add them to the end. | |||||
| if len(queryValues) > 0 { | |||||
| urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) | |||||
| } | |||||
| return url.Parse(urlStr) | |||||
| } | |||||
| // returns true if virtual hosted style requests are to be used. | |||||
| func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool { | |||||
| if bucketName == "" { | |||||
| return false | |||||
| } | |||||
| if c.lookup == BucketLookupDNS { | |||||
| return true | |||||
| } | |||||
| if c.lookup == BucketLookupPath { | |||||
| return false | |||||
| } | |||||
| // default to virtual only for Amazon/Google storage. In all other cases use | |||||
| // path style requests | |||||
| return s3utils.IsVirtualHostSupported(url, bucketName) | |||||
| } | |||||
| @@ -0,0 +1,39 @@ | |||||
| # version format | |||||
| version: "{build}" | |||||
| # Operating system (build VM template) | |||||
| os: Windows Server 2012 R2 | |||||
| clone_folder: c:\gopath\src\github.com\minio\minio-go | |||||
| # environment variables | |||||
| environment: | |||||
| GOPATH: c:\gopath | |||||
| GO15VENDOREXPERIMENT: 1 | |||||
| # scripts that run after cloning repository | |||||
| install: | |||||
| - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% | |||||
| - go version | |||||
| - go env | |||||
| - go get -u golang.org/x/lint/golint | |||||
| - go get -u github.com/remyoudompheng/go-misc/deadcode | |||||
| - go get -u github.com/gordonklaus/ineffassign | |||||
| - go get -u golang.org/x/crypto/argon2 | |||||
| - go get -t ./... | |||||
| # to run your custom scripts instead of automatic MSBuild | |||||
| build_script: | |||||
| - go vet ./... | |||||
| - gofmt -s -l . | |||||
| - golint -set_exit_status github.com/minio/minio-go... | |||||
| - deadcode | |||||
| - ineffassign . | |||||
| - go test -short -v | |||||
| - go test -short -race -v | |||||
| # to disable automatic tests | |||||
| test: off | |||||
| # to disable deployment | |||||
| deploy: off | |||||
| @@ -0,0 +1,221 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "net/http" | |||||
| "net/url" | |||||
| "path" | |||||
| "sync" | |||||
| "github.com/minio/minio-go/pkg/credentials" | |||||
| "github.com/minio/minio-go/pkg/s3signer" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // bucketLocationCache - Provides simple mechanism to hold bucket | |||||
| // locations in memory. | |||||
| type bucketLocationCache struct { | |||||
| // mutex is used for handling the concurrent | |||||
| // read/write requests for cache. | |||||
| sync.RWMutex | |||||
| // items holds the cached bucket locations. | |||||
| items map[string]string | |||||
| } | |||||
| // newBucketLocationCache - Provides a new bucket location cache to be | |||||
| // used internally with the client object. | |||||
| func newBucketLocationCache() *bucketLocationCache { | |||||
| return &bucketLocationCache{ | |||||
| items: make(map[string]string), | |||||
| } | |||||
| } | |||||
| // Get - Returns a value of a given key if it exists. | |||||
| func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { | |||||
| r.RLock() | |||||
| defer r.RUnlock() | |||||
| location, ok = r.items[bucketName] | |||||
| return | |||||
| } | |||||
| // Set - Will persist a value into cache. | |||||
| func (r *bucketLocationCache) Set(bucketName string, location string) { | |||||
| r.Lock() | |||||
| defer r.Unlock() | |||||
| r.items[bucketName] = location | |||||
| } | |||||
| // Delete - Deletes a bucket name from cache. | |||||
| func (r *bucketLocationCache) Delete(bucketName string) { | |||||
| r.Lock() | |||||
| defer r.Unlock() | |||||
| delete(r.items, bucketName) | |||||
| } | |||||
| // GetBucketLocation - get location for the bucket name from location cache, if not | |||||
| // fetch freshly by making a new request. | |||||
| func (c Client) GetBucketLocation(bucketName string) (string, error) { | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return "", err | |||||
| } | |||||
| return c.getBucketLocation(bucketName) | |||||
| } | |||||
| // getBucketLocation - Get location for the bucketName from location map cache, if not | |||||
| // fetch freshly by making a new request. | |||||
| func (c Client) getBucketLocation(bucketName string) (string, error) { | |||||
| if err := s3utils.CheckValidBucketName(bucketName); err != nil { | |||||
| return "", err | |||||
| } | |||||
| // Region set then no need to fetch bucket location. | |||||
| if c.region != "" { | |||||
| return c.region, nil | |||||
| } | |||||
| if location, ok := c.bucketLocCache.Get(bucketName); ok { | |||||
| return location, nil | |||||
| } | |||||
| // Initialize a new request. | |||||
| req, err := c.getBucketLocationRequest(bucketName) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| // Initiate the request. | |||||
| resp, err := c.do(req) | |||||
| defer closeResponse(resp) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| location, err := processBucketLocationResponse(resp, bucketName) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| c.bucketLocCache.Set(bucketName, location) | |||||
| return location, nil | |||||
| } | |||||
| // processes the getBucketLocation http response from the server. | |||||
| func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) { | |||||
| if resp != nil { | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| err = httpRespToErrorResponse(resp, bucketName, "") | |||||
| errResp := ToErrorResponse(err) | |||||
| // For access denied error, it could be an anonymous | |||||
| // request. Move forward and let the top level callers | |||||
| // succeed if possible based on their policy. | |||||
| if errResp.Code == "AccessDenied" { | |||||
| return "us-east-1", nil | |||||
| } | |||||
| return "", err | |||||
| } | |||||
| } | |||||
| // Extract location. | |||||
| var locationConstraint string | |||||
| err = xmlDecoder(resp.Body, &locationConstraint) | |||||
| if err != nil { | |||||
| return "", err | |||||
| } | |||||
| location := locationConstraint | |||||
| // Location is empty will be 'us-east-1'. | |||||
| if location == "" { | |||||
| location = "us-east-1" | |||||
| } | |||||
| // Location can be 'EU' convert it to meaningful 'eu-west-1'. | |||||
| if location == "EU" { | |||||
| location = "eu-west-1" | |||||
| } | |||||
| // Save the location into cache. | |||||
| // Return. | |||||
| return location, nil | |||||
| } | |||||
| // getBucketLocationRequest - Wrapper creates a new getBucketLocation request. | |||||
| func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) { | |||||
| // Set location query. | |||||
| urlValues := make(url.Values) | |||||
| urlValues.Set("location", "") | |||||
| // Set get bucket location always as path style. | |||||
| targetURL := c.endpointURL | |||||
| targetURL.Path = path.Join(bucketName, "") + "/" | |||||
| targetURL.RawQuery = urlValues.Encode() | |||||
| // Get a new HTTP request for the method. | |||||
| req, err := http.NewRequest("GET", targetURL.String(), nil) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| // Set UserAgent for the request. | |||||
| c.setUserAgent(req) | |||||
| // Get credentials from the configured credentials provider. | |||||
| value, err := c.credsProvider.Get() | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| var ( | |||||
| signerType = value.SignerType | |||||
| accessKeyID = value.AccessKeyID | |||||
| secretAccessKey = value.SecretAccessKey | |||||
| sessionToken = value.SessionToken | |||||
| ) | |||||
| // Custom signer set then override the behavior. | |||||
| if c.overrideSignerType != credentials.SignatureDefault { | |||||
| signerType = c.overrideSignerType | |||||
| } | |||||
| // If signerType returned by credentials helper is anonymous, | |||||
| // then do not sign regardless of signerType override. | |||||
| if value.SignerType == credentials.SignatureAnonymous { | |||||
| signerType = credentials.SignatureAnonymous | |||||
| } | |||||
| if signerType.IsAnonymous() { | |||||
| return req, nil | |||||
| } | |||||
| if signerType.IsV2() { | |||||
| // Get Bucket Location calls should be always path style | |||||
| isVirtualHost := false | |||||
| req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) | |||||
| return req, nil | |||||
| } | |||||
| // Set sha256 sum for signature calculation only with signature version '4'. | |||||
| contentSha256 := emptySHA256Hex | |||||
| if c.secure { | |||||
| contentSha256 = unsignedPayload | |||||
| } | |||||
| req.Header.Set("X-Amz-Content-Sha256", contentSha256) | |||||
| req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") | |||||
| return req, nil | |||||
| } | |||||
| @@ -0,0 +1,273 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "encoding/xml" | |||||
| "github.com/minio/minio-go/pkg/set" | |||||
| ) | |||||
| // NotificationEventType is a S3 notification event associated to the bucket notification configuration | |||||
| type NotificationEventType string | |||||
| // The role of all event types are described in : | |||||
| // http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations | |||||
| const ( | |||||
| ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*" | |||||
| ObjectCreatedPut = "s3:ObjectCreated:Put" | |||||
| ObjectCreatedPost = "s3:ObjectCreated:Post" | |||||
| ObjectCreatedCopy = "s3:ObjectCreated:Copy" | |||||
| ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" | |||||
| ObjectAccessedGet = "s3:ObjectAccessed:Get" | |||||
| ObjectAccessedHead = "s3:ObjectAccessed:Head" | |||||
| ObjectAccessedAll = "s3:ObjectAccessed:*" | |||||
| ObjectRemovedAll = "s3:ObjectRemoved:*" | |||||
| ObjectRemovedDelete = "s3:ObjectRemoved:Delete" | |||||
| ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" | |||||
| ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" | |||||
| ) | |||||
| // FilterRule - child of S3Key, a tag in the notification xml which | |||||
| // carries suffix/prefix filters | |||||
| type FilterRule struct { | |||||
| Name string `xml:"Name"` | |||||
| Value string `xml:"Value"` | |||||
| } | |||||
| // S3Key - child of Filter, a tag in the notification xml which | |||||
| // carries suffix/prefix filters | |||||
| type S3Key struct { | |||||
| FilterRules []FilterRule `xml:"FilterRule,omitempty"` | |||||
| } | |||||
| // Filter - a tag in the notification xml structure which carries | |||||
| // suffix/prefix filters | |||||
| type Filter struct { | |||||
| S3Key S3Key `xml:"S3Key,omitempty"` | |||||
| } | |||||
| // Arn - holds ARN information that will be sent to the web service, | |||||
| // ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html | |||||
| type Arn struct { | |||||
| Partition string | |||||
| Service string | |||||
| Region string | |||||
| AccountID string | |||||
| Resource string | |||||
| } | |||||
| // NewArn creates new ARN based on the given partition, service, region, account id and resource | |||||
| func NewArn(partition, service, region, accountID, resource string) Arn { | |||||
| return Arn{Partition: partition, | |||||
| Service: service, | |||||
| Region: region, | |||||
| AccountID: accountID, | |||||
| Resource: resource} | |||||
| } | |||||
| // Return the string format of the ARN | |||||
| func (arn Arn) String() string { | |||||
| return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource | |||||
| } | |||||
| // NotificationConfig - represents one single notification configuration | |||||
| // such as topic, queue or lambda configuration. | |||||
| type NotificationConfig struct { | |||||
| ID string `xml:"Id,omitempty"` | |||||
| Arn Arn `xml:"-"` | |||||
| Events []NotificationEventType `xml:"Event"` | |||||
| Filter *Filter `xml:"Filter,omitempty"` | |||||
| } | |||||
| // NewNotificationConfig creates one notification config and sets the given ARN | |||||
| func NewNotificationConfig(arn Arn) NotificationConfig { | |||||
| return NotificationConfig{Arn: arn, Filter: &Filter{}} | |||||
| } | |||||
| // AddEvents adds one event to the current notification config | |||||
| func (t *NotificationConfig) AddEvents(events ...NotificationEventType) { | |||||
| t.Events = append(t.Events, events...) | |||||
| } | |||||
| // AddFilterSuffix sets the suffix configuration to the current notification config | |||||
| func (t *NotificationConfig) AddFilterSuffix(suffix string) { | |||||
| if t.Filter == nil { | |||||
| t.Filter = &Filter{} | |||||
| } | |||||
| newFilterRule := FilterRule{Name: "suffix", Value: suffix} | |||||
| // Replace any suffix rule if existing and add to the list otherwise | |||||
| for index := range t.Filter.S3Key.FilterRules { | |||||
| if t.Filter.S3Key.FilterRules[index].Name == "suffix" { | |||||
| t.Filter.S3Key.FilterRules[index] = newFilterRule | |||||
| return | |||||
| } | |||||
| } | |||||
| t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) | |||||
| } | |||||
| // AddFilterPrefix sets the prefix configuration to the current notification config | |||||
| func (t *NotificationConfig) AddFilterPrefix(prefix string) { | |||||
| if t.Filter == nil { | |||||
| t.Filter = &Filter{} | |||||
| } | |||||
| newFilterRule := FilterRule{Name: "prefix", Value: prefix} | |||||
| // Replace any prefix rule if existing and add to the list otherwise | |||||
| for index := range t.Filter.S3Key.FilterRules { | |||||
| if t.Filter.S3Key.FilterRules[index].Name == "prefix" { | |||||
| t.Filter.S3Key.FilterRules[index] = newFilterRule | |||||
| return | |||||
| } | |||||
| } | |||||
| t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) | |||||
| } | |||||
| // TopicConfig carries one single topic notification configuration | |||||
| type TopicConfig struct { | |||||
| NotificationConfig | |||||
| Topic string `xml:"Topic"` | |||||
| } | |||||
| // QueueConfig carries one single queue notification configuration | |||||
| type QueueConfig struct { | |||||
| NotificationConfig | |||||
| Queue string `xml:"Queue"` | |||||
| } | |||||
| // LambdaConfig carries one single cloudfunction notification configuration | |||||
| type LambdaConfig struct { | |||||
| NotificationConfig | |||||
| Lambda string `xml:"CloudFunction"` | |||||
| } | |||||
| // BucketNotification - the struct that represents the whole XML to be sent to the web service | |||||
| type BucketNotification struct { | |||||
| XMLName xml.Name `xml:"NotificationConfiguration"` | |||||
| LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"` | |||||
| TopicConfigs []TopicConfig `xml:"TopicConfiguration"` | |||||
| QueueConfigs []QueueConfig `xml:"QueueConfiguration"` | |||||
| } | |||||
| // AddTopic adds a given topic config to the general bucket notification config | |||||
| func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) bool { | |||||
| newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()} | |||||
| for _, n := range b.TopicConfigs { | |||||
| // If new config matches existing one | |||||
| if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { | |||||
| existingConfig := set.NewStringSet() | |||||
| for _, v := range n.Events { | |||||
| existingConfig.Add(string(v)) | |||||
| } | |||||
| newConfig := set.NewStringSet() | |||||
| for _, v := range topicConfig.Events { | |||||
| newConfig.Add(string(v)) | |||||
| } | |||||
| if !newConfig.Intersection(existingConfig).IsEmpty() { | |||||
| return false | |||||
| } | |||||
| } | |||||
| } | |||||
| b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) | |||||
| return true | |||||
| } | |||||
| // AddQueue adds a given queue config to the general bucket notification config | |||||
| func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) bool { | |||||
| newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()} | |||||
| for _, n := range b.QueueConfigs { | |||||
| if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { | |||||
| existingConfig := set.NewStringSet() | |||||
| for _, v := range n.Events { | |||||
| existingConfig.Add(string(v)) | |||||
| } | |||||
| newConfig := set.NewStringSet() | |||||
| for _, v := range queueConfig.Events { | |||||
| newConfig.Add(string(v)) | |||||
| } | |||||
| if !newConfig.Intersection(existingConfig).IsEmpty() { | |||||
| return false | |||||
| } | |||||
| } | |||||
| } | |||||
| b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) | |||||
| return true | |||||
| } | |||||
| // AddLambda adds a given lambda config to the general bucket notification config | |||||
| func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) bool { | |||||
| newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()} | |||||
| for _, n := range b.LambdaConfigs { | |||||
| if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { | |||||
| existingConfig := set.NewStringSet() | |||||
| for _, v := range n.Events { | |||||
| existingConfig.Add(string(v)) | |||||
| } | |||||
| newConfig := set.NewStringSet() | |||||
| for _, v := range lambdaConfig.Events { | |||||
| newConfig.Add(string(v)) | |||||
| } | |||||
| if !newConfig.Intersection(existingConfig).IsEmpty() { | |||||
| return false | |||||
| } | |||||
| } | |||||
| } | |||||
| b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) | |||||
| return true | |||||
| } | |||||
| // RemoveTopicByArn removes all topic configurations that match the exact specified ARN | |||||
| func (b *BucketNotification) RemoveTopicByArn(arn Arn) { | |||||
| var topics []TopicConfig | |||||
| for _, topic := range b.TopicConfigs { | |||||
| if topic.Topic != arn.String() { | |||||
| topics = append(topics, topic) | |||||
| } | |||||
| } | |||||
| b.TopicConfigs = topics | |||||
| } | |||||
| // RemoveQueueByArn removes all queue configurations that match the exact specified ARN | |||||
| func (b *BucketNotification) RemoveQueueByArn(arn Arn) { | |||||
| var queues []QueueConfig | |||||
| for _, queue := range b.QueueConfigs { | |||||
| if queue.Queue != arn.String() { | |||||
| queues = append(queues, queue) | |||||
| } | |||||
| } | |||||
| b.QueueConfigs = queues | |||||
| } | |||||
| // RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN | |||||
| func (b *BucketNotification) RemoveLambdaByArn(arn Arn) { | |||||
| var lambdas []LambdaConfig | |||||
| for _, lambda := range b.LambdaConfigs { | |||||
| if lambda.Lambda != arn.String() { | |||||
| lambdas = append(lambdas, lambda) | |||||
| } | |||||
| } | |||||
| b.LambdaConfigs = lambdas | |||||
| } | |||||
| @@ -0,0 +1,62 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| /// Multipart upload defaults. | |||||
| // absMinPartSize - absolute minimum part size (5 MiB) below which | |||||
| // a part in a multipart upload may not be uploaded. | |||||
| const absMinPartSize = 1024 * 1024 * 5 | |||||
| // minPartSize - minimum part size 64MiB per object after which | |||||
| // putObject behaves internally as multipart. | |||||
| const minPartSize = 1024 * 1024 * 64 | |||||
| // maxPartsCount - maximum number of parts for a single multipart session. | |||||
| const maxPartsCount = 10000 | |||||
| // maxPartSize - maximum part size 5GiB for a single multipart upload | |||||
| // operation. | |||||
| const maxPartSize = 1024 * 1024 * 1024 * 5 | |||||
| // maxSinglePutObjectSize - maximum size 5GiB of object per PUT | |||||
| // operation. | |||||
| const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 | |||||
| // maxMultipartPutObjectSize - maximum size 5TiB of object for | |||||
| // Multipart operation. | |||||
| const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 | |||||
| // unsignedPayload - value to be set to X-Amz-Content-Sha256 header when | |||||
| // we don't want to sign the request payload | |||||
| const unsignedPayload = "UNSIGNED-PAYLOAD" | |||||
| // Total number of parallel workers used for multipart operation. | |||||
| const totalWorkers = 4 | |||||
| // Signature related constants. | |||||
| const ( | |||||
| signV4Algorithm = "AWS4-HMAC-SHA256" | |||||
| iso8601DateFormat = "20060102T150405Z" | |||||
| ) | |||||
| // Storage class header constant. | |||||
| const amzStorageClass = "X-Amz-Storage-Class" | |||||
| // Website redirect location header constant | |||||
| const amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" | |||||
| @@ -0,0 +1,153 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "context" | |||||
| "io" | |||||
| "strings" | |||||
| "github.com/minio/minio-go/pkg/encrypt" | |||||
| ) | |||||
| // Core - Inherits Client and adds new methods to expose the low level S3 APIs. | |||||
| type Core struct { | |||||
| *Client | |||||
| } | |||||
| // NewCore - Returns new initialized a Core client, this CoreClient should be | |||||
| // only used under special conditions such as need to access lower primitives | |||||
| // and being able to use them to write your own wrappers. | |||||
| func NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Core, error) { | |||||
| var s3Client Core | |||||
| client, err := NewV4(endpoint, accessKeyID, secretAccessKey, secure) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| s3Client.Client = client | |||||
| return &s3Client, nil | |||||
| } | |||||
| // ListObjects - List all the objects at a prefix, optionally with marker and delimiter | |||||
| // you can further filter the results. | |||||
| func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { | |||||
| return c.listObjectsQuery(bucket, prefix, marker, delimiter, maxKeys) | |||||
| } | |||||
| // ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses | |||||
| // continuationToken instead of marker to support iteration over the results. | |||||
| func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) { | |||||
| return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys, startAfter) | |||||
| } | |||||
| // CopyObject - copies an object from source object to destination object on server side. | |||||
| func (c Core) CopyObject(sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) { | |||||
| return c.copyObjectDo(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata) | |||||
| } | |||||
| // CopyObjectPart - creates a part in a multipart upload by copying (a | |||||
| // part of) an existing object. | |||||
| func (c Core) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, | |||||
| partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) { | |||||
| return c.copyObjectPartDo(context.Background(), srcBucket, srcObject, destBucket, destObject, uploadID, | |||||
| partID, startOffset, length, metadata) | |||||
| } | |||||
| // PutObject - Upload object. Uploads using single PUT call. | |||||
| func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string, sse encrypt.ServerSide) (ObjectInfo, error) { | |||||
| opts := PutObjectOptions{} | |||||
| m := make(map[string]string) | |||||
| for k, v := range metadata { | |||||
| if strings.ToLower(k) == "content-encoding" { | |||||
| opts.ContentEncoding = v | |||||
| } else if strings.ToLower(k) == "content-disposition" { | |||||
| opts.ContentDisposition = v | |||||
| } else if strings.ToLower(k) == "content-language" { | |||||
| opts.ContentLanguage = v | |||||
| } else if strings.ToLower(k) == "content-type" { | |||||
| opts.ContentType = v | |||||
| } else if strings.ToLower(k) == "cache-control" { | |||||
| opts.CacheControl = v | |||||
| } else if strings.ToLower(k) == strings.ToLower(amzWebsiteRedirectLocation) { | |||||
| opts.WebsiteRedirectLocation = v | |||||
| } else { | |||||
| m[k] = metadata[k] | |||||
| } | |||||
| } | |||||
| opts.UserMetadata = m | |||||
| opts.ServerSideEncryption = sse | |||||
| return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts) | |||||
| } | |||||
| // NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. | |||||
| func (c Core) NewMultipartUpload(bucket, object string, opts PutObjectOptions) (uploadID string, err error) { | |||||
| result, err := c.initiateMultipartUpload(context.Background(), bucket, object, opts) | |||||
| return result.UploadID, err | |||||
| } | |||||
| // ListMultipartUploads - List incomplete uploads. | |||||
| func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) { | |||||
| return c.listMultipartUploadsQuery(bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) | |||||
| } | |||||
| // PutObjectPart - Upload an object part. | |||||
| func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { | |||||
| return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse) | |||||
| } | |||||
| // ListObjectParts - List uploaded parts of an incomplete upload.x | |||||
| func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) { | |||||
| return c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts) | |||||
| } | |||||
| // CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. | |||||
| func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) (string, error) { | |||||
| res, err := c.completeMultipartUpload(context.Background(), bucket, object, uploadID, completeMultipartUpload{ | |||||
| Parts: parts, | |||||
| }) | |||||
| return res.ETag, err | |||||
| } | |||||
| // AbortMultipartUpload - Abort an incomplete upload. | |||||
| func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error { | |||||
| return c.abortMultipartUpload(context.Background(), bucket, object, uploadID) | |||||
| } | |||||
| // GetBucketPolicy - fetches bucket access policy for a given bucket. | |||||
| func (c Core) GetBucketPolicy(bucket string) (string, error) { | |||||
| return c.getBucketPolicy(bucket) | |||||
| } | |||||
| // PutBucketPolicy - applies a new bucket access policy for a given bucket. | |||||
| func (c Core) PutBucketPolicy(bucket, bucketPolicy string) error { | |||||
| return c.putBucketPolicy(bucket, bucketPolicy) | |||||
| } | |||||
| // GetObject is a lower level API implemented to support reading | |||||
| // partial objects and also downloading objects with special conditions | |||||
| // matching etag, modtime etc. | |||||
| func (c Core) GetObject(bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) { | |||||
| return c.getObject(context.Background(), bucketName, objectName, opts) | |||||
| } | |||||
| // StatObject is a lower level API implemented to support special | |||||
| // conditions matching etag, modtime on a request. | |||||
| func (c Core) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { | |||||
| return c.statObject(context.Background(), bucketName, objectName, opts) | |||||
| } | |||||
| @@ -0,0 +1,71 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import "io" | |||||
| // hookReader hooks additional reader in the source stream. It is | |||||
| // useful for making progress bars. Second reader is appropriately | |||||
| // notified about the exact number of bytes read from the primary | |||||
| // source on each Read operation. | |||||
| type hookReader struct { | |||||
| source io.Reader | |||||
| hook io.Reader | |||||
| } | |||||
| // Seek implements io.Seeker. Seeks source first, and if necessary | |||||
| // seeks hook if Seek method is appropriately found. | |||||
| func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) { | |||||
| // Verify for source has embedded Seeker, use it. | |||||
| sourceSeeker, ok := hr.source.(io.Seeker) | |||||
| if ok { | |||||
| return sourceSeeker.Seek(offset, whence) | |||||
| } | |||||
| // Verify if hook has embedded Seeker, use it. | |||||
| hookSeeker, ok := hr.hook.(io.Seeker) | |||||
| if ok { | |||||
| return hookSeeker.Seek(offset, whence) | |||||
| } | |||||
| return n, nil | |||||
| } | |||||
| // Read implements io.Reader. Always reads from the source, the return | |||||
| // value 'n' number of bytes are reported through the hook. Returns | |||||
| // error for all non io.EOF conditions. | |||||
| func (hr *hookReader) Read(b []byte) (n int, err error) { | |||||
| n, err = hr.source.Read(b) | |||||
| if err != nil && err != io.EOF { | |||||
| return n, err | |||||
| } | |||||
| // Progress the hook with the total read bytes from the source. | |||||
| if _, herr := hr.hook.Read(b[:n]); herr != nil { | |||||
| if herr != io.EOF { | |||||
| return n, herr | |||||
| } | |||||
| } | |||||
| return n, err | |||||
| } | |||||
| // newHook returns a io.ReadSeeker which implements hookReader that | |||||
| // reports the data read from the source to the hook. | |||||
| func newHook(source, hook io.Reader) io.Reader { | |||||
| if hook == nil { | |||||
| return source | |||||
| } | |||||
| return &hookReader{source, hook} | |||||
| } | |||||
| @@ -0,0 +1,89 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package credentials | |||||
| // A Chain will search for a provider which returns credentials | |||||
| // and cache that provider until Retrieve is called again. | |||||
| // | |||||
| // The Chain provides a way of chaining multiple providers together | |||||
| // which will pick the first available using priority order of the | |||||
| // Providers in the list. | |||||
| // | |||||
| // If none of the Providers retrieve valid credentials Value, ChainProvider's | |||||
| // Retrieve() will return the no credentials value. | |||||
| // | |||||
| // If a Provider is found which returns valid credentials Value ChainProvider | |||||
| // will cache that Provider for all calls to IsExpired(), until Retrieve is | |||||
| // called again after IsExpired() is true. | |||||
| // | |||||
| // creds := credentials.NewChainCredentials( | |||||
| // []credentials.Provider{ | |||||
| // &credentials.EnvAWSS3{}, | |||||
| // &credentials.EnvMinio{}, | |||||
| // }) | |||||
| // | |||||
| // // Usage of ChainCredentials. | |||||
| // mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") | |||||
| // if err != nil { | |||||
| // log.Fatalln(err) | |||||
| // } | |||||
| // | |||||
| type Chain struct { | |||||
| Providers []Provider | |||||
| curr Provider | |||||
| } | |||||
| // NewChainCredentials returns a pointer to a new Credentials object | |||||
| // wrapping a chain of providers. | |||||
| func NewChainCredentials(providers []Provider) *Credentials { | |||||
| return New(&Chain{ | |||||
| Providers: append([]Provider{}, providers...), | |||||
| }) | |||||
| } | |||||
| // Retrieve returns the credentials value, returns no credentials(anonymous) | |||||
| // if no credentials provider returned any value. | |||||
| // | |||||
| // If a provider is found with credentials, it will be cached and any calls | |||||
| // to IsExpired() will return the expired state of the cached provider. | |||||
| func (c *Chain) Retrieve() (Value, error) { | |||||
| for _, p := range c.Providers { | |||||
| creds, _ := p.Retrieve() | |||||
| // Always prioritize non-anonymous providers, if any. | |||||
| if creds.AccessKeyID == "" && creds.SecretAccessKey == "" { | |||||
| continue | |||||
| } | |||||
| c.curr = p | |||||
| return creds, nil | |||||
| } | |||||
| // At this point we have exhausted all the providers and | |||||
| // are left without any credentials return anonymous. | |||||
| return Value{ | |||||
| SignerType: SignatureAnonymous, | |||||
| }, nil | |||||
| } | |||||
| // IsExpired will returned the expired state of the currently cached provider | |||||
| // if there is one. If there is no current provider, true will be returned. | |||||
| func (c *Chain) IsExpired() bool { | |||||
| if c.curr != nil { | |||||
| return c.curr.IsExpired() | |||||
| } | |||||
| return true | |||||
| } | |||||
| @@ -0,0 +1,17 @@ | |||||
| { | |||||
| "version": "8", | |||||
| "hosts": { | |||||
| "play": { | |||||
| "url": "https://play.minio.io:9000", | |||||
| "accessKey": "Q3AM3UQ867SPQQA43P2F", | |||||
| "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", | |||||
| "api": "S3v2" | |||||
| }, | |||||
| "s3": { | |||||
| "url": "https://s3.amazonaws.com", | |||||
| "accessKey": "accessKey", | |||||
| "secretKey": "secret", | |||||
| "api": "S3v4" | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -0,0 +1,175 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package credentials | |||||
| import ( | |||||
| "sync" | |||||
| "time" | |||||
| ) | |||||
| // A Value is the AWS credentials value for individual credential fields. | |||||
| type Value struct { | |||||
| // AWS Access key ID | |||||
| AccessKeyID string | |||||
| // AWS Secret Access Key | |||||
| SecretAccessKey string | |||||
| // AWS Session Token | |||||
| SessionToken string | |||||
| // Signature Type. | |||||
| SignerType SignatureType | |||||
| } | |||||
| // A Provider is the interface for any component which will provide credentials | |||||
| // Value. A provider is required to manage its own Expired state, and what to | |||||
| // be expired means. | |||||
| type Provider interface { | |||||
| // Retrieve returns nil if it successfully retrieved the value. | |||||
| // Error is returned if the value were not obtainable, or empty. | |||||
| Retrieve() (Value, error) | |||||
| // IsExpired returns if the credentials are no longer valid, and need | |||||
| // to be retrieved. | |||||
| IsExpired() bool | |||||
| } | |||||
| // A Expiry provides shared expiration logic to be used by credentials | |||||
| // providers to implement expiry functionality. | |||||
| // | |||||
| // The best method to use this struct is as an anonymous field within the | |||||
| // provider's struct. | |||||
| // | |||||
| // Example: | |||||
| // type IAMCredentialProvider struct { | |||||
| // Expiry | |||||
| // ... | |||||
| // } | |||||
| type Expiry struct { | |||||
| // The date/time when to expire on | |||||
| expiration time.Time | |||||
| // If set will be used by IsExpired to determine the current time. | |||||
| // Defaults to time.Now if CurrentTime is not set. | |||||
| CurrentTime func() time.Time | |||||
| } | |||||
| // SetExpiration sets the expiration IsExpired will check when called. | |||||
| // | |||||
| // If window is greater than 0 the expiration time will be reduced by the | |||||
| // window value. | |||||
| // | |||||
| // Using a window is helpful to trigger credentials to expire sooner than | |||||
| // the expiration time given to ensure no requests are made with expired | |||||
| // tokens. | |||||
| func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { | |||||
| e.expiration = expiration | |||||
| if window > 0 { | |||||
| e.expiration = e.expiration.Add(-window) | |||||
| } | |||||
| } | |||||
| // IsExpired returns if the credentials are expired. | |||||
| func (e *Expiry) IsExpired() bool { | |||||
| if e.CurrentTime == nil { | |||||
| e.CurrentTime = time.Now | |||||
| } | |||||
| return e.expiration.Before(e.CurrentTime()) | |||||
| } | |||||
| // Credentials - A container for synchronous safe retrieval of credentials Value. | |||||
| // Credentials will cache the credentials value until they expire. Once the value | |||||
| // expires the next Get will attempt to retrieve valid credentials. | |||||
| // | |||||
| // Credentials is safe to use across multiple goroutines and will manage the | |||||
| // synchronous state so the Providers do not need to implement their own | |||||
| // synchronization. | |||||
| // | |||||
| // The first Credentials.Get() will always call Provider.Retrieve() to get the | |||||
| // first instance of the credentials Value. All calls to Get() after that | |||||
| // will return the cached credentials Value until IsExpired() returns true. | |||||
| type Credentials struct { | |||||
| sync.Mutex | |||||
| creds Value | |||||
| forceRefresh bool | |||||
| provider Provider | |||||
| } | |||||
| // New returns a pointer to a new Credentials with the provider set. | |||||
| func New(provider Provider) *Credentials { | |||||
| return &Credentials{ | |||||
| provider: provider, | |||||
| forceRefresh: true, | |||||
| } | |||||
| } | |||||
| // Get returns the credentials value, or error if the credentials Value failed | |||||
| // to be retrieved. | |||||
| // | |||||
| // Will return the cached credentials Value if it has not expired. If the | |||||
| // credentials Value has expired the Provider's Retrieve() will be called | |||||
| // to refresh the credentials. | |||||
| // | |||||
| // If Credentials.Expire() was called the credentials Value will be force | |||||
| // expired, and the next call to Get() will cause them to be refreshed. | |||||
| func (c *Credentials) Get() (Value, error) { | |||||
| c.Lock() | |||||
| defer c.Unlock() | |||||
| if c.isExpired() { | |||||
| creds, err := c.provider.Retrieve() | |||||
| if err != nil { | |||||
| return Value{}, err | |||||
| } | |||||
| c.creds = creds | |||||
| c.forceRefresh = false | |||||
| } | |||||
| return c.creds, nil | |||||
| } | |||||
| // Expire expires the credentials and forces them to be retrieved on the | |||||
| // next call to Get(). | |||||
| // | |||||
| // This will override the Provider's expired state, and force Credentials | |||||
| // to call the Provider's Retrieve(). | |||||
| func (c *Credentials) Expire() { | |||||
| c.Lock() | |||||
| defer c.Unlock() | |||||
| c.forceRefresh = true | |||||
| } | |||||
| // IsExpired returns if the credentials are no longer valid, and need | |||||
| // to be refreshed. | |||||
| // | |||||
| // If the Credentials were forced to be expired with Expire() this will | |||||
| // reflect that override. | |||||
| func (c *Credentials) IsExpired() bool { | |||||
| c.Lock() | |||||
| defer c.Unlock() | |||||
| return c.isExpired() | |||||
| } | |||||
| // isExpired helper method wrapping the definition of expired credentials. | |||||
| func (c *Credentials) isExpired() bool { | |||||
| return c.forceRefresh || c.provider.IsExpired() | |||||
| } | |||||
| @@ -0,0 +1,12 @@ | |||||
| [default] | |||||
| aws_access_key_id = accessKey | |||||
| aws_secret_access_key = secret | |||||
| aws_session_token = token | |||||
| [no_token] | |||||
| aws_access_key_id = accessKey | |||||
| aws_secret_access_key = secret | |||||
| [with_colon] | |||||
| aws_access_key_id: accessKey | |||||
| aws_secret_access_key: secret | |||||
| @@ -0,0 +1,62 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| // Package credentials provides credential retrieval and management | |||||
| // for S3 compatible object storage. | |||||
| // | |||||
| // By default the Credentials.Get() will cache the successful result of a | |||||
| // Provider's Retrieve() until Provider.IsExpired() returns true. At which | |||||
| // point Credentials will call Provider's Retrieve() to get new credential Value. | |||||
| // | |||||
| // The Provider is responsible for determining when credentials have expired. | |||||
| // It is also important to note that Credentials will always call Retrieve the | |||||
| // first time Credentials.Get() is called. | |||||
| // | |||||
| // Example of using the environment variable credentials. | |||||
| // | |||||
| // creds := NewFromEnv() | |||||
| // // Retrieve the credentials value | |||||
| // credValue, err := creds.Get() | |||||
| // if err != nil { | |||||
| // // handle error | |||||
| // } | |||||
| // | |||||
| // Example of forcing credentials to expire and be refreshed on the next Get(). | |||||
| // This may be helpful to proactively expire credentials and refresh them sooner | |||||
| // than they would naturally expire on their own. | |||||
| // | |||||
| // creds := NewFromIAM("") | |||||
| // creds.Expire() | |||||
| // credsValue, err := creds.Get() | |||||
| // // New credentials will be retrieved instead of from cache. | |||||
| // | |||||
| // | |||||
| // Custom Provider | |||||
| // | |||||
| // Each Provider built into this package also provides a helper method to generate | |||||
| // a Credentials pointer setup with the provider. To use a custom Provider just | |||||
| // create a type which satisfies the Provider interface and pass it to the | |||||
| // NewCredentials method. | |||||
| // | |||||
| // type MyProvider struct{} | |||||
| // func (m *MyProvider) Retrieve() (Value, error) {...} | |||||
| // func (m *MyProvider) IsExpired() bool {...} | |||||
| // | |||||
| // creds := NewCredentials(&MyProvider{}) | |||||
| // credValue, err := creds.Get() | |||||
| // | |||||
| package credentials | |||||
| @@ -0,0 +1,71 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package credentials | |||||
| import "os" | |||||
| // A EnvAWS retrieves credentials from the environment variables of the | |||||
| // running process. EnvAWSironment credentials never expire. | |||||
| // | |||||
| // EnvAWSironment variables used: | |||||
| // | |||||
| // * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY. | |||||
| // * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY. | |||||
| // * Secret Token: AWS_SESSION_TOKEN. | |||||
| type EnvAWS struct { | |||||
| retrieved bool | |||||
| } | |||||
| // NewEnvAWS returns a pointer to a new Credentials object | |||||
| // wrapping the environment variable provider. | |||||
| func NewEnvAWS() *Credentials { | |||||
| return New(&EnvAWS{}) | |||||
| } | |||||
| // Retrieve retrieves the keys from the environment. | |||||
| func (e *EnvAWS) Retrieve() (Value, error) { | |||||
| e.retrieved = false | |||||
| id := os.Getenv("AWS_ACCESS_KEY_ID") | |||||
| if id == "" { | |||||
| id = os.Getenv("AWS_ACCESS_KEY") | |||||
| } | |||||
| secret := os.Getenv("AWS_SECRET_ACCESS_KEY") | |||||
| if secret == "" { | |||||
| secret = os.Getenv("AWS_SECRET_KEY") | |||||
| } | |||||
| signerType := SignatureV4 | |||||
| if id == "" || secret == "" { | |||||
| signerType = SignatureAnonymous | |||||
| } | |||||
| e.retrieved = true | |||||
| return Value{ | |||||
| AccessKeyID: id, | |||||
| SecretAccessKey: secret, | |||||
| SessionToken: os.Getenv("AWS_SESSION_TOKEN"), | |||||
| SignerType: signerType, | |||||
| }, nil | |||||
| } | |||||
| // IsExpired returns if the credentials have been retrieved. | |||||
| func (e *EnvAWS) IsExpired() bool { | |||||
| return !e.retrieved | |||||
| } | |||||
| @@ -0,0 +1,62 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package credentials | |||||
| import "os" | |||||
| // A EnvMinio retrieves credentials from the environment variables of the | |||||
| // running process. EnvMinioironment credentials never expire. | |||||
| // | |||||
| // EnvMinioironment variables used: | |||||
| // | |||||
| // * Access Key ID: MINIO_ACCESS_KEY. | |||||
| // * Secret Access Key: MINIO_SECRET_KEY. | |||||
| type EnvMinio struct { | |||||
| retrieved bool | |||||
| } | |||||
| // NewEnvMinio returns a pointer to a new Credentials object | |||||
| // wrapping the environment variable provider. | |||||
| func NewEnvMinio() *Credentials { | |||||
| return New(&EnvMinio{}) | |||||
| } | |||||
| // Retrieve retrieves the keys from the environment. | |||||
| func (e *EnvMinio) Retrieve() (Value, error) { | |||||
| e.retrieved = false | |||||
| id := os.Getenv("MINIO_ACCESS_KEY") | |||||
| secret := os.Getenv("MINIO_SECRET_KEY") | |||||
| signerType := SignatureV4 | |||||
| if id == "" || secret == "" { | |||||
| signerType = SignatureAnonymous | |||||
| } | |||||
| e.retrieved = true | |||||
| return Value{ | |||||
| AccessKeyID: id, | |||||
| SecretAccessKey: secret, | |||||
| SignerType: signerType, | |||||
| }, nil | |||||
| } | |||||
| // IsExpired returns if the credentials have been retrieved. | |||||
| func (e *EnvMinio) IsExpired() bool { | |||||
| return !e.retrieved | |||||
| } | |||||
| @@ -0,0 +1,120 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package credentials | |||||
| import ( | |||||
| "os" | |||||
| "path/filepath" | |||||
| "github.com/go-ini/ini" | |||||
| homedir "github.com/mitchellh/go-homedir" | |||||
| ) | |||||
| // A FileAWSCredentials retrieves credentials from the current user's home | |||||
| // directory, and keeps track if those credentials are expired. | |||||
| // | |||||
| // Profile ini file example: $HOME/.aws/credentials | |||||
| type FileAWSCredentials struct { | |||||
| // Path to the shared credentials file. | |||||
| // | |||||
| // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the | |||||
| // env value is empty will default to current user's home directory. | |||||
| // Linux/OSX: "$HOME/.aws/credentials" | |||||
| // Windows: "%USERPROFILE%\.aws\credentials" | |||||
| filename string | |||||
| // AWS Profile to extract credentials from the shared credentials file. If empty | |||||
| // will default to environment variable "AWS_PROFILE" or "default" if | |||||
| // environment variable is also not set. | |||||
| profile string | |||||
| // retrieved states if the credentials have been successfully retrieved. | |||||
| retrieved bool | |||||
| } | |||||
| // NewFileAWSCredentials returns a pointer to a new Credentials object | |||||
| // wrapping the Profile file provider. | |||||
| func NewFileAWSCredentials(filename string, profile string) *Credentials { | |||||
| return New(&FileAWSCredentials{ | |||||
| filename: filename, | |||||
| profile: profile, | |||||
| }) | |||||
| } | |||||
| // Retrieve reads and extracts the shared credentials from the current | |||||
| // users home directory. | |||||
| func (p *FileAWSCredentials) Retrieve() (Value, error) { | |||||
| if p.filename == "" { | |||||
| p.filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") | |||||
| if p.filename == "" { | |||||
| homeDir, err := homedir.Dir() | |||||
| if err != nil { | |||||
| return Value{}, err | |||||
| } | |||||
| p.filename = filepath.Join(homeDir, ".aws", "credentials") | |||||
| } | |||||
| } | |||||
| if p.profile == "" { | |||||
| p.profile = os.Getenv("AWS_PROFILE") | |||||
| if p.profile == "" { | |||||
| p.profile = "default" | |||||
| } | |||||
| } | |||||
| p.retrieved = false | |||||
| iniProfile, err := loadProfile(p.filename, p.profile) | |||||
| if err != nil { | |||||
| return Value{}, err | |||||
| } | |||||
| // Default to empty string if not found. | |||||
| id := iniProfile.Key("aws_access_key_id") | |||||
| // Default to empty string if not found. | |||||
| secret := iniProfile.Key("aws_secret_access_key") | |||||
| // Default to empty string if not found. | |||||
| token := iniProfile.Key("aws_session_token") | |||||
| p.retrieved = true | |||||
| return Value{ | |||||
| AccessKeyID: id.String(), | |||||
| SecretAccessKey: secret.String(), | |||||
| SessionToken: token.String(), | |||||
| SignerType: SignatureV4, | |||||
| }, nil | |||||
| } | |||||
| // IsExpired returns if the shared credentials have expired. | |||||
| func (p *FileAWSCredentials) IsExpired() bool { | |||||
| return !p.retrieved | |||||
| } | |||||
| // loadProfiles loads from the file pointed to by shared credentials filename for profile. | |||||
| // The credentials retrieved from the profile will be returned or error. Error will be | |||||
| // returned if it fails to read from the file, or the data is invalid. | |||||
| func loadProfile(filename, profile string) (*ini.Section, error) { | |||||
| config, err := ini.Load(filename) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| iniProfile, err := config.GetSection(profile) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return iniProfile, nil | |||||
| } | |||||
| @@ -0,0 +1,133 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package credentials | |||||
| import ( | |||||
| "encoding/json" | |||||
| "io/ioutil" | |||||
| "os" | |||||
| "path/filepath" | |||||
| "runtime" | |||||
| homedir "github.com/mitchellh/go-homedir" | |||||
| ) | |||||
| // A FileMinioClient retrieves credentials from the current user's home | |||||
| // directory, and keeps track if those credentials are expired. | |||||
| // | |||||
| // Configuration file example: $HOME/.mc/config.json | |||||
| type FileMinioClient struct { | |||||
| // Path to the shared credentials file. | |||||
| // | |||||
| // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the | |||||
| // env value is empty will default to current user's home directory. | |||||
| // Linux/OSX: "$HOME/.mc/config.json" | |||||
| // Windows: "%USERALIAS%\mc\config.json" | |||||
| filename string | |||||
| // Minio Alias to extract credentials from the shared credentials file. If empty | |||||
| // will default to environment variable "MINIO_ALIAS" or "default" if | |||||
| // environment variable is also not set. | |||||
| alias string | |||||
| // retrieved states if the credentials have been successfully retrieved. | |||||
| retrieved bool | |||||
| } | |||||
| // NewFileMinioClient returns a pointer to a new Credentials object | |||||
| // wrapping the Alias file provider. | |||||
| func NewFileMinioClient(filename string, alias string) *Credentials { | |||||
| return New(&FileMinioClient{ | |||||
| filename: filename, | |||||
| alias: alias, | |||||
| }) | |||||
| } | |||||
| // Retrieve reads and extracts the shared credentials from the current | |||||
| // users home directory. | |||||
| func (p *FileMinioClient) Retrieve() (Value, error) { | |||||
| if p.filename == "" { | |||||
| if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok { | |||||
| p.filename = value | |||||
| } else { | |||||
| homeDir, err := homedir.Dir() | |||||
| if err != nil { | |||||
| return Value{}, err | |||||
| } | |||||
| p.filename = filepath.Join(homeDir, ".mc", "config.json") | |||||
| if runtime.GOOS == "windows" { | |||||
| p.filename = filepath.Join(homeDir, "mc", "config.json") | |||||
| } | |||||
| } | |||||
| } | |||||
| if p.alias == "" { | |||||
| p.alias = os.Getenv("MINIO_ALIAS") | |||||
| if p.alias == "" { | |||||
| p.alias = "s3" | |||||
| } | |||||
| } | |||||
| p.retrieved = false | |||||
| hostCfg, err := loadAlias(p.filename, p.alias) | |||||
| if err != nil { | |||||
| return Value{}, err | |||||
| } | |||||
| p.retrieved = true | |||||
| return Value{ | |||||
| AccessKeyID: hostCfg.AccessKey, | |||||
| SecretAccessKey: hostCfg.SecretKey, | |||||
| SignerType: parseSignatureType(hostCfg.API), | |||||
| }, nil | |||||
| } | |||||
| // IsExpired returns if the shared credentials have expired. | |||||
| func (p *FileMinioClient) IsExpired() bool { | |||||
| return !p.retrieved | |||||
| } | |||||
| // hostConfig configuration of a host. | |||||
| type hostConfig struct { | |||||
| URL string `json:"url"` | |||||
| AccessKey string `json:"accessKey"` | |||||
| SecretKey string `json:"secretKey"` | |||||
| API string `json:"api"` | |||||
| } | |||||
| // config config version. | |||||
| type config struct { | |||||
| Version string `json:"version"` | |||||
| Hosts map[string]hostConfig `json:"hosts"` | |||||
| } | |||||
| // loadAliass loads from the file pointed to by shared credentials filename for alias. | |||||
| // The credentials retrieved from the alias will be returned or error. Error will be | |||||
| // returned if it fails to read from the file. | |||||
| func loadAlias(filename, alias string) (hostConfig, error) { | |||||
| cfg := &config{} | |||||
| configBytes, err := ioutil.ReadFile(filename) | |||||
| if err != nil { | |||||
| return hostConfig{}, err | |||||
| } | |||||
| if err = json.Unmarshal(configBytes, cfg); err != nil { | |||||
| return hostConfig{}, err | |||||
| } | |||||
| return cfg.Hosts[alias], nil | |||||
| } | |||||
| @@ -0,0 +1,250 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package credentials | |||||
| import ( | |||||
| "bufio" | |||||
| "encoding/json" | |||||
| "errors" | |||||
| "fmt" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "os" | |||||
| "path" | |||||
| "time" | |||||
| ) | |||||
| // DefaultExpiryWindow - Default expiry window. | |||||
| // ExpiryWindow will allow the credentials to trigger refreshing | |||||
| // prior to the credentials actually expiring. This is beneficial | |||||
| // so race conditions with expiring credentials do not cause | |||||
| // request to fail unexpectedly due to ExpiredTokenException exceptions. | |||||
| const DefaultExpiryWindow = time.Second * 10 // 10 secs | |||||
| // A IAM retrieves credentials from the EC2 service, and keeps track if | |||||
| // those credentials are expired. | |||||
| type IAM struct { | |||||
| Expiry | |||||
| // Required http Client to use when connecting to IAM metadata service. | |||||
| Client *http.Client | |||||
| // Custom endpoint to fetch IAM role credentials. | |||||
| endpoint string | |||||
| } | |||||
| // IAM Roles for Amazon EC2 | |||||
| // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html | |||||
| const ( | |||||
| defaultIAMRoleEndpoint = "http://169.254.169.254" | |||||
| defaultECSRoleEndpoint = "http://169.254.170.2" | |||||
| defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials" | |||||
| ) | |||||
| // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html | |||||
| func getEndpoint(endpoint string) (string, bool) { | |||||
| if endpoint != "" { | |||||
| return endpoint, os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") != "" | |||||
| } | |||||
| if ecsURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"); ecsURI != "" { | |||||
| return fmt.Sprintf("%s%s", defaultECSRoleEndpoint, ecsURI), true | |||||
| } | |||||
| return defaultIAMRoleEndpoint, false | |||||
| } | |||||
| // NewIAM returns a pointer to a new Credentials object wrapping the IAM. | |||||
| func NewIAM(endpoint string) *Credentials { | |||||
| p := &IAM{ | |||||
| Client: &http.Client{ | |||||
| Transport: http.DefaultTransport, | |||||
| }, | |||||
| endpoint: endpoint, | |||||
| } | |||||
| return New(p) | |||||
| } | |||||
| // Retrieve retrieves credentials from the EC2 service. | |||||
| // Error will be returned if the request fails, or unable to extract | |||||
| // the desired | |||||
| func (m *IAM) Retrieve() (Value, error) { | |||||
| endpoint, isEcsTask := getEndpoint(m.endpoint) | |||||
| var roleCreds ec2RoleCredRespBody | |||||
| var err error | |||||
| if isEcsTask { | |||||
| roleCreds, err = getEcsTaskCredentials(m.Client, endpoint) | |||||
| } else { | |||||
| roleCreds, err = getCredentials(m.Client, endpoint) | |||||
| } | |||||
| if err != nil { | |||||
| return Value{}, err | |||||
| } | |||||
| // Expiry window is set to 10secs. | |||||
| m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow) | |||||
| return Value{ | |||||
| AccessKeyID: roleCreds.AccessKeyID, | |||||
| SecretAccessKey: roleCreds.SecretAccessKey, | |||||
| SessionToken: roleCreds.Token, | |||||
| SignerType: SignatureV4, | |||||
| }, nil | |||||
| } | |||||
| // A ec2RoleCredRespBody provides the shape for unmarshaling credential | |||||
| // request responses. | |||||
| type ec2RoleCredRespBody struct { | |||||
| // Success State | |||||
| Expiration time.Time | |||||
| AccessKeyID string | |||||
| SecretAccessKey string | |||||
| Token string | |||||
| // Error state | |||||
| Code string | |||||
| Message string | |||||
| // Unused params. | |||||
| LastUpdated time.Time | |||||
| Type string | |||||
| } | |||||
| // Get the final IAM role URL where the request will | |||||
| // be sent to fetch the rolling access credentials. | |||||
| // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html | |||||
| func getIAMRoleURL(endpoint string) (*url.URL, error) { | |||||
| u, err := url.Parse(endpoint) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| u.Path = defaultIAMSecurityCredsPath | |||||
| return u, nil | |||||
| } | |||||
| // listRoleNames lists of credential role names associated | |||||
| // with the current EC2 service. If there are no credentials, | |||||
| // or there is an error making or receiving the request. | |||||
| // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html | |||||
| func listRoleNames(client *http.Client, u *url.URL) ([]string, error) { | |||||
| req, err := http.NewRequest("GET", u.String(), nil) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| resp, err := client.Do(req) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| defer resp.Body.Close() | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return nil, errors.New(resp.Status) | |||||
| } | |||||
| credsList := []string{} | |||||
| s := bufio.NewScanner(resp.Body) | |||||
| for s.Scan() { | |||||
| credsList = append(credsList, s.Text()) | |||||
| } | |||||
| if err := s.Err(); err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return credsList, nil | |||||
| } | |||||
| func getEcsTaskCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { | |||||
| req, err := http.NewRequest("GET", endpoint, nil) | |||||
| if err != nil { | |||||
| return ec2RoleCredRespBody{}, err | |||||
| } | |||||
| resp, err := client.Do(req) | |||||
| if err != nil { | |||||
| return ec2RoleCredRespBody{}, err | |||||
| } | |||||
| defer resp.Body.Close() | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return ec2RoleCredRespBody{}, errors.New(resp.Status) | |||||
| } | |||||
| respCreds := ec2RoleCredRespBody{} | |||||
| if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { | |||||
| return ec2RoleCredRespBody{}, err | |||||
| } | |||||
| return respCreds, nil | |||||
| } | |||||
| // getCredentials - obtains the credentials from the IAM role name associated with | |||||
| // the current EC2 service. | |||||
| // | |||||
| // If the credentials cannot be found, or there is an error | |||||
| // reading the response an error will be returned. | |||||
| func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { | |||||
| // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html | |||||
| u, err := getIAMRoleURL(endpoint) | |||||
| if err != nil { | |||||
| return ec2RoleCredRespBody{}, err | |||||
| } | |||||
| // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html | |||||
| roleNames, err := listRoleNames(client, u) | |||||
| if err != nil { | |||||
| return ec2RoleCredRespBody{}, err | |||||
| } | |||||
| if len(roleNames) == 0 { | |||||
| return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service") | |||||
| } | |||||
| // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html | |||||
| // - An instance profile can contain only one IAM role. This limit cannot be increased. | |||||
| roleName := roleNames[0] | |||||
| // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html | |||||
| // The following command retrieves the security credentials for an | |||||
| // IAM role named `s3access`. | |||||
| // | |||||
| // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access | |||||
| // | |||||
| u.Path = path.Join(u.Path, roleName) | |||||
| req, err := http.NewRequest("GET", u.String(), nil) | |||||
| if err != nil { | |||||
| return ec2RoleCredRespBody{}, err | |||||
| } | |||||
| resp, err := client.Do(req) | |||||
| if err != nil { | |||||
| return ec2RoleCredRespBody{}, err | |||||
| } | |||||
| defer resp.Body.Close() | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return ec2RoleCredRespBody{}, errors.New(resp.Status) | |||||
| } | |||||
| respCreds := ec2RoleCredRespBody{} | |||||
| if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { | |||||
| return ec2RoleCredRespBody{}, err | |||||
| } | |||||
| if respCreds.Code != "Success" { | |||||
| // If an error code was returned something failed requesting the role. | |||||
| return ec2RoleCredRespBody{}, errors.New(respCreds.Message) | |||||
| } | |||||
| return respCreds, nil | |||||
| } | |||||
| @@ -0,0 +1,77 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package credentials | |||||
| import "strings" | |||||
| // SignatureType is type of Authorization requested for a given HTTP request. | |||||
| type SignatureType int | |||||
| // Different types of supported signatures - default is SignatureV4 or SignatureDefault. | |||||
| const ( | |||||
| // SignatureDefault is always set to v4. | |||||
| SignatureDefault SignatureType = iota | |||||
| SignatureV4 | |||||
| SignatureV2 | |||||
| SignatureV4Streaming | |||||
| SignatureAnonymous // Anonymous signature signifies, no signature. | |||||
| ) | |||||
| // IsV2 - is signature SignatureV2? | |||||
| func (s SignatureType) IsV2() bool { | |||||
| return s == SignatureV2 | |||||
| } | |||||
| // IsV4 - is signature SignatureV4? | |||||
| func (s SignatureType) IsV4() bool { | |||||
| return s == SignatureV4 || s == SignatureDefault | |||||
| } | |||||
| // IsStreamingV4 - is signature SignatureV4Streaming? | |||||
| func (s SignatureType) IsStreamingV4() bool { | |||||
| return s == SignatureV4Streaming | |||||
| } | |||||
| // IsAnonymous - is signature empty? | |||||
| func (s SignatureType) IsAnonymous() bool { | |||||
| return s == SignatureAnonymous | |||||
| } | |||||
| // Stringer humanized version of signature type, | |||||
| // strings returned here are case insensitive. | |||||
| func (s SignatureType) String() string { | |||||
| if s.IsV2() { | |||||
| return "S3v2" | |||||
| } else if s.IsV4() { | |||||
| return "S3v4" | |||||
| } else if s.IsStreamingV4() { | |||||
| return "S3v4Streaming" | |||||
| } | |||||
| return "Anonymous" | |||||
| } | |||||
| func parseSignatureType(str string) SignatureType { | |||||
| if strings.EqualFold(str, "S3v4") { | |||||
| return SignatureV4 | |||||
| } else if strings.EqualFold(str, "S3v2") { | |||||
| return SignatureV2 | |||||
| } else if strings.EqualFold(str, "S3v4Streaming") { | |||||
| return SignatureV4Streaming | |||||
| } | |||||
| return SignatureAnonymous | |||||
| } | |||||
| @@ -0,0 +1,67 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package credentials | |||||
| // A Static is a set of credentials which are set programmatically, | |||||
| // and will never expire. | |||||
| type Static struct { | |||||
| Value | |||||
| } | |||||
| // NewStaticV2 returns a pointer to a new Credentials object | |||||
| // wrapping a static credentials value provider, signature is | |||||
| // set to v2. If access and secret are not specified then | |||||
| // regardless of signature type set it Value will return | |||||
| // as anonymous. | |||||
| func NewStaticV2(id, secret, token string) *Credentials { | |||||
| return NewStatic(id, secret, token, SignatureV2) | |||||
| } | |||||
| // NewStaticV4 is similar to NewStaticV2 with similar considerations. | |||||
| func NewStaticV4(id, secret, token string) *Credentials { | |||||
| return NewStatic(id, secret, token, SignatureV4) | |||||
| } | |||||
| // NewStatic returns a pointer to a new Credentials object | |||||
| // wrapping a static credentials value provider. | |||||
| func NewStatic(id, secret, token string, signerType SignatureType) *Credentials { | |||||
| return New(&Static{ | |||||
| Value: Value{ | |||||
| AccessKeyID: id, | |||||
| SecretAccessKey: secret, | |||||
| SessionToken: token, | |||||
| SignerType: signerType, | |||||
| }, | |||||
| }) | |||||
| } | |||||
| // Retrieve returns the static credentials. | |||||
| func (s *Static) Retrieve() (Value, error) { | |||||
| if s.AccessKeyID == "" || s.SecretAccessKey == "" { | |||||
| // Anonymous is not an error | |||||
| return Value{SignerType: SignatureAnonymous}, nil | |||||
| } | |||||
| return s.Value, nil | |||||
| } | |||||
| // IsExpired returns if the credentials are expired. | |||||
| // | |||||
| // For Static, the credentials never expired. | |||||
| func (s *Static) IsExpired() bool { | |||||
| return false | |||||
| } | |||||
| @@ -0,0 +1,173 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2019 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package credentials | |||||
| import ( | |||||
| "encoding/xml" | |||||
| "errors" | |||||
| "fmt" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "time" | |||||
| ) | |||||
| // AssumedRoleUser - The identifiers for the temporary security credentials that | |||||
| // the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser | |||||
| type AssumedRoleUser struct { | |||||
| Arn string | |||||
| AssumedRoleID string `xml:"AssumeRoleId"` | |||||
| } | |||||
| // AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request. | |||||
| type AssumeRoleWithClientGrantsResponse struct { | |||||
| XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"` | |||||
| Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"` | |||||
| ResponseMetadata struct { | |||||
| RequestID string `xml:"RequestId,omitempty"` | |||||
| } `xml:"ResponseMetadata,omitempty"` | |||||
| } | |||||
| // ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants | |||||
| // request, including temporary credentials that can be used to make Minio API requests. | |||||
| type ClientGrantsResult struct { | |||||
| AssumedRoleUser AssumedRoleUser `xml:",omitempty"` | |||||
| Audience string `xml:",omitempty"` | |||||
| Credentials struct { | |||||
| AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` | |||||
| SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` | |||||
| Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` | |||||
| SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` | |||||
| } `xml:",omitempty"` | |||||
| PackedPolicySize int `xml:",omitempty"` | |||||
| Provider string `xml:",omitempty"` | |||||
| SubjectFromClientGrantsToken string `xml:",omitempty"` | |||||
| } | |||||
| // ClientGrantsToken - client grants token with expiry. | |||||
| type ClientGrantsToken struct { | |||||
| token string | |||||
| expiry int | |||||
| } | |||||
| // Token - access token returned after authenticating client grants. | |||||
| func (c *ClientGrantsToken) Token() string { | |||||
| return c.token | |||||
| } | |||||
| // Expiry - expiry for the access token returned after authenticating | |||||
| // client grants. | |||||
| func (c *ClientGrantsToken) Expiry() string { | |||||
| return fmt.Sprintf("%d", c.expiry) | |||||
| } | |||||
| // A STSClientGrants retrieves credentials from Minio service, and keeps track if | |||||
| // those credentials are expired. | |||||
| type STSClientGrants struct { | |||||
| Expiry | |||||
| // Required http Client to use when connecting to Minio STS service. | |||||
| Client *http.Client | |||||
| // Minio endpoint to fetch STS credentials. | |||||
| stsEndpoint string | |||||
| // getClientGrantsTokenExpiry function to retrieve tokens | |||||
| // from IDP This function should return two values one is | |||||
| // accessToken which is a self contained access token (JWT) | |||||
| // and second return value is the expiry associated with | |||||
| // this token. This is a customer provided function and | |||||
| // is mandatory. | |||||
| getClientGrantsTokenExpiry func() (*ClientGrantsToken, error) | |||||
| } | |||||
| // NewSTSClientGrants returns a pointer to a new | |||||
| // Credentials object wrapping the STSClientGrants. | |||||
| func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) { | |||||
| if stsEndpoint == "" { | |||||
| return nil, errors.New("STS endpoint cannot be empty") | |||||
| } | |||||
| if getClientGrantsTokenExpiry == nil { | |||||
| return nil, errors.New("Client grants access token and expiry retrieval function should be defined") | |||||
| } | |||||
| return New(&STSClientGrants{ | |||||
| Client: &http.Client{ | |||||
| Transport: http.DefaultTransport, | |||||
| }, | |||||
| stsEndpoint: stsEndpoint, | |||||
| getClientGrantsTokenExpiry: getClientGrantsTokenExpiry, | |||||
| }), nil | |||||
| } | |||||
| func getClientGrantsCredentials(clnt *http.Client, endpoint string, | |||||
| getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (AssumeRoleWithClientGrantsResponse, error) { | |||||
| accessToken, err := getClientGrantsTokenExpiry() | |||||
| if err != nil { | |||||
| return AssumeRoleWithClientGrantsResponse{}, err | |||||
| } | |||||
| v := url.Values{} | |||||
| v.Set("Action", "AssumeRoleWithClientGrants") | |||||
| v.Set("Token", accessToken.Token()) | |||||
| v.Set("DurationSeconds", accessToken.Expiry()) | |||||
| v.Set("Version", "2011-06-15") | |||||
| u, err := url.Parse(endpoint) | |||||
| if err != nil { | |||||
| return AssumeRoleWithClientGrantsResponse{}, err | |||||
| } | |||||
| u.RawQuery = v.Encode() | |||||
| req, err := http.NewRequest("POST", u.String(), nil) | |||||
| if err != nil { | |||||
| return AssumeRoleWithClientGrantsResponse{}, err | |||||
| } | |||||
| resp, err := clnt.Do(req) | |||||
| if err != nil { | |||||
| return AssumeRoleWithClientGrantsResponse{}, err | |||||
| } | |||||
| defer resp.Body.Close() | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return AssumeRoleWithClientGrantsResponse{}, errors.New(resp.Status) | |||||
| } | |||||
| a := AssumeRoleWithClientGrantsResponse{} | |||||
| if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { | |||||
| return AssumeRoleWithClientGrantsResponse{}, err | |||||
| } | |||||
| return a, nil | |||||
| } | |||||
| // Retrieve retrieves credentials from the Minio service. | |||||
| // Error will be returned if the request fails. | |||||
| func (m *STSClientGrants) Retrieve() (Value, error) { | |||||
| a, err := getClientGrantsCredentials(m.Client, m.stsEndpoint, m.getClientGrantsTokenExpiry) | |||||
| if err != nil { | |||||
| return Value{}, err | |||||
| } | |||||
| // Expiry window is set to 10secs. | |||||
| m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) | |||||
| return Value{ | |||||
| AccessKeyID: a.Result.Credentials.AccessKey, | |||||
| SecretAccessKey: a.Result.Credentials.SecretKey, | |||||
| SessionToken: a.Result.Credentials.SessionToken, | |||||
| SignerType: SignatureV4, | |||||
| }, nil | |||||
| } | |||||
| @@ -0,0 +1,169 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2019 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package credentials | |||||
| import ( | |||||
| "encoding/xml" | |||||
| "errors" | |||||
| "fmt" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "time" | |||||
| ) | |||||
| // AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request. | |||||
| type AssumeRoleWithWebIdentityResponse struct { | |||||
| XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"` | |||||
| Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"` | |||||
| ResponseMetadata struct { | |||||
| RequestID string `xml:"RequestId,omitempty"` | |||||
| } `xml:"ResponseMetadata,omitempty"` | |||||
| } | |||||
| // WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity | |||||
| // request, including temporary credentials that can be used to make Minio API requests. | |||||
| type WebIdentityResult struct { | |||||
| AssumedRoleUser AssumedRoleUser `xml:",omitempty"` | |||||
| Audience string `xml:",omitempty"` | |||||
| Credentials struct { | |||||
| AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` | |||||
| SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` | |||||
| Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` | |||||
| SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` | |||||
| } `xml:",omitempty"` | |||||
| PackedPolicySize int `xml:",omitempty"` | |||||
| Provider string `xml:",omitempty"` | |||||
| SubjectFromWebIdentityToken string `xml:",omitempty"` | |||||
| } | |||||
| // WebIdentityToken - web identity token with expiry. | |||||
| type WebIdentityToken struct { | |||||
| token string | |||||
| expiry int | |||||
| } | |||||
| // Token - access token returned after authenticating web identity. | |||||
| func (c *WebIdentityToken) Token() string { | |||||
| return c.token | |||||
| } | |||||
| // Expiry - expiry for the access token returned after authenticating | |||||
| // web identity. | |||||
| func (c *WebIdentityToken) Expiry() string { | |||||
| return fmt.Sprintf("%d", c.expiry) | |||||
| } | |||||
| // A STSWebIdentity retrieves credentials from Minio service, and keeps track if | |||||
| // those credentials are expired. | |||||
| type STSWebIdentity struct { | |||||
| Expiry | |||||
| // Required http Client to use when connecting to Minio STS service. | |||||
| Client *http.Client | |||||
| // Minio endpoint to fetch STS credentials. | |||||
| stsEndpoint string | |||||
| // getWebIDTokenExpiry function which returns ID tokens | |||||
| // from IDP. This function should return two values one | |||||
| // is ID token which is a self contained ID token (JWT) | |||||
| // and second return value is the expiry associated with | |||||
| // this token. | |||||
| // This is a customer provided function and is mandatory. | |||||
| getWebIDTokenExpiry func() (*WebIdentityToken, error) | |||||
| } | |||||
| // NewSTSWebIdentity returns a pointer to a new | |||||
| // Credentials object wrapping the STSWebIdentity. | |||||
| func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) { | |||||
| if stsEndpoint == "" { | |||||
| return nil, errors.New("STS endpoint cannot be empty") | |||||
| } | |||||
| if getWebIDTokenExpiry == nil { | |||||
| return nil, errors.New("Web ID token and expiry retrieval function should be defined") | |||||
| } | |||||
| return New(&STSWebIdentity{ | |||||
| Client: &http.Client{ | |||||
| Transport: http.DefaultTransport, | |||||
| }, | |||||
| stsEndpoint: stsEndpoint, | |||||
| getWebIDTokenExpiry: getWebIDTokenExpiry, | |||||
| }), nil | |||||
| } | |||||
| func getWebIdentityCredentials(clnt *http.Client, endpoint string, | |||||
| getWebIDTokenExpiry func() (*WebIdentityToken, error)) (AssumeRoleWithWebIdentityResponse, error) { | |||||
| idToken, err := getWebIDTokenExpiry() | |||||
| if err != nil { | |||||
| return AssumeRoleWithWebIdentityResponse{}, err | |||||
| } | |||||
| v := url.Values{} | |||||
| v.Set("Action", "AssumeRoleWithWebIdentity") | |||||
| v.Set("WebIdentityToken", idToken.Token()) | |||||
| v.Set("DurationSeconds", idToken.Expiry()) | |||||
| v.Set("Version", "2011-06-15") | |||||
| u, err := url.Parse(endpoint) | |||||
| if err != nil { | |||||
| return AssumeRoleWithWebIdentityResponse{}, err | |||||
| } | |||||
| u.RawQuery = v.Encode() | |||||
| req, err := http.NewRequest("POST", u.String(), nil) | |||||
| if err != nil { | |||||
| return AssumeRoleWithWebIdentityResponse{}, err | |||||
| } | |||||
| resp, err := clnt.Do(req) | |||||
| if err != nil { | |||||
| return AssumeRoleWithWebIdentityResponse{}, err | |||||
| } | |||||
| defer resp.Body.Close() | |||||
| if resp.StatusCode != http.StatusOK { | |||||
| return AssumeRoleWithWebIdentityResponse{}, errors.New(resp.Status) | |||||
| } | |||||
| a := AssumeRoleWithWebIdentityResponse{} | |||||
| if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { | |||||
| return AssumeRoleWithWebIdentityResponse{}, err | |||||
| } | |||||
| return a, nil | |||||
| } | |||||
| // Retrieve retrieves credentials from the Minio service. | |||||
| // Error will be returned if the request fails. | |||||
| func (m *STSWebIdentity) Retrieve() (Value, error) { | |||||
| a, err := getWebIdentityCredentials(m.Client, m.stsEndpoint, m.getWebIDTokenExpiry) | |||||
| if err != nil { | |||||
| return Value{}, err | |||||
| } | |||||
| // Expiry window is set to 10secs. | |||||
| m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) | |||||
| return Value{ | |||||
| AccessKeyID: a.Result.Credentials.AccessKey, | |||||
| SecretAccessKey: a.Result.Credentials.SecretKey, | |||||
| SessionToken: a.Result.Credentials.SessionToken, | |||||
| SignerType: SignatureV4, | |||||
| }, nil | |||||
| } | |||||
| @@ -0,0 +1,195 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2018 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package encrypt | |||||
| import ( | |||||
| "crypto/md5" | |||||
| "encoding/base64" | |||||
| "encoding/json" | |||||
| "errors" | |||||
| "net/http" | |||||
| "golang.org/x/crypto/argon2" | |||||
| ) | |||||
| const ( | |||||
| // sseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS. | |||||
| sseGenericHeader = "X-Amz-Server-Side-Encryption" | |||||
| // sseKmsKeyID is the AWS SSE-KMS key id. | |||||
| sseKmsKeyID = sseGenericHeader + "-Aws-Kms-Key-Id" | |||||
| // sseEncryptionContext is the AWS SSE-KMS Encryption Context data. | |||||
| sseEncryptionContext = sseGenericHeader + "-Encryption-Context" | |||||
| // sseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key. | |||||
| sseCustomerAlgorithm = sseGenericHeader + "-Customer-Algorithm" | |||||
| // sseCustomerKey is the AWS SSE-C encryption key HTTP header key. | |||||
| sseCustomerKey = sseGenericHeader + "-Customer-Key" | |||||
| // sseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key. | |||||
| sseCustomerKeyMD5 = sseGenericHeader + "-Customer-Key-MD5" | |||||
| // sseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API. | |||||
| sseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" | |||||
| // sseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API. | |||||
| sseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" | |||||
| // sseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API. | |||||
| sseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5" | |||||
| ) | |||||
| // PBKDF creates a SSE-C key from the provided password and salt. | |||||
| // PBKDF is a password-based key derivation function | |||||
| // which can be used to derive a high-entropy cryptographic | |||||
| // key from a low-entropy password and a salt. | |||||
| type PBKDF func(password, salt []byte) ServerSide | |||||
| // DefaultPBKDF is the default PBKDF. It uses Argon2id with the | |||||
| // recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads). | |||||
| var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { | |||||
| sse := ssec{} | |||||
| copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32)) | |||||
| return sse | |||||
| } | |||||
| // Type is the server-side-encryption method. It represents one of | |||||
| // the following encryption methods: | |||||
| // - SSE-C: server-side-encryption with customer provided keys | |||||
| // - KMS: server-side-encryption with managed keys | |||||
| // - S3: server-side-encryption using S3 storage encryption | |||||
| type Type string | |||||
| const ( | |||||
| // SSEC represents server-side-encryption with customer provided keys | |||||
| SSEC Type = "SSE-C" | |||||
| // KMS represents server-side-encryption with managed keys | |||||
| KMS Type = "KMS" | |||||
| // S3 represents server-side-encryption using S3 storage encryption | |||||
| S3 Type = "S3" | |||||
| ) | |||||
| // ServerSide is a form of S3 server-side-encryption. | |||||
| type ServerSide interface { | |||||
| // Type returns the server-side-encryption method. | |||||
| Type() Type | |||||
| // Marshal adds encryption headers to the provided HTTP headers. | |||||
| // It marks an HTTP request as server-side-encryption request | |||||
| // and inserts the required data into the headers. | |||||
| Marshal(h http.Header) | |||||
| } | |||||
| // NewSSE returns a server-side-encryption using S3 storage encryption. | |||||
| // Using SSE-S3 the server will encrypt the object with server-managed keys. | |||||
| func NewSSE() ServerSide { return s3{} } | |||||
| // NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context. | |||||
| func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { | |||||
| if context == nil { | |||||
| return kms{key: keyID, hasContext: false}, nil | |||||
| } | |||||
| serializedContext, err := json.Marshal(context) | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| return kms{key: keyID, context: serializedContext, hasContext: true}, nil | |||||
| } | |||||
| // NewSSEC returns a new server-side-encryption using SSE-C and the provided key. | |||||
| // The key must be 32 bytes long. | |||||
| func NewSSEC(key []byte) (ServerSide, error) { | |||||
| if len(key) != 32 { | |||||
| return nil, errors.New("encrypt: SSE-C key must be 256 bit long") | |||||
| } | |||||
| sse := ssec{} | |||||
| copy(sse[:], key) | |||||
| return sse, nil | |||||
| } | |||||
| // SSE transforms a SSE-C copy encryption into a SSE-C encryption. | |||||
| // It is the inverse of SSECopy(...). | |||||
| // | |||||
| // If the provided sse is no SSE-C copy encryption SSE returns | |||||
| // sse unmodified. | |||||
| func SSE(sse ServerSide) ServerSide { | |||||
| if sse == nil || sse.Type() != SSEC { | |||||
| return sse | |||||
| } | |||||
| if sse, ok := sse.(ssecCopy); ok { | |||||
| return ssec(sse) | |||||
| } | |||||
| return sse | |||||
| } | |||||
| // SSECopy transforms a SSE-C encryption into a SSE-C copy | |||||
| // encryption. This is required for SSE-C key rotation or a SSE-C | |||||
| // copy where the source and the destination should be encrypted. | |||||
| // | |||||
| // If the provided sse is no SSE-C encryption SSECopy returns | |||||
| // sse unmodified. | |||||
| func SSECopy(sse ServerSide) ServerSide { | |||||
| if sse == nil || sse.Type() != SSEC { | |||||
| return sse | |||||
| } | |||||
| if sse, ok := sse.(ssec); ok { | |||||
| return ssecCopy(sse) | |||||
| } | |||||
| return sse | |||||
| } | |||||
| type ssec [32]byte | |||||
| func (s ssec) Type() Type { return SSEC } | |||||
| func (s ssec) Marshal(h http.Header) { | |||||
| keyMD5 := md5.Sum(s[:]) | |||||
| h.Set(sseCustomerAlgorithm, "AES256") | |||||
| h.Set(sseCustomerKey, base64.StdEncoding.EncodeToString(s[:])) | |||||
| h.Set(sseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) | |||||
| } | |||||
| type ssecCopy [32]byte | |||||
| func (s ssecCopy) Type() Type { return SSEC } | |||||
| func (s ssecCopy) Marshal(h http.Header) { | |||||
| keyMD5 := md5.Sum(s[:]) | |||||
| h.Set(sseCopyCustomerAlgorithm, "AES256") | |||||
| h.Set(sseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:])) | |||||
| h.Set(sseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) | |||||
| } | |||||
| type s3 struct{} | |||||
| func (s s3) Type() Type { return S3 } | |||||
| func (s s3) Marshal(h http.Header) { h.Set(sseGenericHeader, "AES256") } | |||||
| type kms struct { | |||||
| key string | |||||
| context []byte | |||||
| hasContext bool | |||||
| } | |||||
| func (s kms) Type() Type { return KMS } | |||||
| func (s kms) Marshal(h http.Header) { | |||||
| h.Set(sseGenericHeader, "aws:kms") | |||||
| h.Set(sseKmsKeyID, s.key) | |||||
| if s.hasContext { | |||||
| h.Set(sseEncryptionContext, base64.StdEncoding.EncodeToString(s.context)) | |||||
| } | |||||
| } | |||||
| @@ -0,0 +1,306 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package s3signer | |||||
| import ( | |||||
| "bytes" | |||||
| "encoding/hex" | |||||
| "fmt" | |||||
| "io" | |||||
| "io/ioutil" | |||||
| "net/http" | |||||
| "strconv" | |||||
| "strings" | |||||
| "time" | |||||
| ) | |||||
| // Reference for constants used below - | |||||
| // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming | |||||
| const ( | |||||
| streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" | |||||
| streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" | |||||
| emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" | |||||
| payloadChunkSize = 64 * 1024 | |||||
| chunkSigConstLen = 17 // ";chunk-signature=" | |||||
| signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" | |||||
| crlfLen = 2 // CRLF | |||||
| ) | |||||
| // Request headers to be ignored while calculating seed signature for | |||||
| // a request. | |||||
| var ignoredStreamingHeaders = map[string]bool{ | |||||
| "Authorization": true, | |||||
| "User-Agent": true, | |||||
| "Content-Type": true, | |||||
| } | |||||
| // getSignedChunkLength - calculates the length of chunk metadata | |||||
| func getSignedChunkLength(chunkDataSize int64) int64 { | |||||
| return int64(len(fmt.Sprintf("%x", chunkDataSize))) + | |||||
| chunkSigConstLen + | |||||
| signatureStrLen + | |||||
| crlfLen + | |||||
| chunkDataSize + | |||||
| crlfLen | |||||
| } | |||||
| // getStreamLength - calculates the length of the overall stream (data + metadata) | |||||
| func getStreamLength(dataLen, chunkSize int64) int64 { | |||||
| if dataLen <= 0 { | |||||
| return 0 | |||||
| } | |||||
| chunksCount := int64(dataLen / chunkSize) | |||||
| remainingBytes := int64(dataLen % chunkSize) | |||||
| streamLen := int64(0) | |||||
| streamLen += chunksCount * getSignedChunkLength(chunkSize) | |||||
| if remainingBytes > 0 { | |||||
| streamLen += getSignedChunkLength(remainingBytes) | |||||
| } | |||||
| streamLen += getSignedChunkLength(0) | |||||
| return streamLen | |||||
| } | |||||
| // buildChunkStringToSign - returns the string to sign given chunk data | |||||
| // and previous signature. | |||||
| func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { | |||||
| stringToSignParts := []string{ | |||||
| streamingPayloadHdr, | |||||
| t.Format(iso8601DateFormat), | |||||
| getScope(region, t), | |||||
| previousSig, | |||||
| emptySHA256, | |||||
| hex.EncodeToString(sum256(chunkData)), | |||||
| } | |||||
| return strings.Join(stringToSignParts, "\n") | |||||
| } | |||||
| // prepareStreamingRequest - prepares a request with appropriate | |||||
| // headers before computing the seed signature. | |||||
| func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { | |||||
| // Set x-amz-content-sha256 header. | |||||
| req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) | |||||
| if sessionToken != "" { | |||||
| req.Header.Set("X-Amz-Security-Token", sessionToken) | |||||
| } | |||||
| req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) | |||||
| // Set content length with streaming signature for each chunk included. | |||||
| req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) | |||||
| req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) | |||||
| } | |||||
| // buildChunkHeader - returns the chunk header. | |||||
| // e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n | |||||
| func buildChunkHeader(chunkLen int64, signature string) []byte { | |||||
| return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n") | |||||
| } | |||||
| // buildChunkSignature - returns chunk signature for a given chunk and previous signature. | |||||
| func buildChunkSignature(chunkData []byte, reqTime time.Time, region, | |||||
| previousSignature, secretAccessKey string) string { | |||||
| chunkStringToSign := buildChunkStringToSign(reqTime, region, | |||||
| previousSignature, chunkData) | |||||
| signingKey := getSigningKey(secretAccessKey, region, reqTime) | |||||
| return getSignature(signingKey, chunkStringToSign) | |||||
| } | |||||
| // getSeedSignature - returns the seed signature for a given request. | |||||
| func (s *StreamingReader) setSeedSignature(req *http.Request) { | |||||
| // Get canonical request | |||||
| canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders) | |||||
| // Get string to sign from canonical request. | |||||
| stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest) | |||||
| signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime) | |||||
| // Calculate signature. | |||||
| s.seedSignature = getSignature(signingKey, stringToSign) | |||||
| } | |||||
| // StreamingReader implements chunked upload signature as a reader on | |||||
| // top of req.Body's ReaderCloser chunk header;data;... repeat | |||||
| type StreamingReader struct { | |||||
| accessKeyID string | |||||
| secretAccessKey string | |||||
| sessionToken string | |||||
| region string | |||||
| prevSignature string | |||||
| seedSignature string | |||||
| contentLen int64 // Content-Length from req header | |||||
| baseReadCloser io.ReadCloser // underlying io.Reader | |||||
| bytesRead int64 // bytes read from underlying io.Reader | |||||
| buf bytes.Buffer // holds signed chunk | |||||
| chunkBuf []byte // holds raw data read from req Body | |||||
| chunkBufLen int // no. of bytes read so far into chunkBuf | |||||
| done bool // done reading the underlying reader to EOF | |||||
| reqTime time.Time | |||||
| chunkNum int | |||||
| totalChunks int | |||||
| lastChunkSize int | |||||
| } | |||||
| // signChunk - signs a chunk read from s.baseReader of chunkLen size. | |||||
| func (s *StreamingReader) signChunk(chunkLen int) { | |||||
| // Compute chunk signature for next header | |||||
| signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, | |||||
| s.region, s.prevSignature, s.secretAccessKey) | |||||
| // For next chunk signature computation | |||||
| s.prevSignature = signature | |||||
| // Write chunk header into streaming buffer | |||||
| chunkHdr := buildChunkHeader(int64(chunkLen), signature) | |||||
| s.buf.Write(chunkHdr) | |||||
| // Write chunk data into streaming buffer | |||||
| s.buf.Write(s.chunkBuf[:chunkLen]) | |||||
| // Write the chunk trailer. | |||||
| s.buf.Write([]byte("\r\n")) | |||||
| // Reset chunkBufLen for next chunk read. | |||||
| s.chunkBufLen = 0 | |||||
| s.chunkNum++ | |||||
| } | |||||
| // setStreamingAuthHeader - builds and sets authorization header value | |||||
| // for streaming signature. | |||||
| func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { | |||||
| credential := GetCredential(s.accessKeyID, s.region, s.reqTime) | |||||
| authParts := []string{ | |||||
| signV4Algorithm + " Credential=" + credential, | |||||
| "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders), | |||||
| "Signature=" + s.seedSignature, | |||||
| } | |||||
| // Set authorization header. | |||||
| auth := strings.Join(authParts, ",") | |||||
| req.Header.Set("Authorization", auth) | |||||
| } | |||||
| // StreamingSignV4 - provides chunked upload signatureV4 support by | |||||
| // implementing io.Reader. | |||||
| func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, | |||||
| region string, dataLen int64, reqTime time.Time) *http.Request { | |||||
| // Set headers needed for streaming signature. | |||||
| prepareStreamingRequest(req, sessionToken, dataLen, reqTime) | |||||
| if req.Body == nil { | |||||
| req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) | |||||
| } | |||||
| stReader := &StreamingReader{ | |||||
| baseReadCloser: req.Body, | |||||
| accessKeyID: accessKeyID, | |||||
| secretAccessKey: secretAccessKey, | |||||
| sessionToken: sessionToken, | |||||
| region: region, | |||||
| reqTime: reqTime, | |||||
| chunkBuf: make([]byte, payloadChunkSize), | |||||
| contentLen: dataLen, | |||||
| chunkNum: 1, | |||||
| totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, | |||||
| lastChunkSize: int(dataLen % payloadChunkSize), | |||||
| } | |||||
| // Add the request headers required for chunk upload signing. | |||||
| // Compute the seed signature. | |||||
| stReader.setSeedSignature(req) | |||||
| // Set the authorization header with the seed signature. | |||||
| stReader.setStreamingAuthHeader(req) | |||||
| // Set seed signature as prevSignature for subsequent | |||||
| // streaming signing process. | |||||
| stReader.prevSignature = stReader.seedSignature | |||||
| req.Body = stReader | |||||
| return req | |||||
| } | |||||
| // Read - this method performs chunk upload signature providing a | |||||
| // io.Reader interface. | |||||
| func (s *StreamingReader) Read(buf []byte) (int, error) { | |||||
| switch { | |||||
| // After the last chunk is read from underlying reader, we | |||||
| // never re-fill s.buf. | |||||
| case s.done: | |||||
| // s.buf will be (re-)filled with next chunk when has lesser | |||||
| // bytes than asked for. | |||||
| case s.buf.Len() < len(buf): | |||||
| s.chunkBufLen = 0 | |||||
| for { | |||||
| n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) | |||||
| // Usually we validate `err` first, but in this case | |||||
| // we are validating n > 0 for the following reasons. | |||||
| // | |||||
| // 1. n > 0, err is one of io.EOF, nil (near end of stream) | |||||
| // A Reader returning a non-zero number of bytes at the end | |||||
| // of the input stream may return either err == EOF or err == nil | |||||
| // | |||||
| // 2. n == 0, err is io.EOF (actual end of stream) | |||||
| // | |||||
| // Callers should always process the n > 0 bytes returned | |||||
| // before considering the error err. | |||||
| if n1 > 0 { | |||||
| s.chunkBufLen += n1 | |||||
| s.bytesRead += int64(n1) | |||||
| if s.chunkBufLen == payloadChunkSize || | |||||
| (s.chunkNum == s.totalChunks-1 && | |||||
| s.chunkBufLen == s.lastChunkSize) { | |||||
| // Sign the chunk and write it to s.buf. | |||||
| s.signChunk(s.chunkBufLen) | |||||
| break | |||||
| } | |||||
| } | |||||
| if err != nil { | |||||
| if err == io.EOF { | |||||
| // No more data left in baseReader - last chunk. | |||||
| // Done reading the last chunk from baseReader. | |||||
| s.done = true | |||||
| // bytes read from baseReader different than | |||||
| // content length provided. | |||||
| if s.bytesRead != s.contentLen { | |||||
| return 0, io.ErrUnexpectedEOF | |||||
| } | |||||
| // Sign the chunk and write it to s.buf. | |||||
| s.signChunk(0) | |||||
| break | |||||
| } | |||||
| return 0, err | |||||
| } | |||||
| } | |||||
| } | |||||
| return s.buf.Read(buf) | |||||
| } | |||||
| // Close - this method makes underlying io.ReadCloser's Close method available. | |||||
| func (s *StreamingReader) Close() error { | |||||
| return s.baseReadCloser.Close() | |||||
| } | |||||
| @@ -0,0 +1,316 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package s3signer | |||||
| import ( | |||||
| "bytes" | |||||
| "crypto/hmac" | |||||
| "crypto/sha1" | |||||
| "encoding/base64" | |||||
| "fmt" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "sort" | |||||
| "strconv" | |||||
| "strings" | |||||
| "time" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // Signature and API related constants. | |||||
| const ( | |||||
| signV2Algorithm = "AWS" | |||||
| ) | |||||
| // Encode input URL path to URL encoded path. | |||||
| func encodeURL2Path(req *http.Request, virtualHost bool) (path string) { | |||||
| if virtualHost { | |||||
| reqHost := getHostAddr(req) | |||||
| dotPos := strings.Index(reqHost, ".") | |||||
| if dotPos > -1 { | |||||
| bucketName := reqHost[:dotPos] | |||||
| path = "/" + bucketName | |||||
| path += req.URL.Path | |||||
| path = s3utils.EncodePath(path) | |||||
| return | |||||
| } | |||||
| } | |||||
| path = s3utils.EncodePath(req.URL.Path) | |||||
| return | |||||
| } | |||||
| // PreSignV2 - presign the request in following style. | |||||
| // https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. | |||||
| func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request { | |||||
| // Presign is not needed for anonymous credentials. | |||||
| if accessKeyID == "" || secretAccessKey == "" { | |||||
| return &req | |||||
| } | |||||
| d := time.Now().UTC() | |||||
| // Find epoch expires when the request will expire. | |||||
| epochExpires := d.Unix() + expires | |||||
| // Add expires header if not present. | |||||
| if expiresStr := req.Header.Get("Expires"); expiresStr == "" { | |||||
| req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10)) | |||||
| } | |||||
| // Get presigned string to sign. | |||||
| stringToSign := preStringToSignV2(req, virtualHost) | |||||
| hm := hmac.New(sha1.New, []byte(secretAccessKey)) | |||||
| hm.Write([]byte(stringToSign)) | |||||
| // Calculate signature. | |||||
| signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) | |||||
| query := req.URL.Query() | |||||
| // Handle specially for Google Cloud Storage. | |||||
| if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") { | |||||
| query.Set("GoogleAccessId", accessKeyID) | |||||
| } else { | |||||
| query.Set("AWSAccessKeyId", accessKeyID) | |||||
| } | |||||
| // Fill in Expires for presigned query. | |||||
| query.Set("Expires", strconv.FormatInt(epochExpires, 10)) | |||||
| // Encode query and save. | |||||
| req.URL.RawQuery = s3utils.QueryEncode(query) | |||||
| // Save signature finally. | |||||
| req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) | |||||
| // Return. | |||||
| return &req | |||||
| } | |||||
| // PostPresignSignatureV2 - presigned signature for PostPolicy | |||||
| // request. | |||||
| func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { | |||||
| hm := hmac.New(sha1.New, []byte(secretAccessKey)) | |||||
| hm.Write([]byte(policyBase64)) | |||||
| signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) | |||||
| return signature | |||||
| } | |||||
| // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; | |||||
| // Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); | |||||
| // | |||||
| // StringToSign = HTTP-Verb + "\n" + | |||||
| // Content-Md5 + "\n" + | |||||
| // Content-Type + "\n" + | |||||
| // Date + "\n" + | |||||
| // CanonicalizedProtocolHeaders + | |||||
| // CanonicalizedResource; | |||||
| // | |||||
| // CanonicalizedResource = [ "/" + Bucket ] + | |||||
| // <HTTP-Request-URI, from the protocol name up to the query string> + | |||||
| // [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; | |||||
| // | |||||
| // CanonicalizedProtocolHeaders = <described below> | |||||
| // SignV2 sign the request before Do() (AWS Signature Version 2). | |||||
| func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request { | |||||
| // Signature calculation is not needed for anonymous credentials. | |||||
| if accessKeyID == "" || secretAccessKey == "" { | |||||
| return &req | |||||
| } | |||||
| // Initial time. | |||||
| d := time.Now().UTC() | |||||
| // Add date if not present. | |||||
| if date := req.Header.Get("Date"); date == "" { | |||||
| req.Header.Set("Date", d.Format(http.TimeFormat)) | |||||
| } | |||||
| // Calculate HMAC for secretAccessKey. | |||||
| stringToSign := stringToSignV2(req, virtualHost) | |||||
| hm := hmac.New(sha1.New, []byte(secretAccessKey)) | |||||
| hm.Write([]byte(stringToSign)) | |||||
| // Prepare auth header. | |||||
| authHeader := new(bytes.Buffer) | |||||
| authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) | |||||
| encoder := base64.NewEncoder(base64.StdEncoding, authHeader) | |||||
| encoder.Write(hm.Sum(nil)) | |||||
| encoder.Close() | |||||
| // Set Authorization header. | |||||
| req.Header.Set("Authorization", authHeader.String()) | |||||
| return &req | |||||
| } | |||||
| // From the Amazon docs: | |||||
| // | |||||
| // StringToSign = HTTP-Verb + "\n" + | |||||
| // Content-Md5 + "\n" + | |||||
| // Content-Type + "\n" + | |||||
| // Expires + "\n" + | |||||
| // CanonicalizedProtocolHeaders + | |||||
| // CanonicalizedResource; | |||||
| func preStringToSignV2(req http.Request, virtualHost bool) string { | |||||
| buf := new(bytes.Buffer) | |||||
| // Write standard headers. | |||||
| writePreSignV2Headers(buf, req) | |||||
| // Write canonicalized protocol headers if any. | |||||
| writeCanonicalizedHeaders(buf, req) | |||||
| // Write canonicalized Query resources if any. | |||||
| writeCanonicalizedResource(buf, req, virtualHost) | |||||
| return buf.String() | |||||
| } | |||||
| // writePreSignV2Headers - write preSign v2 required headers. | |||||
| func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { | |||||
| buf.WriteString(req.Method + "\n") | |||||
| buf.WriteString(req.Header.Get("Content-Md5") + "\n") | |||||
| buf.WriteString(req.Header.Get("Content-Type") + "\n") | |||||
| buf.WriteString(req.Header.Get("Expires") + "\n") | |||||
| } | |||||
| // From the Amazon docs: | |||||
| // | |||||
| // StringToSign = HTTP-Verb + "\n" + | |||||
| // Content-Md5 + "\n" + | |||||
| // Content-Type + "\n" + | |||||
| // Date + "\n" + | |||||
| // CanonicalizedProtocolHeaders + | |||||
| // CanonicalizedResource; | |||||
| func stringToSignV2(req http.Request, virtualHost bool) string { | |||||
| buf := new(bytes.Buffer) | |||||
| // Write standard headers. | |||||
| writeSignV2Headers(buf, req) | |||||
| // Write canonicalized protocol headers if any. | |||||
| writeCanonicalizedHeaders(buf, req) | |||||
| // Write canonicalized Query resources if any. | |||||
| writeCanonicalizedResource(buf, req, virtualHost) | |||||
| return buf.String() | |||||
| } | |||||
| // writeSignV2Headers - write signV2 required headers. | |||||
| func writeSignV2Headers(buf *bytes.Buffer, req http.Request) { | |||||
| buf.WriteString(req.Method + "\n") | |||||
| buf.WriteString(req.Header.Get("Content-Md5") + "\n") | |||||
| buf.WriteString(req.Header.Get("Content-Type") + "\n") | |||||
| buf.WriteString(req.Header.Get("Date") + "\n") | |||||
| } | |||||
| // writeCanonicalizedHeaders - write canonicalized headers. | |||||
| func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { | |||||
| var protoHeaders []string | |||||
| vals := make(map[string][]string) | |||||
| for k, vv := range req.Header { | |||||
| // All the AMZ headers should be lowercase | |||||
| lk := strings.ToLower(k) | |||||
| if strings.HasPrefix(lk, "x-amz") { | |||||
| protoHeaders = append(protoHeaders, lk) | |||||
| vals[lk] = vv | |||||
| } | |||||
| } | |||||
| sort.Strings(protoHeaders) | |||||
| for _, k := range protoHeaders { | |||||
| buf.WriteString(k) | |||||
| buf.WriteByte(':') | |||||
| for idx, v := range vals[k] { | |||||
| if idx > 0 { | |||||
| buf.WriteByte(',') | |||||
| } | |||||
| if strings.Contains(v, "\n") { | |||||
| // TODO: "Unfold" long headers that | |||||
| // span multiple lines (as allowed by | |||||
| // RFC 2616, section 4.2) by replacing | |||||
| // the folding white-space (including | |||||
| // new-line) by a single space. | |||||
| buf.WriteString(v) | |||||
| } else { | |||||
| buf.WriteString(v) | |||||
| } | |||||
| } | |||||
| buf.WriteByte('\n') | |||||
| } | |||||
| } | |||||
| // AWS S3 Signature V2 calculation rule is give here: | |||||
| // http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign | |||||
| // Whitelist resource list that will be used in query string for signature-V2 calculation. | |||||
| // The list should be alphabetically sorted | |||||
| var resourceList = []string{ | |||||
| "acl", | |||||
| "delete", | |||||
| "lifecycle", | |||||
| "location", | |||||
| "logging", | |||||
| "notification", | |||||
| "partNumber", | |||||
| "policy", | |||||
| "requestPayment", | |||||
| "response-cache-control", | |||||
| "response-content-disposition", | |||||
| "response-content-encoding", | |||||
| "response-content-language", | |||||
| "response-content-type", | |||||
| "response-expires", | |||||
| "torrent", | |||||
| "uploadId", | |||||
| "uploads", | |||||
| "versionId", | |||||
| "versioning", | |||||
| "versions", | |||||
| "website", | |||||
| } | |||||
| // From the Amazon docs: | |||||
| // | |||||
| // CanonicalizedResource = [ "/" + Bucket ] + | |||||
| // <HTTP-Request-URI, from the protocol name up to the query string> + | |||||
| // [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; | |||||
| func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { | |||||
| // Save request URL. | |||||
| requestURL := req.URL | |||||
| // Get encoded URL path. | |||||
| buf.WriteString(encodeURL2Path(&req, virtualHost)) | |||||
| if requestURL.RawQuery != "" { | |||||
| var n int | |||||
| vals, _ := url.ParseQuery(requestURL.RawQuery) | |||||
| // Verify if any sub resource queries are present, if yes | |||||
| // canonicallize them. | |||||
| for _, resource := range resourceList { | |||||
| if vv, ok := vals[resource]; ok && len(vv) > 0 { | |||||
| n++ | |||||
| // First element | |||||
| switch n { | |||||
| case 1: | |||||
| buf.WriteByte('?') | |||||
| // The rest | |||||
| default: | |||||
| buf.WriteByte('&') | |||||
| } | |||||
| buf.WriteString(resource) | |||||
| // Request parameters | |||||
| if len(vv[0]) > 0 { | |||||
| buf.WriteByte('=') | |||||
| buf.WriteString(vv[0]) | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -0,0 +1,315 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package s3signer | |||||
| import ( | |||||
| "bytes" | |||||
| "encoding/hex" | |||||
| "net/http" | |||||
| "sort" | |||||
| "strconv" | |||||
| "strings" | |||||
| "time" | |||||
| "github.com/minio/minio-go/pkg/s3utils" | |||||
| ) | |||||
| // Signature and API related constants. | |||||
| const ( | |||||
| signV4Algorithm = "AWS4-HMAC-SHA256" | |||||
| iso8601DateFormat = "20060102T150405Z" | |||||
| yyyymmdd = "20060102" | |||||
| ) | |||||
| /// | |||||
| /// Excerpts from @lsegal - | |||||
| /// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. | |||||
| /// | |||||
| /// User-Agent: | |||||
| /// | |||||
| /// This is ignored from signing because signing this causes | |||||
| /// problems with generating pre-signed URLs (that are executed | |||||
| /// by other agents) or when customers pass requests through | |||||
| /// proxies, which may modify the user-agent. | |||||
| /// | |||||
| /// Content-Length: | |||||
| /// | |||||
| /// This is ignored from signing because generating a pre-signed | |||||
| /// URL should not provide a content-length constraint, | |||||
| /// specifically when vending a S3 pre-signed PUT URL. The | |||||
| /// corollary to this is that when sending regular requests | |||||
| /// (non-pre-signed), the signature contains a checksum of the | |||||
| /// body, which implicitly validates the payload length (since | |||||
| /// changing the number of bytes would change the checksum) | |||||
| /// and therefore this header is not valuable in the signature. | |||||
| /// | |||||
| /// Content-Type: | |||||
| /// | |||||
| /// Signing this header causes quite a number of problems in | |||||
| /// browser environments, where browsers like to modify and | |||||
| /// normalize the content-type header in different ways. There is | |||||
| /// more information on this in https://goo.gl/2E9gyy. Avoiding | |||||
| /// this field simplifies logic and reduces the possibility of | |||||
| /// future bugs. | |||||
| /// | |||||
| /// Authorization: | |||||
| /// | |||||
| /// Is skipped for obvious reasons | |||||
| /// | |||||
| var v4IgnoredHeaders = map[string]bool{ | |||||
| "Authorization": true, | |||||
| "Content-Type": true, | |||||
| "Content-Length": true, | |||||
| "User-Agent": true, | |||||
| } | |||||
| // getSigningKey hmac seed to calculate final signature. | |||||
| func getSigningKey(secret, loc string, t time.Time) []byte { | |||||
| date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) | |||||
| location := sumHMAC(date, []byte(loc)) | |||||
| service := sumHMAC(location, []byte("s3")) | |||||
| signingKey := sumHMAC(service, []byte("aws4_request")) | |||||
| return signingKey | |||||
| } | |||||
| // getSignature final signature in hexadecimal form. | |||||
| func getSignature(signingKey []byte, stringToSign string) string { | |||||
| return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) | |||||
| } | |||||
| // getScope generate a string of a specific date, an AWS region, and a | |||||
| // service. | |||||
| func getScope(location string, t time.Time) string { | |||||
| scope := strings.Join([]string{ | |||||
| t.Format(yyyymmdd), | |||||
| location, | |||||
| "s3", | |||||
| "aws4_request", | |||||
| }, "/") | |||||
| return scope | |||||
| } | |||||
| // GetCredential generate a credential string. | |||||
| func GetCredential(accessKeyID, location string, t time.Time) string { | |||||
| scope := getScope(location, t) | |||||
| return accessKeyID + "/" + scope | |||||
| } | |||||
| // getHashedPayload get the hexadecimal value of the SHA256 hash of | |||||
| // the request payload. | |||||
| func getHashedPayload(req http.Request) string { | |||||
| hashedPayload := req.Header.Get("X-Amz-Content-Sha256") | |||||
| if hashedPayload == "" { | |||||
| // Presign does not have a payload, use S3 recommended value. | |||||
| hashedPayload = unsignedPayload | |||||
| } | |||||
| return hashedPayload | |||||
| } | |||||
| // getCanonicalHeaders generate a list of request headers for | |||||
| // signature. | |||||
| func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string { | |||||
| var headers []string | |||||
| vals := make(map[string][]string) | |||||
| for k, vv := range req.Header { | |||||
| if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { | |||||
| continue // ignored header | |||||
| } | |||||
| headers = append(headers, strings.ToLower(k)) | |||||
| vals[strings.ToLower(k)] = vv | |||||
| } | |||||
| headers = append(headers, "host") | |||||
| sort.Strings(headers) | |||||
| var buf bytes.Buffer | |||||
| // Save all the headers in canonical form <header>:<value> newline | |||||
| // separated for each header. | |||||
| for _, k := range headers { | |||||
| buf.WriteString(k) | |||||
| buf.WriteByte(':') | |||||
| switch { | |||||
| case k == "host": | |||||
| buf.WriteString(getHostAddr(&req)) | |||||
| fallthrough | |||||
| default: | |||||
| for idx, v := range vals[k] { | |||||
| if idx > 0 { | |||||
| buf.WriteByte(',') | |||||
| } | |||||
| buf.WriteString(v) | |||||
| } | |||||
| buf.WriteByte('\n') | |||||
| } | |||||
| } | |||||
| return buf.String() | |||||
| } | |||||
| // getSignedHeaders generate all signed request headers. | |||||
| // i.e lexically sorted, semicolon-separated list of lowercase | |||||
| // request header names. | |||||
| func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { | |||||
| var headers []string | |||||
| for k := range req.Header { | |||||
| if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { | |||||
| continue // Ignored header found continue. | |||||
| } | |||||
| headers = append(headers, strings.ToLower(k)) | |||||
| } | |||||
| headers = append(headers, "host") | |||||
| sort.Strings(headers) | |||||
| return strings.Join(headers, ";") | |||||
| } | |||||
| // getCanonicalRequest generate a canonical request of style. | |||||
| // | |||||
| // canonicalRequest = | |||||
| // <HTTPMethod>\n | |||||
| // <CanonicalURI>\n | |||||
| // <CanonicalQueryString>\n | |||||
| // <CanonicalHeaders>\n | |||||
| // <SignedHeaders>\n | |||||
| // <HashedPayload> | |||||
| func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool) string { | |||||
| req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) | |||||
| canonicalRequest := strings.Join([]string{ | |||||
| req.Method, | |||||
| s3utils.EncodePath(req.URL.Path), | |||||
| req.URL.RawQuery, | |||||
| getCanonicalHeaders(req, ignoredHeaders), | |||||
| getSignedHeaders(req, ignoredHeaders), | |||||
| getHashedPayload(req), | |||||
| }, "\n") | |||||
| return canonicalRequest | |||||
| } | |||||
| // getStringToSign a string based on selected query values. | |||||
| func getStringToSignV4(t time.Time, location, canonicalRequest string) string { | |||||
| stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" | |||||
| stringToSign = stringToSign + getScope(location, t) + "\n" | |||||
| stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) | |||||
| return stringToSign | |||||
| } | |||||
| // PreSignV4 presign the request, in accordance with | |||||
| // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. | |||||
| func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request { | |||||
| // Presign is not needed for anonymous credentials. | |||||
| if accessKeyID == "" || secretAccessKey == "" { | |||||
| return &req | |||||
| } | |||||
| // Initial time. | |||||
| t := time.Now().UTC() | |||||
| // Get credential string. | |||||
| credential := GetCredential(accessKeyID, location, t) | |||||
| // Get all signed headers. | |||||
| signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) | |||||
| // Set URL query. | |||||
| query := req.URL.Query() | |||||
| query.Set("X-Amz-Algorithm", signV4Algorithm) | |||||
| query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) | |||||
| query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) | |||||
| query.Set("X-Amz-SignedHeaders", signedHeaders) | |||||
| query.Set("X-Amz-Credential", credential) | |||||
| // Set session token if available. | |||||
| if sessionToken != "" { | |||||
| query.Set("X-Amz-Security-Token", sessionToken) | |||||
| } | |||||
| req.URL.RawQuery = query.Encode() | |||||
| // Get canonical request. | |||||
| canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) | |||||
| // Get string to sign from canonical request. | |||||
| stringToSign := getStringToSignV4(t, location, canonicalRequest) | |||||
| // Gext hmac signing key. | |||||
| signingKey := getSigningKey(secretAccessKey, location, t) | |||||
| // Calculate signature. | |||||
| signature := getSignature(signingKey, stringToSign) | |||||
| // Add signature header to RawQuery. | |||||
| req.URL.RawQuery += "&X-Amz-Signature=" + signature | |||||
| return &req | |||||
| } | |||||
| // PostPresignSignatureV4 - presigned signature for PostPolicy | |||||
| // requests. | |||||
| func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { | |||||
| // Get signining key. | |||||
| signingkey := getSigningKey(secretAccessKey, location, t) | |||||
| // Calculate signature. | |||||
| signature := getSignature(signingkey, policyBase64) | |||||
| return signature | |||||
| } | |||||
| // SignV4 sign the request before Do(), in accordance with | |||||
| // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. | |||||
| func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { | |||||
| // Signature calculation is not needed for anonymous credentials. | |||||
| if accessKeyID == "" || secretAccessKey == "" { | |||||
| return &req | |||||
| } | |||||
| // Initial time. | |||||
| t := time.Now().UTC() | |||||
| // Set x-amz-date. | |||||
| req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) | |||||
| // Set session token if available. | |||||
| if sessionToken != "" { | |||||
| req.Header.Set("X-Amz-Security-Token", sessionToken) | |||||
| } | |||||
| // Get canonical request. | |||||
| canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) | |||||
| // Get string to sign from canonical request. | |||||
| stringToSign := getStringToSignV4(t, location, canonicalRequest) | |||||
| // Get hmac signing key. | |||||
| signingKey := getSigningKey(secretAccessKey, location, t) | |||||
| // Get credential string. | |||||
| credential := GetCredential(accessKeyID, location, t) | |||||
| // Get all signed headers. | |||||
| signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) | |||||
| // Calculate signature. | |||||
| signature := getSignature(signingKey, stringToSign) | |||||
| // If regular request, construct the final authorization header. | |||||
| parts := []string{ | |||||
| signV4Algorithm + " Credential=" + credential, | |||||
| "SignedHeaders=" + signedHeaders, | |||||
| "Signature=" + signature, | |||||
| } | |||||
| // Set authorization header. | |||||
| auth := strings.Join(parts, ", ") | |||||
| req.Header.Set("Authorization", auth) | |||||
| return &req | |||||
| } | |||||
| @@ -0,0 +1,49 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package s3signer | |||||
| import ( | |||||
| "crypto/hmac" | |||||
| "crypto/sha256" | |||||
| "net/http" | |||||
| ) | |||||
| // unsignedPayload - value to be set to X-Amz-Content-Sha256 header when | |||||
| const unsignedPayload = "UNSIGNED-PAYLOAD" | |||||
| // sum256 calculate sha256 sum for an input byte array. | |||||
| func sum256(data []byte) []byte { | |||||
| hash := sha256.New() | |||||
| hash.Write(data) | |||||
| return hash.Sum(nil) | |||||
| } | |||||
| // sumHMAC calculate hmac between two input byte array. | |||||
| func sumHMAC(key []byte, data []byte) []byte { | |||||
| hash := hmac.New(sha256.New, key) | |||||
| hash.Write(data) | |||||
| return hash.Sum(nil) | |||||
| } | |||||
| // getHostAddr returns host header if available, otherwise returns host from URL | |||||
| func getHostAddr(req *http.Request) string { | |||||
| if req.Host != "" { | |||||
| return req.Host | |||||
| } | |||||
| return req.URL.Host | |||||
| } | |||||
| @@ -0,0 +1,331 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package s3utils | |||||
| import ( | |||||
| "bytes" | |||||
| "encoding/hex" | |||||
| "errors" | |||||
| "net" | |||||
| "net/url" | |||||
| "regexp" | |||||
| "sort" | |||||
| "strings" | |||||
| "unicode/utf8" | |||||
| ) | |||||
| // Sentinel URL is the default url value which is invalid. | |||||
| var sentinelURL = url.URL{} | |||||
| // IsValidDomain validates if input string is a valid domain name. | |||||
| func IsValidDomain(host string) bool { | |||||
| // See RFC 1035, RFC 3696. | |||||
| host = strings.TrimSpace(host) | |||||
| if len(host) == 0 || len(host) > 255 { | |||||
| return false | |||||
| } | |||||
| // host cannot start or end with "-" | |||||
| if host[len(host)-1:] == "-" || host[:1] == "-" { | |||||
| return false | |||||
| } | |||||
| // host cannot start or end with "_" | |||||
| if host[len(host)-1:] == "_" || host[:1] == "_" { | |||||
| return false | |||||
| } | |||||
| // host cannot start or end with a "." | |||||
| if host[len(host)-1:] == "." || host[:1] == "." { | |||||
| return false | |||||
| } | |||||
| // All non alphanumeric characters are invalid. | |||||
| if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") { | |||||
| return false | |||||
| } | |||||
| // No need to regexp match, since the list is non-exhaustive. | |||||
| // We let it valid and fail later. | |||||
| return true | |||||
| } | |||||
| // IsValidIP parses input string for ip address validity. | |||||
| func IsValidIP(ip string) bool { | |||||
| return net.ParseIP(ip) != nil | |||||
| } | |||||
| // IsVirtualHostSupported - verifies if bucketName can be part of | |||||
| // virtual host. Currently only Amazon S3 and Google Cloud Storage | |||||
| // would support this. | |||||
| func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool { | |||||
| if endpointURL == sentinelURL { | |||||
| return false | |||||
| } | |||||
| // bucketName can be valid but '.' in the hostname will fail SSL | |||||
| // certificate validation. So do not use host-style for such buckets. | |||||
| if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") { | |||||
| return false | |||||
| } | |||||
| // Return true for all other cases | |||||
| return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL) | |||||
| } | |||||
| // Refer for region styles - https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region | |||||
| // amazonS3HostHyphen - regular expression used to determine if an arg is s3 host in hyphenated style. | |||||
| var amazonS3HostHyphen = regexp.MustCompile(`^s3-(.*?)\.amazonaws\.com$`) | |||||
| // amazonS3HostDualStack - regular expression used to determine if an arg is s3 host dualstack. | |||||
| var amazonS3HostDualStack = regexp.MustCompile(`^s3\.dualstack\.(.*?)\.amazonaws\.com$`) | |||||
| // amazonS3HostDot - regular expression used to determine if an arg is s3 host in . style. | |||||
| var amazonS3HostDot = regexp.MustCompile(`^s3\.(.*?)\.amazonaws\.com$`) | |||||
| // amazonS3ChinaHost - regular expression used to determine if the arg is s3 china host. | |||||
| var amazonS3ChinaHost = regexp.MustCompile(`^s3\.(cn.*?)\.amazonaws\.com\.cn$`) | |||||
| // GetRegionFromURL - returns a region from url host. | |||||
| func GetRegionFromURL(endpointURL url.URL) string { | |||||
| if endpointURL == sentinelURL { | |||||
| return "" | |||||
| } | |||||
| if endpointURL.Host == "s3-external-1.amazonaws.com" { | |||||
| return "" | |||||
| } | |||||
| if IsAmazonGovCloudEndpoint(endpointURL) { | |||||
| return "us-gov-west-1" | |||||
| } | |||||
| parts := amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host) | |||||
| if len(parts) > 1 { | |||||
| return parts[1] | |||||
| } | |||||
| parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host) | |||||
| if len(parts) > 1 { | |||||
| return parts[1] | |||||
| } | |||||
| parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host) | |||||
| if len(parts) > 1 { | |||||
| return parts[1] | |||||
| } | |||||
| parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host) | |||||
| if len(parts) > 1 { | |||||
| return parts[1] | |||||
| } | |||||
| return "" | |||||
| } | |||||
| // IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint. | |||||
| func IsAmazonEndpoint(endpointURL url.URL) bool { | |||||
| if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" { | |||||
| return true | |||||
| } | |||||
| return GetRegionFromURL(endpointURL) != "" | |||||
| } | |||||
| // IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint. | |||||
| func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { | |||||
| if endpointURL == sentinelURL { | |||||
| return false | |||||
| } | |||||
| return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || | |||||
| IsAmazonFIPSGovCloudEndpoint(endpointURL)) | |||||
| } | |||||
| // IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint. | |||||
| // See https://aws.amazon.com/compliance/fips. | |||||
| func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { | |||||
| if endpointURL == sentinelURL { | |||||
| return false | |||||
| } | |||||
| return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" || | |||||
| endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com" | |||||
| } | |||||
| // IsAmazonFIPSUSEastWestEndpoint - Match if it is exactly Amazon S3 FIPS US East/West endpoint. | |||||
| // See https://aws.amazon.com/compliance/fips. | |||||
| func IsAmazonFIPSUSEastWestEndpoint(endpointURL url.URL) bool { | |||||
| if endpointURL == sentinelURL { | |||||
| return false | |||||
| } | |||||
| switch endpointURL.Host { | |||||
| case "s3-fips.us-east-2.amazonaws.com": | |||||
| case "s3-fips.dualstack.us-west-1.amazonaws.com": | |||||
| case "s3-fips.dualstack.us-west-2.amazonaws.com": | |||||
| case "s3-fips.dualstack.us-east-2.amazonaws.com": | |||||
| case "s3-fips.dualstack.us-east-1.amazonaws.com": | |||||
| case "s3-fips.us-west-1.amazonaws.com": | |||||
| case "s3-fips.us-west-2.amazonaws.com": | |||||
| case "s3-fips.us-east-1.amazonaws.com": | |||||
| default: | |||||
| return false | |||||
| } | |||||
| return true | |||||
| } | |||||
| // IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. | |||||
| // See https://aws.amazon.com/compliance/fips. | |||||
| func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { | |||||
| return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL) | |||||
| } | |||||
| // IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint. | |||||
| func IsGoogleEndpoint(endpointURL url.URL) bool { | |||||
| if endpointURL == sentinelURL { | |||||
| return false | |||||
| } | |||||
| return endpointURL.Host == "storage.googleapis.com" | |||||
| } | |||||
| // Expects ascii encoded strings - from output of urlEncodePath | |||||
| func percentEncodeSlash(s string) string { | |||||
| return strings.Replace(s, "/", "%2F", -1) | |||||
| } | |||||
| // QueryEncode - encodes query values in their URL encoded form. In | |||||
| // addition to the percent encoding performed by urlEncodePath() used | |||||
| // here, it also percent encodes '/' (forward slash) | |||||
| func QueryEncode(v url.Values) string { | |||||
| if v == nil { | |||||
| return "" | |||||
| } | |||||
| var buf bytes.Buffer | |||||
| keys := make([]string, 0, len(v)) | |||||
| for k := range v { | |||||
| keys = append(keys, k) | |||||
| } | |||||
| sort.Strings(keys) | |||||
| for _, k := range keys { | |||||
| vs := v[k] | |||||
| prefix := percentEncodeSlash(EncodePath(k)) + "=" | |||||
| for _, v := range vs { | |||||
| if buf.Len() > 0 { | |||||
| buf.WriteByte('&') | |||||
| } | |||||
| buf.WriteString(prefix) | |||||
| buf.WriteString(percentEncodeSlash(EncodePath(v))) | |||||
| } | |||||
| } | |||||
| return buf.String() | |||||
| } | |||||
| // if object matches reserved string, no need to encode them | |||||
| var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") | |||||
| // EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences | |||||
| // | |||||
| // This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 | |||||
| // non english characters cannot be parsed due to the nature in which url.Encode() is written | |||||
| // | |||||
| // This function on the other hand is a direct replacement for url.Encode() technique to support | |||||
| // pretty much every UTF-8 character. | |||||
| func EncodePath(pathName string) string { | |||||
| if reservedObjectNames.MatchString(pathName) { | |||||
| return pathName | |||||
| } | |||||
| var encodedPathname string | |||||
| for _, s := range pathName { | |||||
| if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) | |||||
| encodedPathname = encodedPathname + string(s) | |||||
| continue | |||||
| } | |||||
| switch s { | |||||
| case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) | |||||
| encodedPathname = encodedPathname + string(s) | |||||
| continue | |||||
| default: | |||||
| len := utf8.RuneLen(s) | |||||
| if len < 0 { | |||||
| // if utf8 cannot convert return the same string as is | |||||
| return pathName | |||||
| } | |||||
| u := make([]byte, len) | |||||
| utf8.EncodeRune(u, s) | |||||
| for _, r := range u { | |||||
| hex := hex.EncodeToString([]byte{r}) | |||||
| encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) | |||||
| } | |||||
| } | |||||
| } | |||||
| return encodedPathname | |||||
| } | |||||
| // We support '.' with bucket names but we fallback to using path | |||||
| // style requests instead for such buckets. | |||||
| var ( | |||||
| validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`) | |||||
| validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) | |||||
| ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) | |||||
| ) | |||||
| // Common checker for both stricter and basic validation. | |||||
| func checkBucketNameCommon(bucketName string, strict bool) (err error) { | |||||
| if strings.TrimSpace(bucketName) == "" { | |||||
| return errors.New("Bucket name cannot be empty") | |||||
| } | |||||
| if len(bucketName) < 3 { | |||||
| return errors.New("Bucket name cannot be smaller than 3 characters") | |||||
| } | |||||
| if len(bucketName) > 63 { | |||||
| return errors.New("Bucket name cannot be greater than 63 characters") | |||||
| } | |||||
| if ipAddress.MatchString(bucketName) { | |||||
| return errors.New("Bucket name cannot be an ip address") | |||||
| } | |||||
| if strings.Contains(bucketName, "..") { | |||||
| return errors.New("Bucket name contains invalid characters") | |||||
| } | |||||
| if strict { | |||||
| if !validBucketNameStrict.MatchString(bucketName) { | |||||
| err = errors.New("Bucket name contains invalid characters") | |||||
| } | |||||
| return err | |||||
| } | |||||
| if !validBucketName.MatchString(bucketName) { | |||||
| err = errors.New("Bucket name contains invalid characters") | |||||
| } | |||||
| return err | |||||
| } | |||||
| // CheckValidBucketName - checks if we have a valid input bucket name. | |||||
| func CheckValidBucketName(bucketName string) (err error) { | |||||
| return checkBucketNameCommon(bucketName, false) | |||||
| } | |||||
| // CheckValidBucketNameStrict - checks if we have a valid input bucket name. | |||||
| // This is a stricter version. | |||||
| // - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html | |||||
| func CheckValidBucketNameStrict(bucketName string) (err error) { | |||||
| return checkBucketNameCommon(bucketName, true) | |||||
| } | |||||
| // CheckValidObjectNamePrefix - checks if we have a valid input object name prefix. | |||||
| // - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html | |||||
| func CheckValidObjectNamePrefix(objectName string) error { | |||||
| if len(objectName) > 1024 { | |||||
| return errors.New("Object name cannot be greater than 1024 characters") | |||||
| } | |||||
| if !utf8.ValidString(objectName) { | |||||
| return errors.New("Object name with non UTF-8 strings are not supported") | |||||
| } | |||||
| return nil | |||||
| } | |||||
| // CheckValidObjectName - checks if we have a valid input object name. | |||||
| // - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html | |||||
| func CheckValidObjectName(objectName string) error { | |||||
| if strings.TrimSpace(objectName) == "" { | |||||
| return errors.New("Object name cannot be empty") | |||||
| } | |||||
| return CheckValidObjectNamePrefix(objectName) | |||||
| } | |||||
| @@ -0,0 +1,197 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package set | |||||
| import ( | |||||
| "encoding/json" | |||||
| "fmt" | |||||
| "sort" | |||||
| ) | |||||
| // StringSet - uses map as set of strings. | |||||
| type StringSet map[string]struct{} | |||||
| // ToSlice - returns StringSet as string slice. | |||||
| func (set StringSet) ToSlice() []string { | |||||
| keys := make([]string, 0, len(set)) | |||||
| for k := range set { | |||||
| keys = append(keys, k) | |||||
| } | |||||
| sort.Strings(keys) | |||||
| return keys | |||||
| } | |||||
| // IsEmpty - returns whether the set is empty or not. | |||||
| func (set StringSet) IsEmpty() bool { | |||||
| return len(set) == 0 | |||||
| } | |||||
| // Add - adds string to the set. | |||||
| func (set StringSet) Add(s string) { | |||||
| set[s] = struct{}{} | |||||
| } | |||||
| // Remove - removes string in the set. It does nothing if string does not exist in the set. | |||||
| func (set StringSet) Remove(s string) { | |||||
| delete(set, s) | |||||
| } | |||||
| // Contains - checks if string is in the set. | |||||
| func (set StringSet) Contains(s string) bool { | |||||
| _, ok := set[s] | |||||
| return ok | |||||
| } | |||||
| // FuncMatch - returns new set containing each value who passes match function. | |||||
| // A 'matchFn' should accept element in a set as first argument and | |||||
| // 'matchString' as second argument. The function can do any logic to | |||||
| // compare both the arguments and should return true to accept element in | |||||
| // a set to include in output set else the element is ignored. | |||||
| func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet { | |||||
| nset := NewStringSet() | |||||
| for k := range set { | |||||
| if matchFn(k, matchString) { | |||||
| nset.Add(k) | |||||
| } | |||||
| } | |||||
| return nset | |||||
| } | |||||
| // ApplyFunc - returns new set containing each value processed by 'applyFn'. | |||||
| // A 'applyFn' should accept element in a set as a argument and return | |||||
| // a processed string. The function can do any logic to return a processed | |||||
| // string. | |||||
| func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet { | |||||
| nset := NewStringSet() | |||||
| for k := range set { | |||||
| nset.Add(applyFn(k)) | |||||
| } | |||||
| return nset | |||||
| } | |||||
| // Equals - checks whether given set is equal to current set or not. | |||||
| func (set StringSet) Equals(sset StringSet) bool { | |||||
| // If length of set is not equal to length of given set, the | |||||
| // set is not equal to given set. | |||||
| if len(set) != len(sset) { | |||||
| return false | |||||
| } | |||||
| // As both sets are equal in length, check each elements are equal. | |||||
| for k := range set { | |||||
| if _, ok := sset[k]; !ok { | |||||
| return false | |||||
| } | |||||
| } | |||||
| return true | |||||
| } | |||||
| // Intersection - returns the intersection with given set as new set. | |||||
| func (set StringSet) Intersection(sset StringSet) StringSet { | |||||
| nset := NewStringSet() | |||||
| for k := range set { | |||||
| if _, ok := sset[k]; ok { | |||||
| nset.Add(k) | |||||
| } | |||||
| } | |||||
| return nset | |||||
| } | |||||
| // Difference - returns the difference with given set as new set. | |||||
| func (set StringSet) Difference(sset StringSet) StringSet { | |||||
| nset := NewStringSet() | |||||
| for k := range set { | |||||
| if _, ok := sset[k]; !ok { | |||||
| nset.Add(k) | |||||
| } | |||||
| } | |||||
| return nset | |||||
| } | |||||
| // Union - returns the union with given set as new set. | |||||
| func (set StringSet) Union(sset StringSet) StringSet { | |||||
| nset := NewStringSet() | |||||
| for k := range set { | |||||
| nset.Add(k) | |||||
| } | |||||
| for k := range sset { | |||||
| nset.Add(k) | |||||
| } | |||||
| return nset | |||||
| } | |||||
| // MarshalJSON - converts to JSON data. | |||||
| func (set StringSet) MarshalJSON() ([]byte, error) { | |||||
| return json.Marshal(set.ToSlice()) | |||||
| } | |||||
| // UnmarshalJSON - parses JSON data and creates new set with it. | |||||
| // If 'data' contains JSON string array, the set contains each string. | |||||
| // If 'data' contains JSON string, the set contains the string as one element. | |||||
| // If 'data' contains Other JSON types, JSON parse error is returned. | |||||
| func (set *StringSet) UnmarshalJSON(data []byte) error { | |||||
| sl := []string{} | |||||
| var err error | |||||
| if err = json.Unmarshal(data, &sl); err == nil { | |||||
| *set = make(StringSet) | |||||
| for _, s := range sl { | |||||
| set.Add(s) | |||||
| } | |||||
| } else { | |||||
| var s string | |||||
| if err = json.Unmarshal(data, &s); err == nil { | |||||
| *set = make(StringSet) | |||||
| set.Add(s) | |||||
| } | |||||
| } | |||||
| return err | |||||
| } | |||||
| // String - returns printable string of the set. | |||||
| func (set StringSet) String() string { | |||||
| return fmt.Sprintf("%s", set.ToSlice()) | |||||
| } | |||||
| // NewStringSet - creates new string set. | |||||
| func NewStringSet() StringSet { | |||||
| return make(StringSet) | |||||
| } | |||||
| // CreateStringSet - creates new string set with given string values. | |||||
| func CreateStringSet(sl ...string) StringSet { | |||||
| set := make(StringSet) | |||||
| for _, k := range sl { | |||||
| set.Add(k) | |||||
| } | |||||
| return set | |||||
| } | |||||
| // CopyStringSet - returns copy of given set. | |||||
| func CopyStringSet(set StringSet) StringSet { | |||||
| nset := NewStringSet() | |||||
| for k, v := range set { | |||||
| nset[k] = v | |||||
| } | |||||
| return nset | |||||
| } | |||||
| @@ -0,0 +1,270 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "encoding/base64" | |||||
| "fmt" | |||||
| "strings" | |||||
| "time" | |||||
| ) | |||||
| // expirationDateFormat date format for expiration key in json policy. | |||||
| const expirationDateFormat = "2006-01-02T15:04:05.999Z" | |||||
| // policyCondition explanation: | |||||
| // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html | |||||
| // | |||||
| // Example: | |||||
| // | |||||
| // policyCondition { | |||||
| // matchType: "$eq", | |||||
| // key: "$Content-Type", | |||||
| // value: "image/png", | |||||
| // } | |||||
| // | |||||
| type policyCondition struct { | |||||
| matchType string | |||||
| condition string | |||||
| value string | |||||
| } | |||||
| // PostPolicy - Provides strict static type conversion and validation | |||||
| // for Amazon S3's POST policy JSON string. | |||||
| type PostPolicy struct { | |||||
| // Expiration date and time of the POST policy. | |||||
| expiration time.Time | |||||
| // Collection of different policy conditions. | |||||
| conditions []policyCondition | |||||
| // ContentLengthRange minimum and maximum allowable size for the | |||||
| // uploaded content. | |||||
| contentLengthRange struct { | |||||
| min int64 | |||||
| max int64 | |||||
| } | |||||
| // Post form data. | |||||
| formData map[string]string | |||||
| } | |||||
| // NewPostPolicy - Instantiate new post policy. | |||||
| func NewPostPolicy() *PostPolicy { | |||||
| p := &PostPolicy{} | |||||
| p.conditions = make([]policyCondition, 0) | |||||
| p.formData = make(map[string]string) | |||||
| return p | |||||
| } | |||||
| // SetExpires - Sets expiration time for the new policy. | |||||
| func (p *PostPolicy) SetExpires(t time.Time) error { | |||||
| if t.IsZero() { | |||||
| return ErrInvalidArgument("No expiry time set.") | |||||
| } | |||||
| p.expiration = t | |||||
| return nil | |||||
| } | |||||
| // SetKey - Sets an object name for the policy based upload. | |||||
| func (p *PostPolicy) SetKey(key string) error { | |||||
| if strings.TrimSpace(key) == "" || key == "" { | |||||
| return ErrInvalidArgument("Object name is empty.") | |||||
| } | |||||
| policyCond := policyCondition{ | |||||
| matchType: "eq", | |||||
| condition: "$key", | |||||
| value: key, | |||||
| } | |||||
| if err := p.addNewPolicy(policyCond); err != nil { | |||||
| return err | |||||
| } | |||||
| p.formData["key"] = key | |||||
| return nil | |||||
| } | |||||
| // SetKeyStartsWith - Sets an object name that an policy based upload | |||||
| // can start with. | |||||
| func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { | |||||
| if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { | |||||
| return ErrInvalidArgument("Object prefix is empty.") | |||||
| } | |||||
| policyCond := policyCondition{ | |||||
| matchType: "starts-with", | |||||
| condition: "$key", | |||||
| value: keyStartsWith, | |||||
| } | |||||
| if err := p.addNewPolicy(policyCond); err != nil { | |||||
| return err | |||||
| } | |||||
| p.formData["key"] = keyStartsWith | |||||
| return nil | |||||
| } | |||||
| // SetBucket - Sets bucket at which objects will be uploaded to. | |||||
| func (p *PostPolicy) SetBucket(bucketName string) error { | |||||
| if strings.TrimSpace(bucketName) == "" || bucketName == "" { | |||||
| return ErrInvalidArgument("Bucket name is empty.") | |||||
| } | |||||
| policyCond := policyCondition{ | |||||
| matchType: "eq", | |||||
| condition: "$bucket", | |||||
| value: bucketName, | |||||
| } | |||||
| if err := p.addNewPolicy(policyCond); err != nil { | |||||
| return err | |||||
| } | |||||
| p.formData["bucket"] = bucketName | |||||
| return nil | |||||
| } | |||||
| // SetContentType - Sets content-type of the object for this policy | |||||
| // based upload. | |||||
| func (p *PostPolicy) SetContentType(contentType string) error { | |||||
| if strings.TrimSpace(contentType) == "" || contentType == "" { | |||||
| return ErrInvalidArgument("No content type specified.") | |||||
| } | |||||
| policyCond := policyCondition{ | |||||
| matchType: "eq", | |||||
| condition: "$Content-Type", | |||||
| value: contentType, | |||||
| } | |||||
| if err := p.addNewPolicy(policyCond); err != nil { | |||||
| return err | |||||
| } | |||||
| p.formData["Content-Type"] = contentType | |||||
| return nil | |||||
| } | |||||
| // SetContentLengthRange - Set new min and max content length | |||||
| // condition for all incoming uploads. | |||||
| func (p *PostPolicy) SetContentLengthRange(min, max int64) error { | |||||
| if min > max { | |||||
| return ErrInvalidArgument("Minimum limit is larger than maximum limit.") | |||||
| } | |||||
| if min < 0 { | |||||
| return ErrInvalidArgument("Minimum limit cannot be negative.") | |||||
| } | |||||
| if max < 0 { | |||||
| return ErrInvalidArgument("Maximum limit cannot be negative.") | |||||
| } | |||||
| p.contentLengthRange.min = min | |||||
| p.contentLengthRange.max = max | |||||
| return nil | |||||
| } | |||||
| // SetSuccessStatusAction - Sets the status success code of the object for this policy | |||||
| // based upload. | |||||
| func (p *PostPolicy) SetSuccessStatusAction(status string) error { | |||||
| if strings.TrimSpace(status) == "" || status == "" { | |||||
| return ErrInvalidArgument("Status is empty") | |||||
| } | |||||
| policyCond := policyCondition{ | |||||
| matchType: "eq", | |||||
| condition: "$success_action_status", | |||||
| value: status, | |||||
| } | |||||
| if err := p.addNewPolicy(policyCond); err != nil { | |||||
| return err | |||||
| } | |||||
| p.formData["success_action_status"] = status | |||||
| return nil | |||||
| } | |||||
| // SetUserMetadata - Set user metadata as a key/value couple. | |||||
| // Can be retrieved through a HEAD request or an event. | |||||
| func (p *PostPolicy) SetUserMetadata(key string, value string) error { | |||||
| if strings.TrimSpace(key) == "" || key == "" { | |||||
| return ErrInvalidArgument("Key is empty") | |||||
| } | |||||
| if strings.TrimSpace(value) == "" || value == "" { | |||||
| return ErrInvalidArgument("Value is empty") | |||||
| } | |||||
| headerName := fmt.Sprintf("x-amz-meta-%s", key) | |||||
| policyCond := policyCondition{ | |||||
| matchType: "eq", | |||||
| condition: fmt.Sprintf("$%s", headerName), | |||||
| value: value, | |||||
| } | |||||
| if err := p.addNewPolicy(policyCond); err != nil { | |||||
| return err | |||||
| } | |||||
| p.formData[headerName] = value | |||||
| return nil | |||||
| } | |||||
| // SetUserData - Set user data as a key/value couple. | |||||
| // Can be retrieved through a HEAD request or an event. | |||||
| func (p *PostPolicy) SetUserData(key string, value string) error { | |||||
| if key == "" { | |||||
| return ErrInvalidArgument("Key is empty") | |||||
| } | |||||
| if value == "" { | |||||
| return ErrInvalidArgument("Value is empty") | |||||
| } | |||||
| headerName := fmt.Sprintf("x-amz-%s", key) | |||||
| policyCond := policyCondition{ | |||||
| matchType: "eq", | |||||
| condition: fmt.Sprintf("$%s", headerName), | |||||
| value: value, | |||||
| } | |||||
| if err := p.addNewPolicy(policyCond); err != nil { | |||||
| return err | |||||
| } | |||||
| p.formData[headerName] = value | |||||
| return nil | |||||
| } | |||||
| // addNewPolicy - internal helper to validate adding new policies. | |||||
| func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { | |||||
| if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { | |||||
| return ErrInvalidArgument("Policy fields are empty.") | |||||
| } | |||||
| p.conditions = append(p.conditions, policyCond) | |||||
| return nil | |||||
| } | |||||
| // Stringer interface for printing policy in json formatted string. | |||||
| func (p PostPolicy) String() string { | |||||
| return string(p.marshalJSON()) | |||||
| } | |||||
| // marshalJSON - Provides Marshalled JSON in bytes. | |||||
| func (p PostPolicy) marshalJSON() []byte { | |||||
| expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` | |||||
| var conditionsStr string | |||||
| conditions := []string{} | |||||
| for _, po := range p.conditions { | |||||
| conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) | |||||
| } | |||||
| if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { | |||||
| conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", | |||||
| p.contentLengthRange.min, p.contentLengthRange.max)) | |||||
| } | |||||
| if len(conditions) > 0 { | |||||
| conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" | |||||
| } | |||||
| retStr := "{" | |||||
| retStr = retStr + expirationStr + "," | |||||
| retStr = retStr + conditionsStr | |||||
| retStr = retStr + "}" | |||||
| return []byte(retStr) | |||||
| } | |||||
| // base64 - Produces base64 of PostPolicy's Marshalled json. | |||||
| func (p PostPolicy) base64() string { | |||||
| return base64.StdEncoding.EncodeToString(p.marshalJSON()) | |||||
| } | |||||
| @@ -0,0 +1,69 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import "time" | |||||
| // newRetryTimerContinous creates a timer with exponentially increasing delays forever. | |||||
| func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { | |||||
| attemptCh := make(chan int) | |||||
| // normalize jitter to the range [0, 1.0] | |||||
| if jitter < NoJitter { | |||||
| jitter = NoJitter | |||||
| } | |||||
| if jitter > MaxJitter { | |||||
| jitter = MaxJitter | |||||
| } | |||||
| // computes the exponential backoff duration according to | |||||
| // https://www.awsarchitectureblog.com/2015/03/backoff.html | |||||
| exponentialBackoffWait := func(attempt int) time.Duration { | |||||
| // 1<<uint(attempt) below could overflow, so limit the value of attempt | |||||
| maxAttempt := 30 | |||||
| if attempt > maxAttempt { | |||||
| attempt = maxAttempt | |||||
| } | |||||
| //sleep = random_between(0, min(cap, base * 2 ** attempt)) | |||||
| sleep := unit * time.Duration(1<<uint(attempt)) | |||||
| if sleep > cap { | |||||
| sleep = cap | |||||
| } | |||||
| if jitter != NoJitter { | |||||
| sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) | |||||
| } | |||||
| return sleep | |||||
| } | |||||
| go func() { | |||||
| defer close(attemptCh) | |||||
| var nextBackoff int | |||||
| for { | |||||
| select { | |||||
| // Attempts starts. | |||||
| case attemptCh <- nextBackoff: | |||||
| nextBackoff++ | |||||
| case <-doneCh: | |||||
| // Stop the routine. | |||||
| return | |||||
| } | |||||
| time.Sleep(exponentialBackoffWait(nextBackoff)) | |||||
| } | |||||
| }() | |||||
| return attemptCh | |||||
| } | |||||
| @@ -0,0 +1,153 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| import ( | |||||
| "net" | |||||
| "net/http" | |||||
| "net/url" | |||||
| "strings" | |||||
| "time" | |||||
| ) | |||||
| // MaxRetry is the maximum number of retries before stopping. | |||||
| var MaxRetry = 10 | |||||
| // MaxJitter will randomize over the full exponential backoff time | |||||
| const MaxJitter = 1.0 | |||||
| // NoJitter disables the use of jitter for randomizing the exponential backoff time | |||||
| const NoJitter = 0.0 | |||||
| // DefaultRetryUnit - default unit multiplicative per retry. | |||||
| // defaults to 1 second. | |||||
| const DefaultRetryUnit = time.Second | |||||
| // DefaultRetryCap - Each retry attempt never waits no longer than | |||||
| // this maximum time duration. | |||||
| const DefaultRetryCap = time.Second * 30 | |||||
| // newRetryTimer creates a timer with exponentially increasing | |||||
| // delays until the maximum retry attempts are reached. | |||||
| func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { | |||||
| attemptCh := make(chan int) | |||||
| // computes the exponential backoff duration according to | |||||
| // https://www.awsarchitectureblog.com/2015/03/backoff.html | |||||
| exponentialBackoffWait := func(attempt int) time.Duration { | |||||
| // normalize jitter to the range [0, 1.0] | |||||
| if jitter < NoJitter { | |||||
| jitter = NoJitter | |||||
| } | |||||
| if jitter > MaxJitter { | |||||
| jitter = MaxJitter | |||||
| } | |||||
| //sleep = random_between(0, min(cap, base * 2 ** attempt)) | |||||
| sleep := unit * time.Duration(1<<uint(attempt)) | |||||
| if sleep > cap { | |||||
| sleep = cap | |||||
| } | |||||
| if jitter != NoJitter { | |||||
| sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) | |||||
| } | |||||
| return sleep | |||||
| } | |||||
| go func() { | |||||
| defer close(attemptCh) | |||||
| for i := 0; i < maxRetry; i++ { | |||||
| select { | |||||
| // Attempts start from 1. | |||||
| case attemptCh <- i + 1: | |||||
| case <-doneCh: | |||||
| // Stop the routine. | |||||
| return | |||||
| } | |||||
| time.Sleep(exponentialBackoffWait(i)) | |||||
| } | |||||
| }() | |||||
| return attemptCh | |||||
| } | |||||
| // isHTTPReqErrorRetryable - is http requests error retryable, such | |||||
| // as i/o timeout, connection broken etc.. | |||||
| func isHTTPReqErrorRetryable(err error) bool { | |||||
| if err == nil { | |||||
| return false | |||||
| } | |||||
| switch e := err.(type) { | |||||
| case *url.Error: | |||||
| switch e.Err.(type) { | |||||
| case *net.DNSError, *net.OpError, net.UnknownNetworkError: | |||||
| return true | |||||
| } | |||||
| if strings.Contains(err.Error(), "Connection closed by foreign host") { | |||||
| return true | |||||
| } else if strings.Contains(err.Error(), "net/http: TLS handshake timeout") { | |||||
| // If error is - tlsHandshakeTimeoutError, retry. | |||||
| return true | |||||
| } else if strings.Contains(err.Error(), "i/o timeout") { | |||||
| // If error is - tcp timeoutError, retry. | |||||
| return true | |||||
| } else if strings.Contains(err.Error(), "connection timed out") { | |||||
| // If err is a net.Dial timeout, retry. | |||||
| return true | |||||
| } else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") { | |||||
| // If error is transport connection broken, retry. | |||||
| return true | |||||
| } | |||||
| } | |||||
| return false | |||||
| } | |||||
| // List of AWS S3 error codes which are retryable. | |||||
| var retryableS3Codes = map[string]struct{}{ | |||||
| "RequestError": {}, | |||||
| "RequestTimeout": {}, | |||||
| "Throttling": {}, | |||||
| "ThrottlingException": {}, | |||||
| "RequestLimitExceeded": {}, | |||||
| "RequestThrottled": {}, | |||||
| "InternalError": {}, | |||||
| "ExpiredToken": {}, | |||||
| "ExpiredTokenException": {}, | |||||
| "SlowDown": {}, | |||||
| // Add more AWS S3 codes here. | |||||
| } | |||||
| // isS3CodeRetryable - is s3 error code retryable. | |||||
| func isS3CodeRetryable(s3Code string) (ok bool) { | |||||
| _, ok = retryableS3Codes[s3Code] | |||||
| return ok | |||||
| } | |||||
| // List of HTTP status codes which are retryable. | |||||
| var retryableHTTPStatusCodes = map[int]struct{}{ | |||||
| 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet | |||||
| http.StatusInternalServerError: {}, | |||||
| http.StatusBadGateway: {}, | |||||
| http.StatusServiceUnavailable: {}, | |||||
| // Add more HTTP status codes here. | |||||
| } | |||||
| // isHTTPStatusRetryable - is HTTP error code retryable. | |||||
| func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { | |||||
| _, ok = retryableHTTPStatusCodes[httpStatusCode] | |||||
| return ok | |||||
| } | |||||
| @@ -0,0 +1,52 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| // awsS3EndpointMap Amazon S3 endpoint map. | |||||
| var awsS3EndpointMap = map[string]string{ | |||||
| "us-east-1": "s3.dualstack.us-east-1.amazonaws.com", | |||||
| "us-east-2": "s3.dualstack.us-east-2.amazonaws.com", | |||||
| "us-west-2": "s3.dualstack.us-west-2.amazonaws.com", | |||||
| "us-west-1": "s3.dualstack.us-west-1.amazonaws.com", | |||||
| "ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com", | |||||
| "eu-west-1": "s3.dualstack.eu-west-1.amazonaws.com", | |||||
| "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", | |||||
| "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", | |||||
| "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", | |||||
| "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", | |||||
| "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", | |||||
| "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", | |||||
| "ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com", | |||||
| "ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com", | |||||
| "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", | |||||
| "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", | |||||
| "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", | |||||
| "us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com", | |||||
| "cn-north-1": "s3.cn-north-1.amazonaws.com.cn", | |||||
| "cn-northwest-1": "s3.cn-northwest-1.amazonaws.com.cn", | |||||
| } | |||||
| // getS3Endpoint get Amazon S3 endpoint based on the bucket location. | |||||
| func getS3Endpoint(bucketLocation string) (s3Endpoint string) { | |||||
| s3Endpoint, ok := awsS3EndpointMap[bucketLocation] | |||||
| if !ok { | |||||
| // Default to 's3.dualstack.us-east-1.amazonaws.com' endpoint. | |||||
| s3Endpoint = "s3.dualstack.us-east-1.amazonaws.com" | |||||
| } | |||||
| return s3Endpoint | |||||
| } | |||||
| @@ -0,0 +1,61 @@ | |||||
| /* | |||||
| * Minio Go Library for Amazon S3 Compatible Cloud Storage | |||||
| * Copyright 2015-2017 Minio, Inc. | |||||
| * | |||||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| * you may not use this file except in compliance with the License. | |||||
| * You may obtain a copy of the License at | |||||
| * | |||||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, software | |||||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| * See the License for the specific language governing permissions and | |||||
| * limitations under the License. | |||||
| */ | |||||
| package minio | |||||
| // Non exhaustive list of AWS S3 standard error responses - | |||||
| // http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html | |||||
| var s3ErrorResponseMap = map[string]string{ | |||||
| "AccessDenied": "Access Denied.", | |||||
| "BadDigest": "The Content-Md5 you specified did not match what we received.", | |||||
| "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", | |||||
| "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", | |||||
| "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", | |||||
| "InternalError": "We encountered an internal error, please try again.", | |||||
| "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", | |||||
| "InvalidBucketName": "The specified bucket is not valid.", | |||||
| "InvalidDigest": "The Content-Md5 you specified is not valid.", | |||||
| "InvalidRange": "The requested range is not satisfiable", | |||||
| "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", | |||||
| "MissingContentLength": "You must provide the Content-Length HTTP header.", | |||||
| "MissingContentMD5": "Missing required header for this request: Content-Md5.", | |||||
| "MissingRequestBodyError": "Request body is empty.", | |||||
| "NoSuchBucket": "The specified bucket does not exist.", | |||||
| "NoSuchBucketPolicy": "The bucket policy does not exist", | |||||
| "NoSuchKey": "The specified key does not exist.", | |||||
| "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", | |||||
| "NotImplemented": "A header you provided implies functionality that is not implemented", | |||||
| "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", | |||||
| "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", | |||||
| "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", | |||||
| "MethodNotAllowed": "The specified method is not allowed against this resource.", | |||||
| "InvalidPart": "One or more of the specified parts could not be found.", | |||||
| "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", | |||||
| "InvalidObjectState": "The operation is not valid for the current state of the object.", | |||||
| "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", | |||||
| "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", | |||||
| "BucketNotEmpty": "The bucket you tried to delete is not empty", | |||||
| "AllAccessDisabled": "All access to this bucket has been disabled.", | |||||
| "MalformedPolicy": "Policy has invalid resource.", | |||||
| "MissingFields": "Missing fields in request.", | |||||
| "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".", | |||||
| "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", | |||||
| "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", | |||||
| "InvalidDuration": "Duration provided in the request is invalid.", | |||||
| "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", | |||||
| // Add new API errors here. | |||||
| } | |||||