| @@ -0,0 +1,156 @@ | |||
| package minio_ext | |||
| import ( | |||
| "encoding/xml" | |||
| "fmt" | |||
| "net/http" | |||
| ) | |||
| type ErrorResponse struct { | |||
| XMLName xml.Name `xml:"Error" json:"-"` | |||
| Code string | |||
| Message string | |||
| BucketName string | |||
| Key string | |||
| RequestID string `xml:"RequestId"` | |||
| HostID string `xml:"HostId"` | |||
| // Region where the bucket is located. This header is returned | |||
| // only in HEAD bucket and ListObjects response. | |||
| Region string | |||
| // Underlying HTTP status code for the returned error | |||
| StatusCode int `xml:"-" json:"-"` | |||
| } | |||
| // Error - Returns HTTP error string | |||
| func (e ErrorResponse) Error() string { | |||
| return e.Message | |||
| } | |||
| const ( | |||
| reportIssue = "Please report this issue at https://github.com/minio/minio/issues." | |||
| ) | |||
| // httpRespToErrorResponse returns a new encoded ErrorResponse | |||
| // structure as error. | |||
| func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { | |||
| if resp == nil { | |||
| msg := "Response is empty. " + reportIssue | |||
| return ErrInvalidArgument(msg) | |||
| } | |||
| errResp := ErrorResponse{ | |||
| StatusCode: resp.StatusCode, | |||
| } | |||
| err := xmlDecoder(resp.Body, &errResp) | |||
| // Xml decoding failed with no body, fall back to HTTP headers. | |||
| if err != nil { | |||
| switch resp.StatusCode { | |||
| case http.StatusNotFound: | |||
| if objectName == "" { | |||
| errResp = ErrorResponse{ | |||
| StatusCode: resp.StatusCode, | |||
| Code: "NoSuchBucket", | |||
| Message: "The specified bucket does not exist.", | |||
| BucketName: bucketName, | |||
| } | |||
| } else { | |||
| errResp = ErrorResponse{ | |||
| StatusCode: resp.StatusCode, | |||
| Code: "NoSuchKey", | |||
| Message: "The specified key does not exist.", | |||
| BucketName: bucketName, | |||
| Key: objectName, | |||
| } | |||
| } | |||
| case http.StatusForbidden: | |||
| errResp = ErrorResponse{ | |||
| StatusCode: resp.StatusCode, | |||
| Code: "AccessDenied", | |||
| Message: "Access Denied.", | |||
| BucketName: bucketName, | |||
| Key: objectName, | |||
| } | |||
| case http.StatusConflict: | |||
| errResp = ErrorResponse{ | |||
| StatusCode: resp.StatusCode, | |||
| Code: "Conflict", | |||
| Message: "Bucket not empty.", | |||
| BucketName: bucketName, | |||
| } | |||
| case http.StatusPreconditionFailed: | |||
| errResp = ErrorResponse{ | |||
| StatusCode: resp.StatusCode, | |||
| Code: "PreconditionFailed", | |||
| Message: s3ErrorResponseMap["PreconditionFailed"], | |||
| BucketName: bucketName, | |||
| Key: objectName, | |||
| } | |||
| default: | |||
| errResp = ErrorResponse{ | |||
| StatusCode: resp.StatusCode, | |||
| Code: resp.Status, | |||
| Message: resp.Status, | |||
| BucketName: bucketName, | |||
| } | |||
| } | |||
| } | |||
| // Save hostID, requestID and region information | |||
| // from headers if not available through error XML. | |||
| if errResp.RequestID == "" { | |||
| errResp.RequestID = resp.Header.Get("x-amz-request-id") | |||
| } | |||
| if errResp.HostID == "" { | |||
| errResp.HostID = resp.Header.Get("x-amz-id-2") | |||
| } | |||
| if errResp.Region == "" { | |||
| errResp.Region = resp.Header.Get("x-amz-bucket-region") | |||
| } | |||
| if errResp.Code == "InvalidRegion" && errResp.Region != "" { | |||
| errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region) | |||
| } | |||
| return errResp | |||
| } | |||
| func ToErrorResponse(err error) ErrorResponse { | |||
| switch err := err.(type) { | |||
| case ErrorResponse: | |||
| return err | |||
| default: | |||
| return ErrorResponse{} | |||
| } | |||
| } | |||
| // ErrInvalidArgument - Invalid argument response. | |||
| func ErrInvalidArgument(message string) error { | |||
| return ErrorResponse{ | |||
| Code: "InvalidArgument", | |||
| Message: message, | |||
| RequestID: "minio", | |||
| } | |||
| } | |||
| // ErrEntityTooLarge - Input size is larger than supported maximum. | |||
| func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { | |||
| msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) | |||
| return ErrorResponse{ | |||
| StatusCode: http.StatusBadRequest, | |||
| Code: "EntityTooLarge", | |||
| Message: msg, | |||
| BucketName: bucketName, | |||
| Key: objectName, | |||
| } | |||
| } | |||
| // ErrEntityTooSmall - Input size is smaller than supported minimum. | |||
| func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error { | |||
| msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) | |||
| return ErrorResponse{ | |||
| StatusCode: http.StatusBadRequest, | |||
| Code: "EntityTooSmall", | |||
| Message: msg, | |||
| BucketName: bucketName, | |||
| Key: objectName, | |||
| } | |||
| } | |||
| @@ -0,0 +1,62 @@ | |||
| /* | |||
| * MinIO Go Library for Amazon S3 Compatible Cloud Storage | |||
| * Copyright 2015-2017 MinIO, Inc. | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| package minio_ext | |||
| /// Multipart upload defaults. | |||
| // absMinPartSize - absolute minimum part size (5 MiB) below which | |||
| // a part in a multipart upload may not be uploaded. | |||
| const absMinPartSize = 1024 * 1024 * 5 | |||
| // minPartSize - minimum part size 128MiB per object after which | |||
| // putObject behaves internally as multipart. | |||
| const minPartSize = 1024 * 1024 * 128 | |||
| // maxPartsCount - maximum number of parts for a single multipart session. | |||
| const maxPartsCount = 10000 | |||
| // maxPartSize - maximum part size 5GiB for a single multipart upload | |||
| // operation. | |||
| const maxPartSize = 1024 * 1024 * 1024 * 5 | |||
| // maxSinglePutObjectSize - maximum size 5GiB of object per PUT | |||
| // operation. | |||
| const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 | |||
| // maxMultipartPutObjectSize - maximum size 5TiB of object for | |||
| // Multipart operation. | |||
| const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 | |||
| // unsignedPayload - value to be set to X-Amz-Content-Sha256 header when | |||
| // we don't want to sign the request payload | |||
| const unsignedPayload = "UNSIGNED-PAYLOAD" | |||
| // Total number of parallel workers used for multipart operation. | |||
| const totalWorkers = 4 | |||
| // Signature related constants. | |||
| const ( | |||
| signV4Algorithm = "AWS4-HMAC-SHA256" | |||
| iso8601DateFormat = "20060102T150405Z" | |||
| ) | |||
| // Storage class header constant. | |||
| const amzStorageClass = "X-Amz-Storage-Class" | |||
| // Website redirect location header constant | |||
| const amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" | |||
| @@ -0,0 +1,85 @@ | |||
| package minio_ext | |||
| import ( | |||
| "net/http" | |||
| "net/url" | |||
| "time" | |||
| ) | |||
| // StringMap represents map with custom UnmarshalXML | |||
| type StringMap map[string]string | |||
| // CommonPrefix container for prefix response. | |||
| type CommonPrefix struct { | |||
| Prefix string | |||
| } | |||
| // ObjectInfo container for object metadata. | |||
| type ObjectInfo struct { | |||
| // An ETag is optionally set to md5sum of an object. In case of multipart objects, | |||
| // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of | |||
| // each parts concatenated into one string. | |||
| ETag string `json:"etag"` | |||
| Key string `json:"name"` // Name of the object | |||
| LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. | |||
| Size int64 `json:"size"` // Size in bytes of the object. | |||
| ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. | |||
| Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached. | |||
| // Collection of additional metadata on the object. | |||
| // eg: x-amz-meta-*, content-encoding etc. | |||
| Metadata http.Header `json:"metadata" xml:"-"` | |||
| // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. | |||
| UserMetadata StringMap `json:"userMetadata"` | |||
| // Owner name. | |||
| Owner struct { | |||
| DisplayName string `json:"name"` | |||
| ID string `json:"id"` | |||
| } `json:"owner"` | |||
| // The class of storage used to store the object. | |||
| StorageClass string `json:"storageClass"` | |||
| // Error | |||
| Err error `json:"-"` | |||
| } | |||
| // ListBucketResult container for listObjects response. | |||
| type ListBucketResult struct { | |||
| // A response can contain CommonPrefixes only if you have | |||
| // specified a delimiter. | |||
| CommonPrefixes []CommonPrefix | |||
| // Metadata about each object returned. | |||
| Contents []ObjectInfo | |||
| Delimiter string | |||
| // Encoding type used to encode object keys in the response. | |||
| EncodingType string | |||
| // A flag that indicates whether or not ListObjects returned all of the results | |||
| // that satisfied the search criteria. | |||
| IsTruncated bool | |||
| Marker string | |||
| MaxKeys int64 | |||
| Name string | |||
| // When response is truncated (the IsTruncated element value in | |||
| // the response is true), you can use the key name in this field | |||
| // as marker in the subsequent request to get next set of objects. | |||
| // Object storage lists objects in alphabetical order Note: This | |||
| // element is returned only if you have delimiter request | |||
| // parameter specified. If response does not include the NextMaker | |||
| // and it is truncated, you can use the value of the last Key in | |||
| // the response as the marker in the subsequent request to get the | |||
| // next set of object keys. | |||
| NextMarker string | |||
| Prefix string | |||
| } | |||
| var ( | |||
| // Hex encoded string of nil sha256sum bytes. | |||
| emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" | |||
| // Sentinel URL is the default url value which is invalid. | |||
| sentinelURL = url.URL{} | |||
| ) | |||
| @@ -0,0 +1,82 @@ | |||
| // +build go1.7 go1.8 | |||
| /* | |||
| * MinIO Go Library for Amazon S3 Compatible Cloud Storage | |||
| * Copyright 2017-2018 MinIO, Inc. | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| package minio_ext | |||
| import ( | |||
| "crypto/tls" | |||
| "crypto/x509" | |||
| "net" | |||
| "net/http" | |||
| "time" | |||
| "golang.org/x/net/http2" | |||
| ) | |||
| // DefaultTransport - this default transport is similar to | |||
| // http.DefaultTransport but with additional param DisableCompression | |||
| // is set to true to avoid decompressing content with 'gzip' encoding. | |||
| var DefaultTransport = func(secure bool) (http.RoundTripper, error) { | |||
| tr := &http.Transport{ | |||
| Proxy: http.ProxyFromEnvironment, | |||
| DialContext: (&net.Dialer{ | |||
| Timeout: 30 * time.Second, | |||
| KeepAlive: 30 * time.Second, | |||
| }).DialContext, | |||
| MaxIdleConns: 1024, | |||
| MaxIdleConnsPerHost: 1024, | |||
| IdleConnTimeout: 90 * time.Second, | |||
| TLSHandshakeTimeout: 10 * time.Second, | |||
| ExpectContinueTimeout: 1 * time.Second, | |||
| // Set this value so that the underlying transport round-tripper | |||
| // doesn't try to auto decode the body of objects with | |||
| // content-encoding set to `gzip`. | |||
| // | |||
| // Refer: | |||
| // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 | |||
| DisableCompression: true, | |||
| } | |||
| if secure { | |||
| rootCAs, _ := x509.SystemCertPool() | |||
| if rootCAs == nil { | |||
| // In some systems (like Windows) system cert pool is | |||
| // not supported or no certificates are present on the | |||
| // system - so we create a new cert pool. | |||
| rootCAs = x509.NewCertPool() | |||
| } | |||
| // Keep TLS config. | |||
| tlsConfig := &tls.Config{ | |||
| RootCAs: rootCAs, | |||
| // Can't use SSLv3 because of POODLE and BEAST | |||
| // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher | |||
| // Can't use TLSv1.1 because of RC4 cipher usage | |||
| MinVersion: tls.VersionTLS12, | |||
| } | |||
| tr.TLSClientConfig = tlsConfig | |||
| // Because we create a custom TLSClientConfig, we have to opt-in to HTTP/2. | |||
| // See https://github.com/golang/go/issues/14275 | |||
| if err := http2.ConfigureTransport(tr); err != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| return tr, nil | |||
| } | |||
| @@ -0,0 +1,185 @@ | |||
| package minio_ext | |||
| import ( | |||
| "encoding/hex" | |||
| "encoding/xml" | |||
| "io" | |||
| "io/ioutil" | |||
| "net" | |||
| "net/http" | |||
| "net/url" | |||
| "regexp" | |||
| "strings" | |||
| "crypto/sha256" | |||
| "github.com/minio/minio-go/v6/pkg/s3utils" | |||
| ) | |||
| // regCred matches credential string in HTTP header | |||
| var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") | |||
| // regCred matches signature string in HTTP header | |||
| var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") | |||
| // xmlDecoder provide decoded value in xml. | |||
| func xmlDecoder(body io.Reader, v interface{}) error { | |||
| d := xml.NewDecoder(body) | |||
| return d.Decode(v) | |||
| } | |||
| // Redact out signature value from authorization string. | |||
| func redactSignature(origAuth string) string { | |||
| if !strings.HasPrefix(origAuth, signV4Algorithm) { | |||
| // Set a temporary redacted auth | |||
| return "AWS **REDACTED**:**REDACTED**" | |||
| } | |||
| /// Signature V4 authorization header. | |||
| // Strip out accessKeyID from: | |||
| // Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request | |||
| newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") | |||
| // Strip out 256-bit signature from: Signature=<256-bit signature> | |||
| return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") | |||
| } | |||
| // closeResponse close non nil response with any response Body. | |||
| // convenient wrapper to drain any remaining data on response body. | |||
| // | |||
| // Subsequently this allows golang http RoundTripper | |||
| // to re-use the same connection for future requests. | |||
| func closeResponse(resp *http.Response) { | |||
| // Callers should close resp.Body when done reading from it. | |||
| // If resp.Body is not closed, the Client's underlying RoundTripper | |||
| // (typically Transport) may not be able to re-use a persistent TCP | |||
| // connection to the server for a subsequent "keep-alive" request. | |||
| if resp != nil && resp.Body != nil { | |||
| // Drain any remaining Body and then close the connection. | |||
| // Without this closing connection would disallow re-using | |||
| // the same connection for future uses. | |||
| // - http://stackoverflow.com/a/17961593/4465767 | |||
| io.Copy(ioutil.Discard, resp.Body) | |||
| resp.Body.Close() | |||
| } | |||
| } | |||
| // Verify if input endpoint URL is valid. | |||
| func isValidEndpointURL(endpointURL url.URL) error { | |||
| if endpointURL == sentinelURL { | |||
| return ErrInvalidArgument("Endpoint url cannot be empty.") | |||
| } | |||
| if endpointURL.Path != "/" && endpointURL.Path != "" { | |||
| return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.") | |||
| } | |||
| if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") { | |||
| if !s3utils.IsAmazonEndpoint(endpointURL) { | |||
| return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") | |||
| } | |||
| } | |||
| if strings.Contains(endpointURL.Host, ".googleapis.com") { | |||
| if !s3utils.IsGoogleEndpoint(endpointURL) { | |||
| return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| // getEndpointURL - construct a new endpoint. | |||
| func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { | |||
| if strings.Contains(endpoint, ":") { | |||
| host, _, err := net.SplitHostPort(endpoint) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { | |||
| msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." | |||
| return nil, ErrInvalidArgument(msg) | |||
| } | |||
| } else { | |||
| if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) { | |||
| msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." | |||
| return nil, ErrInvalidArgument(msg) | |||
| } | |||
| } | |||
| // If secure is false, use 'http' scheme. | |||
| scheme := "https" | |||
| if !secure { | |||
| scheme = "http" | |||
| } | |||
| // Construct a secured endpoint URL. | |||
| endpointURLStr := scheme + "://" + endpoint | |||
| endpointURL, err := url.Parse(endpointURLStr) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| // Validate incoming endpoint URL. | |||
| if err := isValidEndpointURL(*endpointURL); err != nil { | |||
| return nil, err | |||
| } | |||
| return endpointURL, nil | |||
| } | |||
| var supportedHeaders = []string{ | |||
| "content-type", | |||
| "cache-control", | |||
| "content-encoding", | |||
| "content-disposition", | |||
| "content-language", | |||
| "x-amz-website-redirect-location", | |||
| "expires", | |||
| // Add more supported headers here. | |||
| } | |||
| // isStorageClassHeader returns true if the header is a supported storage class header | |||
| func isStorageClassHeader(headerKey string) bool { | |||
| return strings.EqualFold(amzStorageClass, headerKey) | |||
| } | |||
| // isStandardHeader returns true if header is a supported header and not a custom header | |||
| func isStandardHeader(headerKey string) bool { | |||
| key := strings.ToLower(headerKey) | |||
| for _, header := range supportedHeaders { | |||
| if strings.ToLower(header) == key { | |||
| return true | |||
| } | |||
| } | |||
| return false | |||
| } | |||
| // sseHeaders is list of server side encryption headers | |||
| var sseHeaders = []string{ | |||
| "x-amz-server-side-encryption", | |||
| "x-amz-server-side-encryption-aws-kms-key-id", | |||
| "x-amz-server-side-encryption-context", | |||
| "x-amz-server-side-encryption-customer-algorithm", | |||
| "x-amz-server-side-encryption-customer-key", | |||
| "x-amz-server-side-encryption-customer-key-MD5", | |||
| } | |||
| // isSSEHeader returns true if header is a server side encryption header. | |||
| func isSSEHeader(headerKey string) bool { | |||
| key := strings.ToLower(headerKey) | |||
| for _, h := range sseHeaders { | |||
| if strings.ToLower(h) == key { | |||
| return true | |||
| } | |||
| } | |||
| return false | |||
| } | |||
| // isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header. | |||
| func isAmzHeader(headerKey string) bool { | |||
| key := strings.ToLower(headerKey) | |||
| return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) | |||
| } | |||
| // sum256 calculate sha256sum for an input byte array, returns hex encoded. | |||
| func sum256Hex(data []byte) string { | |||
| hash := sha256.New() | |||
| hash.Write(data) | |||
| return hex.EncodeToString(hash.Sum(nil)) | |||
| } | |||
| @@ -0,0 +1,158 @@ | |||
| package storage | |||
| import ( | |||
| "encoding/json" | |||
| "encoding/xml" | |||
| "path" | |||
| "sort" | |||
| "strings" | |||
| "sync" | |||
| "time" | |||
| "code.gitea.io/gitea/modules/log" | |||
| "code.gitea.io/gitea/modules/minio_ext" | |||
| "code.gitea.io/gitea/modules/setting" | |||
| miniov6 "github.com/minio/minio-go/v6" | |||
| ) | |||
| const ( | |||
| PresignedUploadPartUrlExpireTime = int64(time.Hour * 24 * 7) | |||
| ) | |||
| type ComplPart struct { | |||
| PartNumber int `json:"partNumber"` | |||
| ETag string `json:"eTag"` | |||
| } | |||
| type CompleteParts struct { | |||
| Data []ComplPart `json:"completedParts"` | |||
| } | |||
| // completedParts is a collection of parts sortable by their part numbers. | |||
| // used for sorting the uploaded parts before completing the multipart request. | |||
| type completedParts []miniov6.CompletePart | |||
| func (a completedParts) Len() int { return len(a) } | |||
| func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } | |||
| func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } | |||
| // completeMultipartUpload container for completing multipart upload. | |||
| type completeMultipartUpload struct { | |||
| XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` | |||
| Parts []miniov6.CompletePart `xml:"Part"` | |||
| } | |||
| var ( | |||
| adminClient * minio_ext.Client = nil | |||
| coreClient *miniov6.Core = nil | |||
| ) | |||
| var mutex *sync.Mutex | |||
| func init(){ | |||
| mutex = new(sync.Mutex) | |||
| } | |||
| func getClients()(*minio_ext.Client, *miniov6.Core, error){ | |||
| var client * minio_ext.Client | |||
| var core *miniov6.Core | |||
| mutex.Lock() | |||
| defer mutex.Unlock() | |||
| if nil != adminClient && nil != coreClient { | |||
| client = adminClient | |||
| core = coreClient | |||
| return client, core, nil | |||
| } | |||
| var err error | |||
| minio := setting.Attachment.Minio | |||
| if nil == adminClient { | |||
| adminClient, err = minio_ext.New( | |||
| minio.Endpoint, | |||
| minio.AccessKeyID, | |||
| minio.SecretAccessKey, | |||
| minio.UseSSL, | |||
| ) | |||
| if nil != err{ | |||
| return nil, nil, err | |||
| } | |||
| } | |||
| client = adminClient | |||
| if nil == coreClient { | |||
| coreClient, err = miniov6.NewCore( | |||
| minio.Endpoint, | |||
| minio.AccessKeyID, | |||
| minio.SecretAccessKey, | |||
| minio.UseSSL, | |||
| ) | |||
| if nil != err{ | |||
| return nil, nil, err | |||
| } | |||
| } | |||
| core = coreClient | |||
| return client, core, nil | |||
| } | |||
| func GenMultiPartSignedUrl(bucketName string, objectName string, uploadId string, partNumber int, partSize int64) (string, error) { | |||
| minioClient, _, err := getClients() | |||
| if err != nil { | |||
| log.Error("getClients failed:", err.Error()) | |||
| return "", err | |||
| } | |||
| return minioClient.GenUploadPartSignedUrl(uploadId, bucketName, objectName, partNumber, partSize, PresignedUploadPartUrlExpireTime) | |||
| } | |||
| func NewMultiPartUpload(uuid string) (string, error){ | |||
| _, core, err := getClients() | |||
| if err != nil { | |||
| log.Error("getClients failed:", err.Error()) | |||
| return "", err | |||
| } | |||
| minio := setting.Attachment.Minio | |||
| bucketName := minio.Bucket | |||
| objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") | |||
| return core.NewMultipartUpload(bucketName, objectName, miniov6.PutObjectOptions{}) | |||
| } | |||
| func CompleteMultiPartUpload(uuid string, uploadID string, complParts string) (string, error){ | |||
| _, core, err := getClients() | |||
| if err != nil { | |||
| log.Error("getClients failed:", err.Error()) | |||
| return "", err | |||
| } | |||
| minio := setting.Attachment.Minio | |||
| bucketName := minio.Bucket | |||
| objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") | |||
| //complParts:{"completedParts":[{"partNumber":2,"eTag":'"684929e7fe8b996d495e7b152d34ae37"'}]} | |||
| var parts CompleteParts | |||
| err = json.Unmarshal([]byte(complParts), &parts) | |||
| if err != nil { | |||
| log.Error("json.Unmarshal(%s) failed:(%s)", complParts, err.Error()) | |||
| return "", err | |||
| } | |||
| // Complete multipart upload. | |||
| var complMultipartUpload completeMultipartUpload | |||
| for _,part := range parts.Data { | |||
| complMultipartUpload.Parts =append(complMultipartUpload.Parts, miniov6.CompletePart{ | |||
| PartNumber:part.PartNumber, | |||
| ETag:part.ETag, | |||
| }) | |||
| } | |||
| // Sort all completed parts. | |||
| sort.Sort(completedParts(complMultipartUpload.Parts)) | |||
| return core.CompleteMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload.Parts) | |||
| } | |||
| @@ -276,3 +276,64 @@ func AddAttachment(ctx *context.Context) { | |||
| "result_code": "0", | |||
| }) | |||
| } | |||
| func NewMultipart(ctx *context.Context) { | |||
| if !setting.Attachment.Enabled { | |||
| ctx.Error(404, "attachment is not enabled") | |||
| return | |||
| } | |||
| err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ",")) | |||
| if err != nil { | |||
| ctx.Error(400, err.Error()) | |||
| return | |||
| } | |||
| if setting.Attachment.StoreType == storage.MinioStorageType { | |||
| uuid := gouuid.NewV4().String() | |||
| url, err := storage.NewMultiPartUpload(uuid) | |||
| if err != nil { | |||
| ctx.ServerError("NewMultipart", err) | |||
| return | |||
| } | |||
| ctx.JSON(200, map[string]string{ | |||
| "uuid": uuid, | |||
| "url": url, | |||
| }) | |||
| } else { | |||
| ctx.Error(404, "storage type is not enabled") | |||
| return | |||
| } | |||
| } | |||
| func CompleteMultipart(ctx *context.Context) { | |||
| uuid := ctx.Query("uuid") | |||
| uploadID := ctx.Query("uploadID") | |||
| completedParts := ctx.Query("completedParts") | |||
| _, err := storage.CompleteMultiPartUpload(uuid, uploadID, completedParts) | |||
| if err != nil { | |||
| ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err)) | |||
| return | |||
| } | |||
| _, err = models.InsertAttachment(&models.Attachment{ | |||
| UUID: uuid, | |||
| UploaderID: ctx.User.ID, | |||
| IsPrivate: true, | |||
| Name: ctx.Query("file_name"), | |||
| Size: ctx.QueryInt64("size"), | |||
| DatasetID: ctx.QueryInt64("dataset_id"), | |||
| }) | |||
| if err != nil { | |||
| ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err)) | |||
| return | |||
| } | |||
| ctx.JSON(200, map[string]string{ | |||
| "result_code": "0", | |||
| }) | |||
| } | |||
| @@ -521,6 +521,8 @@ func RegisterRoutes(m *macaron.Macaron) { | |||
| m.Get("/get_pre_url", repo.GetPresignedPutObjectURL) | |||
| m.Post("/add", repo.AddAttachment) | |||
| m.Post("/private", repo.UpdatePublicAttachment) | |||
| m.Get("/new_multipart", repo.NewMultipart) | |||
| m.Post("/complete_multipart", repo.CompleteMultipart) | |||
| }, reqSignIn) | |||
| m.Group("/:username", func() { | |||