@@ -95,6 +95,6 @@ func UpdateFileChunk(fileChunk *FileChunk) error { | |||
func updateFileChunk(e Engine, fileChunk *FileChunk) error { | |||
var sess *xorm.Session | |||
sess = e.Where("uuid = ?", fileChunk.UUID) | |||
_, err := sess.Cols("is_uploaded", "completed_parts").Update(fileChunk) | |||
_, err := sess.Cols("is_uploaded").Update(fileChunk) | |||
return err | |||
} |
@@ -16,6 +16,7 @@ package obs | |||
import ( | |||
"errors" | |||
"fmt" | |||
"github.com/unknwon/com" | |||
"io" | |||
"net/http" | |||
"os" | |||
@@ -788,3 +789,54 @@ func (obsClient ObsClient) GetBucketRequestPaymentWithSignedUrl(signedUrl string | |||
} | |||
return | |||
} | |||
func (obsClient ObsClient) CreateUploadPartSignedUrl(bucketName, objectKey, uploadId string, partNumber int, partSize int64) (string, error) { | |||
requestURL := "" | |||
input := &UploadPartInput{} | |||
input.Bucket = bucketName | |||
input.Key = objectKey | |||
input.PartNumber = partNumber | |||
input.UploadId = uploadId | |||
//input.ContentMD5 = _input.ContentMD5 | |||
//input.SourceFile = _input.SourceFile | |||
//input.Offset = _input.Offset | |||
input.PartSize = partSize | |||
//input.SseHeader = _input.SseHeader | |||
//input.Body = _input.Body | |||
params, headers, _, err := input.trans(obsClient.conf.signature == SignatureObs) | |||
if err != nil { | |||
return requestURL, err | |||
} | |||
if params == nil { | |||
params = make(map[string]string) | |||
} | |||
if headers == nil { | |||
headers = make(map[string][]string) | |||
} | |||
var extensions []extensionOptions | |||
for _, extension := range extensions { | |||
if extensionHeader, ok := extension.(extensionHeaders); ok { | |||
_err := extensionHeader(headers, obsClient.conf.signature == SignatureObs) | |||
if _err != nil { | |||
doLog(LEVEL_WARN, fmt.Sprintf("set header with error: %v", _err)) | |||
} | |||
} else { | |||
doLog(LEVEL_WARN, "Unsupported extensionOptions") | |||
} | |||
} | |||
headers["Content-Length"] = []string{com.ToStr(partNumber,10)} | |||
requestURL, err = obsClient.doAuth(HTTP_PUT, bucketName, objectKey, params, headers, "") | |||
if err != nil { | |||
return requestURL, nil | |||
} | |||
return requestURL, nil | |||
} |
@@ -100,20 +100,54 @@ func CompleteObsMultiPartUpload(uuid string, uploadID string) error { | |||
return nil | |||
} | |||
func ObsUploadPart(uuid string, uploadId string, partNumber int, partSize int64, partReader io.Reader) error { | |||
func ObsUploadPart(uuid string, uploadId string, partNumber int, partSize int64, body io.Reader) (string, error) { | |||
input := &obs.UploadPartInput{} | |||
input.PartNumber = partNumber | |||
input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") | |||
input.UploadId = uploadId | |||
input.Bucket = setting.Bucket | |||
input.PartSize = partSize | |||
input.Body = partReader | |||
_, err := ObsCli.UploadPart(input) | |||
input.Body = body | |||
output, err := ObsCli.UploadPart(input) | |||
if err != nil { | |||
log.Error("UploadPart failed:", err.Error()) | |||
return err | |||
return "", err | |||
} | |||
return nil | |||
return output.ETag, nil | |||
} | |||
func ObsGenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, partSize int64) (string, error) { | |||
/* | |||
input := &obs.CreateSignedUrlInput{} | |||
input.Bucket = setting.Bucket | |||
input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") | |||
input.Expires = int(PresignedUploadPartUrlExpireTime) | |||
input.Method = obs.HTTP_PUT | |||
input.QueryParams = map[string]string{ | |||
"Bucket": input.Bucket, | |||
"Key": input.Key, | |||
"PartNumber": com.ToStr(partNumber,10), | |||
"UploadId": uploadId, | |||
"PartSize": com.ToStr(partSize,10), | |||
} | |||
input.Headers = map[string]string{ | |||
} | |||
*/ | |||
Key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/") | |||
url, err := ObsCli.CreateUploadPartSignedUrl(setting.Bucket, Key, uploadId, partNumber, partSize) | |||
if err != nil { | |||
log.Error("CreateSignedUrl failed:", err.Error()) | |||
return "", err | |||
} | |||
log.Info(url) | |||
return url, nil | |||
} |
@@ -5,6 +5,15 @@ | |||
package repo | |||
import ( | |||
contexExt "context" | |||
"encoding/json" | |||
"errors" | |||
"fmt" | |||
"mime/multipart" | |||
"net/http" | |||
"strconv" | |||
"strings" | |||
"code.gitea.io/gitea/models" | |||
"code.gitea.io/gitea/modules/context" | |||
"code.gitea.io/gitea/modules/log" | |||
@@ -13,13 +22,6 @@ import ( | |||
"code.gitea.io/gitea/modules/storage" | |||
"code.gitea.io/gitea/modules/upload" | |||
"code.gitea.io/gitea/modules/worker" | |||
contexExt "context" | |||
"encoding/json" | |||
"errors" | |||
"fmt" | |||
"net/http" | |||
"strconv" | |||
"strings" | |||
gouuid "github.com/satori/go.uuid" | |||
) | |||
@@ -38,6 +40,15 @@ type CloudBrainDataset struct { | |||
CreateTime string `json:"created_at"` | |||
} | |||
type UploadForm struct { | |||
UploadID string `form:"uploadId"` | |||
UuID string `form:"uuid"` | |||
PartSize int64 `form:"size"` | |||
Offset int64 `form:"offset"` | |||
PartNumber int `form:"chunkNumber"` | |||
PartFile multipart.File `form:"file"` | |||
} | |||
func RenderAttachmentSettings(ctx *context.Context) { | |||
renderAttachmentSettings(ctx) | |||
} | |||
@@ -538,15 +549,31 @@ func GetMultipartUploadUrl(ctx *context.Context) { | |||
partNumber := ctx.QueryInt("chunkNumber") | |||
size := ctx.QueryInt64("size") | |||
if size > minio_ext.MinPartSize { | |||
ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size)) | |||
typeCloudBrain := ctx.QueryInt("type") | |||
err := checkTypeCloudBrain(typeCloudBrain) | |||
if err != nil { | |||
ctx.ServerError("checkTypeCloudBrain failed", err) | |||
return | |||
} | |||
url, err := storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size) | |||
if err != nil { | |||
ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err)) | |||
return | |||
url := "" | |||
if typeCloudBrain == models.TypeCloudBrainOne { | |||
if size > minio_ext.MinPartSize { | |||
ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size)) | |||
return | |||
} | |||
url, err = storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size) | |||
if err != nil { | |||
ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err)) | |||
return | |||
} | |||
} else { | |||
url, err = storage.ObsGenMultiPartSignedUrl(uuid, uploadID, partNumber, size) | |||
if err != nil { | |||
ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err)) | |||
return | |||
} | |||
} | |||
ctx.JSON(200, map[string]string{ | |||
@@ -555,26 +582,34 @@ func GetMultipartUploadUrl(ctx *context.Context) { | |||
} | |||
func UploadPart(ctx *context.Context) { | |||
uuid := ctx.Query("uuid") | |||
uploadID := ctx.Query("uploadID") | |||
partNumber := ctx.QueryInt("chunkNumber") | |||
size := ctx.QueryInt64("size") | |||
tmp, err := ctx.Req.Body().String() | |||
log.Info(tmp) | |||
if size > minio_ext.MinPartSize { | |||
ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size)) | |||
err = ctx.Req.ParseMultipartForm(100*1024*1024) | |||
if err != nil { | |||
ctx.Error(http.StatusBadRequest, fmt.Sprintf("ParseMultipartForm failed: %v", err)) | |||
return | |||
} | |||
url, err := storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size) | |||
//todo:get file reader | |||
//err := storage.ObsUploadPart(uuid, uploadID, partNumber, size, partReader) | |||
file, fileHeader, err := ctx.Req.FormFile("file") | |||
log.Info(ctx.Req.Form.Get("file")) | |||
if err != nil { | |||
ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err)) | |||
ctx.Error(http.StatusBadRequest, fmt.Sprintf("FormFile failed: %v", err)) | |||
return | |||
} | |||
log.Info(fileHeader.Filename) | |||
etag, err := storage.ObsUploadPart("", "", 1, 1, file) | |||
if err != nil { | |||
ctx.Error(500, fmt.Sprintf("ObsUploadPart failed: %v", err)) | |||
return | |||
} | |||
ctx.JSON(200, map[string]string{ | |||
"url": url, | |||
"etag": etag, | |||
}) | |||
} | |||
@@ -21,7 +21,7 @@ import qs from 'qs'; | |||
import createDropzone from '../features/dropzone.js'; | |||
const {_AppSubUrl, _StaticUrlPrefix, csrf} = window.config; | |||
const cloud_brain_type = 0; | |||
const cloud_brain_type = 1; | |||
export default { | |||
data() { | |||
@@ -129,9 +129,9 @@ export default { | |||
finishUpload(file) { | |||
this.emitDropzoneSuccess(file); | |||
setTimeout(() => { | |||
window.location.reload(); | |||
}, 1000); | |||
// setTimeout(() => { | |||
// window.location.reload(); | |||
// }, 1000); | |||
}, | |||
computeMD5(file) { | |||
@@ -326,6 +326,7 @@ export default { | |||
uploadID: file.uploadID, | |||
size: partSize, | |||
chunkNumber: currentChunk + 1, | |||
type: cloud_brain_type, | |||
_csrf: csrf | |||
} | |||
}); | |||
@@ -348,30 +349,49 @@ export default { | |||
}) | |||
); | |||
} | |||
async function uploadPart(currentChunk, partSize, e) { | |||
console.log(e); | |||
let params = new FormData(); | |||
params.append("uuid", file.uuid); | |||
params.append("uploadId", file.uploadID); | |||
params.append("size", partSize); | |||
params.append("chunkNumber", currentChunk + 1); | |||
params.append("file", e.target.file); | |||
params.append("_csrf", csrf); | |||
return await axios.post('/attachments/upload_part', | |||
params, | |||
{headers: {'Content-Type': 'multipart/form-data'}} | |||
); | |||
} | |||
async function uploadChunk(e) { | |||
try { | |||
if (!checkSuccessChunks()) { | |||
const start = currentChunk * chunkSize; | |||
const partSize = | |||
start + chunkSize >= file.size ? file.size - start : chunkSize; | |||
await uploadPart(currentChunk, partSize, e); | |||
// 获取分片上传url | |||
await getUploadChunkUrl(currentChunk, partSize); | |||
if (urls[currentChunk] != '') { | |||
// 上传到minio | |||
await uploadMinio(urls[currentChunk], e); | |||
if (etags[currentChunk] != '') { | |||
// 更新数据库:分片上传结果 | |||
//await updateChunk(currentChunk); | |||
} else { | |||
console.log("上传到minio uploadChunk etags[currentChunk] == ''");// TODO | |||
} | |||
} else { | |||
console.log("uploadChunk urls[currentChunk] != ''");// TODO | |||
} | |||
// await getUploadChunkUrl(currentChunk, partSize); | |||
// if (urls[currentChunk] != '') { | |||
// // 上传到minio | |||
// await uploadMinio(urls[currentChunk], e); | |||
// if (etags[currentChunk] != '') { | |||
// // 更新数据库:分片上传结果 | |||
// //await updateChunk(currentChunk); | |||
// } else { | |||
// console.log("上传到minio uploadChunk etags[currentChunk] == ''");// TODO | |||
// } | |||
// } else { | |||
// console.log("uploadChunk urls[currentChunk] != ''");// TODO | |||
// } | |||
} | |||
} catch (error) { | |||
this.emitDropzoneFailed(file); | |||
console.log(error); | |||
this.emitDropzoneFailed(file); | |||
} | |||
} | |||