Browse Source

模型文件附件新接口。

Signed-off-by: zouap <zouap@pcl.ac.cn>
tags/v1.22.11.2^2
zouap 3 years ago
parent
commit
ea6fe35df8
5 changed files with 609 additions and 330 deletions
  1. +73
    -0
      models/file_chunk.go
  2. +1
    -0
      models/models.go
  3. +105
    -330
      routers/repo/attachment.go
  4. +423
    -0
      routers/repo/attachment_model.go
  5. +7
    -0
      routers/routes/routes.go

+ 73
- 0
models/file_chunk.go View File

@@ -14,6 +14,21 @@ const (
) )


type FileChunk struct { type FileChunk struct {
ID int64 `xorm:"pk autoincr"`
UUID string `xorm:"uuid UNIQUE"`
Md5 string `xorm:"INDEX"`
IsUploaded int `xorm:"DEFAULT 0"` // not uploaded: 0, uploaded: 1
UploadID string `xorm:"UNIQUE"` //minio upload id
TotalChunks int
Size int64
UserID int64 `xorm:"INDEX"`
Type int `xorm:"INDEX DEFAULT 0"`
CompletedParts []string `xorm:"DEFAULT ''"` // chunkNumber+etag eg: ,1-asqwewqe21312312.2-123hjkas
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
}

type ModelFileChunk struct {
ID int64 `xorm:"pk autoincr"` ID int64 `xorm:"pk autoincr"`
UUID string `xorm:"uuid UNIQUE"` UUID string `xorm:"uuid UNIQUE"`
Md5 string `xorm:"INDEX"` Md5 string `xorm:"INDEX"`
@@ -50,6 +65,21 @@ func GetFileChunkByMD5AndUser(md5 string, userID int64, typeCloudBrain int) (*Fi
return getFileChunkByMD5AndUser(x, md5, userID, typeCloudBrain) return getFileChunkByMD5AndUser(x, md5, userID, typeCloudBrain)
} }


func GetModelFileChunkByMD5AndUser(md5 string, userID int64, typeCloudBrain int) (*ModelFileChunk, error) {
return getModelFileChunkByMD5AndUser(x, md5, userID, typeCloudBrain)
}

func getModelFileChunkByMD5AndUser(e Engine, md5 string, userID int64, typeCloudBrain int) (*ModelFileChunk, error) {
fileChunk := new(ModelFileChunk)

if has, err := e.Where("md5 = ? and user_id = ? and type = ?", md5, userID, typeCloudBrain).Get(fileChunk); err != nil {
return nil, err
} else if !has {
return nil, ErrFileChunkNotExist{md5, ""}
}
return fileChunk, nil
}

func getFileChunkByMD5AndUser(e Engine, md5 string, userID int64, typeCloudBrain int) (*FileChunk, error) { func getFileChunkByMD5AndUser(e Engine, md5 string, userID int64, typeCloudBrain int) (*FileChunk, error) {
fileChunk := new(FileChunk) fileChunk := new(FileChunk)


@@ -77,6 +107,21 @@ func getFileChunkByUUID(e Engine, uuid string) (*FileChunk, error) {
return fileChunk, nil return fileChunk, nil
} }


func GetModelFileChunkByUUID(uuid string) (*ModelFileChunk, error) {
return getModelFileChunkByUUID(x, uuid)
}

func getModelFileChunkByUUID(e Engine, uuid string) (*ModelFileChunk, error) {
fileChunk := new(ModelFileChunk)

if has, err := e.Where("uuid = ?", uuid).Get(fileChunk); err != nil {
return nil, err
} else if !has {
return nil, ErrFileChunkNotExist{"", uuid}
}
return fileChunk, nil
}

// InsertFileChunk insert a record into file_chunk. // InsertFileChunk insert a record into file_chunk.
func InsertFileChunk(fileChunk *FileChunk) (_ *FileChunk, err error) { func InsertFileChunk(fileChunk *FileChunk) (_ *FileChunk, err error) {
if _, err := x.Insert(fileChunk); err != nil { if _, err := x.Insert(fileChunk); err != nil {
@@ -86,6 +131,14 @@ func InsertFileChunk(fileChunk *FileChunk) (_ *FileChunk, err error) {
return fileChunk, nil return fileChunk, nil
} }


// InsertFileChunk insert a record into file_chunk.
func InsertModelFileChunk(fileChunk *ModelFileChunk) (_ *ModelFileChunk, err error) {
if _, err := x.Insert(fileChunk); err != nil {
return nil, err
}
return fileChunk, nil
}

func DeleteFileChunkById(uuid string) (*FileChunk, error) { func DeleteFileChunkById(uuid string) (*FileChunk, error) {
return deleteFileChunkById(x, uuid) return deleteFileChunkById(x, uuid)
} }
@@ -107,6 +160,17 @@ func deleteFileChunkById(e Engine, uuid string) (*FileChunk, error) {
} }
} }


func UpdateModelFileChunk(fileChunk *ModelFileChunk) error {
return updateModelFileChunk(x, fileChunk)
}

func updateModelFileChunk(e Engine, fileChunk *ModelFileChunk) error {
var sess *xorm.Session
sess = e.Where("uuid = ?", fileChunk.UUID)
_, err := sess.Cols("is_uploaded").Update(fileChunk)
return err
}

// UpdateFileChunk updates the given file_chunk in database // UpdateFileChunk updates the given file_chunk in database
func UpdateFileChunk(fileChunk *FileChunk) error { func UpdateFileChunk(fileChunk *FileChunk) error {
return updateFileChunk(x, fileChunk) return updateFileChunk(x, fileChunk)
@@ -128,3 +192,12 @@ func deleteFileChunk(e Engine, fileChunk *FileChunk) error {
_, err := e.ID(fileChunk.ID).Delete(fileChunk) _, err := e.ID(fileChunk.ID).Delete(fileChunk)
return err return err
} }

func DeleteModelFileChunk(fileChunk *ModelFileChunk) error {
return deleteModelFileChunk(x, fileChunk)
}

func deleteModelFileChunk(e Engine, fileChunk *ModelFileChunk) error {
_, err := e.ID(fileChunk.ID).Delete(fileChunk)
return err
}

+ 1
- 0
models/models.go View File

@@ -136,6 +136,7 @@ func init() {
new(ImageTopic), new(ImageTopic),
new(ImageTopicRelation), new(ImageTopicRelation),
new(FileChunk), new(FileChunk),
new(ModelFileChunk),
new(BlockChain), new(BlockChain),
new(RecommendOrg), new(RecommendOrg),
new(AiModelManage), new(AiModelManage),


+ 105
- 330
routers/repo/attachment.go View File

@@ -312,8 +312,7 @@ func GetAttachment(ctx *context.Context) {
url = setting.PROXYURL + "/obs_proxy_download?uuid=" + attach.UUID + "&file_name=" + attach.Name url = setting.PROXYURL + "/obs_proxy_download?uuid=" + attach.UUID + "&file_name=" + attach.Name
log.Info("return url=" + url) log.Info("return url=" + url)
} else { } else {
objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(attach.UUID[0:1], attach.UUID[1:2], attach.UUID, attach.Name)), "/")
url, err = storage.ObsGetPreSignedUrl(objectName, attach.Name)
url, err = storage.ObsGetPreSignedUrl(attach.UUID, attach.Name)
if err != nil { if err != nil {
ctx.ServerError("ObsGetPreSignedUrl", err) ctx.ServerError("ObsGetPreSignedUrl", err)
return return
@@ -417,7 +416,7 @@ func AddAttachment(ctx *context.Context) {
uuid := ctx.Query("uuid") uuid := ctx.Query("uuid")
has := false has := false
if typeCloudBrain == models.TypeCloudBrainOne { if typeCloudBrain == models.TypeCloudBrainOne {
has, err = storage.Attachments.HasObject(setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(uuid))
has, err = storage.Attachments.HasObject(models.AttachmentRelativePath(uuid))
if err != nil { if err != nil {
ctx.ServerError("HasObject", err) ctx.ServerError("HasObject", err)
return return
@@ -530,82 +529,10 @@ func UpdateAttachmentDecompressState(ctx *context.Context) {
}) })
} }


func getCloudOneMinioPrefix(scene string, fileChunk *models.FileChunk) string {
if scene == Attachment_model {
if fileChunk.ObjectName != "" {
return fileChunk.ObjectName
}
}
return setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(fileChunk.UUID)
}

func getCloudTwoOBSPrefix(scene string, fileChunk *models.FileChunk, fileName string) string {
if scene == Attachment_model {
if fileChunk.ObjectName != "" {
return fileChunk.ObjectName
}
}
return setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + fileName
}

func specialDeal(modeluuid, uuid, fileName string, ctx *context.Context, fileChunk *models.FileChunk, typeCloudBrain int) {
copyModelFile(typeCloudBrain, fileChunk, fileName, fileName, modeluuid)
UpdateModelSize(modeluuid)
_, err := models.InsertAttachment(&models.Attachment{
UUID: uuid,
UploaderID: ctx.User.ID,
IsPrivate: true,
Name: fileName,
Size: ctx.QueryInt64("size"),
DatasetID: 0,
Description: modeluuid,
Type: typeCloudBrain,
})
if err != nil {
log.Info("add attachment error.")
}
}

func copyModelFile(typeCloudBrain int, fileChunk *models.FileChunk, name, fileName, modeluuid string) bool {
srcObjectName := fileChunk.ObjectName
var isExist bool
//copy
destObjectName := getObjectName(fileName, modeluuid)
if typeCloudBrain == models.TypeCloudBrainOne {
bucketName := setting.Attachment.Minio.Bucket
log.Info("minio copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
if storage.MinioGetFilesSize(bucketName, []string{destObjectName}) > 0 {
isExist = true
} else {
if srcObjectName == "" {
srcObjectName = getMinioInitObjectName("", fileChunk.UUID, "", "")
}
log.Info("minio copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
storage.MinioCopyAFile(bucketName, srcObjectName, bucketName, destObjectName)
}
} else {
bucketName := setting.Bucket
log.Info("obs copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
if storage.ObsGetFilesSize(bucketName, []string{destObjectName}) > 0 {
isExist = true
} else {
if srcObjectName == "" {
srcObjectName = getOBSInitObjectName("", fileChunk.UUID, "", name)
}
log.Info("obs copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
storage.ObsCopyFile(bucketName, srcObjectName, bucketName, destObjectName)
}
}
return isExist
}

func GetSuccessChunks(ctx *context.Context) { func GetSuccessChunks(ctx *context.Context) {
fileMD5 := ctx.Query("md5") fileMD5 := ctx.Query("md5")
typeCloudBrain := ctx.QueryInt("type") typeCloudBrain := ctx.QueryInt("type")
fileName := ctx.Query("file_name") fileName := ctx.Query("file_name")
scene := ctx.Query("scene")
modeluuid := ctx.Query("modeluuid")
log.Info("scene=" + scene + " typeCloudBrain=" + fmt.Sprint(typeCloudBrain))
var chunks string var chunks string


err := checkTypeCloudBrain(typeCloudBrain) err := checkTypeCloudBrain(typeCloudBrain)
@@ -631,12 +558,7 @@ func GetSuccessChunks(ctx *context.Context) {


isExist := false isExist := false
if typeCloudBrain == models.TypeCloudBrainOne { if typeCloudBrain == models.TypeCloudBrainOne {
isExist, err = storage.Attachments.HasObject(getCloudOneMinioPrefix(scene, fileChunk))
if isExist {
log.Info("The file is exist in minio. has uploaded.path=" + getCloudOneMinioPrefix(scene, fileChunk))
} else {
log.Info("The file is not exist in minio..")
}
isExist, err = storage.Attachments.HasObject(setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(fileChunk.UUID))
if err != nil { if err != nil {
ctx.ServerError("HasObject failed", err) ctx.ServerError("HasObject failed", err)
return return
@@ -647,12 +569,7 @@ func GetSuccessChunks(ctx *context.Context) {
if oldAttachment != nil { if oldAttachment != nil {
oldFileName = oldAttachment.Name oldFileName = oldAttachment.Name
} }
isExist, err = storage.ObsHasObject(getCloudTwoOBSPrefix(scene, fileChunk, oldFileName))
if isExist {
log.Info("The file is exist in obs. has uploaded. path=" + getCloudTwoOBSPrefix(scene, fileChunk, oldFileName))
} else {
log.Info("The file is not exist in obs.")
}
isExist, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + oldFileName)
if err != nil { if err != nil {
ctx.ServerError("ObsHasObject failed", err) ctx.ServerError("ObsHasObject failed", err)
return return
@@ -668,14 +585,36 @@ func GetSuccessChunks(ctx *context.Context) {
} }
} }
} else { } else {
models.DeleteFileChunk(fileChunk)
ctx.JSON(200, map[string]string{
"uuid": "",
"uploaded": "0",
"uploadID": "",
"chunks": "",
})
return
if fileChunk.IsUploaded == models.FileUploaded {
log.Info("the file has been recorded but not uploaded")
fileChunk.IsUploaded = models.FileNotUploaded
if err = models.UpdateFileChunk(fileChunk); err != nil {
log.Error("UpdateFileChunk failed:", err.Error())
}
}

if typeCloudBrain == models.TypeCloudBrainOne {
chunks, err = storage.GetPartInfos(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID)), "/"), fileChunk.UploadID)
if err != nil {
log.Error("GetPartInfos failed:%v", err.Error())
}
} else {
chunks, err = storage.GetObsPartInfos(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID, fileName)), "/"), fileChunk.UploadID)
if err != nil {
log.Error("GetObsPartInfos failed:%v", err.Error())
}
}

if err != nil {
models.DeleteFileChunk(fileChunk)
ctx.JSON(200, map[string]string{
"uuid": "",
"uploaded": "0",
"uploadID": "",
"chunks": "",
})
return
}
} }


var attachID int64 var attachID int64
@@ -692,10 +631,6 @@ func GetSuccessChunks(ctx *context.Context) {
} }


if attach == nil { if attach == nil {
if fileChunk.IsUploaded == 1 && scene == Attachment_model {
log.Info("model upload special deal.")
specialDeal(modeluuid, fileChunk.UUID, fileName, ctx, fileChunk, typeCloudBrain)
}
ctx.JSON(200, map[string]string{ ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID, "uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded), "uploaded": strconv.Itoa(fileChunk.IsUploaded),
@@ -708,141 +643,24 @@ func GetSuccessChunks(ctx *context.Context) {
}) })
return return
} }
if scene == Attachment_model {
//使用description存储模型信息
dbmodeluuid := attach.Description
modelname := ""
if dbmodeluuid != modeluuid {
log.Info("The file has uploaded.fileChunk.ObjectName=" + fileChunk.ObjectName + " typeCloudBrain=" + fmt.Sprint(typeCloudBrain))
isExist := copyModelFile(typeCloudBrain, fileChunk, attach.Name, fileName, modeluuid)
// srcObjectName := fileChunk.ObjectName
// var isExist bool
// //copy
// destObjectName := getObjectName(fileName, modeluuid)
// if typeCloudBrain == models.TypeCloudBrainOne {
// bucketName := setting.Attachment.Minio.Bucket
// log.Info("minio copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
// if storage.MinioGetFilesSize(bucketName, []string{destObjectName}) > 0 {
// isExist = true
// } else {
// if srcObjectName == "" {
// srcObjectName = getMinioInitObjectName("", fileChunk.UUID, "", "")
// }
// storage.MinioCopyAFile(bucketName, srcObjectName, bucketName, destObjectName)
// }
// } else {
// bucketName := setting.Bucket
// log.Info("obs copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
// if storage.ObsGetFilesSize(bucketName, []string{destObjectName}) > 0 {
// isExist = true
// } else {
// if srcObjectName == "" {
// srcObjectName = getOBSInitObjectName("", fileChunk.UUID, "", attach.Name)
// }
// log.Info("obs copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
// storage.ObsCopyFile(bucketName, srcObjectName, bucketName, destObjectName)
// }
// }
if dbmodeluuid != "" {
model, err := models.QueryModelById(dbmodeluuid)
if err == nil && model != nil {
modelname = model.Name
}
}
if isExist {
ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": strconv.Itoa(int(attachID)),
"modeluuid": modeluuid,
"fileName": attach.Name,
"modelName": modelname,
})
} else {
UpdateModelSize(modeluuid)
ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": strconv.Itoa(int(attachID)),
"fileName": attach.Name,
})
}
return
} else {
model, err := models.QueryModelById(dbmodeluuid)
if err == nil {
modelname = model.Name
}
ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": strconv.Itoa(int(attachID)),
"modeluuid": dbmodeluuid,
"fileName": attach.Name,
"modelName": modelname,
})
return
}
} else {
dataset, err := models.GetDatasetByID(attach.DatasetID)
if err != nil {
ctx.ServerError("GetDatasetByID", err)
return
}

ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": strconv.Itoa(int(attachID)),
"datasetID": strconv.Itoa(int(attach.DatasetID)),
"fileName": attach.Name,
"datasetName": dataset.Title,
})
}
}

func getObjectName(filename string, modeluuid string) string {
return strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, filename)), "/")
}


func getMinioInitObjectName(scene string, uuid, modeluuid string, filename string) string {
if scene == Attachment_model {
return strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, filename)), "/")
} else {
return strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/")
}
}

func getChunkMinioExistObjectName(scene string, fileChunk *models.FileChunk, filename string) string {
if scene == Attachment_model {
return fileChunk.ObjectName
} else {
return strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID)), "/")
dataset, err := models.GetDatasetByID(attach.DatasetID)
if err != nil {
ctx.ServerError("GetDatasetByID", err)
return
} }
}


func getOBSInitObjectName(scene string, uuid, modeluuid string, filename string) string {
if scene == Attachment_model {
return strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, filename)), "/")
} else {
return strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, filename)), "/")
}
}
ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": strconv.Itoa(int(attachID)),
"datasetID": strconv.Itoa(int(attach.DatasetID)),
"fileName": attach.Name,
"datasetName": dataset.Title,
})


func getChunkOBSExistObjectName(scene string, fileChunk *models.FileChunk, filename string) string {
if scene == Attachment_model {
return fileChunk.ObjectName
} else {
return strings.TrimPrefix(path.Join(setting.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID, filename)), "/")
}
} }


func NewMultipart(ctx *context.Context) { func NewMultipart(ctx *context.Context) {
@@ -865,8 +683,6 @@ func NewMultipart(ctx *context.Context) {
} }


fileName := ctx.Query("file_name") fileName := ctx.Query("file_name")
scene := ctx.Query("scene")
modeluuid := ctx.Query("modeluuid")


if setting.Attachment.StoreType == storage.MinioStorageType { if setting.Attachment.StoreType == storage.MinioStorageType {
totalChunkCounts := ctx.QueryInt("totalChunkCounts") totalChunkCounts := ctx.QueryInt("totalChunkCounts")
@@ -882,19 +698,15 @@ func NewMultipart(ctx *context.Context) {
} }


uuid := gouuid.NewV4().String() uuid := gouuid.NewV4().String()

var uploadID string var uploadID string
var objectName string
if typeCloudBrain == models.TypeCloudBrainOne { if typeCloudBrain == models.TypeCloudBrainOne {
objectName = getMinioInitObjectName(scene, uuid, modeluuid, fileName)
uploadID, err = storage.NewMultiPartUpload(objectName)
uploadID, err = storage.NewMultiPartUpload(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/"))
if err != nil { if err != nil {
ctx.ServerError("NewMultipart", err) ctx.ServerError("NewMultipart", err)
return return
} }
} else { } else {
objectName = getOBSInitObjectName(scene, uuid, modeluuid, fileName)
uploadID, err = storage.NewObsMultiPartUpload(objectName)
uploadID, err = storage.NewObsMultiPartUpload(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/"))
if err != nil { if err != nil {
ctx.ServerError("NewObsMultiPartUpload", err) ctx.ServerError("NewObsMultiPartUpload", err)
return return
@@ -905,7 +717,6 @@ func NewMultipart(ctx *context.Context) {
UUID: uuid, UUID: uuid,
UserID: ctx.User.ID, UserID: ctx.User.ID,
UploadID: uploadID, UploadID: uploadID,
ObjectName: objectName,
Md5: ctx.Query("md5"), Md5: ctx.Query("md5"),
Size: fileSize, Size: fileSize,
TotalChunks: totalChunkCounts, TotalChunks: totalChunkCounts,
@@ -939,8 +750,8 @@ func PutOBSProxyUpload(ctx *context.Context) {
ctx.Error(500, fmt.Sprintf("FormFile: %v", RequestBody)) ctx.Error(500, fmt.Sprintf("FormFile: %v", RequestBody))
return return
} }
objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")
err := storage.ObsMultiPartUpload(objectName, uploadID, partNumber, fileName, RequestBody.ReadCloser())
err := storage.ObsMultiPartUpload(uuid, uploadID, partNumber, fileName, RequestBody.ReadCloser())
if err != nil { if err != nil {
log.Info("upload error.") log.Info("upload error.")
} }
@@ -980,22 +791,14 @@ func GetMultipartUploadUrl(ctx *context.Context) {
partNumber := ctx.QueryInt("chunkNumber") partNumber := ctx.QueryInt("chunkNumber")
size := ctx.QueryInt64("size") size := ctx.QueryInt64("size")
fileName := ctx.Query("file_name") fileName := ctx.Query("file_name")
scene := ctx.Query("scene")
typeCloudBrain := ctx.QueryInt("type") typeCloudBrain := ctx.QueryInt("type")
err := checkTypeCloudBrain(typeCloudBrain) err := checkTypeCloudBrain(typeCloudBrain)
if err != nil { if err != nil {
ctx.ServerError("checkTypeCloudBrain failed", err) ctx.ServerError("checkTypeCloudBrain failed", err)
return return
} }
fileChunk, err := models.GetFileChunkByUUID(uuid)
if err != nil {
if models.IsErrFileChunkNotExist(err) {
ctx.Error(404)
} else {
ctx.ServerError("GetFileChunkByUUID", err)
}
return
}

url := "" url := ""
if typeCloudBrain == models.TypeCloudBrainOne { if typeCloudBrain == models.TypeCloudBrainOne {
if size > minio_ext.MinPartSize { if size > minio_ext.MinPartSize {
@@ -1003,7 +806,7 @@ func GetMultipartUploadUrl(ctx *context.Context) {
return return
} }


url, err = storage.GenMultiPartSignedUrl(getChunkMinioExistObjectName(scene, fileChunk, fileName), uploadID, partNumber, size)
url, err = storage.GenMultiPartSignedUrl(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/"), uploadID, partNumber, size)
if err != nil { if err != nil {
ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err)) ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
return return
@@ -1013,7 +816,7 @@ func GetMultipartUploadUrl(ctx *context.Context) {
url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName
log.Info("return url=" + url) log.Info("return url=" + url)
} else { } else {
url, err = storage.ObsGenMultiPartSignedUrl(getChunkOBSExistObjectName(scene, fileChunk, fileName), uploadID, partNumber)
url, err = storage.ObsGenMultiPartSignedUrl(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/"), uploadID, partNumber)
if err != nil { if err != nil {
ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err)) ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
return return
@@ -1021,7 +824,6 @@ func GetMultipartUploadUrl(ctx *context.Context) {
log.Info("url=" + url) log.Info("url=" + url)
} }
} }

ctx.JSON(200, map[string]string{ ctx.JSON(200, map[string]string{
"url": url, "url": url,
}) })
@@ -1032,12 +834,8 @@ func CompleteMultipart(ctx *context.Context) {
uploadID := ctx.Query("uploadID") uploadID := ctx.Query("uploadID")
typeCloudBrain := ctx.QueryInt("type") typeCloudBrain := ctx.QueryInt("type")
fileName := ctx.Query("file_name") fileName := ctx.Query("file_name")
scene := ctx.Query("scene")
modeluuid := ctx.Query("modeluuid")


log.Warn("uuid:" + uuid) log.Warn("uuid:" + uuid)
log.Warn("modeluuid:" + modeluuid)
log.Warn("scene:" + scene)
log.Warn("typeCloudBrain:" + strconv.Itoa(typeCloudBrain)) log.Warn("typeCloudBrain:" + strconv.Itoa(typeCloudBrain))


err := checkTypeCloudBrain(typeCloudBrain) err := checkTypeCloudBrain(typeCloudBrain)
@@ -1057,13 +855,13 @@ func CompleteMultipart(ctx *context.Context) {
} }


if typeCloudBrain == models.TypeCloudBrainOne { if typeCloudBrain == models.TypeCloudBrainOne {
_, err = storage.CompleteMultiPartUpload(getChunkMinioExistObjectName(scene, fileChunk, fileName), uploadID, fileChunk.TotalChunks)
_, err = storage.CompleteMultiPartUpload(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID)), "/"), uploadID, fileChunk.TotalChunks)
if err != nil { if err != nil {
ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err)) ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
return return
} }
} else { } else {
err = storage.CompleteObsMultiPartUpload(getChunkOBSExistObjectName(scene, fileChunk, fileName), uploadID, fileChunk.TotalChunks)
err = storage.CompleteObsMultiPartUpload(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID, fileName)), "/"), uploadID, fileChunk.TotalChunks)
if err != nil { if err != nil {
ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err)) ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
return return
@@ -1077,81 +875,58 @@ func CompleteMultipart(ctx *context.Context) {
ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err)) ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
return return
} }
dataset, _ := models.GetDatasetByID(ctx.QueryInt64("dataset_id"))
log.Warn("insert attachment to datasetId:" + strconv.FormatInt(dataset.ID, 10))
attachment, err := models.InsertAttachment(&models.Attachment{
UUID: uuid,
UploaderID: ctx.User.ID,
IsPrivate: dataset.IsPrivate(),
Name: fileName,
Size: ctx.QueryInt64("size"),
DatasetID: ctx.QueryInt64("dataset_id"),
Description: ctx.Query("description"),
Type: typeCloudBrain,
})


if scene == Attachment_model {
//更新模型大小信息
UpdateModelSize(modeluuid)
_, err := models.InsertAttachment(&models.Attachment{
UUID: uuid,
UploaderID: ctx.User.ID,
IsPrivate: true,
Name: fileName,
Size: ctx.QueryInt64("size"),
DatasetID: 0,
Description: modeluuid,
Type: typeCloudBrain,
})
if err != nil {
ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
return
}
ctx.JSON(200, map[string]string{
"result_code": "0",
})

} else {
dataset, _ := models.GetDatasetByID(ctx.QueryInt64("dataset_id"))
log.Warn("insert attachment to datasetId:" + strconv.FormatInt(dataset.ID, 10))
attachment, err := models.InsertAttachment(&models.Attachment{
UUID: uuid,
UploaderID: ctx.User.ID,
IsPrivate: dataset.IsPrivate(),
Name: fileName,
Size: ctx.QueryInt64("size"),
DatasetID: ctx.QueryInt64("dataset_id"),
Description: ctx.Query("description"),
Type: typeCloudBrain,
})

if err != nil {
ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
return
}
attachment.UpdateDatasetUpdateUnix()
repository, _ := models.GetRepositoryByID(dataset.RepoID)
notification.NotifyOtherTask(ctx.User, repository, fmt.Sprint(repository.IsPrivate, attachment.IsPrivate), attachment.Name, models.ActionUploadAttachment)
if attachment.DatasetID != 0 {
if isCanDecompress(attachment.Name) {
if typeCloudBrain == models.TypeCloudBrainOne {
err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
if err != nil {
log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
} else {
updateAttachmentDecompressStateIng(attachment)
}
if err != nil {
ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
return
}
attachment.UpdateDatasetUpdateUnix()
repository, _ := models.GetRepositoryByID(dataset.RepoID)
notification.NotifyOtherTask(ctx.User, repository, fmt.Sprint(repository.IsPrivate, attachment.IsPrivate), attachment.Name, models.ActionUploadAttachment)
if attachment.DatasetID != 0 {
if isCanDecompress(attachment.Name) {
if typeCloudBrain == models.TypeCloudBrainOne {
err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
if err != nil {
log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
} else {
updateAttachmentDecompressStateIng(attachment)
} }
if typeCloudBrain == models.TypeCloudBrainTwo {
attachjson, _ := json.Marshal(attachment)
err = labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
if err != nil {
log.Error("SendDecompressTask to labelsystem (%s) failed:%s", attachment.UUID, err.Error())
} else {
updateAttachmentDecompressStateIng(attachment)
}
}
if typeCloudBrain == models.TypeCloudBrainTwo {
attachjson, _ := json.Marshal(attachment)
err = labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
if err != nil {
log.Error("SendDecompressTask to labelsystem (%s) failed:%s", attachment.UUID, err.Error())
} else {
updateAttachmentDecompressStateIng(attachment)
} }
} else {
var labelMap map[string]string
labelMap = make(map[string]string)
labelMap["UUID"] = uuid
labelMap["Type"] = fmt.Sprint(attachment.Type)
labelMap["UploaderID"] = fmt.Sprint(attachment.UploaderID)
labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
labelMap["AttachName"] = attachment.Name
attachjson, _ := json.Marshal(labelMap)
labelmsg.SendAddAttachToLabelSys(string(attachjson))
} }
} else {
var labelMap map[string]string
labelMap = make(map[string]string)
labelMap["UUID"] = uuid
labelMap["Type"] = fmt.Sprint(attachment.Type)
labelMap["UploaderID"] = fmt.Sprint(attachment.UploaderID)
labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
labelMap["AttachName"] = attachment.Name
attachjson, _ := json.Marshal(labelMap)
labelmsg.SendAddAttachToLabelSys(string(attachjson))
} }
} }

ctx.JSON(200, map[string]string{ ctx.JSON(200, map[string]string{
"result_code": "0", "result_code": "0",
}) })
@@ -1238,7 +1013,7 @@ func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
} }


for _, attch := range attachs { for _, attch := range attachs {
has, err := storage.Attachments.HasObject(setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(attch.UUID))
has, err := storage.Attachments.HasObject(models.AttachmentRelativePath(attch.UUID))
if err != nil || !has { if err != nil || !has {
continue continue
} }


+ 423
- 0
routers/repo/attachment_model.go View File

@@ -0,0 +1,423 @@
package repo

import (
"fmt"
"path"
"strconv"
"strings"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/minio_ext"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/upload"
gouuid "github.com/satori/go.uuid"
)

func GetModelChunks(ctx *context.Context) {
fileMD5 := ctx.Query("md5")
typeCloudBrain := ctx.QueryInt("type")
fileName := ctx.Query("file_name")
scene := ctx.Query("scene")
modeluuid := ctx.Query("modeluuid")
log.Info("scene=" + scene + " typeCloudBrain=" + fmt.Sprint(typeCloudBrain))
var chunks string

err := checkTypeCloudBrain(typeCloudBrain)
if err != nil {
ctx.ServerError("checkTypeCloudBrain failed", err)
return
}

fileChunk, err := models.GetModelFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
if err != nil {
if models.IsErrFileChunkNotExist(err) {
ctx.JSON(200, map[string]string{
"uuid": "",
"uploaded": "0",
"uploadID": "",
"chunks": "",
})
} else {
ctx.ServerError("GetFileChunkByMD5", err)
}
return
}

isExist := false
if typeCloudBrain == models.TypeCloudBrainOne {
isExist, err = storage.Attachments.HasObject(fileChunk.ObjectName)
if isExist {
log.Info("The file is exist in minio. has uploaded.path=" + fileChunk.ObjectName)
} else {
log.Info("The file is not exist in minio..")
}
if err != nil {
ctx.ServerError("HasObject failed", err)
return
}
} else {
isExist, err = storage.ObsHasObject(fileChunk.ObjectName)
if isExist {
log.Info("The file is exist in obs. has uploaded. path=" + fileChunk.ObjectName)
} else {
log.Info("The file is not exist in obs.")
}
if err != nil {
ctx.ServerError("ObsHasObject failed", err)
return
}
}

if isExist {
if fileChunk.IsUploaded == models.FileNotUploaded {
log.Info("the file has been uploaded but not recorded")
fileChunk.IsUploaded = models.FileUploaded
if err = models.UpdateModelFileChunk(fileChunk); err != nil {
log.Error("UpdateFileChunk failed:", err.Error())
}
}
} else {
if fileChunk.IsUploaded == models.FileUploaded {
log.Info("the file has been recorded but not uploaded")
fileChunk.IsUploaded = models.FileNotUploaded
if err = models.UpdateModelFileChunk(fileChunk); err != nil {
log.Error("UpdateFileChunk failed:", err.Error())
}
}

if typeCloudBrain == models.TypeCloudBrainOne {
chunks, err = storage.GetPartInfos(fileChunk.ObjectName, fileChunk.UploadID)
if err != nil {
log.Error("GetPartInfos failed:%v", err.Error())
}
} else {
chunks, err = storage.GetObsPartInfos(fileChunk.ObjectName, fileChunk.UploadID)
if err != nil {
log.Error("GetObsPartInfos failed:%v", err.Error())
}
}
if err != nil {
models.DeleteModelFileChunk(fileChunk)
ctx.JSON(200, map[string]string{
"uuid": "",
"uploaded": "0",
"uploadID": "",
"chunks": "",
})
return
}
}

var attachID int64
attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
if err != nil {
if models.IsErrAttachmentNotExist(err) {
attachID = 0
} else {
ctx.ServerError("GetAttachmentByUUID", err)
return
}
} else {
attachID = attach.ID
}

if attach == nil {
ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": "0",
"datasetID": "0",
"fileName": "",
"datasetName": "",
})
return
}

//使用description存储模型信息
dbmodeluuid := attach.Description
modelname := ""
if dbmodeluuid != modeluuid {
log.Info("The file has uploaded.fileChunk.ObjectName=" + fileChunk.ObjectName + " typeCloudBrain=" + fmt.Sprint(typeCloudBrain))
isExist := copyModelAttachmentFile(typeCloudBrain, fileChunk, fileName, modeluuid)
if dbmodeluuid != "" {
model, err := models.QueryModelById(dbmodeluuid)
if err == nil && model != nil {
modelname = model.Name
}
}
if isExist {
ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": strconv.Itoa(int(attachID)),
"modeluuid": modeluuid,
"fileName": attach.Name,
"modelName": modelname,
})
} else {
UpdateModelSize(modeluuid)
ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": strconv.Itoa(int(attachID)),
"fileName": attach.Name,
})
}
return
} else {
model, err := models.QueryModelById(dbmodeluuid)
if err == nil {
modelname = model.Name
}
ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": strconv.Itoa(int(attachID)),
"modeluuid": dbmodeluuid,
"fileName": attach.Name,
"modelName": modelname,
})
return
}

}

func copyModelAttachmentFile(typeCloudBrain int, fileChunk *models.ModelFileChunk, fileName, modeluuid string) bool {
srcObjectName := fileChunk.ObjectName
var isExist bool
//copy
destObjectName := getObjectName(fileName, modeluuid)
if typeCloudBrain == models.TypeCloudBrainOne {
bucketName := setting.Attachment.Minio.Bucket
log.Info("minio copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
if storage.MinioGetFilesSize(bucketName, []string{destObjectName}) > 0 {
isExist = true
} else {

log.Info("minio copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
storage.MinioCopyAFile(bucketName, srcObjectName, bucketName, destObjectName)
}
} else {
bucketName := setting.Bucket
log.Info("obs copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
if storage.ObsGetFilesSize(bucketName, []string{destObjectName}) > 0 {
isExist = true
} else {
log.Info("obs copy..srcObjectName=" + srcObjectName + " bucketName=" + bucketName)
storage.ObsCopyFile(bucketName, srcObjectName, bucketName, destObjectName)
}
}
return isExist
}

func getObjectName(filename string, modeluuid string) string {
return strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, filename)), "/")
}

func NewModelMultipart(ctx *context.Context) {
if !setting.Attachment.Enabled {
ctx.Error(404, "attachment is not enabled")
return
}
fileName := ctx.Query("file_name")
modeluuid := ctx.Query("modeluuid")

err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
if err != nil {
ctx.Error(400, err.Error())
return
}

typeCloudBrain := ctx.QueryInt("type")
err = checkTypeCloudBrain(typeCloudBrain)
if err != nil {
ctx.ServerError("checkTypeCloudBrain failed", err)
return
}

if setting.Attachment.StoreType == storage.MinioStorageType {
totalChunkCounts := ctx.QueryInt("totalChunkCounts")
if totalChunkCounts > minio_ext.MaxPartsCount {
ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
return
}

fileSize := ctx.QueryInt64("size")
if fileSize > minio_ext.MaxMultipartPutObjectSize {
ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
return
}

uuid := gouuid.NewV4().String()
var uploadID string
if typeCloudBrain == models.TypeCloudBrainOne {
uploadID, err = storage.NewMultiPartUpload(strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, fileName)), "/"))
if err != nil {
ctx.ServerError("NewMultipart", err)
return
}
} else {
uploadID, err = storage.NewObsMultiPartUpload(strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, fileName)), "/"))
if err != nil {
ctx.ServerError("NewObsMultiPartUpload", err)
return
}
}

_, err = models.InsertModelFileChunk(&models.ModelFileChunk{
UUID: uuid,
UserID: ctx.User.ID,
UploadID: uploadID,
Md5: ctx.Query("md5"),
Size: fileSize,
TotalChunks: totalChunkCounts,
Type: typeCloudBrain,
})

if err != nil {
ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
return
}

ctx.JSON(200, map[string]string{
"uuid": uuid,
"uploadID": uploadID,
})
} else {
ctx.Error(404, "storage type is not enabled")
return
}
}

func GetModelMultipartUploadUrl(ctx *context.Context) {
uuid := ctx.Query("uuid")
uploadID := ctx.Query("uploadID")
partNumber := ctx.QueryInt("chunkNumber")
size := ctx.QueryInt64("size")
fileName := ctx.Query("file_name")
typeCloudBrain := ctx.QueryInt("type")
err := checkTypeCloudBrain(typeCloudBrain)
if err != nil {
ctx.ServerError("checkTypeCloudBrain failed", err)
return
}
fileChunk, err := models.GetModelFileChunkByUUID(uuid)
if err != nil {
if models.IsErrFileChunkNotExist(err) {
ctx.Error(404)
} else {
ctx.ServerError("GetFileChunkByUUID", err)
}
return
}
url := ""
if typeCloudBrain == models.TypeCloudBrainOne {
if size > minio_ext.MinPartSize {
ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
return
}

url, err = storage.GenMultiPartSignedUrl(fileChunk.ObjectName, uploadID, partNumber, size)
if err != nil {
ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
return
}
} else {
if setting.PROXYURL != "" {
url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName
log.Info("return url=" + url)
} else {
url, err = storage.ObsGenMultiPartSignedUrl(fileChunk.ObjectName, uploadID, partNumber)
if err != nil {
ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
return
}
log.Info("url=" + url)
}
}

ctx.JSON(200, map[string]string{
"url": url,
})
}

func CompleteModelMultipart(ctx *context.Context) {
uuid := ctx.Query("uuid")
uploadID := ctx.Query("uploadID")
typeCloudBrain := ctx.QueryInt("type")
fileName := ctx.Query("file_name")
modeluuid := ctx.Query("modeluuid")
log.Warn("uuid:" + uuid)
log.Warn("modeluuid:" + modeluuid)
log.Warn("typeCloudBrain:" + strconv.Itoa(typeCloudBrain))

err := checkTypeCloudBrain(typeCloudBrain)
if err != nil {
ctx.ServerError("checkTypeCloudBrain failed", err)
return
}
fileChunk, err := models.GetModelFileChunkByUUID(uuid)
if err != nil {
if models.IsErrFileChunkNotExist(err) {
ctx.Error(404)
} else {
ctx.ServerError("GetFileChunkByUUID", err)
}
return
}

if typeCloudBrain == models.TypeCloudBrainOne {
_, err = storage.CompleteMultiPartUpload(fileChunk.ObjectName, uploadID, fileChunk.TotalChunks)
if err != nil {
ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
return
}
} else {
err = storage.CompleteObsMultiPartUpload(fileChunk.ObjectName, uploadID, fileChunk.TotalChunks)
if err != nil {
ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
return
}
}

fileChunk.IsUploaded = models.FileUploaded

err = models.UpdateModelFileChunk(fileChunk)
if err != nil {
ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
return
}
//更新模型大小信息
UpdateModelSize(modeluuid)

_, err = models.InsertAttachment(&models.Attachment{
UUID: uuid,
UploaderID: ctx.User.ID,
IsPrivate: true,
Name: fileName,
Size: ctx.QueryInt64("size"),
DatasetID: 0,
Description: modeluuid,
Type: typeCloudBrain,
})

if err != nil {
ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
return
}
ctx.JSON(200, map[string]string{
"result_code": "0",
})

}

+ 7
- 0
routers/routes/routes.go View File

@@ -729,6 +729,13 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Post("/complete_multipart", repo.CompleteMultipart) m.Post("/complete_multipart", repo.CompleteMultipart)
}) })


m.Group("/attachments/model", func() {
m.Get("/get_chunks", repo.GetModelChunks)
m.Get("/new_multipart", repo.NewModelMultipart)
m.Get("/get_multipart_url", repo.GetModelMultipartUploadUrl)
m.Post("/complete_multipart", repo.CompleteModelMultipart)
})

m.Group("/attachments", func() { m.Group("/attachments", func() {
m.Get("/public/query", repo.QueryAllPublicDataset) m.Get("/public/query", repo.QueryAllPublicDataset)
m.Get("/private/:username", repo.QueryPrivateDataset) m.Get("/private/:username", repo.QueryPrivateDataset)


Loading…
Cancel
Save