You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment_model.go 8.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. package repo
  2. import (
  3. "fmt"
  4. "path"
  5. "strconv"
  6. "strings"
  7. "code.gitea.io/gitea/models"
  8. "code.gitea.io/gitea/modules/context"
  9. "code.gitea.io/gitea/modules/log"
  10. "code.gitea.io/gitea/modules/minio_ext"
  11. "code.gitea.io/gitea/modules/setting"
  12. "code.gitea.io/gitea/modules/storage"
  13. "code.gitea.io/gitea/modules/upload"
  14. gouuid "github.com/satori/go.uuid"
  15. )
  16. func GetModelChunks(ctx *context.Context) {
  17. fileMD5 := ctx.Query("md5")
  18. typeCloudBrain := ctx.QueryInt("type")
  19. fileName := ctx.Query("file_name")
  20. scene := ctx.Query("scene")
  21. modeluuid := ctx.Query("modeluuid")
  22. log.Info("scene=" + scene + " typeCloudBrain=" + fmt.Sprint(typeCloudBrain))
  23. var chunks string
  24. err := checkTypeCloudBrain(typeCloudBrain)
  25. if err != nil {
  26. ctx.ServerError("checkTypeCloudBrain failed", err)
  27. return
  28. }
  29. fileChunk, err := models.GetModelFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain, modeluuid)
  30. if err != nil {
  31. if models.IsErrFileChunkNotExist(err) {
  32. ctx.JSON(200, map[string]string{
  33. "uuid": "",
  34. "uploaded": "0",
  35. "uploadID": "",
  36. "chunks": "",
  37. })
  38. } else {
  39. ctx.ServerError("GetFileChunkByMD5", err)
  40. }
  41. return
  42. }
  43. isExist := false
  44. if typeCloudBrain == models.TypeCloudBrainOne {
  45. isExist, err = storage.Attachments.HasObject(fileChunk.ObjectName)
  46. if isExist {
  47. log.Info("The file is exist in minio. has uploaded.path=" + fileChunk.ObjectName)
  48. } else {
  49. log.Info("The file is not exist in minio..")
  50. }
  51. if err != nil {
  52. ctx.ServerError("HasObject failed", err)
  53. return
  54. }
  55. } else {
  56. isExist, err = storage.ObsHasObject(fileChunk.ObjectName)
  57. if isExist {
  58. log.Info("The file is exist in obs. has uploaded. path=" + fileChunk.ObjectName)
  59. } else {
  60. log.Info("The file is not exist in obs.")
  61. }
  62. if err != nil {
  63. ctx.ServerError("ObsHasObject failed", err)
  64. return
  65. }
  66. }
  67. if isExist {
  68. if fileChunk.IsUploaded == models.FileNotUploaded {
  69. log.Info("the file has been uploaded but not recorded")
  70. fileChunk.IsUploaded = models.FileUploaded
  71. if err = models.UpdateModelFileChunk(fileChunk); err != nil {
  72. log.Error("UpdateFileChunk failed:", err.Error())
  73. }
  74. }
  75. modelname := ""
  76. model, err := models.QueryModelById(modeluuid)
  77. if err == nil && model != nil {
  78. modelname = model.Name
  79. }
  80. ctx.JSON(200, map[string]string{
  81. "uuid": fileChunk.UUID,
  82. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  83. "uploadID": fileChunk.UploadID,
  84. "chunks": string(chunks),
  85. "attachID": "0",
  86. "modeluuid": modeluuid,
  87. "fileName": fileName,
  88. "modelName": modelname,
  89. })
  90. } else {
  91. if fileChunk.IsUploaded == models.FileUploaded {
  92. log.Info("the file has been recorded but not uploaded")
  93. fileChunk.IsUploaded = models.FileNotUploaded
  94. if err = models.UpdateModelFileChunk(fileChunk); err != nil {
  95. log.Error("UpdateFileChunk failed:", err.Error())
  96. }
  97. }
  98. if typeCloudBrain == models.TypeCloudBrainOne {
  99. chunks, err = storage.GetPartInfos(fileChunk.ObjectName, fileChunk.UploadID)
  100. if err != nil {
  101. log.Error("GetPartInfos failed:%v", err.Error())
  102. }
  103. } else {
  104. chunks, err = storage.GetObsPartInfos(fileChunk.ObjectName, fileChunk.UploadID)
  105. if err != nil {
  106. log.Error("GetObsPartInfos failed:%v", err.Error())
  107. }
  108. }
  109. if err != nil {
  110. models.DeleteModelFileChunk(fileChunk)
  111. ctx.JSON(200, map[string]string{
  112. "uuid": "",
  113. "uploaded": "0",
  114. "uploadID": "",
  115. "chunks": "",
  116. })
  117. } else {
  118. ctx.JSON(200, map[string]string{
  119. "uuid": fileChunk.UUID,
  120. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  121. "uploadID": fileChunk.UploadID,
  122. "chunks": string(chunks),
  123. "attachID": "0",
  124. "datasetID": "0",
  125. "fileName": "",
  126. "datasetName": "",
  127. })
  128. }
  129. }
  130. }
  131. func getObjectName(filename string, modeluuid string) string {
  132. return strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, filename)), "/")
  133. }
  134. func NewModelMultipart(ctx *context.Context) {
  135. if !setting.Attachment.Enabled {
  136. ctx.Error(404, "attachment is not enabled")
  137. return
  138. }
  139. fileName := ctx.Query("file_name")
  140. modeluuid := ctx.Query("modeluuid")
  141. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  142. if err != nil {
  143. ctx.Error(400, err.Error())
  144. return
  145. }
  146. typeCloudBrain := ctx.QueryInt("type")
  147. err = checkTypeCloudBrain(typeCloudBrain)
  148. if err != nil {
  149. ctx.ServerError("checkTypeCloudBrain failed", err)
  150. return
  151. }
  152. if setting.Attachment.StoreType == storage.MinioStorageType {
  153. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  154. if totalChunkCounts > minio_ext.MaxPartsCount {
  155. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  156. return
  157. }
  158. fileSize := ctx.QueryInt64("size")
  159. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  160. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  161. return
  162. }
  163. uuid := gouuid.NewV4().String()
  164. var uploadID string
  165. var objectName string
  166. if typeCloudBrain == models.TypeCloudBrainOne {
  167. objectName = strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, fileName)), "/")
  168. uploadID, err = storage.NewMultiPartUpload(objectName)
  169. if err != nil {
  170. ctx.ServerError("NewMultipart", err)
  171. return
  172. }
  173. } else {
  174. objectName = strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, fileName)), "/")
  175. uploadID, err = storage.NewObsMultiPartUpload(objectName)
  176. if err != nil {
  177. ctx.ServerError("NewObsMultiPartUpload", err)
  178. return
  179. }
  180. }
  181. _, err = models.InsertModelFileChunk(&models.ModelFileChunk{
  182. UUID: uuid,
  183. UserID: ctx.User.ID,
  184. UploadID: uploadID,
  185. Md5: ctx.Query("md5"),
  186. Size: fileSize,
  187. ObjectName: objectName,
  188. ModelUUID: modeluuid,
  189. TotalChunks: totalChunkCounts,
  190. Type: typeCloudBrain,
  191. })
  192. if err != nil {
  193. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  194. return
  195. }
  196. ctx.JSON(200, map[string]string{
  197. "uuid": uuid,
  198. "uploadID": uploadID,
  199. })
  200. } else {
  201. ctx.Error(404, "storage type is not enabled")
  202. return
  203. }
  204. }
  205. func GetModelMultipartUploadUrl(ctx *context.Context) {
  206. uuid := ctx.Query("uuid")
  207. uploadID := ctx.Query("uploadID")
  208. partNumber := ctx.QueryInt("chunkNumber")
  209. size := ctx.QueryInt64("size")
  210. typeCloudBrain := ctx.QueryInt("type")
  211. err := checkTypeCloudBrain(typeCloudBrain)
  212. if err != nil {
  213. ctx.ServerError("checkTypeCloudBrain failed", err)
  214. return
  215. }
  216. fileChunk, err := models.GetModelFileChunkByUUID(uuid)
  217. if err != nil {
  218. if models.IsErrFileChunkNotExist(err) {
  219. ctx.Error(404)
  220. } else {
  221. ctx.ServerError("GetFileChunkByUUID", err)
  222. }
  223. return
  224. }
  225. url := ""
  226. if typeCloudBrain == models.TypeCloudBrainOne {
  227. if size > minio_ext.MinPartSize {
  228. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  229. return
  230. }
  231. url, err = storage.GenMultiPartSignedUrl(fileChunk.ObjectName, uploadID, partNumber, size)
  232. if err != nil {
  233. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  234. return
  235. }
  236. } else {
  237. url, err = storage.ObsGenMultiPartSignedUrl(fileChunk.ObjectName, uploadID, partNumber)
  238. if err != nil {
  239. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  240. return
  241. }
  242. log.Info("url=" + url)
  243. }
  244. ctx.JSON(200, map[string]string{
  245. "url": url,
  246. })
  247. }
  248. func CompleteModelMultipart(ctx *context.Context) {
  249. uuid := ctx.Query("uuid")
  250. uploadID := ctx.Query("uploadID")
  251. typeCloudBrain := ctx.QueryInt("type")
  252. modeluuid := ctx.Query("modeluuid")
  253. log.Warn("uuid:" + uuid)
  254. log.Warn("modeluuid:" + modeluuid)
  255. log.Warn("typeCloudBrain:" + strconv.Itoa(typeCloudBrain))
  256. err := checkTypeCloudBrain(typeCloudBrain)
  257. if err != nil {
  258. ctx.ServerError("checkTypeCloudBrain failed", err)
  259. return
  260. }
  261. fileChunk, err := models.GetModelFileChunkByUUID(uuid)
  262. if err != nil {
  263. if models.IsErrFileChunkNotExist(err) {
  264. ctx.Error(404)
  265. } else {
  266. ctx.ServerError("GetFileChunkByUUID", err)
  267. }
  268. return
  269. }
  270. if typeCloudBrain == models.TypeCloudBrainOne {
  271. _, err = storage.CompleteMultiPartUpload(fileChunk.ObjectName, uploadID, fileChunk.TotalChunks)
  272. if err != nil {
  273. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  274. return
  275. }
  276. } else {
  277. err = storage.CompleteObsMultiPartUpload(fileChunk.ObjectName, uploadID, fileChunk.TotalChunks)
  278. if err != nil {
  279. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  280. return
  281. }
  282. }
  283. fileChunk.IsUploaded = models.FileUploaded
  284. err = models.UpdateModelFileChunk(fileChunk)
  285. if err != nil {
  286. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  287. return
  288. }
  289. //更新模型大小信息
  290. UpdateModelSize(modeluuid)
  291. ctx.JSON(200, map[string]string{
  292. "result_code": "0",
  293. })
  294. }