You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 34 kB

4 years ago
4 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
3 years ago
3 years ago
3 years ago
3 years ago
4 years ago
3 years ago
3 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
3 years ago
3 years ago
3 years ago
3 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
3 years ago
3 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. contexExt "context"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "mime/multipart"
  11. "net/http"
  12. "path"
  13. "strconv"
  14. "strings"
  15. "code.gitea.io/gitea/modules/auth"
  16. "code.gitea.io/gitea/modules/base"
  17. "code.gitea.io/gitea/models"
  18. "code.gitea.io/gitea/modules/context"
  19. "code.gitea.io/gitea/modules/labelmsg"
  20. "code.gitea.io/gitea/modules/log"
  21. "code.gitea.io/gitea/modules/minio_ext"
  22. "code.gitea.io/gitea/modules/notification"
  23. "code.gitea.io/gitea/modules/setting"
  24. "code.gitea.io/gitea/modules/storage"
  25. "code.gitea.io/gitea/modules/upload"
  26. "code.gitea.io/gitea/modules/worker"
  27. gouuid "github.com/satori/go.uuid"
  28. )
  29. const (
  30. //result of decompress
  31. DecompressSuccess = "0"
  32. DecompressFailed = "1"
  33. tplAttachmentUpload base.TplName = "repo/attachment/upload"
  34. tplAttachmentEdit base.TplName = "repo/attachment/edit"
  35. )
  36. type CloudBrainDataset struct {
  37. UUID string `json:"id"`
  38. Name string `json:"name"`
  39. Path string `json:"place"`
  40. UserName string `json:"provider"`
  41. CreateTime string `json:"created_at"`
  42. }
  43. type UploadForm struct {
  44. UploadID string `form:"uploadId"`
  45. UuID string `form:"uuid"`
  46. PartSize int64 `form:"size"`
  47. Offset int64 `form:"offset"`
  48. PartNumber int `form:"chunkNumber"`
  49. PartFile multipart.File `form:"file"`
  50. }
  51. func RenderAttachmentSettings(ctx *context.Context) {
  52. renderAttachmentSettings(ctx)
  53. }
  54. func renderAttachmentSettings(ctx *context.Context) {
  55. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  56. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  57. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  58. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  59. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  60. }
  61. func UploadAttachmentUI(ctx *context.Context) {
  62. ctx.Data["datasetId"] = ctx.Query("datasetId")
  63. ctx.Data["PageIsDataset"] = true
  64. ctx.HTML(200, tplAttachmentUpload)
  65. }
  66. func EditAttachmentUI(ctx *context.Context) {
  67. id, _ := strconv.ParseInt(ctx.Params(":id"), 10, 64)
  68. ctx.Data["PageIsDataset"] = true
  69. attachment, _ := models.GetAttachmentByID(id)
  70. if attachment == nil {
  71. ctx.Error(404, "The attachment does not exits.")
  72. }
  73. ctx.Data["Attachment"] = attachment
  74. ctx.HTML(200, tplAttachmentEdit)
  75. }
  76. func EditAttachment(ctx *context.Context, form auth.EditAttachmentForm) {
  77. err := models.UpdateAttachmentDescription(&models.Attachment{
  78. ID: form.ID,
  79. Description: form.Description,
  80. })
  81. if err != nil {
  82. ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.edit_attachment_fail")))
  83. }
  84. ctx.JSON(http.StatusOK, models.BaseOKMessage)
  85. }
  86. // UploadAttachment response for uploading issue's attachment
  87. func UploadAttachment(ctx *context.Context) {
  88. if !setting.Attachment.Enabled {
  89. ctx.Error(404, "attachment is not enabled")
  90. return
  91. }
  92. file, header, err := ctx.Req.FormFile("file")
  93. if err != nil {
  94. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  95. return
  96. }
  97. defer file.Close()
  98. buf := make([]byte, 1024)
  99. n, _ := file.Read(buf)
  100. if n > 0 {
  101. buf = buf[:n]
  102. }
  103. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  104. if err != nil {
  105. ctx.Error(400, err.Error())
  106. return
  107. }
  108. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  109. attach, err := models.NewAttachment(&models.Attachment{
  110. IsPrivate: true,
  111. UploaderID: ctx.User.ID,
  112. Name: header.Filename,
  113. DatasetID: datasetID,
  114. }, buf, file)
  115. if err != nil {
  116. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  117. return
  118. }
  119. log.Trace("New attachment uploaded: %s", attach.UUID)
  120. ctx.JSON(200, map[string]string{
  121. "uuid": attach.UUID,
  122. })
  123. }
  124. func UpdatePublicAttachment(ctx *context.Context) {
  125. file := ctx.Query("file")
  126. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  127. attach, err := models.GetAttachmentByUUID(file)
  128. if err != nil {
  129. ctx.Error(404, err.Error())
  130. return
  131. }
  132. attach.IsPrivate = isPrivate
  133. models.UpdateAttachment(attach)
  134. }
  135. // DeleteAttachment response for deleting issue's attachment
  136. func DeleteAttachment(ctx *context.Context) {
  137. file := ctx.Query("file")
  138. attach, err := models.GetAttachmentByUUID(file)
  139. if err != nil {
  140. ctx.Error(400, err.Error())
  141. return
  142. }
  143. //issue 214: mod del-dataset permission
  144. if !models.CanDelAttachment(ctx.IsSigned, ctx.User, attach) {
  145. ctx.Error(403)
  146. return
  147. }
  148. err = models.DeleteAttachment(attach, true)
  149. if err != nil {
  150. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  151. return
  152. }
  153. attachjson, _ := json.Marshal(attach)
  154. labelmsg.SendDeleteAttachToLabelSys(string(attachjson))
  155. DeleteAllUnzipFile(attach, "")
  156. _, err = models.DeleteFileChunkById(attach.UUID)
  157. if err != nil {
  158. ctx.Error(500, fmt.Sprintf("DeleteFileChunkById: %v", err))
  159. return
  160. }
  161. ctx.JSON(200, map[string]string{
  162. "uuid": attach.UUID,
  163. })
  164. }
  165. func DownloadUserIsOrgOrCollaboration(ctx *context.Context, attach *models.Attachment) bool {
  166. dataset, err := models.GetDatasetByID(attach.DatasetID)
  167. if err != nil {
  168. log.Info("query dataset error")
  169. } else {
  170. repo, err := models.GetRepositoryByID(dataset.RepoID)
  171. if err != nil {
  172. log.Info("query repo error.")
  173. } else {
  174. repo.GetOwner()
  175. if ctx.User != nil {
  176. if repo.Owner.IsOrganization() {
  177. if repo.Owner.IsUserPartOfOrg(ctx.User.ID) {
  178. log.Info("org user may visit the attach.")
  179. return true
  180. }
  181. }
  182. isCollaborator, _ := repo.IsCollaborator(ctx.User.ID)
  183. if isCollaborator {
  184. log.Info("Collaborator user may visit the attach.")
  185. return true
  186. }
  187. }
  188. }
  189. }
  190. return false
  191. }
  192. // GetAttachment serve attachements
  193. func GetAttachment(ctx *context.Context) {
  194. typeCloudBrain := ctx.QueryInt("type")
  195. err := checkTypeCloudBrain(typeCloudBrain)
  196. if err != nil {
  197. ctx.ServerError("checkTypeCloudBrain failed", err)
  198. return
  199. }
  200. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  201. if err != nil {
  202. if models.IsErrAttachmentNotExist(err) {
  203. ctx.Error(404)
  204. } else {
  205. ctx.ServerError("GetAttachmentByUUID", err)
  206. }
  207. return
  208. }
  209. repository, unitType, err := attach.LinkedRepository()
  210. if err != nil {
  211. ctx.ServerError("LinkedRepository", err)
  212. return
  213. }
  214. dataSet, err := attach.LinkedDataSet()
  215. if err != nil {
  216. ctx.ServerError("LinkedDataSet", err)
  217. return
  218. }
  219. if repository == nil && dataSet != nil {
  220. repository, _ = models.GetRepositoryByID(dataSet.RepoID)
  221. unitType = models.UnitTypeDatasets
  222. }
  223. if repository == nil { //If not linked
  224. //if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  225. //log.Info("ctx.IsSigned =" + fmt.Sprintf("%v", ctx.IsSigned))
  226. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate && !DownloadUserIsOrgOrCollaboration(ctx, attach) { //We block if not the uploader
  227. ctx.Error(http.StatusNotFound)
  228. return
  229. }
  230. } else { //If we have the repository we check access
  231. perm, errPermission := models.GetUserRepoPermission(repository, ctx.User)
  232. if errPermission != nil {
  233. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", errPermission.Error())
  234. return
  235. }
  236. if !perm.CanRead(unitType) {
  237. ctx.Error(http.StatusNotFound)
  238. return
  239. }
  240. }
  241. if dataSet != nil {
  242. if !ctx.IsSigned {
  243. ctx.SetCookie("redirect_to", setting.AppSubURL+ctx.Req.URL.RequestURI(), 0, setting.AppSubURL)
  244. ctx.Redirect(setting.AppSubURL + "/user/login")
  245. return
  246. } else {
  247. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  248. if err != nil {
  249. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  250. return
  251. }
  252. if !isPermit {
  253. ctx.Error(http.StatusNotFound)
  254. return
  255. }
  256. }
  257. }
  258. //If we have matched and access to release or issue
  259. if setting.Attachment.StoreType == storage.MinioStorageType {
  260. url := ""
  261. if typeCloudBrain == models.TypeCloudBrainOne {
  262. url, err = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+attach.RelativePath(), attach.Name)
  263. if err != nil {
  264. ctx.ServerError("PresignedGetURL", err)
  265. return
  266. }
  267. } else {
  268. if setting.PROXYURL != "" {
  269. url = setting.PROXYURL + "/obs_proxy_download?uuid=" + attach.UUID + "&file_name=" + attach.Name
  270. log.Info("return url=" + url)
  271. } else {
  272. objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(attach.UUID[0:1], attach.UUID[1:2], attach.UUID, attach.Name)), "/")
  273. url, err = storage.ObsGetPreSignedUrl(objectName, attach.Name)
  274. if err != nil {
  275. ctx.ServerError("ObsGetPreSignedUrl", err)
  276. return
  277. }
  278. }
  279. }
  280. if err = increaseDownloadCount(attach, dataSet); err != nil {
  281. ctx.ServerError("Update", err)
  282. return
  283. }
  284. if dataSet != nil {
  285. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  286. } else {
  287. fr, err := storage.Attachments.Open(attach.RelativePath())
  288. if err != nil {
  289. ctx.ServerError("Open", err)
  290. return
  291. }
  292. defer fr.Close()
  293. if err = ServeData(ctx, attach.Name, fr); err != nil {
  294. ctx.ServerError("ServeData", err)
  295. return
  296. }
  297. }
  298. } else {
  299. fr, err := storage.Attachments.Open(attach.RelativePath())
  300. if err != nil {
  301. ctx.ServerError("Open", err)
  302. return
  303. }
  304. defer fr.Close()
  305. if err = increaseDownloadCount(attach, dataSet); err != nil {
  306. ctx.ServerError("Update", err)
  307. return
  308. }
  309. if err = ServeData(ctx, attach.Name, fr); err != nil {
  310. ctx.ServerError("ServeData", err)
  311. return
  312. }
  313. }
  314. }
  315. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  316. if err := attach.IncreaseDownloadCount(); err != nil {
  317. return err
  318. }
  319. if dataSet != nil {
  320. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  321. return err
  322. }
  323. }
  324. return nil
  325. }
  326. // Get a presigned url for put object
  327. func GetPresignedPutObjectURL(ctx *context.Context) {
  328. if !setting.Attachment.Enabled {
  329. ctx.Error(404, "attachment is not enabled")
  330. return
  331. }
  332. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  333. if err != nil {
  334. ctx.Error(400, err.Error())
  335. return
  336. }
  337. if setting.Attachment.StoreType == storage.MinioStorageType {
  338. uuid := gouuid.NewV4().String()
  339. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  340. if err != nil {
  341. ctx.ServerError("PresignedPutURL", err)
  342. return
  343. }
  344. ctx.JSON(200, map[string]string{
  345. "uuid": uuid,
  346. "url": url,
  347. })
  348. } else {
  349. ctx.Error(404, "storage type is not enabled")
  350. return
  351. }
  352. }
  353. // AddAttachment response for add attachment record
  354. func AddAttachment(ctx *context.Context) {
  355. typeCloudBrain := ctx.QueryInt("type")
  356. fileName := ctx.Query("file_name")
  357. err := checkTypeCloudBrain(typeCloudBrain)
  358. if err != nil {
  359. ctx.ServerError("checkTypeCloudBrain failed", err)
  360. return
  361. }
  362. uuid := ctx.Query("uuid")
  363. has := false
  364. if typeCloudBrain == models.TypeCloudBrainOne {
  365. has, err = storage.Attachments.HasObject(setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(uuid))
  366. if err != nil {
  367. ctx.ServerError("HasObject", err)
  368. return
  369. }
  370. } else {
  371. has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + fileName)
  372. if err != nil {
  373. ctx.ServerError("ObsHasObject", err)
  374. return
  375. }
  376. }
  377. if !has {
  378. ctx.Error(404, "attachment has not been uploaded")
  379. return
  380. }
  381. datasetId := ctx.QueryInt64("dataset_id")
  382. dataset, err := models.GetDatasetByID(datasetId)
  383. if err != nil {
  384. ctx.Error(404, "dataset does not exist.")
  385. return
  386. }
  387. attachment, err := models.InsertAttachment(&models.Attachment{
  388. UUID: uuid,
  389. UploaderID: ctx.User.ID,
  390. IsPrivate: dataset.IsPrivate(),
  391. Name: fileName,
  392. Size: ctx.QueryInt64("size"),
  393. DatasetID: ctx.QueryInt64("dataset_id"),
  394. Type: typeCloudBrain,
  395. })
  396. if err != nil {
  397. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  398. return
  399. }
  400. if attachment.DatasetID != 0 {
  401. if isCanDecompress(attachment.Name) {
  402. if typeCloudBrain == models.TypeCloudBrainOne {
  403. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  404. if err != nil {
  405. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  406. } else {
  407. attachment.DecompressState = models.DecompressStateIng
  408. err = models.UpdateAttachment(attachment)
  409. if err != nil {
  410. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  411. }
  412. }
  413. }
  414. //todo:decompress type_two
  415. }
  416. }
  417. ctx.JSON(200, map[string]string{
  418. "result_code": "0",
  419. })
  420. }
  421. func isCanDecompress(name string) bool {
  422. if strings.HasSuffix(name, ".zip") || strings.HasSuffix(name, ".tar.gz") || strings.HasSuffix(name, ".tgz") {
  423. return true
  424. }
  425. return false
  426. }
  427. func UpdateAttachmentDecompressState(ctx *context.Context) {
  428. uuid := ctx.Query("uuid")
  429. result := ctx.Query("result")
  430. attach, err := models.GetAttachmentByUUID(uuid)
  431. if err != nil {
  432. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  433. return
  434. }
  435. if result == DecompressSuccess {
  436. attach.DecompressState = models.DecompressStateDone
  437. } else if result == DecompressFailed {
  438. attach.DecompressState = models.DecompressStateFailed
  439. } else {
  440. log.Error("result is error:", result)
  441. return
  442. }
  443. err = models.UpdateAttachment(attach)
  444. if err != nil {
  445. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  446. return
  447. }
  448. log.Info("start to send msg to labelsystem ")
  449. dataset, _ := models.GetDatasetByID(attach.DatasetID)
  450. var labelMap map[string]string
  451. labelMap = make(map[string]string)
  452. labelMap["UUID"] = uuid
  453. labelMap["Type"] = fmt.Sprint(attach.Type)
  454. labelMap["UploaderID"] = fmt.Sprint(attach.UploaderID)
  455. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  456. labelMap["AttachName"] = attach.Name
  457. attachjson, _ := json.Marshal(labelMap)
  458. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  459. log.Info("end to send msg to labelsystem ")
  460. ctx.JSON(200, map[string]string{
  461. "result_code": "0",
  462. })
  463. }
  464. func getCloudOneMinioPrefix(scene string, fileChunk *models.FileChunk) string {
  465. if scene == Attachment_model {
  466. return fileChunk.ObjectName
  467. } else {
  468. return setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(fileChunk.UUID)
  469. }
  470. }
  471. func getCloudTwoOBSPrefix(scene string, fileChunk *models.FileChunk, fileName string) string {
  472. if scene == Attachment_model {
  473. return fileChunk.ObjectName
  474. } else {
  475. return setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + fileName
  476. }
  477. }
  478. func GetSuccessChunks(ctx *context.Context) {
  479. fileMD5 := ctx.Query("md5")
  480. typeCloudBrain := ctx.QueryInt("type")
  481. fileName := ctx.Query("file_name")
  482. scene := ctx.Query("scene")
  483. modeluuid := ctx.Query("modeluuid")
  484. log.Info("scene=" + scene)
  485. var chunks string
  486. err := checkTypeCloudBrain(typeCloudBrain)
  487. if err != nil {
  488. ctx.ServerError("checkTypeCloudBrain failed", err)
  489. return
  490. }
  491. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  492. if err != nil {
  493. if models.IsErrFileChunkNotExist(err) {
  494. ctx.JSON(200, map[string]string{
  495. "uuid": "",
  496. "uploaded": "0",
  497. "uploadID": "",
  498. "chunks": "",
  499. })
  500. } else {
  501. ctx.ServerError("GetFileChunkByMD5", err)
  502. }
  503. return
  504. }
  505. isExist := false
  506. if typeCloudBrain == models.TypeCloudBrainOne {
  507. isExist, err = storage.Attachments.HasObject(getCloudOneMinioPrefix(scene, fileChunk))
  508. if err != nil {
  509. ctx.ServerError("HasObject failed", err)
  510. return
  511. }
  512. } else {
  513. oldFileName := fileName
  514. oldAttachment, _ := models.GetAttachmentByUUID(fileChunk.UUID)
  515. if oldAttachment != nil {
  516. oldFileName = oldAttachment.Name
  517. }
  518. isExist, err = storage.ObsHasObject(getCloudTwoOBSPrefix(scene, fileChunk, oldFileName))
  519. if err != nil {
  520. ctx.ServerError("ObsHasObject failed", err)
  521. return
  522. }
  523. }
  524. if isExist {
  525. if fileChunk.IsUploaded == models.FileNotUploaded {
  526. log.Info("the file has been uploaded but not recorded")
  527. fileChunk.IsUploaded = models.FileUploaded
  528. if err = models.UpdateFileChunk(fileChunk); err != nil {
  529. log.Error("UpdateFileChunk failed:", err.Error())
  530. }
  531. }
  532. } else {
  533. if fileChunk.IsUploaded == models.FileUploaded {
  534. log.Info("the file has been recorded but not uploaded")
  535. fileChunk.IsUploaded = models.FileNotUploaded
  536. if err = models.UpdateFileChunk(fileChunk); err != nil {
  537. log.Error("UpdateFileChunk failed:", err.Error())
  538. }
  539. }
  540. if typeCloudBrain == models.TypeCloudBrainOne {
  541. chunks, err = storage.GetPartInfos(getChunkMinioExistObjectName(scene, fileChunk, fileName), fileChunk.UploadID)
  542. if err != nil {
  543. log.Error("GetPartInfos failed:%v", err.Error())
  544. }
  545. } else {
  546. chunks, err = storage.GetObsPartInfos(getChunkOBSExistObjectName(scene, fileChunk, fileName), fileChunk.UploadID)
  547. if err != nil {
  548. log.Error("GetObsPartInfos failed:%v", err.Error())
  549. }
  550. }
  551. if err != nil {
  552. models.DeleteFileChunk(fileChunk)
  553. ctx.JSON(200, map[string]string{
  554. "uuid": "",
  555. "uploaded": "0",
  556. "uploadID": "",
  557. "chunks": "",
  558. })
  559. return
  560. }
  561. }
  562. var attachID int64
  563. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  564. if err != nil {
  565. if models.IsErrAttachmentNotExist(err) {
  566. attachID = 0
  567. } else {
  568. ctx.ServerError("GetAttachmentByUUID", err)
  569. return
  570. }
  571. } else {
  572. attachID = attach.ID
  573. }
  574. if attach == nil {
  575. ctx.JSON(200, map[string]string{
  576. "uuid": fileChunk.UUID,
  577. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  578. "uploadID": fileChunk.UploadID,
  579. "chunks": string(chunks),
  580. "attachID": "0",
  581. "datasetID": "0",
  582. "fileName": "",
  583. "datasetName": "",
  584. })
  585. return
  586. }
  587. if scene == Attachment_model {
  588. //使用description存储模型信息
  589. dbmodeluuid := attach.Description
  590. modelname := ""
  591. if dbmodeluuid != modeluuid {
  592. model, err := models.QueryModelById(dbmodeluuid)
  593. if err == nil {
  594. modelname = model.Name
  595. }
  596. //copy
  597. srcObjectName := fileChunk.ObjectName
  598. destObjectName := getObjectName(fileName, modeluuid)
  599. var isExist bool
  600. if typeCloudBrain == models.TypeCloudBrainOne {
  601. bucketName := setting.Attachment.Minio.Bucket
  602. if storage.MinioGetFilesSize(bucketName, []string{destObjectName}) > 0 {
  603. isExist = true
  604. } else {
  605. storage.MinioCopyAFile(bucketName, srcObjectName, bucketName, destObjectName)
  606. }
  607. } else {
  608. bucketName := setting.Bucket
  609. if storage.ObsGetFilesSize(bucketName, []string{destObjectName}) > 0 {
  610. isExist = true
  611. } else {
  612. storage.ObsCopyFile(bucketName, srcObjectName, bucketName, destObjectName)
  613. }
  614. }
  615. if isExist {
  616. ctx.JSON(200, map[string]string{
  617. "uuid": fileChunk.UUID,
  618. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  619. "uploadID": fileChunk.UploadID,
  620. "chunks": string(chunks),
  621. "attachID": strconv.Itoa(int(attachID)),
  622. "modeluuid": modeluuid,
  623. "fileName": attach.Name,
  624. "modelName": modelname,
  625. })
  626. } else {
  627. UpdateModelSize(modeluuid)
  628. ctx.JSON(200, map[string]string{
  629. "uuid": fileChunk.UUID,
  630. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  631. "uploadID": fileChunk.UploadID,
  632. "chunks": string(chunks),
  633. "attachID": strconv.Itoa(int(attachID)),
  634. "fileName": attach.Name,
  635. })
  636. }
  637. return
  638. } else {
  639. model, err := models.QueryModelById(dbmodeluuid)
  640. if err == nil {
  641. modelname = model.Name
  642. }
  643. ctx.JSON(200, map[string]string{
  644. "uuid": fileChunk.UUID,
  645. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  646. "uploadID": fileChunk.UploadID,
  647. "chunks": string(chunks),
  648. "attachID": strconv.Itoa(int(attachID)),
  649. "modeluuid": dbmodeluuid,
  650. "fileName": attach.Name,
  651. "modelName": modelname,
  652. })
  653. return
  654. }
  655. } else {
  656. dataset, err := models.GetDatasetByID(attach.DatasetID)
  657. if err != nil {
  658. ctx.ServerError("GetDatasetByID", err)
  659. return
  660. }
  661. ctx.JSON(200, map[string]string{
  662. "uuid": fileChunk.UUID,
  663. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  664. "uploadID": fileChunk.UploadID,
  665. "chunks": string(chunks),
  666. "attachID": strconv.Itoa(int(attachID)),
  667. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  668. "fileName": attach.Name,
  669. "datasetName": dataset.Title,
  670. })
  671. }
  672. }
  673. func getObjectName(filename string, modeluuid string) string {
  674. return strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, filename)), "/")
  675. }
  676. func getMinioInitObjectName(scene string, uuid, modeluuid string, filename string) string {
  677. if scene == Attachment_model {
  678. return strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, filename)), "/")
  679. } else {
  680. return strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/")
  681. }
  682. }
  683. func getChunkMinioExistObjectName(scene string, fileChunk *models.FileChunk, filename string) string {
  684. if scene == Attachment_model {
  685. return fileChunk.ObjectName
  686. } else {
  687. return strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID)), "/")
  688. }
  689. }
  690. func getOBSInitObjectName(scene string, uuid, modeluuid string, filename string) string {
  691. if scene == Attachment_model {
  692. return strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, filename)), "/")
  693. } else {
  694. return strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, filename)), "/")
  695. }
  696. }
  697. func getChunkOBSExistObjectName(scene string, fileChunk *models.FileChunk, filename string) string {
  698. if scene == Attachment_model {
  699. return fileChunk.ObjectName
  700. } else {
  701. return strings.TrimPrefix(path.Join(setting.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID, filename)), "/")
  702. }
  703. }
  704. func NewMultipart(ctx *context.Context) {
  705. if !setting.Attachment.Enabled {
  706. ctx.Error(404, "attachment is not enabled")
  707. return
  708. }
  709. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  710. if err != nil {
  711. ctx.Error(400, err.Error())
  712. return
  713. }
  714. typeCloudBrain := ctx.QueryInt("type")
  715. err = checkTypeCloudBrain(typeCloudBrain)
  716. if err != nil {
  717. ctx.ServerError("checkTypeCloudBrain failed", err)
  718. return
  719. }
  720. fileName := ctx.Query("file_name")
  721. scene := ctx.Query("scene")
  722. modeluuid := ctx.Query("modeluuid")
  723. if setting.Attachment.StoreType == storage.MinioStorageType {
  724. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  725. if totalChunkCounts > minio_ext.MaxPartsCount {
  726. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  727. return
  728. }
  729. fileSize := ctx.QueryInt64("size")
  730. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  731. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  732. return
  733. }
  734. uuid := gouuid.NewV4().String()
  735. var uploadID string
  736. var objectName string
  737. if typeCloudBrain == models.TypeCloudBrainOne {
  738. objectName = getMinioInitObjectName(scene, uuid, modeluuid, fileName)
  739. uploadID, err = storage.NewMultiPartUpload(objectName)
  740. if err != nil {
  741. ctx.ServerError("NewMultipart", err)
  742. return
  743. }
  744. } else {
  745. objectName = getOBSInitObjectName(scene, uuid, modeluuid, fileName)
  746. uploadID, err = storage.NewObsMultiPartUpload(objectName)
  747. if err != nil {
  748. ctx.ServerError("NewObsMultiPartUpload", err)
  749. return
  750. }
  751. }
  752. _, err = models.InsertFileChunk(&models.FileChunk{
  753. UUID: uuid,
  754. UserID: ctx.User.ID,
  755. UploadID: uploadID,
  756. ObjectName: objectName,
  757. Md5: ctx.Query("md5"),
  758. Size: fileSize,
  759. TotalChunks: totalChunkCounts,
  760. Type: typeCloudBrain,
  761. })
  762. if err != nil {
  763. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  764. return
  765. }
  766. ctx.JSON(200, map[string]string{
  767. "uuid": uuid,
  768. "uploadID": uploadID,
  769. })
  770. } else {
  771. ctx.Error(404, "storage type is not enabled")
  772. return
  773. }
  774. }
  775. func PutOBSProxyUpload(ctx *context.Context) {
  776. uuid := ctx.Query("uuid")
  777. uploadID := ctx.Query("uploadId")
  778. partNumber := ctx.QueryInt("partNumber")
  779. fileName := ctx.Query("file_name")
  780. RequestBody := ctx.Req.Body()
  781. if RequestBody == nil {
  782. ctx.Error(500, fmt.Sprintf("FormFile: %v", RequestBody))
  783. return
  784. }
  785. objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")
  786. err := storage.ObsMultiPartUpload(objectName, uploadID, partNumber, fileName, RequestBody.ReadCloser())
  787. if err != nil {
  788. log.Info("upload error.")
  789. }
  790. }
  791. func GetOBSProxyDownload(ctx *context.Context) {
  792. uuid := ctx.Query("uuid")
  793. fileName := ctx.Query("file_name")
  794. objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")
  795. body, err := storage.ObsDownloadAFile(setting.Bucket, objectName)
  796. if err != nil {
  797. log.Info("upload error.")
  798. } else {
  799. defer body.Close()
  800. ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+fileName)
  801. ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
  802. p := make([]byte, 1024)
  803. var readErr error
  804. var readCount int
  805. // 读取对象内容
  806. for {
  807. readCount, readErr = body.Read(p)
  808. if readCount > 0 {
  809. ctx.Resp.Write(p[:readCount])
  810. //fmt.Printf("%s", p[:readCount])
  811. }
  812. if readErr != nil {
  813. break
  814. }
  815. }
  816. }
  817. }
  818. func GetMultipartUploadUrl(ctx *context.Context) {
  819. uuid := ctx.Query("uuid")
  820. uploadID := ctx.Query("uploadID")
  821. partNumber := ctx.QueryInt("chunkNumber")
  822. size := ctx.QueryInt64("size")
  823. fileName := ctx.Query("file_name")
  824. scene := ctx.Query("scene")
  825. typeCloudBrain := ctx.QueryInt("type")
  826. err := checkTypeCloudBrain(typeCloudBrain)
  827. if err != nil {
  828. ctx.ServerError("checkTypeCloudBrain failed", err)
  829. return
  830. }
  831. fileChunk, err := models.GetFileChunkByUUID(uuid)
  832. if err != nil {
  833. if models.IsErrFileChunkNotExist(err) {
  834. ctx.Error(404)
  835. } else {
  836. ctx.ServerError("GetFileChunkByUUID", err)
  837. }
  838. return
  839. }
  840. url := ""
  841. if typeCloudBrain == models.TypeCloudBrainOne {
  842. if size > minio_ext.MinPartSize {
  843. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  844. return
  845. }
  846. url, err = storage.GenMultiPartSignedUrl(getChunkMinioExistObjectName(scene, fileChunk, fileName), uploadID, partNumber, size)
  847. if err != nil {
  848. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  849. return
  850. }
  851. } else {
  852. if setting.PROXYURL != "" {
  853. url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName
  854. log.Info("return url=" + url)
  855. } else {
  856. url, err = storage.ObsGenMultiPartSignedUrl(getChunkOBSExistObjectName(scene, fileChunk, fileName), uploadID, partNumber)
  857. if err != nil {
  858. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  859. return
  860. }
  861. log.Info("url=" + url)
  862. }
  863. }
  864. ctx.JSON(200, map[string]string{
  865. "url": url,
  866. })
  867. }
  868. func CompleteMultipart(ctx *context.Context) {
  869. uuid := ctx.Query("uuid")
  870. uploadID := ctx.Query("uploadID")
  871. typeCloudBrain := ctx.QueryInt("type")
  872. fileName := ctx.Query("file_name")
  873. scene := ctx.Query("scene")
  874. modeluuid := ctx.Query("modeluuid")
  875. log.Warn("uuid:" + uuid)
  876. log.Warn("modeluuid:" + modeluuid)
  877. log.Warn("scene:" + scene)
  878. log.Warn("typeCloudBrain:" + strconv.Itoa(typeCloudBrain))
  879. err := checkTypeCloudBrain(typeCloudBrain)
  880. if err != nil {
  881. ctx.ServerError("checkTypeCloudBrain failed", err)
  882. return
  883. }
  884. fileChunk, err := models.GetFileChunkByUUID(uuid)
  885. if err != nil {
  886. if models.IsErrFileChunkNotExist(err) {
  887. ctx.Error(404)
  888. } else {
  889. ctx.ServerError("GetFileChunkByUUID", err)
  890. }
  891. return
  892. }
  893. if typeCloudBrain == models.TypeCloudBrainOne {
  894. _, err = storage.CompleteMultiPartUpload(getChunkMinioExistObjectName(scene, fileChunk, fileName), uploadID, fileChunk.TotalChunks)
  895. if err != nil {
  896. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  897. return
  898. }
  899. } else {
  900. err = storage.CompleteObsMultiPartUpload(getChunkOBSExistObjectName(scene, fileChunk, fileName), uploadID, fileChunk.TotalChunks)
  901. if err != nil {
  902. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  903. return
  904. }
  905. }
  906. fileChunk.IsUploaded = models.FileUploaded
  907. err = models.UpdateFileChunk(fileChunk)
  908. if err != nil {
  909. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  910. return
  911. }
  912. if scene == Attachment_model {
  913. //更新模型大小信息
  914. UpdateModelSize(modeluuid)
  915. _, err := models.InsertAttachment(&models.Attachment{
  916. UUID: uuid,
  917. UploaderID: ctx.User.ID,
  918. IsPrivate: true,
  919. Name: fileName,
  920. Size: ctx.QueryInt64("size"),
  921. DatasetID: 0,
  922. Description: modeluuid,
  923. Type: typeCloudBrain,
  924. })
  925. if err != nil {
  926. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  927. return
  928. }
  929. ctx.JSON(200, map[string]string{
  930. "result_code": "0",
  931. })
  932. } else {
  933. dataset, _ := models.GetDatasetByID(ctx.QueryInt64("dataset_id"))
  934. log.Warn("insert attachment to datasetId:" + strconv.FormatInt(dataset.ID, 10))
  935. attachment, err := models.InsertAttachment(&models.Attachment{
  936. UUID: uuid,
  937. UploaderID: ctx.User.ID,
  938. IsPrivate: dataset.IsPrivate(),
  939. Name: fileName,
  940. Size: ctx.QueryInt64("size"),
  941. DatasetID: ctx.QueryInt64("dataset_id"),
  942. Description: ctx.Query("description"),
  943. Type: typeCloudBrain,
  944. })
  945. if err != nil {
  946. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  947. return
  948. }
  949. attachment.UpdateDatasetUpdateUnix()
  950. repository, _ := models.GetRepositoryByID(dataset.RepoID)
  951. notification.NotifyOtherTask(ctx.User, repository, fmt.Sprint(repository.IsPrivate, attachment.IsPrivate), attachment.Name, models.ActionUploadAttachment)
  952. if attachment.DatasetID != 0 {
  953. if isCanDecompress(attachment.Name) {
  954. if typeCloudBrain == models.TypeCloudBrainOne {
  955. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  956. if err != nil {
  957. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  958. } else {
  959. updateAttachmentDecompressStateIng(attachment)
  960. }
  961. }
  962. if typeCloudBrain == models.TypeCloudBrainTwo {
  963. attachjson, _ := json.Marshal(attachment)
  964. err = labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  965. if err != nil {
  966. log.Error("SendDecompressTask to labelsystem (%s) failed:%s", attachment.UUID, err.Error())
  967. } else {
  968. updateAttachmentDecompressStateIng(attachment)
  969. }
  970. }
  971. } else {
  972. var labelMap map[string]string
  973. labelMap = make(map[string]string)
  974. labelMap["UUID"] = uuid
  975. labelMap["Type"] = fmt.Sprint(attachment.Type)
  976. labelMap["UploaderID"] = fmt.Sprint(attachment.UploaderID)
  977. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  978. labelMap["AttachName"] = attachment.Name
  979. attachjson, _ := json.Marshal(labelMap)
  980. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  981. }
  982. }
  983. }
  984. ctx.JSON(200, map[string]string{
  985. "result_code": "0",
  986. })
  987. }
  988. func HandleUnDecompressAttachment() {
  989. attachs, err := models.GetUnDecompressAttachments()
  990. if err != nil {
  991. log.Error("GetUnDecompressAttachments failed:", err.Error())
  992. return
  993. }
  994. for _, attach := range attachs {
  995. if attach.Type == models.TypeCloudBrainOne {
  996. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name)
  997. if err != nil {
  998. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  999. } else {
  1000. updateAttachmentDecompressStateIng(attach)
  1001. }
  1002. } else if attach.Type == models.TypeCloudBrainTwo {
  1003. attachjson, _ := json.Marshal(attach)
  1004. err = labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  1005. if err != nil {
  1006. log.Error("SendDecompressTask to labelsystem (%s) failed:%s", attach.UUID, err.Error())
  1007. } else {
  1008. updateAttachmentDecompressStateIng(attach)
  1009. }
  1010. }
  1011. }
  1012. return
  1013. }
  1014. func updateAttachmentDecompressStateIng(attach *models.Attachment) {
  1015. attach.DecompressState = models.DecompressStateIng
  1016. err := models.UpdateAttachment(attach)
  1017. if err != nil {
  1018. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  1019. }
  1020. }
  1021. func QueryAllPublicDataset(ctx *context.Context) {
  1022. attachs, err := models.GetAllPublicAttachments()
  1023. if err != nil {
  1024. ctx.JSON(200, map[string]string{
  1025. "result_code": "-1",
  1026. "error_msg": err.Error(),
  1027. "data": "",
  1028. })
  1029. return
  1030. }
  1031. queryDatasets(ctx, attachs)
  1032. }
  1033. func QueryPrivateDataset(ctx *context.Context) {
  1034. username := ctx.Params(":username")
  1035. attachs, err := models.GetPrivateAttachments(username)
  1036. if err != nil {
  1037. ctx.JSON(200, map[string]string{
  1038. "result_code": "-1",
  1039. "error_msg": err.Error(),
  1040. "data": "",
  1041. })
  1042. return
  1043. }
  1044. for _, attach := range attachs {
  1045. attach.Name = username
  1046. }
  1047. queryDatasets(ctx, attachs)
  1048. }
  1049. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  1050. var datasets []CloudBrainDataset
  1051. if len(attachs) == 0 {
  1052. log.Info("dataset is null")
  1053. ctx.JSON(200, map[string]string{
  1054. "result_code": "0",
  1055. "error_msg": "",
  1056. "data": "",
  1057. })
  1058. return
  1059. }
  1060. for _, attch := range attachs {
  1061. has, err := storage.Attachments.HasObject(setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(attch.UUID))
  1062. if err != nil || !has {
  1063. continue
  1064. }
  1065. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  1066. attch.Attachment.Name,
  1067. setting.Attachment.Minio.RealPath +
  1068. setting.Attachment.Minio.Bucket + "/" +
  1069. setting.Attachment.Minio.BasePath +
  1070. models.AttachmentRelativePath(attch.UUID) +
  1071. attch.UUID,
  1072. attch.Name,
  1073. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  1074. }
  1075. data, err := json.Marshal(datasets)
  1076. if err != nil {
  1077. log.Error("json.Marshal failed:", err.Error())
  1078. ctx.JSON(200, map[string]string{
  1079. "result_code": "-1",
  1080. "error_msg": err.Error(),
  1081. "data": "",
  1082. })
  1083. return
  1084. }
  1085. ctx.JSON(200, map[string]string{
  1086. "result_code": "0",
  1087. "error_msg": "",
  1088. "data": string(data),
  1089. })
  1090. return
  1091. }
  1092. func checkTypeCloudBrain(typeCloudBrain int) error {
  1093. if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo {
  1094. log.Error("type error:", typeCloudBrain)
  1095. return errors.New("type error")
  1096. }
  1097. return nil
  1098. }