You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

attachment.go 30 kB

4 years ago
4 years ago
3 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
3 years ago
3 years ago
3 years ago
3 years ago
4 years ago
3 years ago
3 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
3 years ago
3 years ago
3 years ago
3 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
3 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061
  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package repo
  5. import (
  6. contexExt "context"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "mime/multipart"
  11. "net/http"
  12. "path"
  13. "strconv"
  14. "strings"
  15. "code.gitea.io/gitea/modules/auth"
  16. "code.gitea.io/gitea/modules/base"
  17. "code.gitea.io/gitea/models"
  18. "code.gitea.io/gitea/modules/context"
  19. "code.gitea.io/gitea/modules/labelmsg"
  20. "code.gitea.io/gitea/modules/log"
  21. "code.gitea.io/gitea/modules/minio_ext"
  22. "code.gitea.io/gitea/modules/notification"
  23. "code.gitea.io/gitea/modules/setting"
  24. "code.gitea.io/gitea/modules/storage"
  25. "code.gitea.io/gitea/modules/upload"
  26. "code.gitea.io/gitea/modules/worker"
  27. repo_service "code.gitea.io/gitea/services/repository"
  28. gouuid "github.com/satori/go.uuid"
  29. )
  30. const (
  31. //result of decompress
  32. DecompressSuccess = "0"
  33. DecompressFailed = "1"
  34. tplAttachmentUpload base.TplName = "repo/attachment/upload"
  35. tplAttachmentEdit base.TplName = "repo/attachment/edit"
  36. )
  37. type CloudBrainDataset struct {
  38. UUID string `json:"id"`
  39. Name string `json:"name"`
  40. Path string `json:"place"`
  41. UserName string `json:"provider"`
  42. CreateTime string `json:"created_at"`
  43. }
  44. type UploadForm struct {
  45. UploadID string `form:"uploadId"`
  46. UuID string `form:"uuid"`
  47. PartSize int64 `form:"size"`
  48. Offset int64 `form:"offset"`
  49. PartNumber int `form:"chunkNumber"`
  50. PartFile multipart.File `form:"file"`
  51. }
  52. func RenderAttachmentSettings(ctx *context.Context) {
  53. renderAttachmentSettings(ctx)
  54. }
  55. func renderAttachmentSettings(ctx *context.Context) {
  56. ctx.Data["IsAttachmentEnabled"] = setting.Attachment.Enabled
  57. ctx.Data["AttachmentStoreType"] = setting.Attachment.StoreType
  58. ctx.Data["AttachmentAllowedTypes"] = setting.Attachment.AllowedTypes
  59. ctx.Data["AttachmentMaxSize"] = setting.Attachment.MaxSize
  60. ctx.Data["AttachmentMaxFiles"] = setting.Attachment.MaxFiles
  61. }
  62. func UploadAttachmentUI(ctx *context.Context) {
  63. ctx.Data["datasetId"] = ctx.Query("datasetId")
  64. ctx.Data["PageIsDataset"] = true
  65. ctx.HTML(200, tplAttachmentUpload)
  66. }
  67. func EditAttachmentUI(ctx *context.Context) {
  68. id, _ := strconv.ParseInt(ctx.Params(":id"), 10, 64)
  69. ctx.Data["PageIsDataset"] = true
  70. attachment, _ := models.GetAttachmentByID(id)
  71. if attachment == nil {
  72. ctx.Error(404, "The attachment does not exits.")
  73. }
  74. ctx.Data["Attachment"] = attachment
  75. ctx.HTML(200, tplAttachmentEdit)
  76. }
  77. func EditAttachment(ctx *context.Context, form auth.EditAttachmentForm) {
  78. err := models.UpdateAttachmentDescription(&models.Attachment{
  79. ID: form.ID,
  80. Description: form.Description,
  81. })
  82. if err != nil {
  83. ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.edit_attachment_fail")))
  84. }
  85. ctx.JSON(http.StatusOK, models.BaseOKMessage)
  86. }
  87. // UploadAttachment response for uploading issue's attachment
  88. func UploadAttachment(ctx *context.Context) {
  89. if !setting.Attachment.Enabled {
  90. ctx.Error(404, "attachment is not enabled")
  91. return
  92. }
  93. file, header, err := ctx.Req.FormFile("file")
  94. if err != nil {
  95. ctx.Error(500, fmt.Sprintf("FormFile: %v", err))
  96. return
  97. }
  98. defer file.Close()
  99. buf := make([]byte, 1024)
  100. n, _ := file.Read(buf)
  101. if n > 0 {
  102. buf = buf[:n]
  103. }
  104. err = upload.VerifyAllowedContentType(buf, strings.Split(setting.Attachment.AllowedTypes, ","))
  105. if err != nil {
  106. ctx.Error(400, err.Error())
  107. return
  108. }
  109. datasetID, _ := strconv.ParseInt(ctx.Req.FormValue("dataset_id"), 10, 64)
  110. attach, err := models.NewAttachment(&models.Attachment{
  111. IsPrivate: true,
  112. UploaderID: ctx.User.ID,
  113. Name: header.Filename,
  114. DatasetID: datasetID,
  115. }, buf, file)
  116. if err != nil {
  117. ctx.Error(500, fmt.Sprintf("NewAttachment: %v", err))
  118. return
  119. }
  120. log.Trace("New attachment uploaded: %s", attach.UUID)
  121. ctx.JSON(200, map[string]string{
  122. "uuid": attach.UUID,
  123. })
  124. }
  125. func UpdatePublicAttachment(ctx *context.Context) {
  126. file := ctx.Query("file")
  127. isPrivate, _ := strconv.ParseBool(ctx.Query("is_private"))
  128. attach, err := models.GetAttachmentByUUID(file)
  129. if err != nil {
  130. ctx.Error(404, err.Error())
  131. return
  132. }
  133. attach.IsPrivate = isPrivate
  134. models.UpdateAttachment(attach)
  135. }
  136. // DeleteAttachment response for deleting issue's attachment
  137. func DeleteAttachment(ctx *context.Context) {
  138. file := ctx.Query("file")
  139. attach, err := models.GetAttachmentByUUID(file)
  140. if err != nil {
  141. ctx.Error(400, err.Error())
  142. return
  143. }
  144. //issue 214: mod del-dataset permission
  145. if !models.CanDelAttachment(ctx.IsSigned, ctx.User, attach) {
  146. ctx.Error(403)
  147. return
  148. }
  149. err = models.DeleteAttachment(attach, true)
  150. if err != nil {
  151. ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
  152. return
  153. }
  154. go repo_service.DecreaseRepoDatasetNum(attach.DatasetID)
  155. attachjson, _ := json.Marshal(attach)
  156. labelmsg.SendDeleteAttachToLabelSys(string(attachjson))
  157. DeleteAllUnzipFile(attach, "")
  158. _, err = models.DeleteFileChunkById(attach.UUID)
  159. if err != nil {
  160. ctx.Error(500, fmt.Sprintf("DeleteFileChunkById: %v", err))
  161. return
  162. }
  163. ctx.JSON(200, map[string]string{
  164. "uuid": attach.UUID,
  165. })
  166. }
  167. func DownloadUserIsOrgOrCollaboration(ctx *context.Context, attach *models.Attachment) bool {
  168. dataset, err := models.GetDatasetByID(attach.DatasetID)
  169. if err != nil {
  170. log.Info("query dataset error")
  171. } else {
  172. repo, err := models.GetRepositoryByID(dataset.RepoID)
  173. if err != nil {
  174. log.Info("query repo error.")
  175. } else {
  176. repo.GetOwner()
  177. if ctx.User != nil {
  178. if repo.Owner.IsOrganization() {
  179. if repo.Owner.IsUserPartOfOrg(ctx.User.ID) {
  180. log.Info("org user may visit the attach.")
  181. return true
  182. }
  183. }
  184. isCollaborator, _ := repo.IsCollaborator(ctx.User.ID)
  185. if isCollaborator {
  186. log.Info("Collaborator user may visit the attach.")
  187. return true
  188. }
  189. }
  190. }
  191. }
  192. return false
  193. }
  194. // GetAttachment serve attachements
  195. func GetAttachment(ctx *context.Context) {
  196. typeCloudBrain := ctx.QueryInt("type")
  197. err := checkTypeCloudBrain(typeCloudBrain)
  198. if err != nil {
  199. ctx.ServerError("checkTypeCloudBrain failed", err)
  200. return
  201. }
  202. attach, err := models.GetAttachmentByUUID(ctx.Params(":uuid"))
  203. if err != nil {
  204. if models.IsErrAttachmentNotExist(err) {
  205. ctx.Error(404)
  206. } else {
  207. ctx.ServerError("GetAttachmentByUUID", err)
  208. }
  209. return
  210. }
  211. repository, unitType, err := attach.LinkedRepository()
  212. if err != nil {
  213. ctx.ServerError("LinkedRepository", err)
  214. return
  215. }
  216. dataSet, err := attach.LinkedDataSet()
  217. if err != nil {
  218. ctx.ServerError("LinkedDataSet", err)
  219. return
  220. }
  221. if repository == nil && dataSet != nil {
  222. repository, _ = models.GetRepositoryByID(dataSet.RepoID)
  223. unitType = models.UnitTypeDatasets
  224. }
  225. if repository == nil { //If not linked
  226. //if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate { //We block if not the uploader
  227. //log.Info("ctx.IsSigned =" + fmt.Sprintf("%v", ctx.IsSigned))
  228. if !(ctx.IsSigned && attach.UploaderID == ctx.User.ID) && attach.IsPrivate && !DownloadUserIsOrgOrCollaboration(ctx, attach) { //We block if not the uploader
  229. ctx.Error(http.StatusNotFound)
  230. return
  231. }
  232. } else { //If we have the repository we check access
  233. perm, errPermission := models.GetUserRepoPermission(repository, ctx.User)
  234. if errPermission != nil {
  235. ctx.Error(http.StatusInternalServerError, "GetUserRepoPermission", errPermission.Error())
  236. return
  237. }
  238. if !perm.CanRead(unitType) {
  239. ctx.Error(http.StatusNotFound)
  240. return
  241. }
  242. }
  243. if dataSet != nil {
  244. if !ctx.IsSigned {
  245. ctx.SetCookie("redirect_to", setting.AppSubURL+ctx.Req.URL.RequestURI(), 0, setting.AppSubURL)
  246. ctx.Redirect(setting.AppSubURL + "/user/login")
  247. return
  248. } else {
  249. isPermit, err := models.GetUserDataSetPermission(dataSet, ctx.User)
  250. if err != nil {
  251. ctx.Error(http.StatusInternalServerError, "GetUserDataSetPermission", err.Error())
  252. return
  253. }
  254. if !isPermit {
  255. ctx.Error(http.StatusNotFound)
  256. return
  257. }
  258. }
  259. }
  260. //If we have matched and access to release or issue
  261. if setting.Attachment.StoreType == storage.MinioStorageType {
  262. url := ""
  263. if typeCloudBrain == models.TypeCloudBrainOne {
  264. url, err = storage.Attachments.PresignedGetURL(setting.Attachment.Minio.BasePath+attach.RelativePath(), attach.Name)
  265. if err != nil {
  266. ctx.ServerError("PresignedGetURL", err)
  267. return
  268. }
  269. } else {
  270. if setting.PROXYURL != "" {
  271. url = setting.PROXYURL + "/obs_proxy_download?uuid=" + attach.UUID + "&file_name=" + attach.Name
  272. log.Info("return url=" + url)
  273. } else {
  274. objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(attach.UUID[0:1], attach.UUID[1:2], attach.UUID, attach.Name)), "/")
  275. url, err = storage.ObsGetPreSignedUrl(objectName, attach.Name)
  276. if err != nil {
  277. ctx.ServerError("ObsGetPreSignedUrl", err)
  278. return
  279. }
  280. }
  281. }
  282. if err = increaseDownloadCount(attach, dataSet); err != nil {
  283. ctx.ServerError("Update", err)
  284. return
  285. }
  286. if dataSet != nil {
  287. http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
  288. } else {
  289. fr, err := storage.Attachments.Open(attach.RelativePath())
  290. if err != nil {
  291. ctx.ServerError("Open", err)
  292. return
  293. }
  294. defer fr.Close()
  295. if err = ServeData(ctx, attach.Name, fr); err != nil {
  296. ctx.ServerError("ServeData", err)
  297. return
  298. }
  299. }
  300. } else {
  301. fr, err := storage.Attachments.Open(attach.RelativePath())
  302. if err != nil {
  303. ctx.ServerError("Open", err)
  304. return
  305. }
  306. defer fr.Close()
  307. if err = increaseDownloadCount(attach, dataSet); err != nil {
  308. ctx.ServerError("Update", err)
  309. return
  310. }
  311. if err = ServeData(ctx, attach.Name, fr); err != nil {
  312. ctx.ServerError("ServeData", err)
  313. return
  314. }
  315. }
  316. }
  317. func increaseDownloadCount(attach *models.Attachment, dataSet *models.Dataset) error {
  318. if err := attach.IncreaseDownloadCount(); err != nil {
  319. return err
  320. }
  321. if dataSet != nil {
  322. if err := models.IncreaseDownloadCount(dataSet.ID); err != nil {
  323. return err
  324. }
  325. }
  326. return nil
  327. }
  328. // Get a presigned url for put object
  329. func GetPresignedPutObjectURL(ctx *context.Context) {
  330. if !setting.Attachment.Enabled {
  331. ctx.Error(404, "attachment is not enabled")
  332. return
  333. }
  334. err := upload.VerifyFileType(ctx.Params("file_type"), strings.Split(setting.Attachment.AllowedTypes, ","))
  335. if err != nil {
  336. ctx.Error(400, err.Error())
  337. return
  338. }
  339. if setting.Attachment.StoreType == storage.MinioStorageType {
  340. uuid := gouuid.NewV4().String()
  341. url, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
  342. if err != nil {
  343. ctx.ServerError("PresignedPutURL", err)
  344. return
  345. }
  346. ctx.JSON(200, map[string]string{
  347. "uuid": uuid,
  348. "url": url,
  349. })
  350. } else {
  351. ctx.Error(404, "storage type is not enabled")
  352. return
  353. }
  354. }
  355. // AddAttachment response for add attachment record
  356. func AddAttachment(ctx *context.Context) {
  357. typeCloudBrain := ctx.QueryInt("type")
  358. fileName := ctx.Query("file_name")
  359. err := checkTypeCloudBrain(typeCloudBrain)
  360. if err != nil {
  361. ctx.ServerError("checkTypeCloudBrain failed", err)
  362. return
  363. }
  364. uuid := ctx.Query("uuid")
  365. has := false
  366. if typeCloudBrain == models.TypeCloudBrainOne {
  367. has, err = storage.Attachments.HasObject(setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(uuid))
  368. if err != nil {
  369. ctx.ServerError("HasObject", err)
  370. return
  371. }
  372. } else {
  373. has, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(uuid) + "/" + fileName)
  374. if err != nil {
  375. ctx.ServerError("ObsHasObject", err)
  376. return
  377. }
  378. }
  379. if !has {
  380. ctx.Error(404, "attachment has not been uploaded")
  381. return
  382. }
  383. datasetId := ctx.QueryInt64("dataset_id")
  384. dataset, err := models.GetDatasetByID(datasetId)
  385. if err != nil {
  386. ctx.Error(404, "dataset does not exist.")
  387. return
  388. }
  389. attachment, err := models.InsertAttachment(&models.Attachment{
  390. UUID: uuid,
  391. UploaderID: ctx.User.ID,
  392. IsPrivate: dataset.IsPrivate(),
  393. Name: fileName,
  394. Size: ctx.QueryInt64("size"),
  395. DatasetID: ctx.QueryInt64("dataset_id"),
  396. Type: typeCloudBrain,
  397. })
  398. if err != nil {
  399. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  400. return
  401. }
  402. if attachment.DatasetID != 0 {
  403. if isCanDecompress(attachment.Name) {
  404. if typeCloudBrain == models.TypeCloudBrainOne {
  405. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  406. if err != nil {
  407. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  408. } else {
  409. attachment.DecompressState = models.DecompressStateIng
  410. err = models.UpdateAttachment(attachment)
  411. if err != nil {
  412. log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
  413. }
  414. }
  415. }
  416. //todo:decompress type_two
  417. }
  418. }
  419. ctx.JSON(200, map[string]string{
  420. "result_code": "0",
  421. })
  422. }
  423. func isCanDecompress(name string) bool {
  424. if strings.HasSuffix(name, ".zip") || strings.HasSuffix(name, ".tar.gz") || strings.HasSuffix(name, ".tgz") {
  425. return true
  426. }
  427. return false
  428. }
  429. func UpdateAttachmentDecompressState(ctx *context.Context) {
  430. uuid := ctx.Query("uuid")
  431. result := ctx.Query("result")
  432. attach, err := models.GetAttachmentByUUID(uuid)
  433. if err != nil {
  434. log.Error("GetAttachmentByUUID(%s) failed:%s", uuid, err.Error())
  435. return
  436. }
  437. if result == DecompressSuccess {
  438. attach.DecompressState = models.DecompressStateDone
  439. } else if result == DecompressFailed {
  440. attach.DecompressState = models.DecompressStateFailed
  441. } else {
  442. log.Error("result is error:", result)
  443. return
  444. }
  445. err = models.UpdateAttachment(attach)
  446. if err != nil {
  447. log.Error("UpdateAttachment(%s) failed:%s", uuid, err.Error())
  448. return
  449. }
  450. log.Info("start to send msg to labelsystem ")
  451. dataset, _ := models.GetDatasetByID(attach.DatasetID)
  452. var labelMap map[string]string
  453. labelMap = make(map[string]string)
  454. labelMap["UUID"] = uuid
  455. labelMap["Type"] = fmt.Sprint(attach.Type)
  456. labelMap["UploaderID"] = fmt.Sprint(attach.UploaderID)
  457. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  458. labelMap["AttachName"] = attach.Name
  459. attachjson, _ := json.Marshal(labelMap)
  460. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  461. log.Info("end to send msg to labelsystem ")
  462. ctx.JSON(200, map[string]string{
  463. "result_code": "0",
  464. })
  465. }
  466. func GetSuccessChunks(ctx *context.Context) {
  467. fileMD5 := ctx.Query("md5")
  468. typeCloudBrain := ctx.QueryInt("type")
  469. fileName := ctx.Query("file_name")
  470. var chunks string
  471. err := checkTypeCloudBrain(typeCloudBrain)
  472. if err != nil {
  473. ctx.ServerError("checkTypeCloudBrain failed", err)
  474. return
  475. }
  476. fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID, typeCloudBrain)
  477. if err != nil {
  478. if models.IsErrFileChunkNotExist(err) {
  479. ctx.JSON(200, map[string]string{
  480. "uuid": "",
  481. "uploaded": "0",
  482. "uploadID": "",
  483. "chunks": "",
  484. })
  485. } else {
  486. ctx.ServerError("GetFileChunkByMD5", err)
  487. }
  488. return
  489. }
  490. isExist := false
  491. if typeCloudBrain == models.TypeCloudBrainOne {
  492. isExist, err = storage.Attachments.HasObject(setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(fileChunk.UUID))
  493. if err != nil {
  494. ctx.ServerError("HasObject failed", err)
  495. return
  496. }
  497. } else {
  498. oldFileName := fileName
  499. oldAttachment, _ := models.GetAttachmentByUUID(fileChunk.UUID)
  500. if oldAttachment != nil {
  501. oldFileName = oldAttachment.Name
  502. }
  503. isExist, err = storage.ObsHasObject(setting.BasePath + models.AttachmentRelativePath(fileChunk.UUID) + "/" + oldFileName)
  504. if err != nil {
  505. ctx.ServerError("ObsHasObject failed", err)
  506. return
  507. }
  508. }
  509. if isExist {
  510. if fileChunk.IsUploaded == models.FileNotUploaded {
  511. log.Info("the file has been uploaded but not recorded")
  512. fileChunk.IsUploaded = models.FileUploaded
  513. if err = models.UpdateFileChunk(fileChunk); err != nil {
  514. log.Error("UpdateFileChunk failed:", err.Error())
  515. }
  516. }
  517. } else {
  518. if fileChunk.IsUploaded == models.FileUploaded {
  519. log.Info("the file has been recorded but not uploaded")
  520. fileChunk.IsUploaded = models.FileNotUploaded
  521. if err = models.UpdateFileChunk(fileChunk); err != nil {
  522. log.Error("UpdateFileChunk failed:", err.Error())
  523. }
  524. }
  525. if typeCloudBrain == models.TypeCloudBrainOne {
  526. chunks, err = storage.GetPartInfos(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID)), "/"), fileChunk.UploadID)
  527. if err != nil {
  528. log.Error("GetPartInfos failed:%v", err.Error())
  529. }
  530. } else {
  531. chunks, err = storage.GetObsPartInfos(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID, fileName)), "/"), fileChunk.UploadID)
  532. if err != nil {
  533. log.Error("GetObsPartInfos failed:%v", err.Error())
  534. }
  535. }
  536. if err != nil {
  537. models.DeleteFileChunk(fileChunk)
  538. ctx.JSON(200, map[string]string{
  539. "uuid": "",
  540. "uploaded": "0",
  541. "uploadID": "",
  542. "chunks": "",
  543. })
  544. return
  545. }
  546. }
  547. var attachID int64
  548. attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
  549. if err != nil {
  550. if models.IsErrAttachmentNotExist(err) {
  551. attachID = 0
  552. } else {
  553. ctx.ServerError("GetAttachmentByUUID", err)
  554. return
  555. }
  556. } else {
  557. attachID = attach.ID
  558. }
  559. if attach == nil {
  560. ctx.JSON(200, map[string]string{
  561. "uuid": fileChunk.UUID,
  562. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  563. "uploadID": fileChunk.UploadID,
  564. "chunks": string(chunks),
  565. "attachID": "0",
  566. "datasetID": "0",
  567. "fileName": "",
  568. "datasetName": "",
  569. })
  570. return
  571. }
  572. dataset, err := models.GetDatasetByID(attach.DatasetID)
  573. if err != nil {
  574. ctx.ServerError("GetDatasetByID", err)
  575. return
  576. }
  577. ctx.JSON(200, map[string]string{
  578. "uuid": fileChunk.UUID,
  579. "uploaded": strconv.Itoa(fileChunk.IsUploaded),
  580. "uploadID": fileChunk.UploadID,
  581. "chunks": string(chunks),
  582. "attachID": strconv.Itoa(int(attachID)),
  583. "datasetID": strconv.Itoa(int(attach.DatasetID)),
  584. "fileName": attach.Name,
  585. "datasetName": dataset.Title,
  586. })
  587. }
  588. func NewMultipart(ctx *context.Context) {
  589. if !setting.Attachment.Enabled {
  590. ctx.Error(404, "attachment is not enabled")
  591. return
  592. }
  593. err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
  594. if err != nil {
  595. ctx.Error(400, err.Error())
  596. return
  597. }
  598. typeCloudBrain := ctx.QueryInt("type")
  599. err = checkTypeCloudBrain(typeCloudBrain)
  600. if err != nil {
  601. ctx.ServerError("checkTypeCloudBrain failed", err)
  602. return
  603. }
  604. fileName := ctx.Query("file_name")
  605. if setting.Attachment.StoreType == storage.MinioStorageType {
  606. totalChunkCounts := ctx.QueryInt("totalChunkCounts")
  607. if totalChunkCounts > minio_ext.MaxPartsCount {
  608. ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
  609. return
  610. }
  611. fileSize := ctx.QueryInt64("size")
  612. if fileSize > minio_ext.MaxMultipartPutObjectSize {
  613. ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
  614. return
  615. }
  616. uuid := gouuid.NewV4().String()
  617. var uploadID string
  618. if typeCloudBrain == models.TypeCloudBrainOne {
  619. uploadID, err = storage.NewMultiPartUpload(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/"))
  620. if err != nil {
  621. ctx.ServerError("NewMultipart", err)
  622. return
  623. }
  624. } else {
  625. uploadID, err = storage.NewObsMultiPartUpload(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/"))
  626. if err != nil {
  627. ctx.ServerError("NewObsMultiPartUpload", err)
  628. return
  629. }
  630. }
  631. _, err = models.InsertFileChunk(&models.FileChunk{
  632. UUID: uuid,
  633. UserID: ctx.User.ID,
  634. UploadID: uploadID,
  635. Md5: ctx.Query("md5"),
  636. Size: fileSize,
  637. TotalChunks: totalChunkCounts,
  638. Type: typeCloudBrain,
  639. })
  640. if err != nil {
  641. ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
  642. return
  643. }
  644. ctx.JSON(200, map[string]string{
  645. "uuid": uuid,
  646. "uploadID": uploadID,
  647. })
  648. } else {
  649. ctx.Error(404, "storage type is not enabled")
  650. return
  651. }
  652. }
  653. func PutOBSProxyUpload(ctx *context.Context) {
  654. uuid := ctx.Query("uuid")
  655. uploadID := ctx.Query("uploadId")
  656. partNumber := ctx.QueryInt("partNumber")
  657. fileName := ctx.Query("file_name")
  658. RequestBody := ctx.Req.Body()
  659. if RequestBody == nil {
  660. ctx.Error(500, fmt.Sprintf("FormFile: %v", RequestBody))
  661. return
  662. }
  663. objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")
  664. err := storage.ObsMultiPartUpload(objectName, uploadID, partNumber, fileName, RequestBody.ReadCloser())
  665. if err != nil {
  666. log.Info("upload error.")
  667. }
  668. }
  669. func GetOBSProxyDownload(ctx *context.Context) {
  670. uuid := ctx.Query("uuid")
  671. fileName := ctx.Query("file_name")
  672. objectName := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")
  673. body, err := storage.ObsDownloadAFile(setting.Bucket, objectName)
  674. if err != nil {
  675. log.Info("upload error.")
  676. } else {
  677. defer body.Close()
  678. ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+fileName)
  679. ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
  680. p := make([]byte, 1024)
  681. var readErr error
  682. var readCount int
  683. // 读取对象内容
  684. for {
  685. readCount, readErr = body.Read(p)
  686. if readCount > 0 {
  687. ctx.Resp.Write(p[:readCount])
  688. //fmt.Printf("%s", p[:readCount])
  689. }
  690. if readErr != nil {
  691. break
  692. }
  693. }
  694. }
  695. }
  696. func GetMultipartUploadUrl(ctx *context.Context) {
  697. uuid := ctx.Query("uuid")
  698. uploadID := ctx.Query("uploadID")
  699. partNumber := ctx.QueryInt("chunkNumber")
  700. size := ctx.QueryInt64("size")
  701. fileName := ctx.Query("file_name")
  702. typeCloudBrain := ctx.QueryInt("type")
  703. err := checkTypeCloudBrain(typeCloudBrain)
  704. if err != nil {
  705. ctx.ServerError("checkTypeCloudBrain failed", err)
  706. return
  707. }
  708. url := ""
  709. if typeCloudBrain == models.TypeCloudBrainOne {
  710. if size > minio_ext.MinPartSize {
  711. ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
  712. return
  713. }
  714. url, err = storage.GenMultiPartSignedUrl(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/"), uploadID, partNumber, size)
  715. if err != nil {
  716. ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
  717. return
  718. }
  719. } else {
  720. if setting.PROXYURL != "" {
  721. url = setting.PROXYURL + "/obs_proxy_multipart?uuid=" + uuid + "&uploadId=" + uploadID + "&partNumber=" + fmt.Sprint(partNumber) + "&file_name=" + fileName
  722. log.Info("return url=" + url)
  723. } else {
  724. url, err = storage.ObsGenMultiPartSignedUrl(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/"), uploadID, partNumber)
  725. if err != nil {
  726. ctx.Error(500, fmt.Sprintf("ObsGenMultiPartSignedUrl failed: %v", err))
  727. return
  728. }
  729. log.Info("url=" + url)
  730. }
  731. }
  732. ctx.JSON(200, map[string]string{
  733. "url": url,
  734. })
  735. }
  736. func CompleteMultipart(ctx *context.Context) {
  737. uuid := ctx.Query("uuid")
  738. uploadID := ctx.Query("uploadID")
  739. typeCloudBrain := ctx.QueryInt("type")
  740. fileName := ctx.Query("file_name")
  741. log.Warn("uuid:" + uuid)
  742. log.Warn("typeCloudBrain:" + strconv.Itoa(typeCloudBrain))
  743. err := checkTypeCloudBrain(typeCloudBrain)
  744. if err != nil {
  745. ctx.ServerError("checkTypeCloudBrain failed", err)
  746. return
  747. }
  748. fileChunk, err := models.GetFileChunkByUUID(uuid)
  749. if err != nil {
  750. if models.IsErrFileChunkNotExist(err) {
  751. ctx.Error(404)
  752. } else {
  753. ctx.ServerError("GetFileChunkByUUID", err)
  754. }
  755. return
  756. }
  757. if typeCloudBrain == models.TypeCloudBrainOne {
  758. _, err = storage.CompleteMultiPartUpload(strings.TrimPrefix(path.Join(setting.Attachment.Minio.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID)), "/"), uploadID, fileChunk.TotalChunks)
  759. if err != nil {
  760. ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
  761. return
  762. }
  763. } else {
  764. err = storage.CompleteObsMultiPartUpload(strings.TrimPrefix(path.Join(setting.BasePath, path.Join(fileChunk.UUID[0:1], fileChunk.UUID[1:2], fileChunk.UUID, fileName)), "/"), uploadID, fileChunk.TotalChunks)
  765. if err != nil {
  766. ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
  767. return
  768. }
  769. }
  770. fileChunk.IsUploaded = models.FileUploaded
  771. err = models.UpdateFileChunk(fileChunk)
  772. if err != nil {
  773. ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
  774. return
  775. }
  776. dataset, _ := models.GetDatasetByID(ctx.QueryInt64("dataset_id"))
  777. log.Warn("insert attachment to datasetId:" + strconv.FormatInt(dataset.ID, 10))
  778. attachment, err := models.InsertAttachment(&models.Attachment{
  779. UUID: uuid,
  780. UploaderID: ctx.User.ID,
  781. IsPrivate: dataset.IsPrivate(),
  782. Name: fileName,
  783. Size: ctx.QueryInt64("size"),
  784. DatasetID: ctx.QueryInt64("dataset_id"),
  785. Description: ctx.Query("description"),
  786. Type: typeCloudBrain,
  787. })
  788. if err != nil {
  789. ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
  790. return
  791. }
  792. attachment.UpdateDatasetUpdateUnix()
  793. go repo_service.IncreaseRepoDatasetNum(dataset.ID)
  794. repository, _ := models.GetRepositoryByID(dataset.RepoID)
  795. notification.NotifyOtherTask(ctx.User, repository, fmt.Sprint(repository.IsPrivate, attachment.IsPrivate), attachment.Name, models.ActionUploadAttachment)
  796. if attachment.DatasetID != 0 {
  797. if isCanDecompress(attachment.Name) {
  798. if typeCloudBrain == models.TypeCloudBrainOne {
  799. err = worker.SendDecompressTask(contexExt.Background(), uuid, attachment.Name)
  800. if err != nil {
  801. log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
  802. } else {
  803. updateAttachmentDecompressStateIng(attachment)
  804. }
  805. }
  806. if typeCloudBrain == models.TypeCloudBrainTwo {
  807. attachjson, _ := json.Marshal(attachment)
  808. err = labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  809. if err != nil {
  810. log.Error("SendDecompressTask to labelsystem (%s) failed:%s", attachment.UUID, err.Error())
  811. } else {
  812. updateAttachmentDecompressStateIng(attachment)
  813. }
  814. }
  815. } else {
  816. var labelMap map[string]string
  817. labelMap = make(map[string]string)
  818. labelMap["UUID"] = uuid
  819. labelMap["Type"] = fmt.Sprint(attachment.Type)
  820. labelMap["UploaderID"] = fmt.Sprint(attachment.UploaderID)
  821. labelMap["RepoID"] = fmt.Sprint(dataset.RepoID)
  822. labelMap["AttachName"] = attachment.Name
  823. attachjson, _ := json.Marshal(labelMap)
  824. labelmsg.SendAddAttachToLabelSys(string(attachjson))
  825. }
  826. }
  827. ctx.JSON(200, map[string]string{
  828. "result_code": "0",
  829. })
  830. }
  831. func HandleUnDecompressAttachment() {
  832. attachs, err := models.GetUnDecompressAttachments()
  833. if err != nil {
  834. log.Error("GetUnDecompressAttachments failed:", err.Error())
  835. return
  836. }
  837. for _, attach := range attachs {
  838. if attach.Type == models.TypeCloudBrainOne {
  839. err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name)
  840. if err != nil {
  841. log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
  842. } else {
  843. updateAttachmentDecompressStateIng(attach)
  844. }
  845. } else if attach.Type == models.TypeCloudBrainTwo {
  846. attachjson, _ := json.Marshal(attach)
  847. err = labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
  848. if err != nil {
  849. log.Error("SendDecompressTask to labelsystem (%s) failed:%s", attach.UUID, err.Error())
  850. } else {
  851. updateAttachmentDecompressStateIng(attach)
  852. }
  853. }
  854. }
  855. return
  856. }
  857. func updateAttachmentDecompressStateIng(attach *models.Attachment) {
  858. attach.DecompressState = models.DecompressStateIng
  859. err := models.UpdateAttachment(attach)
  860. if err != nil {
  861. log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
  862. }
  863. }
  864. func QueryAllPublicDataset(ctx *context.Context) {
  865. attachs, err := models.GetAllPublicAttachments()
  866. if err != nil {
  867. ctx.JSON(200, map[string]string{
  868. "result_code": "-1",
  869. "error_msg": err.Error(),
  870. "data": "",
  871. })
  872. return
  873. }
  874. queryDatasets(ctx, attachs)
  875. }
  876. func QueryPrivateDataset(ctx *context.Context) {
  877. username := ctx.Params(":username")
  878. attachs, err := models.GetPrivateAttachments(username)
  879. if err != nil {
  880. ctx.JSON(200, map[string]string{
  881. "result_code": "-1",
  882. "error_msg": err.Error(),
  883. "data": "",
  884. })
  885. return
  886. }
  887. for _, attach := range attachs {
  888. attach.Name = username
  889. }
  890. queryDatasets(ctx, attachs)
  891. }
  892. func queryDatasets(ctx *context.Context, attachs []*models.AttachmentUsername) {
  893. var datasets []CloudBrainDataset
  894. if len(attachs) == 0 {
  895. log.Info("dataset is null")
  896. ctx.JSON(200, map[string]string{
  897. "result_code": "0",
  898. "error_msg": "",
  899. "data": "",
  900. })
  901. return
  902. }
  903. for _, attch := range attachs {
  904. has, err := storage.Attachments.HasObject(setting.Attachment.Minio.BasePath + models.AttachmentRelativePath(attch.UUID))
  905. if err != nil || !has {
  906. continue
  907. }
  908. datasets = append(datasets, CloudBrainDataset{strconv.FormatInt(attch.ID, 10),
  909. attch.Attachment.Name,
  910. setting.Attachment.Minio.RealPath +
  911. setting.Attachment.Minio.Bucket + "/" +
  912. setting.Attachment.Minio.BasePath +
  913. models.AttachmentRelativePath(attch.UUID) +
  914. attch.UUID,
  915. attch.Name,
  916. attch.CreatedUnix.Format("2006-01-02 03:04:05 PM")})
  917. }
  918. data, err := json.Marshal(datasets)
  919. if err != nil {
  920. log.Error("json.Marshal failed:", err.Error())
  921. ctx.JSON(200, map[string]string{
  922. "result_code": "-1",
  923. "error_msg": err.Error(),
  924. "data": "",
  925. })
  926. return
  927. }
  928. ctx.JSON(200, map[string]string{
  929. "result_code": "0",
  930. "error_msg": "",
  931. "data": string(data),
  932. })
  933. return
  934. }
  935. func checkTypeCloudBrain(typeCloudBrain int) error {
  936. if typeCloudBrain != models.TypeCloudBrainOne && typeCloudBrain != models.TypeCloudBrainTwo {
  937. log.Error("type error:", typeCloudBrain)
  938. return errors.New("type error")
  939. }
  940. return nil
  941. }