Browse Source

Merge branch 'develop' into dev-cloudmain

tags/v1.21.12.1
palytoxin 5 years ago
parent
commit
4dabc04128
100 changed files with 13266 additions and 239 deletions
  1. +1
    -0
      .bra.toml
  2. +1
    -0
      .eslintrc
  3. +0
    -1
      .tool-versions
  4. +2
    -0
      custom/conf/app.ini.sample
  5. +1
    -0
      go.mod
  6. +15
    -0
      go.sum
  7. +0
    -1
      main.go
  8. +13
    -13
      models/attachment.go
  9. +16
    -0
      models/error.go
  10. +94
    -0
      models/file_chunk.go
  11. +0
    -2
      models/migrations/migrations.go
  12. +0
    -35
      models/migrations/v140.go
  13. +0
    -26
      models/migrations/v141.go
  14. +1
    -0
      models/models.go
  15. +7
    -0
      modules/decompression/decompression.go
  16. +156
    -0
      modules/minio_ext/api-error-response.go
  17. +1051
    -0
      modules/minio_ext/api.go
  18. +62
    -0
      modules/minio_ext/constants.go
  19. +85
    -0
      modules/minio_ext/object.go
  20. +82
    -0
      modules/minio_ext/transport.go
  21. +185
    -0
      modules/minio_ext/util.go
  22. +3
    -3
      modules/setting/setting.go
  23. +159
    -0
      modules/storage/minio_ext.go
  24. +3
    -3
      modules/timer/timer.go
  25. +3
    -3
      modules/worker/task.go
  26. +8
    -13
      modules/worker/worker.go
  27. +9
    -0
      options/locale/locale_en-US.ini
  28. +9
    -0
      options/locale/locale_zh-CN.ini
  29. +213
    -106
      package-lock.json
  30. +6
    -1
      package.json
  31. +6
    -0
      routers/home.go
  32. +2
    -0
      routers/init.go
  33. +231
    -8
      routers/repo/attachment.go
  34. +2
    -15
      routers/repo/dataset.go
  35. +6
    -0
      routers/routes/routes.go
  36. +5
    -1
      templates/base/head_navbar.tmpl
  37. +2
    -1
      templates/repo/datasets/dataset.tmpl
  38. +37
    -7
      templates/repo/datasets/index.tmpl
  39. +315
    -0
      vendor/cloud.google.com/go/iam/iam.go
  40. +108
    -0
      vendor/cloud.google.com/go/internal/optional/optional.go
  41. +19
    -0
      vendor/cloud.google.com/go/internal/version/update_version.sh
  42. +71
    -0
      vendor/cloud.google.com/go/internal/version/version.go
  43. +46
    -0
      vendor/cloud.google.com/go/pubsub/README.md
  44. +9
    -0
      vendor/cloud.google.com/go/pubsub/apiv1/README.md
  45. +103
    -0
      vendor/cloud.google.com/go/pubsub/apiv1/doc.go
  46. +36
    -0
      vendor/cloud.google.com/go/pubsub/apiv1/iam.go
  47. +95
    -0
      vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go
  48. +417
    -0
      vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go
  49. +635
    -0
      vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go
  50. +72
    -0
      vendor/cloud.google.com/go/pubsub/debug.go
  51. +140
    -0
      vendor/cloud.google.com/go/pubsub/doc.go
  52. +122
    -0
      vendor/cloud.google.com/go/pubsub/flow_controller.go
  53. +79
    -0
      vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go
  54. +527
    -0
      vendor/cloud.google.com/go/pubsub/iterator.go
  55. +100
    -0
      vendor/cloud.google.com/go/pubsub/message.go
  56. +25
    -0
      vendor/cloud.google.com/go/pubsub/nodebug.go
  57. +108
    -0
      vendor/cloud.google.com/go/pubsub/pubsub.go
  58. +192
    -0
      vendor/cloud.google.com/go/pubsub/pullstream.go
  59. +100
    -0
      vendor/cloud.google.com/go/pubsub/service.go
  60. +160
    -0
      vendor/cloud.google.com/go/pubsub/snapshot.go
  61. +741
    -0
      vendor/cloud.google.com/go/pubsub/subscription.go
  62. +550
    -0
      vendor/cloud.google.com/go/pubsub/topic.go
  63. +217
    -0
      vendor/cloud.google.com/go/pubsub/trace.go
  64. +1
    -0
      vendor/github.com/RichardKnop/logging/.gitignore
  65. +17
    -0
      vendor/github.com/RichardKnop/logging/.travis.yml
  66. +354
    -0
      vendor/github.com/RichardKnop/logging/LICENSE
  67. +34
    -0
      vendor/github.com/RichardKnop/logging/Makefile
  68. +58
    -0
      vendor/github.com/RichardKnop/logging/README.md
  69. +40
    -0
      vendor/github.com/RichardKnop/logging/coloured_formatter.go
  70. +20
    -0
      vendor/github.com/RichardKnop/logging/default_formatter.go
  71. +30
    -0
      vendor/github.com/RichardKnop/logging/formatter_interface.go
  72. +7
    -0
      vendor/github.com/RichardKnop/logging/go.mod
  73. +6
    -0
      vendor/github.com/RichardKnop/logging/go.sum
  74. +9
    -0
      vendor/github.com/RichardKnop/logging/gometalinter.json
  75. +17
    -0
      vendor/github.com/RichardKnop/logging/interface.go
  76. +134
    -0
      vendor/github.com/RichardKnop/logging/logger.go
  77. +354
    -0
      vendor/github.com/RichardKnop/machinery/LICENSE
  78. +393
    -0
      vendor/github.com/RichardKnop/machinery/v1/backends/amqp/amqp.go
  79. +512
    -0
      vendor/github.com/RichardKnop/machinery/v1/backends/dynamodb/dynamodb.go
  80. +210
    -0
      vendor/github.com/RichardKnop/machinery/v1/backends/eager/eager.go
  81. +28
    -0
      vendor/github.com/RichardKnop/machinery/v1/backends/iface/interfaces.go
  82. +292
    -0
      vendor/github.com/RichardKnop/machinery/v1/backends/memcache/memcache.go
  83. +358
    -0
      vendor/github.com/RichardKnop/machinery/v1/backends/mongo/mongodb.go
  84. +150
    -0
      vendor/github.com/RichardKnop/machinery/v1/backends/null/null.go
  85. +338
    -0
      vendor/github.com/RichardKnop/machinery/v1/backends/redis/redis.go
  86. +256
    -0
      vendor/github.com/RichardKnop/machinery/v1/backends/result/async_result.go
  87. +424
    -0
      vendor/github.com/RichardKnop/machinery/v1/brokers/amqp/amqp.go
  88. +73
    -0
      vendor/github.com/RichardKnop/machinery/v1/brokers/eager/eager.go
  89. +25
    -0
      vendor/github.com/RichardKnop/machinery/v1/brokers/errs/errors.go
  90. +196
    -0
      vendor/github.com/RichardKnop/machinery/v1/brokers/gcppubsub/gcp_pubsub.go
  91. +27
    -0
      vendor/github.com/RichardKnop/machinery/v1/brokers/iface/interfaces.go
  92. +418
    -0
      vendor/github.com/RichardKnop/machinery/v1/brokers/redis/redis.go
  93. +361
    -0
      vendor/github.com/RichardKnop/machinery/v1/brokers/sqs/sqs.go
  94. +129
    -0
      vendor/github.com/RichardKnop/machinery/v1/common/amqp.go
  95. +25
    -0
      vendor/github.com/RichardKnop/machinery/v1/common/backend.go
  96. +121
    -0
      vendor/github.com/RichardKnop/machinery/v1/common/broker.go
  97. +84
    -0
      vendor/github.com/RichardKnop/machinery/v1/common/redis.go
  98. +161
    -0
      vendor/github.com/RichardKnop/machinery/v1/config/config.go
  99. +58
    -0
      vendor/github.com/RichardKnop/machinery/v1/config/env.go
  100. +83
    -0
      vendor/github.com/RichardKnop/machinery/v1/config/file.go

+ 1
- 0
.bra.toml View File

@@ -13,6 +13,7 @@ watch_dirs = [
"$WORKDIR/options",
"$WORKDIR/public",
"$WORKDIR/custom",
"$WORKDIR/web_src",
] # Directories to watch
watch_exts = [".go", ".ini", ".less"] # Extensions to watch
env_files = [] # Load env vars from files


+ 1
- 0
.eslintrc View File

@@ -3,6 +3,7 @@ root: true
extends:
- eslint-config-airbnb-base
- eslint:recommended
- plugin:vue/recommended

ignorePatterns:
- /web_src/js/vendor


+ 0
- 1
.tool-versions View File

@@ -1 +0,0 @@
nodejs 12.14.0

+ 2
- 0
custom/conf/app.ini.sample View File

@@ -753,6 +753,8 @@ MINIO_LOCATION = us-east-1
MINIO_BASE_PATH = attachments/
; Minio enabled ssl only available when STORE_TYPE is `minio`
MINIO_USE_SSL = false
; real Minio storage path
MINIO_REAL_PATH = /mnt/test/minio/data/

[time]
; Specifies the format for fully outputted dates. Defaults to RFC1123


+ 1
- 0
go.mod View File

@@ -76,6 +76,7 @@ require (
github.com/mgechev/revive v1.0.2
github.com/microcosm-cc/bluemonday v1.0.3-0.20191119130333-0a75d7616912
github.com/minio/minio-go v6.0.14+incompatible
github.com/minio/minio-go/v6 v6.0.57
github.com/mitchellh/go-homedir v1.1.0
github.com/msteinert/pam v0.0.0-20151204160544-02ccfbfaf0cc
github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5


+ 15
- 0
go.sum View File

@@ -434,6 +434,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
@@ -453,6 +455,8 @@ github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCW
github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0=
github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs=
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@@ -512,15 +516,23 @@ github.com/mgechev/revive v1.0.2/go.mod h1:rb0dQy1LVAxW9SWy5R3LPUjevzUbUS316U5MF
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/microcosm-cc/bluemonday v1.0.3-0.20191119130333-0a75d7616912 h1:hJde9rA24hlTcAYSwJoXpDUyGtfKQ/jsofw+WaDqGrI=
github.com/microcosm-cc/bluemonday v1.0.3-0.20191119130333-0a75d7616912/go.mod h1:8iwZnFn2CDDNZ0r6UXhF4xawGvzaqzCRa1n3/lO3W2w=
github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4=
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o=
github.com/minio/minio-go v6.0.14+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8=
github.com/minio/minio-go/v6 v6.0.57 h1:ixPkbKkyD7IhnluRgQpGSpHdpvNVaW6OD5R9IAO/9Tw=
github.com/minio/minio-go/v6 v6.0.57/go.mod h1:5+R/nM9Pwrh0vqF+HbYYDQ84wdUFPyXHkrdT4AIkifM=
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mrjones/oauth v0.0.0-20180629183705-f4e24b6d100c h1:3wkDRdxK92dF+c1ke2dtj7ZzemFWBHB9plnJOtlwdFA=
github.com/mrjones/oauth v0.0.0-20180629183705-f4e24b6d100c/go.mod h1:skjdDftzkFALcuGzYSklqYd8gvat6F1gZJ4YPVbkZpM=
@@ -658,6 +670,7 @@ github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d/go.mod h1:vq0
github.com/siddontang/ledisdb v0.0.0-20190202134119-8ceb77e66a92/go.mod h1:mF1DpOSOUiJRMR+FDqaqu3EBqrybQtrDDszLUZ6oxPg=
github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d/go.mod h1:AMEsy7v5z92TR1JKMkLLoaOQk++LVnOKL3ScbJ8GNGA=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=
@@ -783,6 +796,7 @@ golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -879,6 +893,7 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=


+ 0
- 1
main.go View File

@@ -22,7 +22,6 @@ import (
_ "code.gitea.io/gitea/modules/markup/markdown"
_ "code.gitea.io/gitea/modules/markup/orgmode"
_ "code.gitea.io/gitea/modules/timer"
_ "code.gitea.io/gitea/modules/worker"

"github.com/urfave/cli"
)


+ 13
- 13
models/attachment.go View File

@@ -28,19 +28,19 @@ const (

// Attachment represent a attachment of issue/comment/release.
type Attachment struct {
ID int64 `xorm:"pk autoincr"`
UUID string `xorm:"uuid UNIQUE"`
IssueID int64 `xorm:"INDEX"`
DatasetID int64 `xorm:"INDEX DEFAULT 0"`
ReleaseID int64 `xorm:"INDEX"`
UploaderID int64 `xorm:"INDEX DEFAULT 0"` // Notice: will be zero before this column added
CommentID int64
Name string
DownloadCount int64 `xorm:"DEFAULT 0"`
Size int64 `xorm:"DEFAULT 0"`
IsPrivate bool `xorm:"DEFAULT false"`
DecompressState int32 `xorm:"DEFAULT 0"`
CreatedUnix timeutil.TimeStamp `xorm:"created"`
ID int64 `xorm:"pk autoincr"`
UUID string `xorm:"uuid UNIQUE"`
IssueID int64 `xorm:"INDEX"`
DatasetID int64 `xorm:"INDEX DEFAULT 0"`
ReleaseID int64 `xorm:"INDEX"`
UploaderID int64 `xorm:"INDEX DEFAULT 0"` // Notice: will be zero before this column added
CommentID int64
Name string
DownloadCount int64 `xorm:"DEFAULT 0"`
Size int64 `xorm:"DEFAULT 0"`
IsPrivate bool `xorm:"DEFAULT false"`
DecompressState int32 `xorm:"DEFAULT 0"`
CreatedUnix timeutil.TimeStamp `xorm:"created"`
}

func (a *Attachment) AfterUpdate() {


+ 16
- 0
models/error.go View File

@@ -1971,3 +1971,19 @@ func IsErrOAuthApplicationNotFound(err error) bool {
func (err ErrOAuthApplicationNotFound) Error() string {
return fmt.Sprintf("OAuth application not found [ID: %d]", err.ID)
}

// ErrFileChunkNotExist represents a "FileChunkNotExist" kind of error.
type ErrFileChunkNotExist struct {
Md5 string
Uuid string
}

func (err ErrFileChunkNotExist) Error() string {
return fmt.Sprintf("fileChunk does not exist [md5: %s, uuid: %s]", err.Md5, err.Uuid)
}

// IsErrFileChunkNotExist checks if an error is a ErrFileChunkNotExist.
func IsErrFileChunkNotExist(err error) bool {
_, ok := err.(ErrFileChunkNotExist)
return ok
}

+ 94
- 0
models/file_chunk.go View File

@@ -0,0 +1,94 @@
package models

import (
"code.gitea.io/gitea/modules/timeutil"
"xorm.io/xorm"
)

const (
FileNotUploaded int = iota
FileUploaded
)

type FileChunk struct {
ID int64 `xorm:"pk autoincr"`
UUID string `xorm:"uuid UNIQUE"`
Md5 string `xorm:"INDEX"`
IsUploaded int `xorm:"DEFAULT 0"` // not uploaded: 0, uploaded: 1
UploadID string `xorm:"UNIQUE"`//minio upload id
TotalChunks int
Size int64
UserID int64 `xorm:"INDEX"`
CompletedParts []string `xorm:"DEFAULT """`// chunkNumber+etag eg: ,1-asqwewqe21312312.2-123hjkas
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
}

// GetFileChunkByMD5 returns fileChunk by given id
func GetFileChunkByMD5(md5 string) (*FileChunk, error) {
return getFileChunkByMD5(x, md5)
}

func getFileChunkByMD5(e Engine, md5 string) (*FileChunk, error) {
fileChunk := new(FileChunk)

if has, err := e.Where("md5 = ?", md5).Get(fileChunk); err != nil {
return nil, err
} else if !has {
return nil, ErrFileChunkNotExist{md5, ""}
}
return fileChunk, nil
}

// GetFileChunkByMD5 returns fileChunk by given id
func GetFileChunkByMD5AndUser(md5 string, userID int64) (*FileChunk, error) {
return getFileChunkByMD5AndUser(x, md5, userID)
}

func getFileChunkByMD5AndUser(e Engine, md5 string, userID int64) (*FileChunk, error) {
fileChunk := new(FileChunk)

if has, err := e.Where("md5 = ? and user_id = ?", md5, userID).Get(fileChunk); err != nil {
return nil, err
} else if !has {
return nil, ErrFileChunkNotExist{md5, ""}
}
return fileChunk, nil
}

// GetAttachmentByID returns attachment by given id
func GetFileChunkByUUID(uuid string) (*FileChunk, error) {
return getFileChunkByUUID(x, uuid)
}

func getFileChunkByUUID(e Engine, uuid string) (*FileChunk, error) {
fileChunk := new(FileChunk)

if has, err := e.Where("uuid = ?", uuid).Get(fileChunk); err != nil {
return nil, err
} else if !has {
return nil, ErrFileChunkNotExist{"", uuid}
}
return fileChunk, nil
}

// InsertFileChunk insert a record into file_chunk.
func InsertFileChunk(fileChunk *FileChunk) (_ *FileChunk, err error) {
if _, err := x.Insert(fileChunk); err != nil {
return nil, err
}

return fileChunk,nil
}

// UpdateAttachment updates the given attachment in database
func UpdateFileChunk(fileChunk *FileChunk) error {
return updateFileChunk(x, fileChunk)
}

func updateFileChunk(e Engine, fileChunk *FileChunk) error {
var sess *xorm.Session
sess = e.Where("uuid = ?", fileChunk.UUID)
_, err := sess.Cols("is_uploaded", "completed_parts").Update(fileChunk)
return err
}

+ 0
- 2
models/migrations/migrations.go View File

@@ -212,8 +212,6 @@ var migrations = []Migration{
NewMigration("Add ResolveDoerID to Comment table", addResolveDoerIDCommentColumn),
// v139 -> v140
NewMigration("prepend refs/heads/ to issue refs", prependRefsHeadsToIssueRefs),
NewMigration("add dataset migration", addDatasetTable),
NewMigration("add cloudbrain migration", addCloudBrainTable),
}

// GetCurrentDBVersion returns the current db version


+ 0
- 35
models/migrations/v140.go View File

@@ -1,35 +0,0 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.

package migrations

import (
"fmt"

"code.gitea.io/gitea/modules/timeutil"
"xorm.io/xorm"
)

func addDatasetTable(x *xorm.Engine) error {
type Dataset struct {
ID int64 `xorm:"pk autoincr"`
Title string `xorm:"INDEX NOT NULL"`
Status int32 `xorm:"INDEX"`
Category string
Description string `xorm:"TEXT"`
DownloadTimes int64
License string
Task string
ReleaseID int64 `xorm:"INDEX"`
UserID int64 `xorm:"INDEX"`
RepoID int64 `xorm:"INDEX"`
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
}

if err := x.Sync2(new(Dataset)); err != nil {
return fmt.Errorf("Sync2: %v", err)
}
return nil
}

+ 0
- 26
models/migrations/v141.go View File

@@ -1,26 +0,0 @@
package migrations

import (
"fmt"

"code.gitea.io/gitea/modules/timeutil"
"xorm.io/xorm"
)

func addCloudBrainTable(x *xorm.Engine) error {
type Cloudbrain struct {
ID int64 `xorm:"pk autoincr"`
JobID string `xorm:"INDEX NOT NULL"`
JobName string
Status string `xorm:"INDEX"`
UserID int64 `xorm:"INDEX"`
RepoID int64 `xorm:"INDEX"`
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
}

if err := x.Sync2(new(Cloudbrain)); err != nil {
return fmt.Errorf("Sync2: %v", err)
}
return nil
}

+ 1
- 0
models/models.go View File

@@ -127,6 +127,7 @@ func init() {
new(EmailHash),
new(Dataset),
new(Cloudbrain),
new(FileChunk),
)

gonicNames := []string{"SSL", "UID"}


+ 7
- 0
modules/decompression/decompression.go View File

@@ -0,0 +1,7 @@
package decompression

import "code.gitea.io/gitea/modules/worker"

func NewContext() {
worker.NewTaskCenter()
}

+ 156
- 0
modules/minio_ext/api-error-response.go View File

@@ -0,0 +1,156 @@
package minio_ext

import (
"encoding/xml"
"fmt"
"net/http"
)

type ErrorResponse struct {
XMLName xml.Name `xml:"Error" json:"-"`
Code string
Message string
BucketName string
Key string
RequestID string `xml:"RequestId"`
HostID string `xml:"HostId"`

// Region where the bucket is located. This header is returned
// only in HEAD bucket and ListObjects response.
Region string

// Underlying HTTP status code for the returned error
StatusCode int `xml:"-" json:"-"`
}

// Error - Returns HTTP error string
func (e ErrorResponse) Error() string {
return e.Message
}
const (
reportIssue = "Please report this issue at https://github.com/minio/minio/issues."
)
// httpRespToErrorResponse returns a new encoded ErrorResponse
// structure as error.
func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
if resp == nil {
msg := "Response is empty. " + reportIssue
return ErrInvalidArgument(msg)
}

errResp := ErrorResponse{
StatusCode: resp.StatusCode,
}

err := xmlDecoder(resp.Body, &errResp)
// Xml decoding failed with no body, fall back to HTTP headers.
if err != nil {
switch resp.StatusCode {
case http.StatusNotFound:
if objectName == "" {
errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: "NoSuchBucket",
Message: "The specified bucket does not exist.",
BucketName: bucketName,
}
} else {
errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: "NoSuchKey",
Message: "The specified key does not exist.",
BucketName: bucketName,
Key: objectName,
}
}
case http.StatusForbidden:
errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: "AccessDenied",
Message: "Access Denied.",
BucketName: bucketName,
Key: objectName,
}
case http.StatusConflict:
errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: "Conflict",
Message: "Bucket not empty.",
BucketName: bucketName,
}
case http.StatusPreconditionFailed:
errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: "PreconditionFailed",
Message: s3ErrorResponseMap["PreconditionFailed"],
BucketName: bucketName,
Key: objectName,
}
default:
errResp = ErrorResponse{
StatusCode: resp.StatusCode,
Code: resp.Status,
Message: resp.Status,
BucketName: bucketName,
}
}
}

// Save hostID, requestID and region information
// from headers if not available through error XML.
if errResp.RequestID == "" {
errResp.RequestID = resp.Header.Get("x-amz-request-id")
}
if errResp.HostID == "" {
errResp.HostID = resp.Header.Get("x-amz-id-2")
}
if errResp.Region == "" {
errResp.Region = resp.Header.Get("x-amz-bucket-region")
}
if errResp.Code == "InvalidRegion" && errResp.Region != "" {
errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region)
}

return errResp
}

func ToErrorResponse(err error) ErrorResponse {
switch err := err.(type) {
case ErrorResponse:
return err
default:
return ErrorResponse{}
}
}
// ErrInvalidArgument - Invalid argument response.
func ErrInvalidArgument(message string) error {
return ErrorResponse{
Code: "InvalidArgument",
Message: message,
RequestID: "minio",
}
}

// ErrEntityTooLarge - Input size is larger than supported maximum.
func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
return ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "EntityTooLarge",
Message: msg,
BucketName: bucketName,
Key: objectName,
}
}

// ErrEntityTooSmall - Input size is smaller than supported minimum.
func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize)
return ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "EntityTooSmall",
Message: msg,
BucketName: bucketName,
Key: objectName,
}
}

+ 1051
- 0
modules/minio_ext/api.go
File diff suppressed because it is too large
View File


+ 62
- 0
modules/minio_ext/constants.go View File

@@ -0,0 +1,62 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package minio_ext

/// Multipart upload defaults.

// absMinPartSize - absolute minimum part size (5 MiB) below which
// a part in a multipart upload may not be uploaded.
const absMinPartSize = 1024 * 1024 * 5

// minPartSize - minimum part size 128MiB per object after which
// putObject behaves internally as multipart.
const MinPartSize = 1024 * 1024 * 64

// maxPartsCount - maximum number of parts for a single multipart session.
const MaxPartsCount = 10000

// maxPartSize - maximum part size 5GiB for a single multipart upload
// operation.
const maxPartSize = 1024 * 1024 * 1024 * 5

// maxSinglePutObjectSize - maximum size 5GiB of object per PUT
// operation.
const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5

// maxMultipartPutObjectSize - maximum size 5TiB of object for
// Multipart operation.
const MaxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5

// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
// we don't want to sign the request payload
const unsignedPayload = "UNSIGNED-PAYLOAD"

// Total number of parallel workers used for multipart operation.
const totalWorkers = 4

// Signature related constants.
const (
signV4Algorithm = "AWS4-HMAC-SHA256"
iso8601DateFormat = "20060102T150405Z"
)

// Storage class header constant.
const amzStorageClass = "X-Amz-Storage-Class"

// Website redirect location header constant
const amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location"

+ 85
- 0
modules/minio_ext/object.go View File

@@ -0,0 +1,85 @@
package minio_ext

import (
"net/http"
"net/url"
"time"
)

// StringMap represents map with custom UnmarshalXML
type StringMap map[string]string
// CommonPrefix container for prefix response.
type CommonPrefix struct {
Prefix string
}
// ObjectInfo container for object metadata.
type ObjectInfo struct {
// An ETag is optionally set to md5sum of an object. In case of multipart objects,
// ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of
// each parts concatenated into one string.
ETag string `json:"etag"`

Key string `json:"name"` // Name of the object
LastModified time.Time `json:"lastModified"` // Date and time the object was last modified.
Size int64 `json:"size"` // Size in bytes of the object.
ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached.

// Collection of additional metadata on the object.
// eg: x-amz-meta-*, content-encoding etc.
Metadata http.Header `json:"metadata" xml:"-"`

// x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value.
UserMetadata StringMap `json:"userMetadata"`

// Owner name.
Owner struct {
DisplayName string `json:"name"`
ID string `json:"id"`
} `json:"owner"`

// The class of storage used to store the object.
StorageClass string `json:"storageClass"`

// Error
Err error `json:"-"`
}
// ListBucketResult container for listObjects response.
type ListBucketResult struct {
// A response can contain CommonPrefixes only if you have
// specified a delimiter.
CommonPrefixes []CommonPrefix
// Metadata about each object returned.
Contents []ObjectInfo
Delimiter string

// Encoding type used to encode object keys in the response.
EncodingType string

// A flag that indicates whether or not ListObjects returned all of the results
// that satisfied the search criteria.
IsTruncated bool
Marker string
MaxKeys int64
Name string

// When response is truncated (the IsTruncated element value in
// the response is true), you can use the key name in this field
// as marker in the subsequent request to get next set of objects.
// Object storage lists objects in alphabetical order Note: This
// element is returned only if you have delimiter request
// parameter specified. If response does not include the NextMaker
// and it is truncated, you can use the value of the last Key in
// the response as the marker in the subsequent request to get the
// next set of object keys.
NextMarker string
Prefix string
}

var (
// Hex encoded string of nil sha256sum bytes.
emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"

// Sentinel URL is the default url value which is invalid.
sentinelURL = url.URL{}
)

+ 82
- 0
modules/minio_ext/transport.go View File

@@ -0,0 +1,82 @@
// +build go1.7 go1.8

/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017-2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package minio_ext

import (
"crypto/tls"
"crypto/x509"
"net"
"net/http"
"time"

"golang.org/x/net/http2"
)

// DefaultTransport - this default transport is similar to
// http.DefaultTransport but with additional param DisableCompression
// is set to true to avoid decompressing content with 'gzip' encoding.
var DefaultTransport = func(secure bool) (http.RoundTripper, error) {
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
MaxIdleConns: 1024,
MaxIdleConnsPerHost: 1024,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
// Set this value so that the underlying transport round-tripper
// doesn't try to auto decode the body of objects with
// content-encoding set to `gzip`.
//
// Refer:
// https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
DisableCompression: true,
}

if secure {
rootCAs, _ := x509.SystemCertPool()
if rootCAs == nil {
// In some systems (like Windows) system cert pool is
// not supported or no certificates are present on the
// system - so we create a new cert pool.
rootCAs = x509.NewCertPool()
}

// Keep TLS config.
tlsConfig := &tls.Config{
RootCAs: rootCAs,
// Can't use SSLv3 because of POODLE and BEAST
// Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
// Can't use TLSv1.1 because of RC4 cipher usage
MinVersion: tls.VersionTLS12,
}
tr.TLSClientConfig = tlsConfig

// Because we create a custom TLSClientConfig, we have to opt-in to HTTP/2.
// See https://github.com/golang/go/issues/14275
if err := http2.ConfigureTransport(tr); err != nil {
return nil, err
}
}
return tr, nil
}

+ 185
- 0
modules/minio_ext/util.go View File

@@ -0,0 +1,185 @@
package minio_ext

import (
"encoding/hex"
"encoding/xml"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"crypto/sha256"

"github.com/minio/minio-go/v6/pkg/s3utils"
)

// regCred matches credential string in HTTP header
var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")

// regCred matches signature string in HTTP header
var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")


// xmlDecoder provide decoded value in xml.
func xmlDecoder(body io.Reader, v interface{}) error {
d := xml.NewDecoder(body)
return d.Decode(v)
}

// Redact out signature value from authorization string.
func redactSignature(origAuth string) string {
if !strings.HasPrefix(origAuth, signV4Algorithm) {
// Set a temporary redacted auth
return "AWS **REDACTED**:**REDACTED**"
}

/// Signature V4 authorization header.

// Strip out accessKeyID from:
// Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")

// Strip out 256-bit signature from: Signature=<256-bit signature>
return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
}
// closeResponse close non nil response with any response Body.
// convenient wrapper to drain any remaining data on response body.
//
// Subsequently this allows golang http RoundTripper
// to re-use the same connection for future requests.
func closeResponse(resp *http.Response) {
// Callers should close resp.Body when done reading from it.
// If resp.Body is not closed, the Client's underlying RoundTripper
// (typically Transport) may not be able to re-use a persistent TCP
// connection to the server for a subsequent "keep-alive" request.
if resp != nil && resp.Body != nil {
// Drain any remaining Body and then close the connection.
// Without this closing connection would disallow re-using
// the same connection for future uses.
// - http://stackoverflow.com/a/17961593/4465767
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}
}

// Verify if input endpoint URL is valid.
func isValidEndpointURL(endpointURL url.URL) error {
if endpointURL == sentinelURL {
return ErrInvalidArgument("Endpoint url cannot be empty.")
}
if endpointURL.Path != "/" && endpointURL.Path != "" {
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
}
if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") {
if !s3utils.IsAmazonEndpoint(endpointURL) {
return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
}
}
if strings.Contains(endpointURL.Host, ".googleapis.com") {
if !s3utils.IsGoogleEndpoint(endpointURL) {
return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
}
}
return nil
}

// getEndpointURL - construct a new endpoint.
func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
if strings.Contains(endpoint, ":") {
host, _, err := net.SplitHostPort(endpoint)
if err != nil {
return nil, err
}
if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
} else {
if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
}
// If secure is false, use 'http' scheme.
scheme := "https"
if !secure {
scheme = "http"
}

// Construct a secured endpoint URL.
endpointURLStr := scheme + "://" + endpoint
endpointURL, err := url.Parse(endpointURLStr)
if err != nil {
return nil, err
}

// Validate incoming endpoint URL.
if err := isValidEndpointURL(*endpointURL); err != nil {
return nil, err
}
return endpointURL, nil
}

var supportedHeaders = []string{
"content-type",
"cache-control",
"content-encoding",
"content-disposition",
"content-language",
"x-amz-website-redirect-location",
"expires",
// Add more supported headers here.
}

// isStorageClassHeader returns true if the header is a supported storage class header
func isStorageClassHeader(headerKey string) bool {
return strings.EqualFold(amzStorageClass, headerKey)
}

// isStandardHeader returns true if header is a supported header and not a custom header
func isStandardHeader(headerKey string) bool {
key := strings.ToLower(headerKey)
for _, header := range supportedHeaders {
if strings.ToLower(header) == key {
return true
}
}
return false
}

// sseHeaders is list of server side encryption headers
var sseHeaders = []string{
"x-amz-server-side-encryption",
"x-amz-server-side-encryption-aws-kms-key-id",
"x-amz-server-side-encryption-context",
"x-amz-server-side-encryption-customer-algorithm",
"x-amz-server-side-encryption-customer-key",
"x-amz-server-side-encryption-customer-key-MD5",
}

// isSSEHeader returns true if header is a server side encryption header.
func isSSEHeader(headerKey string) bool {
key := strings.ToLower(headerKey)
for _, h := range sseHeaders {
if strings.ToLower(h) == key {
return true
}
}
return false
}

// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header.
func isAmzHeader(headerKey string) bool {
key := strings.ToLower(headerKey)

return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey)
}

// sum256 calculate sha256sum for an input byte array, returns hex encoded.
func sum256Hex(data []byte) string {
hash := sha256.New()
hash.Write(data)
return hex.EncodeToString(hash.Sum(nil))
}

+ 3
- 3
modules/setting/setting.go View File

@@ -422,9 +422,9 @@ var (
UILocation = time.Local

//Machinery config
Broker string
DefaultQueue string
ResultBackend string
Broker string
DefaultQueue string
ResultBackend string
)

// DateLang transforms standard language locale name to corresponding value in datetime plugin.


+ 159
- 0
modules/storage/minio_ext.go View File

@@ -0,0 +1,159 @@
package storage

import (
"encoding/xml"
"path"
"sort"
"strconv"
"strings"
"sync"
"time"

"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/minio_ext"
"code.gitea.io/gitea/modules/setting"

miniov6 "github.com/minio/minio-go/v6"
)

const (
PresignedUploadPartUrlExpireTime = time.Hour * 24 * 7
)

type ComplPart struct {
PartNumber int `json:"partNumber"`
ETag string `json:"eTag"`
}

type CompleteParts struct {
Data []ComplPart `json:"completedParts"`
}
// completedParts is a collection of parts sortable by their part numbers.
// used for sorting the uploaded parts before completing the multipart request.
type completedParts []miniov6.CompletePart
func (a completedParts) Len() int { return len(a) }
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }

// completeMultipartUpload container for completing multipart upload.
type completeMultipartUpload struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
Parts []miniov6.CompletePart `xml:"Part"`
}

var (
adminClient * minio_ext.Client = nil
coreClient *miniov6.Core = nil
)

var mutex *sync.Mutex

func init(){
mutex = new(sync.Mutex)
}

func getClients()(*minio_ext.Client, *miniov6.Core, error){
var client * minio_ext.Client
var core *miniov6.Core
mutex.Lock()

defer mutex.Unlock()

if nil != adminClient && nil != coreClient {
client = adminClient
core = coreClient
return client, core, nil
}

var err error
minio := setting.Attachment.Minio
if nil == adminClient {
adminClient, err = minio_ext.New(
minio.Endpoint,
minio.AccessKeyID,
minio.SecretAccessKey,
minio.UseSSL,
)
if nil != err{
return nil, nil, err
}
}

client = adminClient

if nil == coreClient {
coreClient, err = miniov6.NewCore(
minio.Endpoint,
minio.AccessKeyID,
minio.SecretAccessKey,
minio.UseSSL,
)
if nil != err{
return nil, nil, err
}
}

core = coreClient

return client, core, nil
}

func GenMultiPartSignedUrl(uuid string, uploadId string, partNumber int, partSize int64) (string, error) {
minioClient, _, err := getClients()
if err != nil {
log.Error("getClients failed:", err.Error())
return "", err
}

minio := setting.Attachment.Minio
bucketName := minio.Bucket
objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/")

return minioClient.GenUploadPartSignedUrl(uploadId, bucketName, objectName, partNumber, partSize, PresignedUploadPartUrlExpireTime, setting.Attachment.Minio.Location)

}

func NewMultiPartUpload(uuid string) (string, error){
_, core, err := getClients()
if err != nil {
log.Error("getClients failed:", err.Error())
return "", err
}

minio := setting.Attachment.Minio
bucketName := minio.Bucket
objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/")

return core.NewMultipartUpload(bucketName, objectName, miniov6.PutObjectOptions{})
}

func CompleteMultiPartUpload(uuid string, uploadID string, complParts []string) (string, error){
_, core, err := getClients()
if err != nil {
log.Error("getClients failed:", err.Error())
return "", err
}

minio := setting.Attachment.Minio
bucketName := minio.Bucket
objectName := strings.TrimPrefix(path.Join(minio.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid)), "/")

var complMultipartUpload completeMultipartUpload
for _,part := range complParts {
partNumber, err := strconv.Atoi(strings.Split(part,"-")[0])
if err != nil {
log.Error(err.Error())
return "",err
}
complMultipartUpload.Parts =append(complMultipartUpload.Parts, miniov6.CompletePart{
PartNumber: partNumber,
ETag: strings.Split(part,"-")[1],
})
}

// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))

return core.CompleteMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload.Parts)
}


+ 3
- 3
modules/timer/timer.go View File

@@ -7,15 +7,15 @@ import (
)

const (
DecompressTimer = time.Minute * 10
DecompressTimer = time.Minute * 10
)

func init() {
ticker := time.NewTicker(DecompressTimer)
go func() {
for {
<- ticker.C
<-ticker.C
repo.HandleUnDecompressAttachment()
}
} ()
}()
}

+ 3
- 3
modules/worker/task.go View File

@@ -10,10 +10,10 @@ import (

// 方法名
const (
DecompressTaskName = "Decompress"
DecompressTaskName = "Decompress"
)

func SendDecompressTask(ctx context.Context, uuid string) error{
func SendDecompressTask(ctx context.Context, uuid string) error {
args := []tasks.Arg{{Name: "uuid", Type: "string", Value: uuid}}
task, err := tasks.NewSignature(DecompressTaskName, args)
if err != nil {
@@ -22,7 +22,7 @@ func SendDecompressTask(ctx context.Context, uuid string) error{
}

task.RetryCount = 3
_,err = AsyncTaskCenter.SendTaskWithContext(ctx, task)
_, err = AsyncTaskCenter.SendTaskWithContext(ctx, task)
if err != nil {
log.Error("SendTaskWithContext failed:", err.Error())
return err


+ 8
- 13
modules/worker/worker.go View File

@@ -1,6 +1,7 @@
package worker

import (
"code.gitea.io/gitea/modules/setting"
"github.com/RichardKnop/machinery/v1"
mchConf "github.com/RichardKnop/machinery/v1/config"
)
@@ -9,21 +10,15 @@ var (
AsyncTaskCenter *machinery.Server
)

func init() {
tc, err := NewTaskCenter()
func NewTaskCenter() {
cnf := &mchConf.Config{
Broker: setting.Broker,
DefaultQueue: setting.DefaultQueue,
ResultBackend: setting.ResultBackend,
}
tc, err := machinery.NewServer(cnf)
if err != nil {
panic(err)
}
AsyncTaskCenter = tc
}

func NewTaskCenter() (*machinery.Server, error) {
cnf := &mchConf.Config{
Broker: "redis://localhost:6379",
DefaultQueue: "DecompressTasksQueue",
ResultBackend: "redis://localhost:6379",
}
// Create server instance
return machinery.NewServer(cnf)
}


+ 9
- 0
options/locale/locale_en-US.ini View File

@@ -59,6 +59,7 @@ manage_org = Manage Organizations
admin_panel = Site Administration
account_settings = Account Settings
settings = Settings
your_dashboard = Dashboard
your_profile = Profile
your_starred = Starred
your_settings = Settings
@@ -2426,6 +2427,14 @@ default_message = Drop files or click here to upload.
invalid_input_type = You can not upload files of this type.
file_too_big = File size ({{filesize}} MB) exceeds the maximum size of ({{maxFilesize}} MB).
remove_file = Remove file
file_status = Upload status:
file_init_status = Drop files or click here to upload.
waitting_uploading = Please wait for the first file transfer to complete
md5_computing = MD5 computing
loading_file = Loading
uploading = Uploading
upload_complete = Uploading complete
enable_minio_support = Enable minio support to use the dataset service

[notification]
notifications = Notifications


+ 9
- 0
options/locale/locale_zh-CN.ini View File

@@ -59,6 +59,7 @@ manage_org=管理我的组织
admin_panel=管理后台
account_settings=帐户设置
settings=帐户设置
your_dashboard=个人概览
your_profile=个人信息
your_starred=已点赞
your_settings=设置
@@ -2428,6 +2429,14 @@ default_message=拖动文件或者点击此处上传。
invalid_input_type=您不能上传该类型的文件
file_too_big=文件体积({{filesize}} MB)超过了最大允许体积({{maxFilesize}} MB)
remove_file=移除文件
file_status=文件处理状态:
file_init_status=等待上传
waitting_uploading=请等待文件传输完成
md5_computing=计算MD5
loading_file=加载文件
uploading=正在上传
upload_complete=上传完成
enable_minio_support=启用minio支持以使用数据集服务

[notification]
notifications=通知


+ 213
- 106
package-lock.json View File

@@ -1336,8 +1336,8 @@
},
"acorn-jsx": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.2.0.tgz",
"integrity": "sha512-HiUX/+K2YpkpJ+SzBffkM/AQ2YE03S0U1kjTLVpoJdhZMOWy8qvXVN9JdLqv2QsaQ6MPYQIuNmwD8zOiYUofLQ==",
"resolved": "https://registry.npm.taobao.org/acorn-jsx/download/acorn-jsx-5.2.0.tgz?cache=0&sync_timestamp=1589684116279&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Facorn-jsx%2Fdownload%2Facorn-jsx-5.2.0.tgz",
"integrity": "sha1-TGYGkXPW/daO2FI5/CViJhgrLr4=",
"dev": true
},
"aggregate-error": {
@@ -1919,6 +1919,14 @@
"integrity": "sha512-wMHVg2EOHaMRxbzgFJ9gtjOOCrI80OHLG14rxi28XwOW8ux6IiEbRCGGGqCtdAIg4FQCbW20k9RsT4y3gJlFug==",
"optional": true
},
"axios": {
"version": "0.19.2",
"resolved": "https://registry.npmjs.org/axios/-/axios-0.19.2.tgz",
"integrity": "sha512-fjgm5MvRHLhx+osE2xoekY70AhARk3a6hkN+3Io1jc00jtquGvxYlKlsFUhmUET0V5te6CcZI7lcv2Ym61mjHA==",
"requires": {
"follow-redirects": "1.5.10"
}
},
"babel-loader": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.1.0.tgz",
@@ -3657,8 +3665,8 @@
},
"doctrine": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz",
"integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==",
"resolved": "https://registry.npm.taobao.org/doctrine/download/doctrine-3.0.0.tgz",
"integrity": "sha1-rd6+rXKmV023g2OdyHoSF3OXOWE=",
"dev": true,
"requires": {
"esutils": "^2.0.2"
@@ -3739,9 +3747,9 @@
}
},
"dropzone": {
"version": "5.7.0",
"resolved": "https://registry.npmjs.org/dropzone/-/dropzone-5.7.0.tgz",
"integrity": "sha512-kOltiZXH5cO/72I22JjE+w6BoT6uaVLfWdFMsi1PMKFkU6BZWpqRwjnsRm0o6ANGTBuZar5Piu7m/CbKqRPiYg=="
"version": "5.7.2",
"resolved": "https://registry.npm.taobao.org/dropzone/download/dropzone-5.7.2.tgz?cache=0&sync_timestamp=1596009792692&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fdropzone%2Fdownload%2Fdropzone-5.7.2.tgz",
"integrity": "sha1-kb7hVy3aUV1AkB2jBLx53d8wm0w="
},
"duplexer2": {
"version": "0.0.2",
@@ -4014,8 +4022,8 @@
},
"eslint": {
"version": "6.8.0",
"resolved": "https://registry.npmjs.org/eslint/-/eslint-6.8.0.tgz",
"integrity": "sha512-K+Iayyo2LtyYhDSYwz5D5QdWw0hCacNzyq1Y821Xna2xSJj7cijoLLYmLxTQgcgZ9mC61nryMy9S7GRbYpI5Ig==",
"resolved": "https://registry.npm.taobao.org/eslint/download/eslint-6.8.0.tgz",
"integrity": "sha1-YiYtZylzn5J1cjgkMC+yJ8jJP/s=",
"dev": true,
"requires": {
"@babel/code-frame": "^7.0.0",
@@ -4059,8 +4067,8 @@
"dependencies": {
"ansi-escapes": {
"version": "4.3.1",
"resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.1.tgz",
"integrity": "sha512-JWF7ocqNrp8u9oqpgV+wH5ftbt+cfvv+PTjOvKLT3AdYly/LmORARfEVT1iyjwN+4MqE5UmVKoAdIBqeoCHgLA==",
"resolved": "https://registry.npm.taobao.org/ansi-escapes/download/ansi-escapes-4.3.1.tgz",
"integrity": "sha1-pcR8xDGB8fOP/XB2g3cA05VSKmE=",
"dev": true,
"requires": {
"type-fest": "^0.11.0"
@@ -4068,22 +4076,22 @@
"dependencies": {
"type-fest": {
"version": "0.11.0",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.11.0.tgz",
"integrity": "sha512-OdjXJxnCN1AvyLSzeKIgXTXxV+99ZuXl3Hpo9XpJAv9MBcHrrJOQ5kV7ypXOuQie+AmWG25hLbiKdwYTifzcfQ==",
"resolved": "https://registry.npm.taobao.org/type-fest/download/type-fest-0.11.0.tgz",
"integrity": "sha1-l6vwhyMQ/tiKXEZrJWgVdhReM/E=",
"dev": true
}
}
},
"ansi-regex": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz",
"integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==",
"resolved": "https://registry.npm.taobao.org/ansi-regex/download/ansi-regex-5.0.0.tgz",
"integrity": "sha1-OIU59VF5vzkznIGvMKZU1p+Hy3U=",
"dev": true
},
"ansi-styles": {
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz",
"integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==",
"resolved": "https://registry.npm.taobao.org/ansi-styles/download/ansi-styles-4.2.1.tgz",
"integrity": "sha1-kK51xCTQCNJiTFvynq0xd+v881k=",
"dev": true,
"requires": {
"@types/color-name": "^1.1.1",
@@ -4092,17 +4100,23 @@
},
"cli-cursor": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz",
"integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==",
"resolved": "https://registry.npm.taobao.org/cli-cursor/download/cli-cursor-3.1.0.tgz",
"integrity": "sha1-JkMFp65JDR0Dvwybp8kl0XU68wc=",
"dev": true,
"requires": {
"restore-cursor": "^3.1.0"
}
},
"cli-width": {
"version": "3.0.0",
"resolved": "https://registry.npm.taobao.org/cli-width/download/cli-width-3.0.0.tgz",
"integrity": "sha1-ovSEN6LKqaIkNueUvwceyeYc7fY=",
"dev": true
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"resolved": "https://registry.npm.taobao.org/color-convert/download/color-convert-2.0.1.tgz",
"integrity": "sha1-ctOmjVmMm9s68q0ehPIdiWq9TeM=",
"dev": true,
"requires": {
"color-name": "~1.1.4"
@@ -4110,20 +4124,20 @@
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"resolved": "https://registry.npm.taobao.org/color-name/download/color-name-1.1.4.tgz",
"integrity": "sha1-wqCah6y95pVD3m9j+jmVyCbFNqI=",
"dev": true
},
"emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"resolved": "https://registry.npm.taobao.org/emoji-regex/download/emoji-regex-8.0.0.tgz",
"integrity": "sha1-6Bj9ac5cz8tARZT4QpY79TFkzDc=",
"dev": true
},
"eslint-scope": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.0.0.tgz",
"integrity": "sha512-oYrhJW7S0bxAFDvWqzvMPRm6pcgcnWc4QnofCAqRTRfQC0JcwenzGglTtsLyIuuWFfkqDG9vz67cnttSd53djw==",
"version": "5.1.0",
"resolved": "https://registry.npm.taobao.org/eslint-scope/download/eslint-scope-5.1.0.tgz?cache=0&sync_timestamp=1591269986906&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Feslint-scope%2Fdownload%2Feslint-scope-5.1.0.tgz",
"integrity": "sha1-0Plx3+WcaeDK2mhLI9Sdv4JgDOU=",
"dev": true,
"requires": {
"esrecurse": "^4.1.0",
@@ -4132,8 +4146,8 @@
},
"figures": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz",
"integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==",
"resolved": "https://registry.npm.taobao.org/figures/download/figures-3.2.0.tgz",
"integrity": "sha1-YlwYvSk8YE3EqN2y/r8MiDQXRq8=",
"dev": true,
"requires": {
"escape-string-regexp": "^1.0.5"
@@ -4141,8 +4155,8 @@
},
"globals": {
"version": "12.4.0",
"resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz",
"integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==",
"resolved": "https://registry.npm.taobao.org/globals/download/globals-12.4.0.tgz",
"integrity": "sha1-oYgTV2pBsAokqX5/gVkYwuGZJfg=",
"dev": true,
"requires": {
"type-fest": "^0.8.1"
@@ -4150,14 +4164,14 @@
},
"has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"resolved": "https://registry.npm.taobao.org/has-flag/download/has-flag-4.0.0.tgz",
"integrity": "sha1-lEdx/ZyByBJlxNaUGGDaBrtZR5s=",
"dev": true
},
"import-fresh": {
"version": "3.2.1",
"resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.1.tgz",
"integrity": "sha512-6e1q1cnWP2RXD9/keSkxHScg508CdXqXWgWBaETNhyuBFz+kUZlKboh+ISK+bU++DmbHimVBrOz/zzPe0sZ3sQ==",
"resolved": "https://registry.npm.taobao.org/import-fresh/download/import-fresh-3.2.1.tgz?cache=0&sync_timestamp=1589682760620&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fimport-fresh%2Fdownload%2Fimport-fresh-3.2.1.tgz",
"integrity": "sha1-Yz/2GFBueTr1rJG/SLcmd+FcvmY=",
"dev": true,
"requires": {
"parent-module": "^1.0.0",
@@ -4165,40 +4179,46 @@
}
},
"inquirer": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/inquirer/-/inquirer-7.1.0.tgz",
"integrity": "sha512-5fJMWEmikSYu0nv/flMc475MhGbB7TSPd/2IpFV4I4rMklboCH2rQjYY5kKiYGHqUF9gvaambupcJFFG9dvReg==",
"version": "7.3.3",
"resolved": "https://registry.npm.taobao.org/inquirer/download/inquirer-7.3.3.tgz",
"integrity": "sha1-BNF2sq8Er8FXqD/XwQDpjuCq0AM=",
"dev": true,
"requires": {
"ansi-escapes": "^4.2.1",
"chalk": "^3.0.0",
"chalk": "^4.1.0",
"cli-cursor": "^3.1.0",
"cli-width": "^2.0.0",
"cli-width": "^3.0.0",
"external-editor": "^3.0.3",
"figures": "^3.0.0",
"lodash": "^4.17.15",
"lodash": "^4.17.19",
"mute-stream": "0.0.8",
"run-async": "^2.4.0",
"rxjs": "^6.5.3",
"rxjs": "^6.6.0",
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0",
"through": "^2.3.6"
},
"dependencies": {
"chalk": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz",
"integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==",
"version": "4.1.0",
"resolved": "https://registry.npm.taobao.org/chalk/download/chalk-4.1.0.tgz",
"integrity": "sha1-ThSHCmGNni7dl92DRf2dncMVZGo=",
"dev": true,
"requires": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
}
},
"lodash": {
"version": "4.17.20",
"resolved": "https://registry.npm.taobao.org/lodash/download/lodash-4.17.20.tgz?cache=0&sync_timestamp=1597335994883&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Flodash%2Fdownload%2Flodash-4.17.20.tgz",
"integrity": "sha1-tEqbYpe8tpjxxRo1RaKzs2jVnFI=",
"dev": true
},
"strip-ansi": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz",
"integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==",
"resolved": "https://registry.npm.taobao.org/strip-ansi/download/strip-ansi-6.0.0.tgz?cache=0&sync_timestamp=1589682795383&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fstrip-ansi%2Fdownload%2Fstrip-ansi-6.0.0.tgz",
"integrity": "sha1-CxVx3XZpzNTz4G4U7x7tJiJa5TI=",
"dev": true,
"requires": {
"ansi-regex": "^5.0.0"
@@ -4208,26 +4228,26 @@
},
"is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"resolved": "https://registry.npm.taobao.org/is-fullwidth-code-point/download/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha1-8Rb4Bk/pCz94RKOJl8C3UFEmnx0=",
"dev": true
},
"mimic-fn": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
"integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
"resolved": "https://registry.npm.taobao.org/mimic-fn/download/mimic-fn-2.1.0.tgz?cache=0&sync_timestamp=1596095644798&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fmimic-fn%2Fdownload%2Fmimic-fn-2.1.0.tgz",
"integrity": "sha1-ftLCzMyvhNP/y3pptXcR/CCDQBs=",
"dev": true
},
"mute-stream": {
"version": "0.0.8",
"resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz",
"integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==",
"resolved": "https://registry.npm.taobao.org/mute-stream/download/mute-stream-0.0.8.tgz",
"integrity": "sha1-FjDEKyJR/4HiooPelqVJfqkuXg0=",
"dev": true
},
"onetime": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.0.tgz",
"integrity": "sha512-5NcSkPHhwTVFIQN+TUqXoS5+dlElHXdpAWu9I0HP20YOtIi+aZ0Ct82jdlILDxjLEAWwvm+qj1m6aEtsDVmm6Q==",
"version": "5.1.2",
"resolved": "https://registry.npm.taobao.org/onetime/download/onetime-5.1.2.tgz?cache=0&sync_timestamp=1597005345612&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fonetime%2Fdownload%2Fonetime-5.1.2.tgz",
"integrity": "sha1-0Oluu1awdHbfHdnEgG5SN5hcpF4=",
"dev": true,
"requires": {
"mimic-fn": "^2.1.0"
@@ -4235,30 +4255,39 @@
},
"resolve-from": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
"integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
"resolved": "https://registry.npm.taobao.org/resolve-from/download/resolve-from-4.0.0.tgz",
"integrity": "sha1-SrzYUq0y3Xuqv+m0DgCjbbXzkuY=",
"dev": true
},
"restore-cursor": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz",
"integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==",
"resolved": "https://registry.npm.taobao.org/restore-cursor/download/restore-cursor-3.1.0.tgz",
"integrity": "sha1-OfZ8VLOnpYzqUjbZXPADQjljH34=",
"dev": true,
"requires": {
"onetime": "^5.1.0",
"signal-exit": "^3.0.2"
}
},
"rxjs": {
"version": "6.6.2",
"resolved": "https://registry.npm.taobao.org/rxjs/download/rxjs-6.6.2.tgz",
"integrity": "sha1-gJanrAPyzE/lhg725XKBDZ4BwNI=",
"dev": true,
"requires": {
"tslib": "^1.9.0"
}
},
"semver": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
"integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
"resolved": "https://registry.npm.taobao.org/semver/download/semver-6.3.0.tgz?cache=0&sync_timestamp=1589682805026&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fsemver%2Fdownload%2Fsemver-6.3.0.tgz",
"integrity": "sha1-7gpkyK9ejO6mdoexM3YeG+y9HT0=",
"dev": true
},
"string-width": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz",
"integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==",
"resolved": "https://registry.npm.taobao.org/string-width/download/string-width-4.2.0.tgz",
"integrity": "sha1-lSGCxGzHssMT0VluYjmSvRY7crU=",
"dev": true,
"requires": {
"emoji-regex": "^8.0.0",
@@ -4268,8 +4297,8 @@
"dependencies": {
"strip-ansi": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz",
"integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==",
"resolved": "https://registry.npm.taobao.org/strip-ansi/download/strip-ansi-6.0.0.tgz?cache=0&sync_timestamp=1589682795383&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fstrip-ansi%2Fdownload%2Fstrip-ansi-6.0.0.tgz",
"integrity": "sha1-CxVx3XZpzNTz4G4U7x7tJiJa5TI=",
"dev": true,
"requires": {
"ansi-regex": "^5.0.0"
@@ -4279,8 +4308,8 @@
},
"strip-ansi": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
"integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
"resolved": "https://registry.npm.taobao.org/strip-ansi/download/strip-ansi-5.2.0.tgz?cache=0&sync_timestamp=1589682795383&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fstrip-ansi%2Fdownload%2Fstrip-ansi-5.2.0.tgz",
"integrity": "sha1-jJpTb+tq/JYr36WxBKUJHBrZwK4=",
"dev": true,
"requires": {
"ansi-regex": "^4.1.0"
@@ -4288,22 +4317,22 @@
"dependencies": {
"ansi-regex": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
"integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==",
"resolved": "https://registry.npm.taobao.org/ansi-regex/download/ansi-regex-4.1.0.tgz",
"integrity": "sha1-i5+PCM8ay4Q3Vqg5yox+MWjFGZc=",
"dev": true
}
}
},
"strip-json-comments": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.0.tgz",
"integrity": "sha512-e6/d0eBu7gHtdCqFt0xJr642LdToM5/cN4Qb9DbHjVx1CP5RyeM+zH7pbecEmDv/lBqb0QH+6Uqq75rxFPkM0w==",
"version": "3.1.1",
"resolved": "https://registry.npm.taobao.org/strip-json-comments/download/strip-json-comments-3.1.1.tgz?cache=0&sync_timestamp=1594567532500&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fstrip-json-comments%2Fdownload%2Fstrip-json-comments-3.1.1.tgz",
"integrity": "sha1-MfEoGzgyYwQ0gxwxDAHMzajL4AY=",
"dev": true
},
"supports-color": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz",
"integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==",
"resolved": "https://registry.npm.taobao.org/supports-color/download/supports-color-7.1.0.tgz",
"integrity": "sha1-aOMlkd9z4lrRxLSRCKLsUHliv9E=",
"dev": true,
"requires": {
"has-flag": "^4.0.0"
@@ -4495,6 +4524,17 @@
}
}
},
"eslint-plugin-vue": {
"version": "6.2.2",
"resolved": "https://registry.npm.taobao.org/eslint-plugin-vue/download/eslint-plugin-vue-6.2.2.tgz?cache=0&sync_timestamp=1597198168566&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Feslint-plugin-vue%2Fdownload%2Feslint-plugin-vue-6.2.2.tgz",
"integrity": "sha1-J/7NmjokeJsPER7N1UCp5WGY4P4=",
"dev": true,
"requires": {
"natural-compare": "^1.4.0",
"semver": "^5.6.0",
"vue-eslint-parser": "^7.0.0"
}
},
"eslint-scope": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-4.0.3.tgz",
@@ -4506,23 +4546,23 @@
},
"eslint-utils": {
"version": "1.4.3",
"resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.4.3.tgz",
"integrity": "sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q==",
"resolved": "https://registry.npm.taobao.org/eslint-utils/download/eslint-utils-1.4.3.tgz",
"integrity": "sha1-dP7HxU0Hdrb2fgJRBAtYBlZOmB8=",
"dev": true,
"requires": {
"eslint-visitor-keys": "^1.1.0"
}
},
"eslint-visitor-keys": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.1.0.tgz",
"integrity": "sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A==",
"version": "1.3.0",
"resolved": "https://registry.npm.taobao.org/eslint-visitor-keys/download/eslint-visitor-keys-1.3.0.tgz?cache=0&sync_timestamp=1597435068105&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Feslint-visitor-keys%2Fdownload%2Feslint-visitor-keys-1.3.0.tgz",
"integrity": "sha1-MOvR73wv3/AcOk8VEESvJfqwUj4=",
"dev": true
},
"espree": {
"version": "6.2.1",
"resolved": "https://registry.npmjs.org/espree/-/espree-6.2.1.tgz",
"integrity": "sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw==",
"resolved": "https://registry.npm.taobao.org/espree/download/espree-6.2.1.tgz?cache=0&sync_timestamp=1595034145062&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fespree%2Fdownload%2Fespree-6.2.1.tgz",
"integrity": "sha1-d/xy4f10SiBSwg84pbV1gy6Cc0o=",
"dev": true,
"requires": {
"acorn": "^7.1.1",
@@ -4531,9 +4571,9 @@
},
"dependencies": {
"acorn": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-7.2.0.tgz",
"integrity": "sha512-apwXVmYVpQ34m/i71vrApRrRKCWQnZZF1+npOD0WV5xZFfwWOmKGQ2RWlfdy9vWITsenisM8M0Qeq8agcFHNiQ==",
"version": "7.4.0",
"resolved": "https://registry.npm.taobao.org/acorn/download/acorn-7.4.0.tgz?cache=0&sync_timestamp=1597235774928&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Facorn%2Fdownload%2Facorn-7.4.0.tgz",
"integrity": "sha1-4a1IbmxUUBY0xsOXxcEh2qODYHw=",
"dev": true
}
}
@@ -4545,17 +4585,17 @@
},
"esquery": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/esquery/-/esquery-1.3.1.tgz",
"integrity": "sha512-olpvt9QG0vniUBZspVRN6lwB7hOZoTRtT+jzR+tS4ffYx2mzbw+z0XCOk44aaLYKApNX5nMm+E+P6o25ip/DHQ==",
"resolved": "https://registry.npm.taobao.org/esquery/download/esquery-1.3.1.tgz",
"integrity": "sha1-t4tYKKqOIU4p+3TE1bdS4cAz2lc=",
"dev": true,
"requires": {
"estraverse": "^5.1.0"
},
"dependencies": {
"estraverse": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.1.0.tgz",
"integrity": "sha512-FyohXK+R0vE+y1nHLoBM7ZTyqRpqAlhdZHCWIWEviFLiGB8b04H6bQs8G+XTthacvT8VuwvteiP7RJSxMs8UEw==",
"version": "5.2.0",
"resolved": "https://registry.npm.taobao.org/estraverse/download/estraverse-5.2.0.tgz?cache=0&sync_timestamp=1596642998635&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Festraverse%2Fdownload%2Festraverse-5.2.0.tgz",
"integrity": "sha1-MH30JUfmzHMk088DwVXVzbjFOIA=",
"dev": true
}
}
@@ -5172,6 +5212,29 @@
"readable-stream": "^2.3.6"
}
},
"follow-redirects": {
"version": "1.5.10",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.5.10.tgz",
"integrity": "sha512-0V5l4Cizzvqt5D44aTXbFZz+FtyXV1vrDN6qrelxtfYQKW0KO0W2T/hkE8xvGa/540LkZlkaUjO4ailYTFtHVQ==",
"requires": {
"debug": "=3.1.0"
},
"dependencies": {
"debug": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz",
"integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==",
"requires": {
"ms": "2.0.0"
}
},
"ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
}
}
},
"fomantic-ui": {
"version": "2.8.4",
"resolved": "https://registry.npmjs.org/fomantic-ui/-/fomantic-ui-2.8.4.tgz",
@@ -6878,8 +6941,8 @@
},
"ignore": {
"version": "4.0.6",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz",
"integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==",
"resolved": "https://registry.npm.taobao.org/ignore/download/ignore-4.0.6.tgz",
"integrity": "sha1-dQ49tYYgh7RzfrrIIH/9HvJ7Jfw=",
"dev": true
},
"image-size": {
@@ -9332,8 +9395,8 @@
},
"optionator": {
"version": "0.8.3",
"resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz",
"integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==",
"resolved": "https://registry.npm.taobao.org/optionator/download/optionator-0.8.3.tgz",
"integrity": "sha1-hPodA2/p08fiHZmIS2ARZ+yPtJU=",
"dev": true,
"requires": {
"deep-is": "~0.1.3",
@@ -10835,8 +10898,8 @@
},
"progress": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz",
"integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==",
"resolved": "https://registry.npm.taobao.org/progress/download/progress-2.0.3.tgz",
"integrity": "sha1-foz42PW48jnBvGi+tOt4Vn1XLvg=",
"dev": true
},
"promise": {
@@ -10944,9 +11007,9 @@
"integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc="
},
"qs": {
"version": "6.5.2",
"resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz",
"integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA=="
"version": "6.9.4",
"resolved": "https://registry.npm.taobao.org/qs/download/qs-6.9.4.tgz",
"integrity": "sha1-kJCykNH5FyjTwi5UhDykSupatoc="
},
"query-string": {
"version": "4.3.4",
@@ -11346,8 +11409,8 @@
},
"regexpp": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.1.tgz",
"integrity": "sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==",
"resolved": "https://registry.npm.taobao.org/regexpp/download/regexpp-2.0.1.tgz",
"integrity": "sha1-jRnTHPYySCtYkEn4KB+T28uk0H8=",
"dev": true
},
"regexpu-core": {
@@ -11578,6 +11641,14 @@
"tough-cookie": "~2.5.0",
"tunnel-agent": "^0.6.0",
"uuid": "^3.3.2"
},
"dependencies": {
"qs": {
"version": "6.5.2",
"resolved": "https://registry.npm.taobao.org/qs/download/qs-6.5.2.tgz",
"integrity": "sha1-yzroBuh0BERYTvFUzo7pjUA/PjY=",
"optional": true
}
}
},
"require-dir": {
@@ -12187,6 +12258,11 @@
"resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.0.tgz",
"integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM="
},
"spark-md5": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/spark-md5/-/spark-md5-3.0.1.tgz",
"integrity": "sha512-0tF3AGSD1ppQeuffsLDIOWlKUd3lS92tFxcsrh5Pe3ZphhnoK+oXIBTzOAThZCiuINZLvpiLH/1VS1/ANEJVig=="
},
"sparkles": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/sparkles/-/sparkles-1.0.1.tgz",
@@ -13639,8 +13715,8 @@
},
"type-fest": {
"version": "0.8.1",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz",
"integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==",
"resolved": "https://registry.npm.taobao.org/type-fest/download/type-fest-0.8.1.tgz",
"integrity": "sha1-CeJJ696FHTseSNJ8EFREZn8XuD0=",
"dev": true
},
"typedarray": {
@@ -14273,6 +14349,32 @@
"v-tooltip": "^2.0.0-rc.32"
}
},
"vue-eslint-parser": {
"version": "7.1.0",
"resolved": "https://registry.npm.taobao.org/vue-eslint-parser/download/vue-eslint-parser-7.1.0.tgz?cache=0&sync_timestamp=1589684321779&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fvue-eslint-parser%2Fdownload%2Fvue-eslint-parser-7.1.0.tgz",
"integrity": "sha1-nNvMgj5lawh1B6GRFzK4Z6wQHoM=",
"dev": true,
"requires": {
"debug": "^4.1.1",
"eslint-scope": "^5.0.0",
"eslint-visitor-keys": "^1.1.0",
"espree": "^6.2.1",
"esquery": "^1.0.1",
"lodash": "^4.17.15"
},
"dependencies": {
"eslint-scope": {
"version": "5.1.0",
"resolved": "https://registry.npm.taobao.org/eslint-scope/download/eslint-scope-5.1.0.tgz?cache=0&sync_timestamp=1591269986906&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Feslint-scope%2Fdownload%2Feslint-scope-5.1.0.tgz",
"integrity": "sha1-0Plx3+WcaeDK2mhLI9Sdv4JgDOU=",
"dev": true,
"requires": {
"esrecurse": "^4.1.0",
"estraverse": "^4.1.1"
}
}
}
},
"vue-hot-reload-api": {
"version": "2.3.4",
"resolved": "https://registry.npmjs.org/vue-hot-reload-api/-/vue-hot-reload-api-2.3.4.tgz",
@@ -14295,6 +14397,11 @@
"resolved": "https://registry.npmjs.org/vue-resize/-/vue-resize-0.4.5.tgz",
"integrity": "sha512-bhP7MlgJQ8TIkZJXAfDf78uJO+mEI3CaLABLjv0WNzr4CcGRGPIAItyWYnP6LsPA4Oq0WE+suidNs6dgpO4RHg=="
},
"vue-router": {
"version": "3.3.4",
"resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.3.4.tgz",
"integrity": "sha512-SdKRBeoXUjaZ9R/8AyxsdTqkOfMcI5tWxPZOUX5Ie1BTL5rPSZ0O++pbiZCeYeythiZIdLEfkDiQPKIaWk5hDg=="
},
"vue-style-loader": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/vue-style-loader/-/vue-style-loader-4.1.2.tgz",
@@ -14843,8 +14950,8 @@
},
"word-wrap": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz",
"integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==",
"resolved": "https://registry.npm.taobao.org/word-wrap/download/word-wrap-1.2.3.tgz?cache=0&sync_timestamp=1589683603678&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fword-wrap%2Fdownload%2Fword-wrap-1.2.3.tgz",
"integrity": "sha1-YQY29rH3A4kb00dxzLF/uTtHB5w=",
"dev": true
},
"wordwrap": {


+ 6
- 1
package.json View File

@@ -11,13 +11,14 @@
"@babel/preset-env": "7.9.6",
"@babel/runtime": "7.9.6",
"@primer/octicons": "9.6.0",
"axios": "0.19.2",
"babel-loader": "8.1.0",
"clipboard": "2.0.6",
"core-js": "3.6.5",
"css-loader": "3.5.3",
"cssnano": "4.1.10",
"domino": "2.1.5",
"dropzone": "5.7.0",
"dropzone": "5.7.2",
"fast-glob": "3.2.2",
"file-loader": "6.0.0",
"fomantic-ui": "2.8.4",
@@ -34,6 +35,8 @@
"postcss-loader": "3.0.0",
"postcss-preset-env": "6.7.0",
"postcss-safe-parser": "4.0.2",
"qs": "6.9.4",
"spark-md5": "3.0.1",
"svg-sprite-loader": "5.0.0",
"svgo": "1.3.2",
"svgo-loader": "2.2.1",
@@ -43,6 +46,7 @@
"vue-bar-graph": "1.2.0",
"vue-calendar-heatmap": "0.8.4",
"vue-loader": "15.9.2",
"vue-router": "3.3.4",
"vue-template-compiler": "2.6.11",
"webpack": "4.43.0",
"webpack-cli": "3.3.11",
@@ -53,6 +57,7 @@
"eslint": "6.8.0",
"eslint-config-airbnb-base": "14.1.0",
"eslint-plugin-import": "2.20.2",
"eslint-plugin-vue": "6.2.2",
"stylelint": "13.3.3",
"stylelint-config-standard": "20.0.0",
"updates": "10.2.11"


+ 6
- 0
routers/home.go View File

@@ -37,6 +37,12 @@ const (

// Home render home page
func Home(ctx *context.Context) {
ctx.Data["PageIsHome"] = true
ctx.Data["IsRepoIndexerEnabled"] = setting.Indexer.RepoIndexerEnabled
ctx.HTML(200, tplHome)
}

func Dashboard(ctx *context.Context) {
if ctx.IsSigned {
if !ctx.User.IsActive && setting.Service.RegisterEmailConfirm {
ctx.Data["Title"] = ctx.Tr("auth.active_your_account")


+ 2
- 0
routers/init.go View File

@@ -15,6 +15,7 @@ import (
"code.gitea.io/gitea/modules/auth/sso"
"code.gitea.io/gitea/modules/cache"
"code.gitea.io/gitea/modules/cron"
"code.gitea.io/gitea/modules/decompression"
"code.gitea.io/gitea/modules/eventsource"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/highlight"
@@ -61,6 +62,7 @@ func NewServices() {
mailer.NewContext()
_ = cache.NewContext()
notification.NewContext()
decompression.NewContext()
}

// In case of problems connecting to DB, retry connection. Eg, PGSQL in Docker Container on Synology


+ 231
- 8
routers/repo/attachment.go View File

@@ -5,25 +5,33 @@
package repo

import (
contexExt "context"
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/minio_ext"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/upload"
"code.gitea.io/gitea/modules/worker"
contexExt "context"
"fmt"
gouuid "github.com/satori/go.uuid"

"net/http"
"strconv"
"strings"

gouuid "github.com/satori/go.uuid"
)

const (
//result of decompress
DecompressSuccess = "0"
DecompressFailed = "1"
DecompressSuccess = "0"
DecompressFailed = "1"
)

func RenderAttachmentSettings(ctx *context.Context) {
@@ -107,7 +115,7 @@ func DeleteAttachment(ctx *context.Context) {
ctx.Error(403)
return
}
err = models.DeleteAttachment(attach, true)
err = models.DeleteAttachment(attach, false)
if err != nil {
ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err))
return
@@ -327,14 +335,229 @@ func UpdateAttachmentDecompressState(ctx *context.Context) {
})
}

func GetSuccessChunks(ctx *context.Context) {
fileMD5 := ctx.Query("md5")

fileChunk, err := models.GetFileChunkByMD5AndUser(fileMD5, ctx.User.ID)
if err != nil {
if models.IsErrFileChunkNotExist(err) {
ctx.JSON(200, map[string]string{
"uuid": "",
"uploaded": "0",
"uploadID": "",
"chunks": "",
})
} else {
ctx.ServerError("GetFileChunkByMD5", err)
}
return
}

chunks, err := json.Marshal(fileChunk.CompletedParts)
if err != nil {
ctx.ServerError("json.Marshal failed", err)
return
}

var attachID int64
attach, err := models.GetAttachmentByUUID(fileChunk.UUID)
if err != nil {
if models.IsErrAttachmentNotExist(err) {
attachID = 0
} else {
ctx.ServerError("GetAttachmentByUUID", err)
return
}
} else {
attachID = attach.ID
}

ctx.JSON(200, map[string]string{
"uuid": fileChunk.UUID,
"uploaded": strconv.Itoa(fileChunk.IsUploaded),
"uploadID": fileChunk.UploadID,
"chunks": string(chunks),
"attachID": strconv.Itoa(int(attachID)),
})

}

func NewMultipart(ctx *context.Context) {
if !setting.Attachment.Enabled {
ctx.Error(404, "attachment is not enabled")
return
}

err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
if err != nil {
ctx.Error(400, err.Error())
return
}

if setting.Attachment.StoreType == storage.MinioStorageType {
totalChunkCounts := ctx.QueryInt("totalChunkCounts")
if totalChunkCounts > minio_ext.MaxPartsCount {
ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
return
}

fileSize := ctx.QueryInt64("size")
if fileSize > minio_ext.MaxMultipartPutObjectSize {
ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
return
}

uuid := gouuid.NewV4().String()
uploadID, err := storage.NewMultiPartUpload(uuid)
if err != nil {
ctx.ServerError("NewMultipart", err)
return
}

_, err = models.InsertFileChunk(&models.FileChunk{
UUID: uuid,
UserID: ctx.User.ID,
UploadID: uploadID,
Md5: ctx.Query("md5"),
Size: fileSize,
TotalChunks: totalChunkCounts,
})

if err != nil {
ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
return
}

ctx.JSON(200, map[string]string{
"uuid": uuid,
"uploadID": uploadID,
})
} else {
ctx.Error(404, "storage type is not enabled")
return
}
}

func GetMultipartUploadUrl(ctx *context.Context) {
uuid := ctx.Query("uuid")
uploadID := ctx.Query("uploadID")
partNumber := ctx.QueryInt("chunkNumber")
size := ctx.QueryInt64("size")

if size > minio_ext.MinPartSize {
ctx.Error(400, fmt.Sprintf("chunk size(%d) is too big", size))
return
}

url, err := storage.GenMultiPartSignedUrl(uuid, uploadID, partNumber, size)
if err != nil {
ctx.Error(500, fmt.Sprintf("GenMultiPartSignedUrl failed: %v", err))
return
}

ctx.JSON(200, map[string]string{
"url": url,
})
}

func CompleteMultipart(ctx *context.Context) {
uuid := ctx.Query("uuid")
uploadID := ctx.Query("uploadID")

fileChunk, err := models.GetFileChunkByUUID(uuid)
if err != nil {
if models.IsErrFileChunkNotExist(err) {
ctx.Error(404)
} else {
ctx.ServerError("GetFileChunkByUUID", err)
}
return
}

_, err = storage.CompleteMultiPartUpload(uuid, uploadID, fileChunk.CompletedParts)
if err != nil {
ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
return
}

fileChunk.IsUploaded = models.FileUploaded

err = models.UpdateFileChunk(fileChunk)
if err != nil {
ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
return
}

attachment, err := models.InsertAttachment(&models.Attachment{
UUID: uuid,
UploaderID: ctx.User.ID,
IsPrivate: true,
Name: ctx.Query("file_name"),
Size: ctx.QueryInt64("size"),
DatasetID: ctx.QueryInt64("dataset_id"),
})

if err != nil {
ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
return
}

if attachment.DatasetID != 0 {
if strings.HasSuffix(attachment.Name, ".zip") {
err = worker.SendDecompressTask(contexExt.Background(), uuid)
if err != nil {
log.Error("SendDecompressTask(%s) failed:%s", uuid, err.Error())
} else {
attachment.DecompressState = models.DecompressStateIng
err = models.UpdateAttachment(attachment)
if err != nil {
log.Error("UpdateAttachment state(%s) failed:%s", uuid, err.Error())
}
}
}
}

ctx.JSON(200, map[string]string{
"result_code": "0",
})
}

func UpdateMultipart(ctx *context.Context) {
uuid := ctx.Query("uuid")
partNumber := ctx.QueryInt("chunkNumber")
etag := ctx.Query("etag")

fileChunk, err := models.GetFileChunkByUUID(uuid)
if err != nil {
if models.IsErrFileChunkNotExist(err) {
ctx.Error(404)
} else {
ctx.ServerError("GetFileChunkByUUID", err)
}
return
}

fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))

err = models.UpdateFileChunk(fileChunk)
if err != nil {
ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
return
}

ctx.JSON(200, map[string]string{
"result_code": "0",
})
}

func HandleUnDecompressAttachment() {
attachs,err := models.GetUnDecompressAttachments()
attachs, err := models.GetUnDecompressAttachments()
if err != nil {
log.Error("GetUnDecompressAttachments failed:", err.Error())
return
}

for _,attach := range attachs {
for _, attach := range attachs {
err = worker.SendDecompressTask(contexExt.Background(), attach.UUID)
if err != nil {
log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())


+ 2
- 15
routers/repo/dataset.go View File

@@ -1,18 +1,15 @@
package repo

import (
"net/url"
"sort"

"code.gitea.io/gitea/modules/storage"
"code.gitea.io/gitea/modules/setting"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/auth"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"

gouuid "github.com/satori/go.uuid"
)

const (
@@ -82,18 +79,8 @@ func DatasetIndex(ctx *context.Context) {
ctx.Data["dataset"] = dataset
ctx.Data["Attachments"] = attachments
ctx.Data["IsOwner"] = true
uuid := gouuid.NewV4().String()
tmpUrl, err := storage.Attachments.PresignedPutURL(models.AttachmentRelativePath(uuid))
if err != nil {
ctx.ServerError("PresignedPutURL", err)
}
preUrl, err := url.QueryUnescape(tmpUrl)
if err != nil {
ctx.ServerError("QueryUnescape", err)
}
ctx.Data["StoreType"] = setting.Attachment.StoreType

ctx.Data["uuid"] = uuid
ctx.Data["url"] = preUrl
renderAttachmentSettings(ctx)

ctx.HTML(200, tplIndex)


+ 6
- 0
routers/routes/routes.go View File

@@ -285,6 +285,7 @@ func RegisterRoutes(m *macaron.Macaron) {
return ""
})
m.Get("/", routers.Home)
m.Get("/dashboard", routers.Dashboard)
m.Group("/explore", func() {
m.Get("", func(ctx *context.Context) {
ctx.Redirect(setting.AppSubURL + "/explore/repos")
@@ -521,6 +522,11 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/get_pre_url", repo.GetPresignedPutObjectURL)
m.Post("/add", repo.AddAttachment)
m.Post("/private", repo.UpdatePublicAttachment)
m.Get("/get_chunks", repo.GetSuccessChunks)
m.Get("/new_multipart", repo.NewMultipart)
m.Get("/get_multipart_url", repo.GetMultipartUploadUrl)
m.Post("/complete_multipart", repo.CompleteMultipart)
m.Post("/update_chunk", repo.UpdateMultipart)
}, reqSignIn)

m.Group("/attachments", func() {


+ 5
- 1
templates/base/head_navbar.tmpl View File

@@ -9,7 +9,7 @@
</div>

{{if .IsSigned}}
<a class="item {{if .PageIsDashboard}}active{{end}}" href="/">{{.i18n.Tr "dashboard"}}</a>
<a class="item {{if .PageIsDashboard}}active{{end}}" href="/dashboard">{{.i18n.Tr "dashboard"}}</a>
<div class="ui dropdown item">
{{.i18n.Tr "custom.head.community"}}
<i class="dropdown icon"></i>
@@ -128,6 +128,10 @@
</div>

<div class="divider"></div>
<a class="item" href="{{AppSubUrl}}/dashboard">
{{svg "octicon-info" 16}}
{{.i18n.Tr "your_dashboard"}}<!-- Your dashboard -->
</a>
<a class="item" href="{{AppSubUrl}}/{{.SignedUser.Name}}">
{{svg "octicon-person" 16}}
{{.i18n.Tr "your_profile"}}<!-- Your profile -->


+ 2
- 1
templates/repo/datasets/dataset.tmpl View File

@@ -2,7 +2,8 @@
<div class="field required dataset-files">
<label>{{.i18n.Tr "dataset.file"}}</label>
<div class="files"></div>
<div class="ui dropzone" id="dataset" data-upload-url="{{.url}}" data-uuid="{{.uuid}}" data-add-url="{{AppSubUrl}}/attachments/add" data-accepts="{{.AttachmentAllowedTypes}}" data-remove-url="{{AppSubUrl}}/attachments/delete" data-csrf="{{.CsrfToken}}" dataset-id={{.dataset.ID}} data-max-file="100" data-dataset-id="{{.dataset.ID}}" data-max-size="{{.AttachmentMaxSize}}" data-default-message="{{.i18n.Tr "dropzone.default_message"}}" data-invalid-input-type="{{.i18n.Tr "dropzone.invalid_input_type"}}" data-file-too-big="{{.i18n.Tr "dropzone.file_too_big"}}" data-remove-file="{{.i18n.Tr "dropzone.remove_file"}}">
<div class="ui dropzone" id="dataset" data-upload-url="{{AppSubUrl}}/attachments" data-accepts="{{.AttachmentAllowedTypes}}" data-remove-url="{{AppSubUrl}}/attachments/delete" data-csrf="{{.CsrfToken}}" dataset-id={{.dataset.ID}} data-max-file="100" data-dataset-id="{{.dataset.ID}}" data-max-size="{{.AttachmentMaxSize}}" data-default-message="{{.i18n.Tr "dropzone.default_message"}}" data-invalid-input-type="{{.i18n.Tr "dropzone.invalid_input_type"}}" data-file-too-big="{{.i18n.Tr "dropzone.file_too_big"}}" data-remove-file="{{.i18n.Tr "dropzone.remove_file"}}">

</div>
</div>
</div>

+ 37
- 7
templates/repo/datasets/index.tmpl View File

@@ -3,15 +3,13 @@
{{template "repo/header" .}}
<form class="ui container" action="{{.Link}}" method="post">
<input name="id" value="{{.dataset.ID}}" type="hidden" />
<div id="datasetId" datasetId="{{.dataset.ID}}">
{{.CsrfTokenHtml}}
{{template "base/alert" .}}
<div class="ui stackable grid {{if .Error}}hide{{end}}" id="dataset-content">
<div class="row">
<div class="column sixteen {{if .Permission.CanWrite $.UnitTypeDatasets}}twelve{{end}} wide">
<h2>{{.dataset.Title}}</h2>
<div id="dataset-desc">
<span class="no-description text-italic">{{.dataset.Description}}</span>
</div>
</div>
{{if .Permission.CanWrite $.UnitTypeDatasets}}
<div class="column four wide right aligned">
@@ -21,6 +19,11 @@
</div>
{{end}}
</div>
<div class="row">
<div class="column sixteen wide">
<span class="no-description text-italic">{{.dataset.Description}}</span>
</div>
</div>
</div>

<div class="ui grid form segment success {{if not .Error}}hide{{end}}" id="dataset-content-edit">
@@ -65,11 +68,38 @@
</div>
<div class="dataset ui middle very relaxed page">
<div class="column">
{{if .Permission.CanWrite $.UnitTypeDatasets}}
<form class="ui form" action="{{.Link}}" method="post">
{{template "repo/datasets/dataset" .}}
</form>
{{if .Permission.CanWrite $.UnitTypeDatasets}}
<div style='display:none;'
id="minioUploader-params"
data-uuid="{{.uuid}}"
data-add-url="{{AppSubUrl}}/attachments/add"
data-accepts="{{.AttachmentAllowedTypes}}"
data-remove-url="{{AppSubUrl}}/attachments/delete"
data-csrf="{{.CsrfToken}}"
dataset-id={{.dataset.ID}}
data-max-file="100"
data-dataset-id="{{.dataset.ID}}"
data-max-size="{{.AttachmentMaxSize}}"
data-default-message="{{.i18n.Tr "dropzone.default_message"}}"
data-invalid-input-type="{{.i18n.Tr "dropzone.invalid_input_type"}}"
data-file-too-big="{{.i18n.Tr "dropzone.file_too_big"}}"
data-remove-file="{{.i18n.Tr "dropzone.remove_file"}}"

data-file-status='{{.i18n.Tr "dropzone.file_status"}}'
data-file-init-status='{{.i18n.Tr "dropzone.file_init_status"}}'
data-waitting-uploading='{{.i18n.Tr "dropzone.waitting_uploading"}}'
data-md5-computing='{{.i18n.Tr "dropzone.md5_computing"}}'
data-loading-file='{{.i18n.Tr "dropzone.loading_file"}}'
data-upload-complete='{{.i18n.Tr "dropzone.upload_complete"}}'
data-uploading='{{.i18n.Tr "dropzone.uploading"}}'
>
</div>
{{if eq .StoreType "minio"}}
<div id="minioUploader"></div>
{{else}}
<div style="margin: 2em 0;"> {{.i18n.Tr "dropzone.enable_minio_support"}} </div>
{{end}}
{{end}}
</div>
</div>
</div>


+ 315
- 0
vendor/cloud.google.com/go/iam/iam.go View File

@@ -0,0 +1,315 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Package iam supports the resource-specific operations of Google Cloud
// IAM (Identity and Access Management) for the Google Cloud Libraries.
// See https://cloud.google.com/iam for more about IAM.
//
// Users of the Google Cloud Libraries will typically not use this package
// directly. Instead they will begin with some resource that supports IAM, like
// a pubsub topic, and call its IAM method to get a Handle for that resource.
package iam

import (
"context"
"fmt"
"time"

gax "github.com/googleapis/gax-go/v2"
pb "google.golang.org/genproto/googleapis/iam/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)

// client abstracts the IAMPolicy API to allow multiple implementations.
type client interface {
Get(ctx context.Context, resource string) (*pb.Policy, error)
Set(ctx context.Context, resource string, p *pb.Policy) error
Test(ctx context.Context, resource string, perms []string) ([]string, error)
}

// grpcClient implements client for the standard gRPC-based IAMPolicy service.
type grpcClient struct {
c pb.IAMPolicyClient
}

var withRetry = gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60 * time.Second,
Multiplier: 1.3,
})
})

func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
var proto *pb.Policy
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
ctx = insertMetadata(ctx, md)

err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
var err error
proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
return err
}, withRetry)
if err != nil {
return nil, err
}
return proto, nil
}

func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
ctx = insertMetadata(ctx, md)

return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
Resource: resource,
Policy: p,
})
return err
}, withRetry)
}

func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
var res *pb.TestIamPermissionsResponse
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
ctx = insertMetadata(ctx, md)

err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
var err error
res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
Resource: resource,
Permissions: perms,
})
return err
}, withRetry)
if err != nil {
return nil, err
}
return res.Permissions, nil
}

// A Handle provides IAM operations for a resource.
type Handle struct {
c client
resource string
}

// InternalNewHandle is for use by the Google Cloud Libraries only.
//
// InternalNewHandle returns a Handle for resource.
// The conn parameter refers to a server that must support the IAMPolicy service.
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource)
}

// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only.
//
// InternalNewHandleClient returns a Handle for resource using the given
// grpc service that implements IAM as a mixin
func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle {
return InternalNewHandleClient(&grpcClient{c: c}, resource)
}

// InternalNewHandleClient is for use by the Google Cloud Libraries only.
//
// InternalNewHandleClient returns a Handle for resource using the given
// client implementation.
func InternalNewHandleClient(c client, resource string) *Handle {
return &Handle{
c: c,
resource: resource,
}
}

// Policy retrieves the IAM policy for the resource.
func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
proto, err := h.c.Get(ctx, h.resource)
if err != nil {
return nil, err
}
return &Policy{InternalProto: proto}, nil
}

// SetPolicy replaces the resource's current policy with the supplied Policy.
//
// If policy was created from a prior call to Get, then the modification will
// only succeed if the policy has not changed since the Get.
func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
return h.c.Set(ctx, h.resource, policy.InternalProto)
}

// TestPermissions returns the subset of permissions that the caller has on the resource.
func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
return h.c.Test(ctx, h.resource, permissions)
}

// A RoleName is a name representing a collection of permissions.
type RoleName string

// Common role names.
const (
Owner RoleName = "roles/owner"
Editor RoleName = "roles/editor"
Viewer RoleName = "roles/viewer"
)

const (
// AllUsers is a special member that denotes all users, even unauthenticated ones.
AllUsers = "allUsers"

// AllAuthenticatedUsers is a special member that denotes all authenticated users.
AllAuthenticatedUsers = "allAuthenticatedUsers"
)

// A Policy is a list of Bindings representing roles
// granted to members.
//
// The zero Policy is a valid policy with no bindings.
type Policy struct {
// TODO(jba): when type aliases are available, put Policy into an internal package
// and provide an exported alias here.

// This field is exported for use by the Google Cloud Libraries only.
// It may become unexported in a future release.
InternalProto *pb.Policy
}

// Members returns the list of members with the supplied role.
// The return value should not be modified. Use Add and Remove
// to modify the members of a role.
func (p *Policy) Members(r RoleName) []string {
b := p.binding(r)
if b == nil {
return nil
}
return b.Members
}

// HasRole reports whether member has role r.
func (p *Policy) HasRole(member string, r RoleName) bool {
return memberIndex(member, p.binding(r)) >= 0
}

// Add adds member member to role r if it is not already present.
// A new binding is created if there is no binding for the role.
func (p *Policy) Add(member string, r RoleName) {
b := p.binding(r)
if b == nil {
if p.InternalProto == nil {
p.InternalProto = &pb.Policy{}
}
p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{
Role: string(r),
Members: []string{member},
})
return
}
if memberIndex(member, b) < 0 {
b.Members = append(b.Members, member)
return
}
}

// Remove removes member from role r if it is present.
func (p *Policy) Remove(member string, r RoleName) {
bi := p.bindingIndex(r)
if bi < 0 {
return
}
bindings := p.InternalProto.Bindings
b := bindings[bi]
mi := memberIndex(member, b)
if mi < 0 {
return
}
// Order doesn't matter for bindings or members, so to remove, move the last item
// into the removed spot and shrink the slice.
if len(b.Members) == 1 {
// Remove binding.
last := len(bindings) - 1
bindings[bi] = bindings[last]
bindings[last] = nil
p.InternalProto.Bindings = bindings[:last]
return
}
// Remove member.
// TODO(jba): worry about multiple copies of m?
last := len(b.Members) - 1
b.Members[mi] = b.Members[last]
b.Members[last] = ""
b.Members = b.Members[:last]
}

// Roles returns the names of all the roles that appear in the Policy.
func (p *Policy) Roles() []RoleName {
if p.InternalProto == nil {
return nil
}
var rns []RoleName
for _, b := range p.InternalProto.Bindings {
rns = append(rns, RoleName(b.Role))
}
return rns
}

// binding returns the Binding for the suppied role, or nil if there isn't one.
func (p *Policy) binding(r RoleName) *pb.Binding {
i := p.bindingIndex(r)
if i < 0 {
return nil
}
return p.InternalProto.Bindings[i]
}

func (p *Policy) bindingIndex(r RoleName) int {
if p.InternalProto == nil {
return -1
}
for i, b := range p.InternalProto.Bindings {
if b.Role == string(r) {
return i
}
}
return -1
}

// memberIndex returns the index of m in b's Members, or -1 if not found.
func memberIndex(m string, b *pb.Binding) int {
if b == nil {
return -1
}
for i, mm := range b.Members {
if mm == m {
return i
}
}
return -1
}

// insertMetadata inserts metadata into the given context
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}

+ 108
- 0
vendor/cloud.google.com/go/internal/optional/optional.go View File

@@ -0,0 +1,108 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Package optional provides versions of primitive types that can
// be nil. These are useful in methods that update some of an API object's
// fields.
package optional

import (
"fmt"
"strings"
"time"
)

type (
// Bool is either a bool or nil.
Bool interface{}

// String is either a string or nil.
String interface{}

// Int is either an int or nil.
Int interface{}

// Uint is either a uint or nil.
Uint interface{}

// Float64 is either a float64 or nil.
Float64 interface{}

// Duration is either a time.Duration or nil.
Duration interface{}
)

// ToBool returns its argument as a bool.
// It panics if its argument is nil or not a bool.
func ToBool(v Bool) bool {
x, ok := v.(bool)
if !ok {
doPanic("Bool", v)
}
return x
}

// ToString returns its argument as a string.
// It panics if its argument is nil or not a string.
func ToString(v String) string {
x, ok := v.(string)
if !ok {
doPanic("String", v)
}
return x
}

// ToInt returns its argument as an int.
// It panics if its argument is nil or not an int.
func ToInt(v Int) int {
x, ok := v.(int)
if !ok {
doPanic("Int", v)
}
return x
}

// ToUint returns its argument as a uint.
// It panics if its argument is nil or not a uint.
func ToUint(v Uint) uint {
x, ok := v.(uint)
if !ok {
doPanic("Uint", v)
}
return x
}

// ToFloat64 returns its argument as a float64.
// It panics if its argument is nil or not a float64.
func ToFloat64(v Float64) float64 {
x, ok := v.(float64)
if !ok {
doPanic("Float64", v)
}
return x
}

// ToDuration returns its argument as a time.Duration.
// It panics if its argument is nil or not a time.Duration.
func ToDuration(v Duration) time.Duration {
x, ok := v.(time.Duration)
if !ok {
doPanic("Duration", v)
}
return x
}

func doPanic(capType string, v interface{}) {
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
}

+ 19
- 0
vendor/cloud.google.com/go/internal/version/update_version.sh View File

@@ -0,0 +1,19 @@
#!/bin/bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

today=$(date +%Y%m%d)

sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE


+ 71
- 0
vendor/cloud.google.com/go/internal/version/version.go View File

@@ -0,0 +1,71 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

//go:generate ./update_version.sh

// Package version contains version information for Google Cloud Client
// Libraries for Go, as reported in request headers.
package version

import (
"runtime"
"strings"
"unicode"
)

// Repo is the current version of the client libraries in this
// repo. It should be a date in YYYYMMDD format.
const Repo = "20190802"

// Go returns the Go runtime version. The returned string
// has no whitespace.
func Go() string {
return goVersion
}

var goVersion = goVer(runtime.Version())

const develPrefix = "devel +"

func goVer(s string) string {
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}

if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return ""
}

func notSemverRune(r rune) bool {
return !strings.ContainsRune("0123456789.", r)
}

+ 46
- 0
vendor/cloud.google.com/go/pubsub/README.md View File

@@ -0,0 +1,46 @@
## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub)

- [About Cloud Pubsub](https://cloud.google.com/pubsub/)
- [API documentation](https://cloud.google.com/pubsub/docs)
- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub)
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub)

### Example Usage

First create a `pubsub.Client` to use throughout your application:

[snip]:# (pubsub-1)
```go
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
log.Fatal(err)
}
```

Then use the client to publish and subscribe:

[snip]:# (pubsub-2)
```go
// Publish "hello world" on topic1.
topic := client.Topic("topic1")
res := topic.Publish(ctx, &pubsub.Message{
Data: []byte("hello world"),
})
// The publish happens asynchronously.
// Later, you can get the result from res:
...
msgID, err := res.Get(ctx)
if err != nil {
log.Fatal(err)
}

// Use a callback to receive messages via subscription1.
sub := client.Subscription("subscription1")
err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
fmt.Println(m.Data)
m.Ack() // Acknowledge that we've consumed the message.
})
if err != nil {
log.Println(err)
}
```

+ 9
- 0
vendor/cloud.google.com/go/pubsub/apiv1/README.md View File

@@ -0,0 +1,9 @@
Auto-generated pubsub v1 clients
=================================

This package includes auto-generated clients for the pubsub v1 API.

Use the handwritten client (in the parent directory,
cloud.google.com/go/pubsub) in preference to this.

This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME.

+ 103
- 0
vendor/cloud.google.com/go/pubsub/apiv1/doc.go View File

@@ -0,0 +1,103 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by gapic-generator. DO NOT EDIT.

// Package pubsub is an auto-generated package for the
// Google Cloud Pub/Sub API.

//
// Provides reliable, many-to-many, asynchronous messaging between
// applications.
//
// Use of Context
//
// The ctx passed to NewClient is used for authentication requests and
// for creating the underlying connection, but is not used for subsequent calls.
// Individual methods on the client use the ctx given to them.
//
// To close the open connection, use the Close() method.
//
// For information about setting deadlines, reusing contexts, and more
// please visit godoc.org/cloud.google.com/go.
//
// Use the client at cloud.google.com/go/pubsub in preference to this.
package pubsub // import "cloud.google.com/go/pubsub/apiv1"

import (
"context"
"runtime"
"strings"
"unicode"

"google.golang.org/grpc/metadata"
)

func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}

// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/pubsub",
}
}

// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
func versionGo() string {
const develPrefix = "devel +"

s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}

notSemverRune := func(r rune) bool {
return strings.IndexRune("0123456789.", r) < 0
}

if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}

const versionClient = "20190819"

+ 36
- 0
vendor/cloud.google.com/go/pubsub/apiv1/iam.go View File

@@ -0,0 +1,36 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package pubsub

import (
"cloud.google.com/go/iam"
pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
)

func (c *PublisherClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle {
return iam.InternalNewHandle(c.Connection(), subscription.Name)
}

func (c *PublisherClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle {
return iam.InternalNewHandle(c.Connection(), topic.Name)
}

func (c *SubscriberClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle {
return iam.InternalNewHandle(c.Connection(), subscription.Name)
}

func (c *SubscriberClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle {
return iam.InternalNewHandle(c.Connection(), topic.Name)
}

+ 95
- 0
vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go View File

@@ -0,0 +1,95 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package pubsub

// PublisherProjectPath returns the path for the project resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s", project)
// instead.
func PublisherProjectPath(project string) string {
return "" +
"projects/" +
project +
""
}

// PublisherTopicPath returns the path for the topic resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/topics/%s", project, topic)
// instead.
func PublisherTopicPath(project, topic string) string {
return "" +
"projects/" +
project +
"/topics/" +
topic +
""
}

// SubscriberProjectPath returns the path for the project resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s", project)
// instead.
func SubscriberProjectPath(project string) string {
return "" +
"projects/" +
project +
""
}

// SubscriberSnapshotPath returns the path for the snapshot resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/snapshots/%s", project, snapshot)
// instead.
func SubscriberSnapshotPath(project, snapshot string) string {
return "" +
"projects/" +
project +
"/snapshots/" +
snapshot +
""
}

// SubscriberSubscriptionPath returns the path for the subscription resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/subscriptions/%s", project, subscription)
// instead.
func SubscriberSubscriptionPath(project, subscription string) string {
return "" +
"projects/" +
project +
"/subscriptions/" +
subscription +
""
}

// SubscriberTopicPath returns the path for the topic resource.
//
// Deprecated: Use
// fmt.Sprintf("projects/%s/topics/%s", project, topic)
// instead.
func SubscriberTopicPath(project, topic string) string {
return "" +
"projects/" +
project +
"/topics/" +
topic +
""
}

+ 417
- 0
vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go View File

@@ -0,0 +1,417 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by gapic-generator. DO NOT EDIT.

package pubsub

import (
"context"
"fmt"
"math"
"net/url"
"time"

"github.com/golang/protobuf/proto"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)

// PublisherCallOptions contains the retry settings for each method of PublisherClient.
type PublisherCallOptions struct {
CreateTopic []gax.CallOption
UpdateTopic []gax.CallOption
Publish []gax.CallOption
GetTopic []gax.CallOption
ListTopics []gax.CallOption
ListTopicSubscriptions []gax.CallOption
DeleteTopic []gax.CallOption
}

func defaultPublisherClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("pubsub.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
}

func defaultPublisherCallOptions() *PublisherCallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Aborted,
codes.Unavailable,
codes.Unknown,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
{"default", "non_idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
{"messaging", "publish"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Aborted,
codes.Canceled,
codes.DeadlineExceeded,
codes.Internal,
codes.ResourceExhausted,
codes.Unavailable,
codes.Unknown,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &PublisherCallOptions{
CreateTopic: retry[[2]string{"default", "non_idempotent"}],
UpdateTopic: retry[[2]string{"default", "non_idempotent"}],
Publish: retry[[2]string{"messaging", "publish"}],
GetTopic: retry[[2]string{"default", "idempotent"}],
ListTopics: retry[[2]string{"default", "idempotent"}],
ListTopicSubscriptions: retry[[2]string{"default", "idempotent"}],
DeleteTopic: retry[[2]string{"default", "non_idempotent"}],
}
}

// PublisherClient is a client for interacting with Google Cloud Pub/Sub API.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type PublisherClient struct {
// The connection to the service.
conn *grpc.ClientConn

// The gRPC API client.
publisherClient pubsubpb.PublisherClient

// The call options for this service.
CallOptions *PublisherCallOptions

// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}

// NewPublisherClient creates a new publisher client.
//
// The service that an application uses to manipulate topics, and to send
// messages to a topic.
func NewPublisherClient(ctx context.Context, opts ...option.ClientOption) (*PublisherClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultPublisherClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &PublisherClient{
conn: conn,
CallOptions: defaultPublisherCallOptions(),

publisherClient: pubsubpb.NewPublisherClient(conn),
}
c.SetGoogleClientInfo()
return c, nil
}

// Connection returns the client's connection to the API service.
func (c *PublisherClient) Connection() *grpc.ClientConn {
return c.conn
}

// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *PublisherClient) Close() error {
return c.conn.Close()
}

// SetGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *PublisherClient) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}

// CreateTopic creates the given topic with the given name. See the
// <a href="https://cloud.google.com/pubsub/docs/admin#resource_names">
// resource name rules</a>.
func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.CreateTopic[0:len(c.CallOptions.CreateTopic):len(c.CallOptions.CreateTopic)], opts...)
var resp *pubsubpb.Topic
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.publisherClient.CreateTopic(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// UpdateTopic updates an existing topic. Note that certain properties of a
// topic are not modifiable.
func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic.name", url.QueryEscape(req.GetTopic().GetName())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.UpdateTopic[0:len(c.CallOptions.UpdateTopic):len(c.CallOptions.UpdateTopic)], opts...)
var resp *pubsubpb.Topic
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.publisherClient.UpdateTopic(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// Publish adds one or more messages to the topic. Returns NOT_FOUND if the topic
// does not exist.
func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.Publish[0:len(c.CallOptions.Publish):len(c.CallOptions.Publish)], opts...)
var resp *pubsubpb.PublishResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.publisherClient.Publish(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// GetTopic gets the configuration of a topic.
func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.GetTopic[0:len(c.CallOptions.GetTopic):len(c.CallOptions.GetTopic)], opts...)
var resp *pubsubpb.Topic
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.publisherClient.GetTopic(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// ListTopics lists matching topics.
func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListTopics[0:len(c.CallOptions.ListTopics):len(c.CallOptions.ListTopics)], opts...)
it := &TopicIterator{}
req = proto.Clone(req).(*pubsubpb.ListTopicsRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Topic, string, error) {
var resp *pubsubpb.ListTopicsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.publisherClient.ListTopics(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.Topics, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
it.pageInfo.Token = req.PageToken
return it
}

// ListTopicSubscriptions lists the names of the subscriptions on this topic.
func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListTopicSubscriptions[0:len(c.CallOptions.ListTopicSubscriptions):len(c.CallOptions.ListTopicSubscriptions)], opts...)
it := &StringIterator{}
req = proto.Clone(req).(*pubsubpb.ListTopicSubscriptionsRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) {
var resp *pubsubpb.ListTopicSubscriptionsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.publisherClient.ListTopicSubscriptions(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.Subscriptions, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
it.pageInfo.Token = req.PageToken
return it
}

// DeleteTopic deletes the topic with the given name. Returns NOT_FOUND if the topic
// does not exist. After a topic is deleted, a new topic may be created with
// the same name; this is an entirely new topic with none of the old
// configuration or subscriptions. Existing subscriptions to this topic are
// not deleted, but their topic field is set to _deleted-topic_.
func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.DeleteTopic[0:len(c.CallOptions.DeleteTopic):len(c.CallOptions.DeleteTopic)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.publisherClient.DeleteTopic(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}

// StringIterator manages a stream of string.
type StringIterator struct {
items []string
pageInfo *iterator.PageInfo
nextFunc func() error

// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error)
}

// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *StringIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}

// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *StringIterator) Next() (string, error) {
var item string
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}

func (it *StringIterator) bufLen() int {
return len(it.items)
}

func (it *StringIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

// TopicIterator manages a stream of *pubsubpb.Topic.
type TopicIterator struct {
items []*pubsubpb.Topic
pageInfo *iterator.PageInfo
nextFunc func() error

// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Topic, nextPageToken string, err error)
}

// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TopicIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}

// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TopicIterator) Next() (*pubsubpb.Topic, error) {
var item *pubsubpb.Topic
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}

func (it *TopicIterator) bufLen() int {
return len(it.items)
}

func (it *TopicIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

+ 635
- 0
vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go View File

@@ -0,0 +1,635 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by gapic-generator. DO NOT EDIT.

package pubsub

import (
"context"
"fmt"
"math"
"net/url"
"time"

"github.com/golang/protobuf/proto"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)

// SubscriberCallOptions contains the retry settings for each method of SubscriberClient.
type SubscriberCallOptions struct {
CreateSubscription []gax.CallOption
GetSubscription []gax.CallOption
UpdateSubscription []gax.CallOption
ListSubscriptions []gax.CallOption
DeleteSubscription []gax.CallOption
ModifyAckDeadline []gax.CallOption
Acknowledge []gax.CallOption
Pull []gax.CallOption
StreamingPull []gax.CallOption
ModifyPushConfig []gax.CallOption
ListSnapshots []gax.CallOption
CreateSnapshot []gax.CallOption
UpdateSnapshot []gax.CallOption
DeleteSnapshot []gax.CallOption
Seek []gax.CallOption
}

func defaultSubscriberClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("pubsub.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
}

func defaultSubscriberCallOptions() *SubscriberCallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Aborted,
codes.Unavailable,
codes.Unknown,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
{"default", "non_idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
{"messaging", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Aborted,
codes.Unavailable,
codes.Unknown,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
{"messaging", "non_idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &SubscriberCallOptions{
CreateSubscription: retry[[2]string{"default", "idempotent"}],
GetSubscription: retry[[2]string{"default", "idempotent"}],
UpdateSubscription: retry[[2]string{"default", "non_idempotent"}],
ListSubscriptions: retry[[2]string{"default", "idempotent"}],
DeleteSubscription: retry[[2]string{"default", "non_idempotent"}],
ModifyAckDeadline: retry[[2]string{"default", "non_idempotent"}],
Acknowledge: retry[[2]string{"messaging", "non_idempotent"}],
Pull: retry[[2]string{"messaging", "idempotent"}],
StreamingPull: retry[[2]string{"streaming_messaging", "none"}],
ModifyPushConfig: retry[[2]string{"default", "non_idempotent"}],
ListSnapshots: retry[[2]string{"default", "idempotent"}],
CreateSnapshot: retry[[2]string{"default", "non_idempotent"}],
UpdateSnapshot: retry[[2]string{"default", "non_idempotent"}],
DeleteSnapshot: retry[[2]string{"default", "non_idempotent"}],
Seek: retry[[2]string{"default", "idempotent"}],
}
}

// SubscriberClient is a client for interacting with Google Cloud Pub/Sub API.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type SubscriberClient struct {
// The connection to the service.
conn *grpc.ClientConn

// The gRPC API client.
subscriberClient pubsubpb.SubscriberClient

// The call options for this service.
CallOptions *SubscriberCallOptions

// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}

// NewSubscriberClient creates a new subscriber client.
//
// The service that an application uses to manipulate subscriptions and to
// consume messages from a subscription via the Pull method or by
// establishing a bi-directional stream using the StreamingPull method.
func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*SubscriberClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultSubscriberClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &SubscriberClient{
conn: conn,
CallOptions: defaultSubscriberCallOptions(),

subscriberClient: pubsubpb.NewSubscriberClient(conn),
}
c.SetGoogleClientInfo()
return c, nil
}

// Connection returns the client's connection to the API service.
func (c *SubscriberClient) Connection() *grpc.ClientConn {
return c.conn
}

// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *SubscriberClient) Close() error {
return c.conn.Close()
}

// SetGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *SubscriberClient) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", versionGo()}, keyval...)
kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}

// CreateSubscription creates a subscription to a given topic. See the
// <a href="https://cloud.google.com/pubsub/docs/admin#resource_names">
// resource name rules</a>.
// If the subscription already exists, returns ALREADY_EXISTS.
// If the corresponding topic doesn't exist, returns NOT_FOUND.
//
// If the name is not provided in the request, the server will assign a random
// name for this subscription on the same project as the topic, conforming
// to the
// resource name
// format (at https://cloud.google.com/pubsub/docs/admin#resource_names). The
// generated name is populated in the returned Subscription object. Note that
// for REST API requests, you must specify a name in the request.
func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.CreateSubscription[0:len(c.CallOptions.CreateSubscription):len(c.CallOptions.CreateSubscription)], opts...)
var resp *pubsubpb.Subscription
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.CreateSubscription(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// GetSubscription gets the configuration details of a subscription.
func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.GetSubscription[0:len(c.CallOptions.GetSubscription):len(c.CallOptions.GetSubscription)], opts...)
var resp *pubsubpb.Subscription
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.GetSubscription(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// UpdateSubscription updates an existing subscription. Note that certain properties of a
// subscription, such as its topic, are not modifiable.
func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription.name", url.QueryEscape(req.GetSubscription().GetName())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.UpdateSubscription[0:len(c.CallOptions.UpdateSubscription):len(c.CallOptions.UpdateSubscription)], opts...)
var resp *pubsubpb.Subscription
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.UpdateSubscription(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// ListSubscriptions lists matching subscriptions.
func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListSubscriptions[0:len(c.CallOptions.ListSubscriptions):len(c.CallOptions.ListSubscriptions)], opts...)
it := &SubscriptionIterator{}
req = proto.Clone(req).(*pubsubpb.ListSubscriptionsRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Subscription, string, error) {
var resp *pubsubpb.ListSubscriptionsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.ListSubscriptions(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.Subscriptions, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
it.pageInfo.Token = req.PageToken
return it
}

// DeleteSubscription deletes an existing subscription. All messages retained in the subscription
// are immediately dropped. Calls to Pull after deletion will return
// NOT_FOUND. After a subscription is deleted, a new one may be created with
// the same name, but the new one has no association with the old
// subscription or its topic unless the same topic is specified.
func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.DeleteSubscription[0:len(c.CallOptions.DeleteSubscription):len(c.CallOptions.DeleteSubscription)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.subscriberClient.DeleteSubscription(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}

// ModifyAckDeadline modifies the ack deadline for a specific message. This method is useful
// to indicate that more time is needed to process a message by the
// subscriber, or to make the message available for redelivery if the
// processing was interrupted. Note that this does not modify the
// subscription-level ackDeadlineSeconds used for subsequent messages.
func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ModifyAckDeadline[0:len(c.CallOptions.ModifyAckDeadline):len(c.CallOptions.ModifyAckDeadline)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.subscriberClient.ModifyAckDeadline(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}

// Acknowledge acknowledges the messages associated with the ack_ids in the
// AcknowledgeRequest. The Pub/Sub system can remove the relevant messages
// from the subscription.
//
// Acknowledging a message whose ack deadline has expired may succeed,
// but such a message may be redelivered later. Acknowledging a message more
// than once will not result in an error.
func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.Acknowledge[0:len(c.CallOptions.Acknowledge):len(c.CallOptions.Acknowledge)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.subscriberClient.Acknowledge(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}

// Pull pulls messages from the server. The server may return UNAVAILABLE if
// there are too many concurrent pull requests pending for the given
// subscription.
func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.Pull[0:len(c.CallOptions.Pull):len(c.CallOptions.Pull)], opts...)
var resp *pubsubpb.PullResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.Pull(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// StreamingPull establishes a stream with the server, which sends messages down to the
// client. The client streams acknowledgements and ack deadline modifications
// back to the server. The server will close the stream and return the status
// on any error. The server may close the stream with status UNAVAILABLE to
// reassign server-side resources, in which case, the client should
// re-establish the stream. Flow control can be achieved by configuring the
// underlying RPC channel.
func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.StreamingPull[0:len(c.CallOptions.StreamingPull):len(c.CallOptions.StreamingPull)], opts...)
var resp pubsubpb.Subscriber_StreamingPullClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.StreamingPull(ctx, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// ModifyPushConfig modifies the PushConfig for a specified subscription.
//
// This may be used to change a push subscription to a pull one (signified by
// an empty PushConfig) or vice versa, or change the endpoint URL and other
// attributes of a push subscription. Messages will accumulate for delivery
// continuously through the call regardless of changes to the PushConfig.
func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ModifyPushConfig[0:len(c.CallOptions.ModifyPushConfig):len(c.CallOptions.ModifyPushConfig)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.subscriberClient.ModifyPushConfig(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}

// ListSnapshots lists the existing snapshots. Snapshots are used in
// <a href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a>
// operations, which allow
// you to manage message acknowledgments in bulk. That is, you can set the
// acknowledgment state of messages in an existing subscription to the state
// captured by a snapshot.
func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.ListSnapshots[0:len(c.CallOptions.ListSnapshots):len(c.CallOptions.ListSnapshots)], opts...)
it := &SnapshotIterator{}
req = proto.Clone(req).(*pubsubpb.ListSnapshotsRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Snapshot, string, error) {
var resp *pubsubpb.ListSnapshotsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.ListSnapshots(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.Snapshots, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
it.pageInfo.Token = req.PageToken
return it
}

// CreateSnapshot creates a snapshot from the requested subscription. Snapshots are used in
// <a href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a>
// operations, which allow
// you to manage message acknowledgments in bulk. That is, you can set the
// acknowledgment state of messages in an existing subscription to the state
// captured by a snapshot.
// <br><br>If the snapshot already exists, returns ALREADY_EXISTS.
// If the requested subscription doesn't exist, returns NOT_FOUND.
// If the backlog in the subscription is too old -- and the resulting snapshot
// would expire in less than 1 hour -- then FAILED_PRECONDITION is returned.
// See also the Snapshot.expire_time field. If the name is not provided in
// the request, the server will assign a random
// name for this snapshot on the same project as the subscription, conforming
// to the
// resource name
// format (at https://cloud.google.com/pubsub/docs/admin#resource_names). The
// generated name is populated in the returned Snapshot object. Note that for
// REST API requests, you must specify a name in the request.
func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.CreateSnapshot[0:len(c.CallOptions.CreateSnapshot):len(c.CallOptions.CreateSnapshot)], opts...)
var resp *pubsubpb.Snapshot
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.CreateSnapshot(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// UpdateSnapshot updates an existing snapshot. Snapshots are used in
// <a href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a>
// operations, which allow
// you to manage message acknowledgments in bulk. That is, you can set the
// acknowledgment state of messages in an existing subscription to the state
// captured by a snapshot.
func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snapshot.name", url.QueryEscape(req.GetSnapshot().GetName())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.UpdateSnapshot[0:len(c.CallOptions.UpdateSnapshot):len(c.CallOptions.UpdateSnapshot)], opts...)
var resp *pubsubpb.Snapshot
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.UpdateSnapshot(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// DeleteSnapshot removes an existing snapshot. Snapshots are used in
// <a href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a>
// operations, which allow
// you to manage message acknowledgments in bulk. That is, you can set the
// acknowledgment state of messages in an existing subscription to the state
// captured by a snapshot.<br><br>
// When the snapshot is deleted, all messages retained in the snapshot
// are immediately dropped. After a snapshot is deleted, a new one may be
// created with the same name, but the new one has no association with the old
// snapshot or its subscription, unless the same subscription is specified.
func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snapshot", url.QueryEscape(req.GetSnapshot())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.DeleteSnapshot[0:len(c.CallOptions.DeleteSnapshot):len(c.CallOptions.DeleteSnapshot)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.subscriberClient.DeleteSnapshot(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}

// Seek seeks an existing subscription to a point in time or to a given snapshot,
// whichever is provided in the request. Snapshots are used in
// <a href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a>
// operations, which allow
// you to manage message acknowledgments in bulk. That is, you can set the
// acknowledgment state of messages in an existing subscription to the state
// captured by a snapshot. Note that both the subscription and the snapshot
// must be on the same topic.
func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) {
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription())))
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append(c.CallOptions.Seek[0:len(c.CallOptions.Seek):len(c.CallOptions.Seek)], opts...)
var resp *pubsubpb.SeekResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.subscriberClient.Seek(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}

// SnapshotIterator manages a stream of *pubsubpb.Snapshot.
type SnapshotIterator struct {
items []*pubsubpb.Snapshot
pageInfo *iterator.PageInfo
nextFunc func() error

// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Snapshot, nextPageToken string, err error)
}

// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *SnapshotIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}

// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *SnapshotIterator) Next() (*pubsubpb.Snapshot, error) {
var item *pubsubpb.Snapshot
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}

func (it *SnapshotIterator) bufLen() int {
return len(it.items)
}

func (it *SnapshotIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

// SubscriptionIterator manages a stream of *pubsubpb.Subscription.
type SubscriptionIterator struct {
items []*pubsubpb.Subscription
pageInfo *iterator.PageInfo
nextFunc func() error

// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Subscription, nextPageToken string, err error)
}

// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *SubscriptionIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}

// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *SubscriptionIterator) Next() (*pubsubpb.Subscription, error) {
var item *pubsubpb.Subscription
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}

func (it *SubscriptionIterator) bufLen() int {
return len(it.items)
}

func (it *SubscriptionIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

+ 72
- 0
vendor/cloud.google.com/go/pubsub/debug.go View File

@@ -0,0 +1,72 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// +build psdebug

package pubsub

import (
"sync"
"time"
)

var (
dmu sync.Mutex
msgTraces = map[string][]Event{}
ackIDToMsgID = map[string]string{}
)

type Event struct {
Desc string
At time.Time
}

func MessageEvents(msgID string) []Event {
dmu.Lock()
defer dmu.Unlock()
return msgTraces[msgID]
}

func addRecv(msgID, ackID string, t time.Time) {
dmu.Lock()
defer dmu.Unlock()
ackIDToMsgID[ackID] = msgID
addEvent(msgID, "recv", t)
}

func addAcks(ackIDs []string) {
dmu.Lock()
defer dmu.Unlock()
now := time.Now()
for _, id := range ackIDs {
addEvent(ackIDToMsgID[id], "ack", now)
}
}

func addModAcks(ackIDs []string, deadlineSecs int32) {
dmu.Lock()
defer dmu.Unlock()
desc := "modack"
if deadlineSecs == 0 {
desc = "nack"
}
now := time.Now()
for _, id := range ackIDs {
addEvent(ackIDToMsgID[id], desc, now)
}
}

func addEvent(msgID, desc string, t time.Time) {
msgTraces[msgID] = append(msgTraces[msgID], Event{desc, t})
}

+ 140
- 0
vendor/cloud.google.com/go/pubsub/doc.go View File

@@ -0,0 +1,140 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

/*
Package pubsub provides an easy way to publish and receive Google Cloud Pub/Sub
messages, hiding the details of the underlying server RPCs. Google Cloud
Pub/Sub is a many-to-many, asynchronous messaging system that decouples senders
and receivers.

More information about Google Cloud Pub/Sub is available at
https://cloud.google.com/pubsub/docs

See https://godoc.org/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.


Publishing

Google Cloud Pub/Sub messages are published to topics. Topics may be created
using the pubsub package like so:

topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name")

Messages may then be published to a topic:

res := topic.Publish(ctx, &pubsub.Message{Data: []byte("payload")})

Publish queues the message for publishing and returns immediately. When enough
messages have accumulated, or enough time has elapsed, the batch of messages is
sent to the Pub/Sub service.

Publish returns a PublishResult, which behaves like a future: its Get method
blocks until the message has been sent to the service.

The first time you call Publish on a topic, goroutines are started in the
background. To clean up these goroutines, call Stop:

topic.Stop()


Receiving

To receive messages published to a topic, clients create subscriptions
to the topic. There may be more than one subscription per topic; each message
that is published to the topic will be delivered to all of its subscriptions.

Subsciptions may be created like so:

sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name",
pubsub.SubscriptionConfig{Topic: topic})

Messages are then consumed from a subscription via callback.

err := sub.Receive(context.Background(), func(ctx context.Context, m *Message) {
log.Printf("Got message: %s", m.Data)
m.Ack()
})
if err != nil {
// Handle error.
}

The callback is invoked concurrently by multiple goroutines, maximizing
throughput. To terminate a call to Receive, cancel its context.

Once client code has processed the message, it must call Message.Ack or
message.Nack, otherwise the message will eventually be redelivered. If the
client cannot or doesn't want to process the message, it can call Message.Nack
to speed redelivery. For more information and configuration options, see
"Deadlines" below.

Note: It is possible for Messages to be redelivered, even if Message.Ack has
been called. Client code must be robust to multiple deliveries of messages.

Note: This uses pubsub's streaming pull feature. This feature properties that
may be surprising. Please take a look at https://cloud.google.com/pubsub/docs/pull#streamingpull
for more details on how streaming pull behaves compared to the synchronous
pull method.


Deadlines

The default pubsub deadlines are suitable for most use cases, but may be
overridden. This section describes the tradeoffs that should be considered
when overriding the defaults.

Behind the scenes, each message returned by the Pub/Sub server has an
associated lease, known as an "ACK deadline". Unless a message is
acknowledged within the ACK deadline, or the client requests that
the ACK deadline be extended, the message will become eligible for redelivery.

As a convenience, the pubsub client will automatically extend deadlines until
either:
* Message.Ack or Message.Nack is called, or
* The "MaxExtension" period elapses from the time the message is fetched from the server.

ACK deadlines are extended periodically by the client. The initial ACK
deadline given to messages is 10s. The period between extensions, as well as the
length of the extension, automatically adjust depending on the time it takes to ack
messages, up to 10m. This has the effect that subscribers that process messages
quickly have their message ack deadlines extended for a short amount, whereas
subscribers that process message slowly have their message ack deadlines extended
for a large amount. The net effect is fewer RPCs sent from the client library.

For example, consider a subscriber that takes 3 minutes to process each message.
Since the library has already recorded several 3 minute "time to ack"s in a
percentile distribution, future message extensions are sent with a value of 3
minutes, every 3 minutes. Suppose the application crashes 5 seconds after the
library sends such an extension: the Pub/Sub server would wait the remaining
2m55s before re-sending the messages out to other subscribers.

Please note that the client library does not use the subscription's AckDeadline
by default. To enforce the subscription AckDeadline, set MaxExtension to the
subscription's AckDeadline:

cfg, err := sub.Config(ctx)
if err != nil {
// TODO: handle err
}

sub.ReceiveSettings.MaxExtension = cfg.AckDeadline


Slow Message Processing

For use cases where message processing exceeds 30 minutes, we recommend using
the base client in a pull model, since long-lived streams are periodically killed
by firewalls. See the example at https://godoc.org/cloud.google.com/go/pubsub/apiv1#example-SubscriberClient-Pull-LengthyClientProcessing
*/
package pubsub // import "cloud.google.com/go/pubsub"

+ 122
- 0
vendor/cloud.google.com/go/pubsub/flow_controller.go View File

@@ -0,0 +1,122 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package pubsub

import (
"context"
"sync/atomic"

"golang.org/x/sync/semaphore"
)

// flowController implements flow control for Subscription.Receive.
type flowController struct {
maxCount int
maxSize int // max total size of messages
semCount, semSize *semaphore.Weighted // enforces max number and size of messages
// Number of calls to acquire - number of calls to release. This can go
// negative if semCount == nil and a large acquire is followed by multiple
// small releases.
// Atomic.
countRemaining int64
}

// newFlowController creates a new flowController that ensures no more than
// maxCount messages or maxSize bytes are outstanding at once. If maxCount or
// maxSize is < 1, then an unlimited number of messages or bytes is permitted,
// respectively.
func newFlowController(maxCount, maxSize int) *flowController {
fc := &flowController{
maxCount: maxCount,
maxSize: maxSize,
semCount: nil,
semSize: nil,
}
if maxCount > 0 {
fc.semCount = semaphore.NewWeighted(int64(maxCount))
}
if maxSize > 0 {
fc.semSize = semaphore.NewWeighted(int64(maxSize))
}
return fc
}

// acquire blocks until one message of size bytes can proceed or ctx is done.
// It returns nil in the first case, or ctx.Err() in the second.
//
// acquire allows large messages to proceed by treating a size greater than maxSize
// as if it were equal to maxSize.
func (f *flowController) acquire(ctx context.Context, size int) error {
if f.semCount != nil {
if err := f.semCount.Acquire(ctx, 1); err != nil {
return err
}
}
if f.semSize != nil {
if err := f.semSize.Acquire(ctx, f.bound(size)); err != nil {
if f.semCount != nil {
f.semCount.Release(1)
}
return err
}
}
atomic.AddInt64(&f.countRemaining, 1)
return nil
}

// tryAcquire returns false if acquire would block. Otherwise, it behaves like
// acquire and returns true.
//
// tryAcquire allows large messages to proceed by treating a size greater than
// maxSize as if it were equal to maxSize.
func (f *flowController) tryAcquire(size int) bool {
if f.semCount != nil {
if !f.semCount.TryAcquire(1) {
return false
}
}
if f.semSize != nil {
if !f.semSize.TryAcquire(f.bound(size)) {
if f.semCount != nil {
f.semCount.Release(1)
}
return false
}
}
atomic.AddInt64(&f.countRemaining, 1)
return true
}

// release notes that one message of size bytes is no longer outstanding.
func (f *flowController) release(size int) {
atomic.AddInt64(&f.countRemaining, -1)
if f.semCount != nil {
f.semCount.Release(1)
}
if f.semSize != nil {
f.semSize.Release(f.bound(size))
}
}

func (f *flowController) bound(size int) int64 {
if size > f.maxSize {
return int64(f.maxSize)
}
return int64(size)
}

func (f *flowController) count() int {
return int(atomic.LoadInt64(&f.countRemaining))
}

+ 79
- 0
vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go View File

@@ -0,0 +1,79 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package distribution

import (
"log"
"math"
"sort"
"sync"
"sync/atomic"
)

// D is a distribution. Methods of D can be called concurrently by multiple
// goroutines.
type D struct {
buckets []uint64
// sumsReuse is the scratch space that is reused
// to store sums during invocations of Percentile.
// After an invocation of New(n):
// len(buckets) == len(sumsReuse) == n
sumsReuse []uint64
mu sync.Mutex
}

// New creates a new distribution capable of holding values from 0 to n-1.
func New(n int) *D {
return &D{
buckets: make([]uint64, n),
sumsReuse: make([]uint64, n),
}
}

// Record records value v to the distribution.
// To help with distributions with long tails, if v is larger than the maximum value,
// Record records the maximum value instead.
// If v is negative, Record panics.
func (d *D) Record(v int) {
if v < 0 {
log.Panicf("Record: value out of range: %d", v)
} else if v >= len(d.buckets) {
v = len(d.buckets) - 1
}
atomic.AddUint64(&d.buckets[v], 1)
}

// Percentile computes the p-th percentile of the distribution where
// p is between 0 and 1. This method may be called by multiple goroutines.
func (d *D) Percentile(p float64) int {
// NOTE: This implementation uses the nearest-rank method.
// https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method

if p < 0 || p > 1 {
log.Panicf("Percentile: percentile out of range: %f", p)
}

d.mu.Lock()
defer d.mu.Unlock()

var sum uint64
for i := range d.sumsReuse {
sum += atomic.LoadUint64(&d.buckets[i])
d.sumsReuse[i] = sum
}

target := uint64(math.Ceil(float64(sum) * p))
return sort.Search(len(d.sumsReuse), func(i int) bool { return d.sumsReuse[i] >= target })
}

+ 527
- 0
vendor/cloud.google.com/go/pubsub/iterator.go View File

@@ -0,0 +1,527 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package pubsub

import (
"context"
"io"
"sync"
"time"

vkit "cloud.google.com/go/pubsub/apiv1"
"cloud.google.com/go/pubsub/internal/distribution"
"github.com/golang/protobuf/proto"
gax "github.com/googleapis/gax-go/v2"
pb "google.golang.org/genproto/googleapis/pubsub/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)

// Between message receipt and ack (that is, the time spent processing a message) we want to extend the message
// deadline by way of modack. However, we don't want to extend the deadline right as soon as the deadline expires;
// instead, we'd want to extend the deadline a little bit of time ahead. gracePeriod is that amount of time ahead
// of the actual deadline.
const gracePeriod = 5 * time.Second

type messageIterator struct {
ctx context.Context
cancel func() // the function that will cancel ctx; called in stop
po *pullOptions
ps *pullStream
subc *vkit.SubscriberClient
subName string
kaTick <-chan time.Time // keep-alive (deadline extensions)
ackTicker *time.Ticker // message acks
nackTicker *time.Ticker // message nacks (more frequent than acks)
pingTicker *time.Ticker // sends to the stream to keep it open
failed chan struct{} // closed on stream error
drained chan struct{} // closed when stopped && no more pending messages
wg sync.WaitGroup

mu sync.Mutex
ackTimeDist *distribution.D // dist uses seconds

// keepAliveDeadlines is a map of id to expiration time. This map is used in conjunction with
// subscription.ReceiveSettings.MaxExtension to record the maximum amount of time (the
// deadline, more specifically) we're willing to extend a message's ack deadline. As each
// message arrives, we'll record now+MaxExtension in this table; whenever we have a chance
// to update ack deadlines (via modack), we'll consult this table and only include IDs
// that are not beyond their deadline.
keepAliveDeadlines map[string]time.Time
pendingAcks map[string]bool
pendingNacks map[string]bool
pendingModAcks map[string]bool // ack IDs whose ack deadline is to be modified
err error // error from stream failure
}

// newMessageIterator starts and returns a new messageIterator.
// subName is the full name of the subscription to pull messages from.
// Stop must be called on the messageIterator when it is no longer needed.
// The iterator always uses the background context for acking messages and extending message deadlines.
func newMessageIterator(subc *vkit.SubscriberClient, subName string, po *pullOptions) *messageIterator {
var ps *pullStream
if !po.synchronous {
ps = newPullStream(context.Background(), subc.StreamingPull, subName)
}
// The period will update each tick based on the distribution of acks. We'll start by arbitrarily sending
// the first keepAlive halfway towards the minimum ack deadline.
keepAlivePeriod := minAckDeadline / 2

// Ack promptly so users don't lose work if client crashes.
ackTicker := time.NewTicker(100 * time.Millisecond)
nackTicker := time.NewTicker(100 * time.Millisecond)
pingTicker := time.NewTicker(30 * time.Second)
cctx, cancel := context.WithCancel(context.Background())
it := &messageIterator{
ctx: cctx,
cancel: cancel,
ps: ps,
po: po,
subc: subc,
subName: subName,
kaTick: time.After(keepAlivePeriod),
ackTicker: ackTicker,
nackTicker: nackTicker,
pingTicker: pingTicker,
failed: make(chan struct{}),
drained: make(chan struct{}),
ackTimeDist: distribution.New(int(maxAckDeadline/time.Second) + 1),
keepAliveDeadlines: map[string]time.Time{},
pendingAcks: map[string]bool{},
pendingNacks: map[string]bool{},
pendingModAcks: map[string]bool{},
}
it.wg.Add(1)
go it.sender()
return it
}

// Subscription.receive will call stop on its messageIterator when finished with it.
// Stop will block until Done has been called on all Messages that have been
// returned by Next, or until the context with which the messageIterator was created
// is cancelled or exceeds its deadline.
func (it *messageIterator) stop() {
it.cancel()
it.mu.Lock()
it.checkDrained()
it.mu.Unlock()
it.wg.Wait()
}

// checkDrained closes the drained channel if the iterator has been stopped and all
// pending messages have either been n/acked or expired.
//
// Called with the lock held.
func (it *messageIterator) checkDrained() {
select {
case <-it.drained:
return
default:
}
select {
case <-it.ctx.Done():
if len(it.keepAliveDeadlines) == 0 {
close(it.drained)
}
default:
}
}

// Called when a message is acked/nacked.
func (it *messageIterator) done(ackID string, ack bool, receiveTime time.Time) {
it.ackTimeDist.Record(int(time.Since(receiveTime) / time.Second))
it.mu.Lock()
defer it.mu.Unlock()
delete(it.keepAliveDeadlines, ackID)
if ack {
it.pendingAcks[ackID] = true
} else {
it.pendingNacks[ackID] = true
}
it.checkDrained()
}

// fail is called when a stream method returns a permanent error.
// fail returns it.err. This may be err, or it may be the error
// set by an earlier call to fail.
func (it *messageIterator) fail(err error) error {
it.mu.Lock()
defer it.mu.Unlock()
if it.err == nil {
it.err = err
close(it.failed)
}
return it.err
}

// receive makes a call to the stream's Recv method, or the Pull RPC, and returns
// its messages.
// maxToPull is the maximum number of messages for the Pull RPC.
func (it *messageIterator) receive(maxToPull int32) ([]*Message, error) {
it.mu.Lock()
ierr := it.err
it.mu.Unlock()
if ierr != nil {
return nil, ierr
}

// Stop retrieving messages if the iterator's Stop method was called.
select {
case <-it.ctx.Done():
it.wg.Wait()
return nil, io.EOF
default:
}

var rmsgs []*pb.ReceivedMessage
var err error
if it.po.synchronous {
rmsgs, err = it.pullMessages(maxToPull)
} else {
rmsgs, err = it.recvMessages()
}
// Any error here is fatal.
if err != nil {
return nil, it.fail(err)
}
msgs, err := convertMessages(rmsgs)
if err != nil {
return nil, it.fail(err)
}
// We received some messages. Remember them so we can keep them alive. Also,
// do a receipt mod-ack when streaming.
maxExt := time.Now().Add(it.po.maxExtension)
ackIDs := map[string]bool{}
it.mu.Lock()
now := time.Now()
for _, m := range msgs {
m.receiveTime = now
addRecv(m.ID, m.ackID, now)
m.doneFunc = it.done
it.keepAliveDeadlines[m.ackID] = maxExt
// Don't change the mod-ack if the message is going to be nacked. This is
// possible if there are retries.
if !it.pendingNacks[m.ackID] {
ackIDs[m.ackID] = true
}
}
deadline := it.ackDeadline()
it.mu.Unlock()
if len(ackIDs) > 0 {
if !it.sendModAck(ackIDs, deadline) {
return nil, it.err
}
}
return msgs, nil
}

// Get messages using the Pull RPC.
// This may block indefinitely. It may also return zero messages, after some time waiting.
func (it *messageIterator) pullMessages(maxToPull int32) ([]*pb.ReceivedMessage, error) {
// Use it.ctx as the RPC context, so that if the iterator is stopped, the call
// will return immediately.
res, err := it.subc.Pull(it.ctx, &pb.PullRequest{
Subscription: it.subName,
MaxMessages: maxToPull,
}, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes)))
switch {
case err == context.Canceled:
return nil, nil
case err != nil:
return nil, err
default:
return res.ReceivedMessages, nil
}
}

func (it *messageIterator) recvMessages() ([]*pb.ReceivedMessage, error) {
res, err := it.ps.Recv()
if err != nil {
return nil, err
}
return res.ReceivedMessages, nil
}

// sender runs in a goroutine and handles all sends to the stream.
func (it *messageIterator) sender() {
defer it.wg.Done()
defer it.ackTicker.Stop()
defer it.nackTicker.Stop()
defer it.pingTicker.Stop()
defer func() {
if it.ps != nil {
it.ps.CloseSend()
}
}()

done := false
for !done {
sendAcks := false
sendNacks := false
sendModAcks := false
sendPing := false

dl := it.ackDeadline()

select {
case <-it.failed:
// Stream failed: nothing to do, so stop immediately.
return

case <-it.drained:
// All outstanding messages have been marked done:
// nothing left to do except make the final calls.
it.mu.Lock()
sendAcks = (len(it.pendingAcks) > 0)
sendNacks = (len(it.pendingNacks) > 0)
// No point in sending modacks.
done = true

case <-it.kaTick:
it.mu.Lock()
it.handleKeepAlives()
sendModAcks = (len(it.pendingModAcks) > 0)

nextTick := dl - gracePeriod
if nextTick <= 0 {
// If the deadline is <= gracePeriod, let's tick again halfway to
// the deadline.
nextTick = dl / 2
}
it.kaTick = time.After(nextTick)

case <-it.nackTicker.C:
it.mu.Lock()
sendNacks = (len(it.pendingNacks) > 0)

case <-it.ackTicker.C:
it.mu.Lock()
sendAcks = (len(it.pendingAcks) > 0)

case <-it.pingTicker.C:
it.mu.Lock()
// Ping only if we are processing messages via streaming.
sendPing = !it.po.synchronous && (len(it.keepAliveDeadlines) > 0)
}
// Lock is held here.
var acks, nacks, modAcks map[string]bool
if sendAcks {
acks = it.pendingAcks
it.pendingAcks = map[string]bool{}
}
if sendNacks {
nacks = it.pendingNacks
it.pendingNacks = map[string]bool{}
}
if sendModAcks {
modAcks = it.pendingModAcks
it.pendingModAcks = map[string]bool{}
}
it.mu.Unlock()
// Make Ack and ModAck RPCs.
if sendAcks {
if !it.sendAck(acks) {
return
}
}
if sendNacks {
// Nack indicated by modifying the deadline to zero.
if !it.sendModAck(nacks, 0) {
return
}
}
if sendModAcks {
if !it.sendModAck(modAcks, dl) {
return
}
}
if sendPing {
it.pingStream()
}
}
}

// handleKeepAlives modifies the pending request to include deadline extensions
// for live messages. It also purges expired messages.
//
// Called with the lock held.
func (it *messageIterator) handleKeepAlives() {
now := time.Now()
for id, expiry := range it.keepAliveDeadlines {
if expiry.Before(now) {
// This delete will not result in skipping any map items, as implied by
// the spec at https://golang.org/ref/spec#For_statements, "For
// statements with range clause", note 3, and stated explicitly at
// https://groups.google.com/forum/#!msg/golang-nuts/UciASUb03Js/pzSq5iVFAQAJ.
delete(it.keepAliveDeadlines, id)
} else {
// This will not conflict with a nack, because nacking removes the ID from keepAliveDeadlines.
it.pendingModAcks[id] = true
}
}
it.checkDrained()
}

func (it *messageIterator) sendAck(m map[string]bool) bool {
// Account for the Subscription field.
overhead := calcFieldSizeString(it.subName)
return it.sendAckIDRPC(m, maxPayload-overhead, func(ids []string) error {
recordStat(it.ctx, AckCount, int64(len(ids)))
addAcks(ids)
// Use context.Background() as the call's context, not it.ctx. We don't
// want to cancel this RPC when the iterator is stopped.
return it.subc.Acknowledge(context.Background(), &pb.AcknowledgeRequest{
Subscription: it.subName,
AckIds: ids,
})
})
}

// The receipt mod-ack amount is derived from a percentile distribution based
// on the time it takes to process messages. The percentile chosen is the 99%th
// percentile in order to capture the highest amount of time necessary without
// considering 1% outliers.
func (it *messageIterator) sendModAck(m map[string]bool, deadline time.Duration) bool {
deadlineSec := int32(deadline / time.Second)
// Account for the Subscription and AckDeadlineSeconds fields.
overhead := calcFieldSizeString(it.subName) + calcFieldSizeInt(int(deadlineSec))
return it.sendAckIDRPC(m, maxPayload-overhead, func(ids []string) error {
if deadline == 0 {
recordStat(it.ctx, NackCount, int64(len(ids)))
} else {
recordStat(it.ctx, ModAckCount, int64(len(ids)))
}
addModAcks(ids, deadlineSec)
// Retry this RPC on Unavailable for a short amount of time, then give up
// without returning a fatal error. The utility of this RPC is by nature
// transient (since the deadline is relative to the current time) and it
// isn't crucial for correctness (since expired messages will just be
// resent).
cctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
bo := gax.Backoff{
Initial: 100 * time.Millisecond,
Max: time.Second,
Multiplier: 2,
}
for {
err := it.subc.ModifyAckDeadline(cctx, &pb.ModifyAckDeadlineRequest{
Subscription: it.subName,
AckDeadlineSeconds: deadlineSec,
AckIds: ids,
})
switch status.Code(err) {
case codes.Unavailable:
if err := gax.Sleep(cctx, bo.Pause()); err == nil {
continue
}
// Treat sleep timeout like RPC timeout.
fallthrough
case codes.DeadlineExceeded:
// Timeout. Not a fatal error, but note that it happened.
recordStat(it.ctx, ModAckTimeoutCount, 1)
return nil
default:
// Any other error is fatal.
return err
}
}
})
}

func (it *messageIterator) sendAckIDRPC(ackIDSet map[string]bool, maxSize int, call func([]string) error) bool {
ackIDs := make([]string, 0, len(ackIDSet))
for k := range ackIDSet {
ackIDs = append(ackIDs, k)
}
var toSend []string
for len(ackIDs) > 0 {
toSend, ackIDs = splitRequestIDs(ackIDs, maxSize)
if err := call(toSend); err != nil {
// The underlying client handles retries, so any error is fatal to the
// iterator.
it.fail(err)
return false
}
}
return true
}

// Send a message to the stream to keep it open. The stream will close if there's no
// traffic on it for a while. By keeping it open, we delay the start of the
// expiration timer on messages that are buffered by gRPC or elsewhere in the
// network. This matters if it takes a long time to process messages relative to the
// default ack deadline, and if the messages are small enough so that many can fit
// into the buffer.
func (it *messageIterator) pingStream() {
// Ignore error; if the stream is broken, this doesn't matter anyway.
_ = it.ps.Send(&pb.StreamingPullRequest{})
}

// calcFieldSizeString returns the number of bytes string fields
// will take up in an encoded proto message.
func calcFieldSizeString(fields ...string) int {
overhead := 0
for _, field := range fields {
overhead += 1 + len(field) + proto.SizeVarint(uint64(len(field)))
}
return overhead
}

// calcFieldSizeInt returns the number of bytes int fields
// will take up in an encoded proto message.
func calcFieldSizeInt(fields ...int) int {
overhead := 0
for _, field := range fields {
overhead += 1 + proto.SizeVarint(uint64(field))
}
return overhead
}

// splitRequestIDs takes a slice of ackIDs and returns two slices such that the first
// ackID slice can be used in a request where the payload does not exceed maxSize.
func splitRequestIDs(ids []string, maxSize int) (prefix, remainder []string) {
size := 0
i := 0
// TODO(hongalex): Use binary search to find split index, since ackIDs are
// fairly constant.
for size < maxSize && i < len(ids) {
size += calcFieldSizeString(ids[i])
i++
}
if size > maxSize {
i--
}
return ids[:i], ids[i:]
}

// The deadline to ack is derived from a percentile distribution based
// on the time it takes to process messages. The percentile chosen is the 99%th
// percentile - that is, processing times up to the 99%th longest processing
// times should be safe. The highest 1% may expire. This number was chosen
// as a way to cover most users' usecases without losing the value of
// expiration.
func (it *messageIterator) ackDeadline() time.Duration {
pt := time.Duration(it.ackTimeDist.Percentile(.99)) * time.Second

if pt > maxAckDeadline {
return maxAckDeadline
}
if pt < minAckDeadline {
return minAckDeadline
}
return pt
}

+ 100
- 0
vendor/cloud.google.com/go/pubsub/message.go View File

@@ -0,0 +1,100 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package pubsub

import (
"time"

"github.com/golang/protobuf/ptypes"
pb "google.golang.org/genproto/googleapis/pubsub/v1"
)

// Message represents a Pub/Sub message.
type Message struct {
// ID identifies this message.
// This ID is assigned by the server and is populated for Messages obtained from a subscription.
// This field is read-only.
ID string

// Data is the actual data in the message.
Data []byte

// Attributes represents the key-value pairs the current message
// is labelled with.
Attributes map[string]string

// ackID is the identifier to acknowledge this message.
ackID string

// The time at which the message was published.
// This is populated by the server for Messages obtained from a subscription.
// This field is read-only.
PublishTime time.Time

// receiveTime is the time the message was received by the client.
receiveTime time.Time

// size is the approximate size of the message's data and attributes.
size int

calledDone bool

// The done method of the iterator that created this Message.
doneFunc func(string, bool, time.Time)
}

func toMessage(resp *pb.ReceivedMessage) (*Message, error) {
if resp.Message == nil {
return &Message{ackID: resp.AckId}, nil
}

pubTime, err := ptypes.Timestamp(resp.Message.PublishTime)
if err != nil {
return nil, err
}
return &Message{
ackID: resp.AckId,
Data: resp.Message.Data,
Attributes: resp.Message.Attributes,
ID: resp.Message.MessageId,
PublishTime: pubTime,
}, nil
}

// Ack indicates successful processing of a Message passed to the Subscriber.Receive callback.
// It should not be called on any other Message value.
// If message acknowledgement fails, the Message will be redelivered.
// Client code must call Ack or Nack when finished for each received Message.
// Calls to Ack or Nack have no effect after the first call.
func (m *Message) Ack() {
m.done(true)
}

// Nack indicates that the client will not or cannot process a Message passed to the Subscriber.Receive callback.
// It should not be called on any other Message value.
// Nack will result in the Message being redelivered more quickly than if it were allowed to expire.
// Client code must call Ack or Nack when finished for each received Message.
// Calls to Ack or Nack have no effect after the first call.
func (m *Message) Nack() {
m.done(false)
}

func (m *Message) done(ack bool) {
if m.calledDone {
return
}
m.calledDone = true
m.doneFunc(m.ackID, ack, m.receiveTime)
}

+ 25
- 0
vendor/cloud.google.com/go/pubsub/nodebug.go View File

@@ -0,0 +1,25 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// +build !psdebug

package pubsub

import "time"

func addRecv(string, string, time.Time) {}

func addAcks([]string) {}

func addModAcks([]string, int32) {}

+ 108
- 0
vendor/cloud.google.com/go/pubsub/pubsub.go View File

@@ -0,0 +1,108 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package pubsub // import "cloud.google.com/go/pubsub"

import (
"context"
"fmt"
"os"
"runtime"
"time"

"cloud.google.com/go/internal/version"
vkit "cloud.google.com/go/pubsub/apiv1"
"google.golang.org/api/option"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
)

const (
// ScopePubSub grants permissions to view and manage Pub/Sub
// topics and subscriptions.
ScopePubSub = "https://www.googleapis.com/auth/pubsub"

// ScopeCloudPlatform grants permissions to view and manage your data
// across Google Cloud Platform services.
ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform"

maxAckDeadline = 10 * time.Minute
)

// Client is a Google Pub/Sub client scoped to a single project.
//
// Clients should be reused rather than being created as needed.
// A Client may be shared by multiple goroutines.
type Client struct {
projectID string
pubc *vkit.PublisherClient
subc *vkit.SubscriberClient
}

// NewClient creates a new PubSub client.
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (c *Client, err error) {
var o []option.ClientOption
// Environment variables for gcloud emulator:
// https://cloud.google.com/sdk/gcloud/reference/beta/emulators/pubsub/
if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" {
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
return nil, fmt.Errorf("grpc.Dial: %v", err)
}
o = []option.ClientOption{option.WithGRPCConn(conn)}
} else {
o = []option.ClientOption{
// Create multiple connections to increase throughput.
option.WithGRPCConnectionPool(runtime.GOMAXPROCS(0)),
option.WithGRPCDialOption(grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: 5 * time.Minute,
})),
}
o = append(o, openCensusOptions()...)
}
o = append(o, opts...)
pubc, err := vkit.NewPublisherClient(ctx, o...)
if err != nil {
return nil, fmt.Errorf("pubsub: %v", err)
}
subc, err := vkit.NewSubscriberClient(ctx, option.WithGRPCConn(pubc.Connection()))
if err != nil {
// Should never happen, since we are passing in the connection.
// If it does, we cannot close, because the user may have passed in their
// own connection originally.
return nil, fmt.Errorf("pubsub: %v", err)
}
pubc.SetGoogleClientInfo("gccl", version.Repo)
return &Client{
projectID: projectID,
pubc: pubc,
subc: subc,
}, nil
}

// Close releases any resources held by the client,
// such as memory and goroutines.
//
// If the client is available for the lifetime of the program, then Close need not be
// called at exit.
func (c *Client) Close() error {
// Return the first error, because the first call closes the connection.
err := c.pubc.Close()
_ = c.subc.Close()
return err
}

func (c *Client) fullyQualifiedProjectName() string {
return fmt.Sprintf("projects/%s", c.projectID)
}

+ 192
- 0
vendor/cloud.google.com/go/pubsub/pullstream.go View File

@@ -0,0 +1,192 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package pubsub

import (
"context"
"io"
"sync"
"time"

gax "github.com/googleapis/gax-go/v2"
pb "google.golang.org/genproto/googleapis/pubsub/v1"
"google.golang.org/grpc"
)

// A pullStream supports the methods of a StreamingPullClient, but re-opens
// the stream on a retryable error.
type pullStream struct {
ctx context.Context
open func() (pb.Subscriber_StreamingPullClient, error)

mu sync.Mutex
spc *pb.Subscriber_StreamingPullClient
err error // permanent error
}

// for testing
type streamingPullFunc func(context.Context, ...gax.CallOption) (pb.Subscriber_StreamingPullClient, error)

func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName string) *pullStream {
ctx = withSubscriptionKey(ctx, subName)
return &pullStream{
ctx: ctx,
open: func() (pb.Subscriber_StreamingPullClient, error) {
spc, err := streamingPull(ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes)))
if err == nil {
recordStat(ctx, StreamRequestCount, 1)
err = spc.Send(&pb.StreamingPullRequest{
Subscription: subName,
// We modack messages when we receive them, so this value doesn't matter too much.
StreamAckDeadlineSeconds: 60,
})
}
if err != nil {
return nil, err
}
return spc, nil
},
}
}

// get returns either a valid *StreamingPullClient (SPC), or a permanent error.
// If the argument is nil, this is the first call for an RPC, and the current
// SPC will be returned (or a new one will be opened). Otherwise, this call is a
// request to re-open the stream because of a retryable error, and the argument
// is a pointer to the SPC that returned the error.
func (s *pullStream) get(spc *pb.Subscriber_StreamingPullClient) (*pb.Subscriber_StreamingPullClient, error) {
s.mu.Lock()
defer s.mu.Unlock()
// A stored error is permanent.
if s.err != nil {
return nil, s.err
}
// If the context is done, so are we.
s.err = s.ctx.Err()
if s.err != nil {
return nil, s.err
}

// If the current and argument SPCs differ, return the current one. This subsumes two cases:
// 1. We have an SPC and the caller is getting the stream for the first time.
// 2. The caller wants to retry, but they have an older SPC; we've already retried.
if spc != s.spc {
return s.spc, nil
}
// Either this is the very first call on this stream (s.spc == nil), or we have a valid
// retry request. Either way, open a new stream.
// The lock is held here for a long time, but it doesn't matter because no callers could get
// anything done anyway.
s.spc = new(pb.Subscriber_StreamingPullClient)
*s.spc, s.err = s.openWithRetry() // Any error from openWithRetry is permanent.
return s.spc, s.err
}

func (s *pullStream) openWithRetry() (pb.Subscriber_StreamingPullClient, error) {
r := defaultRetryer{}
for {
recordStat(s.ctx, StreamOpenCount, 1)
spc, err := s.open()
bo, shouldRetry := r.Retry(err)
if err != nil && shouldRetry {
recordStat(s.ctx, StreamRetryCount, 1)
if err := gax.Sleep(s.ctx, bo); err != nil {
return nil, err
}
continue
}
return spc, err
}
}

func (s *pullStream) call(f func(pb.Subscriber_StreamingPullClient) error, opts ...gax.CallOption) error {
var settings gax.CallSettings
for _, opt := range opts {
opt.Resolve(&settings)
}
var r gax.Retryer = &defaultRetryer{}
if settings.Retry != nil {
r = settings.Retry()
}

var (
spc *pb.Subscriber_StreamingPullClient
err error
)
for {
spc, err = s.get(spc)
if err != nil {
return err
}
start := time.Now()
err = f(*spc)
if err != nil {
bo, shouldRetry := r.Retry(err)
if shouldRetry {
recordStat(s.ctx, StreamRetryCount, 1)
if time.Since(start) < 30*time.Second { // don't sleep if we've been blocked for a while
if err := gax.Sleep(s.ctx, bo); err != nil {
return err
}
}
continue
}
s.mu.Lock()
s.err = err
s.mu.Unlock()
}
return err
}
}

func (s *pullStream) Send(req *pb.StreamingPullRequest) error {
return s.call(func(spc pb.Subscriber_StreamingPullClient) error {
recordStat(s.ctx, AckCount, int64(len(req.AckIds)))
zeroes := 0
for _, mds := range req.ModifyDeadlineSeconds {
if mds == 0 {
zeroes++
}
}
recordStat(s.ctx, NackCount, int64(zeroes))
recordStat(s.ctx, ModAckCount, int64(len(req.ModifyDeadlineSeconds)-zeroes))
recordStat(s.ctx, StreamRequestCount, 1)
return spc.Send(req)
})
}

func (s *pullStream) Recv() (*pb.StreamingPullResponse, error) {
var res *pb.StreamingPullResponse
err := s.call(func(spc pb.Subscriber_StreamingPullClient) error {
var err error
recordStat(s.ctx, StreamResponseCount, 1)
res, err = spc.Recv()
if err == nil {
recordStat(s.ctx, PullCount, int64(len(res.ReceivedMessages)))
}
return err
}, gax.WithRetry(func() gax.Retryer { return &streamingPullRetryer{defaultRetryer: &defaultRetryer{}} }))
return res, err
}

func (s *pullStream) CloseSend() error {
err := s.call(func(spc pb.Subscriber_StreamingPullClient) error {
return spc.CloseSend()
})
s.mu.Lock()
s.err = io.EOF // should not be retried
s.mu.Unlock()
return err
}

+ 100
- 0
vendor/cloud.google.com/go/pubsub/service.go View File

@@ -0,0 +1,100 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package pubsub

import (
"fmt"
"math"
"strings"
"time"

gax "github.com/googleapis/gax-go/v2"
pb "google.golang.org/genproto/googleapis/pubsub/v1"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)

// maxPayload is the maximum number of bytes to devote to the
// encoded AcknowledgementRequest / ModifyAckDeadline proto message.
//
// With gRPC there is no way for the client to know the server's max message size (it is
// configurable on the server). We know from experience that it
// it 512K.
const (
maxPayload = 512 * 1024
maxSendRecvBytes = 20 * 1024 * 1024 // 20M
)

func convertMessages(rms []*pb.ReceivedMessage) ([]*Message, error) {
msgs := make([]*Message, 0, len(rms))
for i, m := range rms {
msg, err := toMessage(m)
if err != nil {
return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m)
}
msgs = append(msgs, msg)
}
return msgs, nil
}

func trunc32(i int64) int32 {
if i > math.MaxInt32 {
i = math.MaxInt32
}
return int32(i)
}

type defaultRetryer struct {
bo gax.Backoff
}

// Logic originally from
// https://github.com/GoogleCloudPlatform/google-cloud-java/blob/master/google-cloud-clients/google-cloud-pubsub/src/main/java/com/google/cloud/pubsub/v1/StatusUtil.java
func (r *defaultRetryer) Retry(err error) (pause time.Duration, shouldRetry bool) {
s, ok := status.FromError(err)
if !ok { // includes io.EOF, normal stream close, which causes us to reopen
return r.bo.Pause(), true
}
switch s.Code() {
case codes.DeadlineExceeded, codes.Internal, codes.ResourceExhausted, codes.Aborted:
return r.bo.Pause(), true
case codes.Unavailable:
c := strings.Contains(s.Message(), "Server shutdownNow invoked")
if !c {
return r.bo.Pause(), true
}
return 0, false
default:
return 0, false
}
}

type streamingPullRetryer struct {
defaultRetryer gax.Retryer
}

// Does not retry ResourceExhausted. See: https://github.com/GoogleCloudPlatform/google-cloud-go/issues/1166#issuecomment-443744705
func (r *streamingPullRetryer) Retry(err error) (pause time.Duration, shouldRetry bool) {
s, ok := status.FromError(err)
if !ok { // call defaultRetryer so that its backoff can be used
return r.defaultRetryer.Retry(err)
}
switch s.Code() {
case codes.ResourceExhausted:
return 0, false
default:
return r.defaultRetryer.Retry(err)
}
}

+ 160
- 0
vendor/cloud.google.com/go/pubsub/snapshot.go View File

@@ -0,0 +1,160 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package pubsub

import (
"context"
"fmt"
"strings"
"time"

"github.com/golang/protobuf/ptypes"
pb "google.golang.org/genproto/googleapis/pubsub/v1"
)

// Snapshot is a reference to a PubSub snapshot.
type Snapshot struct {
c *Client

// The fully qualified identifier for the snapshot, in the format "projects/<projid>/snapshots/<snap>"
name string
}

// ID returns the unique identifier of the snapshot within its project.
func (s *Snapshot) ID() string {
slash := strings.LastIndex(s.name, "/")
if slash == -1 {
// name is not a fully-qualified name.
panic("bad snapshot name")
}
return s.name[slash+1:]
}

// SnapshotConfig contains the details of a Snapshot.
type SnapshotConfig struct {
*Snapshot
Topic *Topic
Expiration time.Time
}

// Snapshot creates a reference to a snapshot.
func (c *Client) Snapshot(id string) *Snapshot {
return &Snapshot{
c: c,
name: fmt.Sprintf("projects/%s/snapshots/%s", c.projectID, id),
}
}

// Snapshots returns an iterator which returns snapshots for this project.
func (c *Client) Snapshots(ctx context.Context) *SnapshotConfigIterator {
it := c.subc.ListSnapshots(ctx, &pb.ListSnapshotsRequest{
Project: c.fullyQualifiedProjectName(),
})
next := func() (*SnapshotConfig, error) {
snap, err := it.Next()
if err != nil {
return nil, err
}
return toSnapshotConfig(snap, c)
}
return &SnapshotConfigIterator{next: next}
}

// SnapshotConfigIterator is an iterator that returns a series of snapshots.
type SnapshotConfigIterator struct {
next func() (*SnapshotConfig, error)
}

// Next returns the next SnapshotConfig. Its second return value is iterator.Done if there are no more results.
// Once Next returns iterator.Done, all subsequent calls will return iterator.Done.
func (snaps *SnapshotConfigIterator) Next() (*SnapshotConfig, error) {
return snaps.next()
}

// Delete deletes a snapshot.
func (s *Snapshot) Delete(ctx context.Context) error {
return s.c.subc.DeleteSnapshot(ctx, &pb.DeleteSnapshotRequest{Snapshot: s.name})
}

// SeekToTime seeks the subscription to a point in time.
//
// Messages retained in the subscription that were published before this
// time are marked as acknowledged, and messages retained in the
// subscription that were published after this time are marked as
// unacknowledged. Note that this operation affects only those messages
// retained in the subscription (configured by SnapshotConfig). For example,
// if `time` corresponds to a point before the message retention
// window (or to a point before the system's notion of the subscription
// creation time), only retained messages will be marked as unacknowledged,
// and already-expunged messages will not be restored.
func (s *Subscription) SeekToTime(ctx context.Context, t time.Time) error {
ts, err := ptypes.TimestampProto(t)
if err != nil {
return err
}
_, err = s.c.subc.Seek(ctx, &pb.SeekRequest{
Subscription: s.name,
Target: &pb.SeekRequest_Time{Time: ts},
})
return err
}

// CreateSnapshot creates a new snapshot from this subscription.
// The snapshot will be for the topic this subscription is subscribed to.
// If the name is empty string, a unique name is assigned.
//
// The created snapshot is guaranteed to retain:
// (a) The existing backlog on the subscription. More precisely, this is
// defined as the messages in the subscription's backlog that are
// unacknowledged when Snapshot returns without error.
// (b) Any messages published to the subscription's topic following
// Snapshot returning without error.
func (s *Subscription) CreateSnapshot(ctx context.Context, name string) (*SnapshotConfig, error) {
if name != "" {
name = fmt.Sprintf("projects/%s/snapshots/%s", strings.Split(s.name, "/")[1], name)
}
snap, err := s.c.subc.CreateSnapshot(ctx, &pb.CreateSnapshotRequest{
Name: name,
Subscription: s.name,
})
if err != nil {
return nil, err
}
return toSnapshotConfig(snap, s.c)
}

// SeekToSnapshot seeks the subscription to a snapshot.
//
// The snapshot need not be created from this subscription,
// but it must be for the topic this subscription is subscribed to.
func (s *Subscription) SeekToSnapshot(ctx context.Context, snap *Snapshot) error {
_, err := s.c.subc.Seek(ctx, &pb.SeekRequest{
Subscription: s.name,
Target: &pb.SeekRequest_Snapshot{Snapshot: snap.name},
})
return err
}

func toSnapshotConfig(snap *pb.Snapshot, c *Client) (*SnapshotConfig, error) {
exp, err := ptypes.Timestamp(snap.ExpireTime)
if err != nil {
return nil, err
}
return &SnapshotConfig{
Snapshot: &Snapshot{c: c, name: snap.Name},
Topic: newTopic(c, snap.Topic),
Expiration: exp,
}, nil
}

+ 741
- 0
vendor/cloud.google.com/go/pubsub/subscription.go View File

@@ -0,0 +1,741 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package pubsub

import (
"context"
"errors"
"fmt"
"io"
"strings"
"sync"
"time"

"cloud.google.com/go/iam"
"cloud.google.com/go/internal/optional"
"github.com/golang/protobuf/ptypes"
durpb "github.com/golang/protobuf/ptypes/duration"
gax "github.com/googleapis/gax-go/v2"
"golang.org/x/sync/errgroup"
pb "google.golang.org/genproto/googleapis/pubsub/v1"
fmpb "google.golang.org/genproto/protobuf/field_mask"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)

// Subscription is a reference to a PubSub subscription.
type Subscription struct {
c *Client

// The fully qualified identifier for the subscription, in the format "projects/<projid>/subscriptions/<name>"
name string

// Settings for pulling messages. Configure these before calling Receive.
ReceiveSettings ReceiveSettings

mu sync.Mutex
receiveActive bool
}

// Subscription creates a reference to a subscription.
func (c *Client) Subscription(id string) *Subscription {
return c.SubscriptionInProject(id, c.projectID)
}

// SubscriptionInProject creates a reference to a subscription in a given project.
func (c *Client) SubscriptionInProject(id, projectID string) *Subscription {
return &Subscription{
c: c,
name: fmt.Sprintf("projects/%s/subscriptions/%s", projectID, id),
}
}

// String returns the globally unique printable name of the subscription.
func (s *Subscription) String() string {
return s.name
}

// ID returns the unique identifier of the subscription within its project.
func (s *Subscription) ID() string {
slash := strings.LastIndex(s.name, "/")
if slash == -1 {
// name is not a fully-qualified name.
panic("bad subscription name")
}
return s.name[slash+1:]
}

// Subscriptions returns an iterator which returns all of the subscriptions for the client's project.
func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator {
it := c.subc.ListSubscriptions(ctx, &pb.ListSubscriptionsRequest{
Project: c.fullyQualifiedProjectName(),
})
return &SubscriptionIterator{
c: c,
next: func() (string, error) {
sub, err := it.Next()
if err != nil {
return "", err
}
return sub.Name, nil
},
}
}

// SubscriptionIterator is an iterator that returns a series of subscriptions.
type SubscriptionIterator struct {
c *Client
next func() (string, error)
}

// Next returns the next subscription. If there are no more subscriptions, iterator.Done will be returned.
func (subs *SubscriptionIterator) Next() (*Subscription, error) {
subName, err := subs.next()
if err != nil {
return nil, err
}
return &Subscription{c: subs.c, name: subName}, nil
}

// PushConfig contains configuration for subscriptions that operate in push mode.
type PushConfig struct {
// A URL locating the endpoint to which messages should be pushed.
Endpoint string

// Endpoint configuration attributes. See https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions#pushconfig for more details.
Attributes map[string]string

// AuthenticationMethod is used by push endpoints to verify the source
// of push requests.
// It can be used with push endpoints that are private by default to
// allow requests only from the Cloud Pub/Sub system, for example.
// This field is optional and should be set only by users interested in
// authenticated push.
//
// It is EXPERIMENTAL and a part of a closed alpha that may not be
// accessible to all users. This field is subject to change or removal
// without notice.
AuthenticationMethod AuthenticationMethod
}

func (pc *PushConfig) toProto() *pb.PushConfig {
if pc == nil {
return nil
}
pbCfg := &pb.PushConfig{
Attributes: pc.Attributes,
PushEndpoint: pc.Endpoint,
}
if authMethod := pc.AuthenticationMethod; authMethod != nil {
switch am := authMethod.(type) {
case *OIDCToken:
pbCfg.AuthenticationMethod = am.toProto()
default: // TODO: add others here when GAIC adds more definitions.
}
}
return pbCfg
}

// AuthenticationMethod is used by push points to verify the source of push requests.
// This interface defines fields that are part of a closed alpha that may not be accessible
// to all users.
type AuthenticationMethod interface {
isAuthMethod() bool
}

// OIDCToken allows PushConfigs to be authenticated using
// the OpenID Connect protocol https://openid.net/connect/
type OIDCToken struct {
// Audience to be used when generating OIDC token. The audience claim
// identifies the recipients that the JWT is intended for. The audience
// value is a single case-sensitive string. Having multiple values (array)
// for the audience field is not supported. More info about the OIDC JWT
// token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3
// Note: if not specified, the Push endpoint URL will be used.
Audience string

// The service account email to be used for generating the OpenID Connect token.
// The caller of:
// * CreateSubscription
// * UpdateSubscription
// * ModifyPushConfig
// calls must have the iam.serviceAccounts.actAs permission for the service account.
// See https://cloud.google.com/iam/docs/understanding-roles#service-accounts-roles.
ServiceAccountEmail string
}

var _ AuthenticationMethod = (*OIDCToken)(nil)

func (oidcToken *OIDCToken) isAuthMethod() bool { return true }

func (oidcToken *OIDCToken) toProto() *pb.PushConfig_OidcToken_ {
if oidcToken == nil {
return nil
}
return &pb.PushConfig_OidcToken_{
OidcToken: &pb.PushConfig_OidcToken{
Audience: oidcToken.Audience,
ServiceAccountEmail: oidcToken.ServiceAccountEmail,
},
}
}

// SubscriptionConfig describes the configuration of a subscription.
type SubscriptionConfig struct {
Topic *Topic
PushConfig PushConfig

// The default maximum time after a subscriber receives a message before
// the subscriber should acknowledge the message. Note: messages which are
// obtained via Subscription.Receive need not be acknowledged within this
// deadline, as the deadline will be automatically extended.
AckDeadline time.Duration

// Whether to retain acknowledged messages. If true, acknowledged messages
// will not be expunged until they fall out of the RetentionDuration window.
RetainAckedMessages bool

// How long to retain messages in backlog, from the time of publish. If
// RetainAckedMessages is true, this duration affects the retention of
// acknowledged messages, otherwise only unacknowledged messages are retained.
// Defaults to 7 days. Cannot be longer than 7 days or shorter than 10 minutes.
RetentionDuration time.Duration

// Expiration policy specifies the conditions for a subscription's expiration.
// A subscription is considered active as long as any connected subscriber is
// successfully consuming messages from the subscription or is issuing
// operations on the subscription. If `expiration_policy` is not set, a
// *default policy* with `ttl` of 31 days will be used. The minimum allowed
// value for `expiration_policy.ttl` is 1 day.
//
// Use time.Duration(0) to indicate that the subscription should never expire.
//
// It is EXPERIMENTAL and subject to change or removal without notice.
ExpirationPolicy optional.Duration

// The set of labels for the subscription.
Labels map[string]string
}

func (cfg *SubscriptionConfig) toProto(name string) *pb.Subscription {
var pbPushConfig *pb.PushConfig
if cfg.PushConfig.Endpoint != "" || len(cfg.PushConfig.Attributes) != 0 || cfg.PushConfig.AuthenticationMethod != nil {
pbPushConfig = cfg.PushConfig.toProto()
}
var retentionDuration *durpb.Duration
if cfg.RetentionDuration != 0 {
retentionDuration = ptypes.DurationProto(cfg.RetentionDuration)
}
return &pb.Subscription{
Name: name,
Topic: cfg.Topic.name,
PushConfig: pbPushConfig,
AckDeadlineSeconds: trunc32(int64(cfg.AckDeadline.Seconds())),
RetainAckedMessages: cfg.RetainAckedMessages,
MessageRetentionDuration: retentionDuration,
Labels: cfg.Labels,
ExpirationPolicy: expirationPolicyToProto(cfg.ExpirationPolicy),
}
}

func protoToSubscriptionConfig(pbSub *pb.Subscription, c *Client) (SubscriptionConfig, error) {
rd := time.Hour * 24 * 7
var err error
if pbSub.MessageRetentionDuration != nil {
rd, err = ptypes.Duration(pbSub.MessageRetentionDuration)
if err != nil {
return SubscriptionConfig{}, err
}
}
var expirationPolicy time.Duration
if ttl := pbSub.ExpirationPolicy.GetTtl(); ttl != nil {
expirationPolicy, err = ptypes.Duration(ttl)
if err != nil {
return SubscriptionConfig{}, err
}
}
subC := SubscriptionConfig{
Topic: newTopic(c, pbSub.Topic),
AckDeadline: time.Second * time.Duration(pbSub.AckDeadlineSeconds),
RetainAckedMessages: pbSub.RetainAckedMessages,
RetentionDuration: rd,
Labels: pbSub.Labels,
ExpirationPolicy: expirationPolicy,
}
pc := protoToPushConfig(pbSub.PushConfig)
if pc != nil {
subC.PushConfig = *pc
}
return subC, nil
}

func protoToPushConfig(pbPc *pb.PushConfig) *PushConfig {
if pbPc == nil {
return nil
}
pc := &PushConfig{
Endpoint: pbPc.PushEndpoint,
Attributes: pbPc.Attributes,
}
if am := pbPc.AuthenticationMethod; am != nil {
if oidcToken, ok := am.(*pb.PushConfig_OidcToken_); ok && oidcToken != nil && oidcToken.OidcToken != nil {
pc.AuthenticationMethod = &OIDCToken{
Audience: oidcToken.OidcToken.GetAudience(),
ServiceAccountEmail: oidcToken.OidcToken.GetServiceAccountEmail(),
}
}
}
return pc
}

// ReceiveSettings configure the Receive method.
// A zero ReceiveSettings will result in values equivalent to DefaultReceiveSettings.
type ReceiveSettings struct {
// MaxExtension is the maximum period for which the Subscription should
// automatically extend the ack deadline for each message.
//
// The Subscription will automatically extend the ack deadline of all
// fetched Messages up to the duration specified. Automatic deadline
// extension beyond the initial receipt may be disabled by specifying a
// duration less than 0.
MaxExtension time.Duration

// MaxOutstandingMessages is the maximum number of unprocessed messages
// (unacknowledged but not yet expired). If MaxOutstandingMessages is 0, it
// will be treated as if it were DefaultReceiveSettings.MaxOutstandingMessages.
// If the value is negative, then there will be no limit on the number of
// unprocessed messages.
MaxOutstandingMessages int

// MaxOutstandingBytes is the maximum size of unprocessed messages
// (unacknowledged but not yet expired). If MaxOutstandingBytes is 0, it will
// be treated as if it were DefaultReceiveSettings.MaxOutstandingBytes. If
// the value is negative, then there will be no limit on the number of bytes
// for unprocessed messages.
MaxOutstandingBytes int

// NumGoroutines is the number of goroutines Receive will spawn to pull
// messages concurrently. If NumGoroutines is less than 1, it will be treated
// as if it were DefaultReceiveSettings.NumGoroutines.
//
// NumGoroutines does not limit the number of messages that can be processed
// concurrently. Even with one goroutine, many messages might be processed at
// once, because that goroutine may continually receive messages and invoke the
// function passed to Receive on them. To limit the number of messages being
// processed concurrently, set MaxOutstandingMessages.
NumGoroutines int

// If Synchronous is true, then no more than MaxOutstandingMessages will be in
// memory at one time. (In contrast, when Synchronous is false, more than
// MaxOutstandingMessages may have been received from the service and in memory
// before being processed.) MaxOutstandingBytes still refers to the total bytes
// processed, rather than in memory. NumGoroutines is ignored.
// The default is false.
Synchronous bool
}

// For synchronous receive, the time to wait if we are already processing
// MaxOutstandingMessages. There is no point calling Pull and asking for zero
// messages, so we pause to allow some message-processing callbacks to finish.
//
// The wait time is large enough to avoid consuming significant CPU, but
// small enough to provide decent throughput. Users who want better
// throughput should not be using synchronous mode.
//
// Waiting might seem like polling, so it's natural to think we could do better by
// noticing when a callback is finished and immediately calling Pull. But if
// callbacks finish in quick succession, this will result in frequent Pull RPCs that
// request a single message, which wastes network bandwidth. Better to wait for a few
// callbacks to finish, so we make fewer RPCs fetching more messages.
//
// This value is unexported so the user doesn't have another knob to think about. Note that
// it is the same value as the one used for nackTicker, so it matches this client's
// idea of a duration that is short, but not so short that we perform excessive RPCs.
const synchronousWaitTime = 100 * time.Millisecond

// This is a var so that tests can change it.
var minAckDeadline = 10 * time.Second

// DefaultReceiveSettings holds the default values for ReceiveSettings.
var DefaultReceiveSettings = ReceiveSettings{
MaxExtension: 10 * time.Minute,
MaxOutstandingMessages: 1000,
MaxOutstandingBytes: 1e9, // 1G
NumGoroutines: 1,
}

// Delete deletes the subscription.
func (s *Subscription) Delete(ctx context.Context) error {
return s.c.subc.DeleteSubscription(ctx, &pb.DeleteSubscriptionRequest{Subscription: s.name})
}

// Exists reports whether the subscription exists on the server.
func (s *Subscription) Exists(ctx context.Context) (bool, error) {
_, err := s.c.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: s.name})
if err == nil {
return true, nil
}
if status.Code(err) == codes.NotFound {
return false, nil
}
return false, err
}

// Config fetches the current configuration for the subscription.
func (s *Subscription) Config(ctx context.Context) (SubscriptionConfig, error) {
pbSub, err := s.c.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: s.name})
if err != nil {
return SubscriptionConfig{}, err
}
cfg, err := protoToSubscriptionConfig(pbSub, s.c)
if err != nil {
return SubscriptionConfig{}, err
}
return cfg, nil
}

// SubscriptionConfigToUpdate describes how to update a subscription.
type SubscriptionConfigToUpdate struct {
// If non-nil, the push config is changed.
PushConfig *PushConfig

// If non-zero, the ack deadline is changed.
AckDeadline time.Duration

// If set, RetainAckedMessages is changed.
RetainAckedMessages optional.Bool

// If non-zero, RetentionDuration is changed.
RetentionDuration time.Duration

// If non-zero, Expiration is changed.
ExpirationPolicy optional.Duration

// If non-nil, the current set of labels is completely
// replaced by the new set.
// This field has beta status. It is not subject to the stability guarantee
// and may change.
Labels map[string]string
}

// Update changes an existing subscription according to the fields set in cfg.
// It returns the new SubscriptionConfig.
//
// Update returns an error if no fields were modified.
func (s *Subscription) Update(ctx context.Context, cfg SubscriptionConfigToUpdate) (SubscriptionConfig, error) {
req := s.updateRequest(&cfg)
if err := cfg.validate(); err != nil {
return SubscriptionConfig{}, fmt.Errorf("pubsub: UpdateSubscription %v", err)
}
if len(req.UpdateMask.Paths) == 0 {
return SubscriptionConfig{}, errors.New("pubsub: UpdateSubscription call with nothing to update")
}
rpsub, err := s.c.subc.UpdateSubscription(ctx, req)
if err != nil {
return SubscriptionConfig{}, err
}
return protoToSubscriptionConfig(rpsub, s.c)
}

func (s *Subscription) updateRequest(cfg *SubscriptionConfigToUpdate) *pb.UpdateSubscriptionRequest {
psub := &pb.Subscription{Name: s.name}
var paths []string
if cfg.PushConfig != nil {
psub.PushConfig = cfg.PushConfig.toProto()
paths = append(paths, "push_config")
}
if cfg.AckDeadline != 0 {
psub.AckDeadlineSeconds = trunc32(int64(cfg.AckDeadline.Seconds()))
paths = append(paths, "ack_deadline_seconds")
}
if cfg.RetainAckedMessages != nil {
psub.RetainAckedMessages = optional.ToBool(cfg.RetainAckedMessages)
paths = append(paths, "retain_acked_messages")
}
if cfg.RetentionDuration != 0 {
psub.MessageRetentionDuration = ptypes.DurationProto(cfg.RetentionDuration)
paths = append(paths, "message_retention_duration")
}
if cfg.ExpirationPolicy != nil {
psub.ExpirationPolicy = expirationPolicyToProto(cfg.ExpirationPolicy)
paths = append(paths, "expiration_policy")
}
if cfg.Labels != nil {
psub.Labels = cfg.Labels
paths = append(paths, "labels")
}
return &pb.UpdateSubscriptionRequest{
Subscription: psub,
UpdateMask: &fmpb.FieldMask{Paths: paths},
}
}

const (
// The minimum expiration policy duration is 1 day as per:
// https://github.com/googleapis/googleapis/blob/51145ff7812d2bb44c1219d0b76dac92a8bd94b2/google/pubsub/v1/pubsub.proto#L606-L607
minExpirationPolicy = 24 * time.Hour

// If an expiration policy is not specified, the default of 31 days is used as per:
// https://github.com/googleapis/googleapis/blob/51145ff7812d2bb44c1219d0b76dac92a8bd94b2/google/pubsub/v1/pubsub.proto#L605-L606
defaultExpirationPolicy = 31 * 24 * time.Hour
)

func (cfg *SubscriptionConfigToUpdate) validate() error {
if cfg == nil || cfg.ExpirationPolicy == nil {
return nil
}
policy, min := optional.ToDuration(cfg.ExpirationPolicy), minExpirationPolicy
if policy == 0 || policy >= min {
return nil
}
return fmt.Errorf("invalid expiration policy(%q) < minimum(%q)", policy, min)
}

func expirationPolicyToProto(expirationPolicy optional.Duration) *pb.ExpirationPolicy {
if expirationPolicy == nil {
return nil
}

dur := optional.ToDuration(expirationPolicy)
var ttl *durpb.Duration
// As per:
// https://godoc.org/google.golang.org/genproto/googleapis/pubsub/v1#ExpirationPolicy.Ttl
// if ExpirationPolicy.Ttl is set to nil, the expirationPolicy is toggled to NEVER expire.
if dur != 0 {
ttl = ptypes.DurationProto(dur)
}
return &pb.ExpirationPolicy{
Ttl: ttl,
}
}

// IAM returns the subscription's IAM handle.
func (s *Subscription) IAM() *iam.Handle {
return iam.InternalNewHandle(s.c.subc.Connection(), s.name)
}

// CreateSubscription creates a new subscription on a topic.
//
// id is the name of the subscription to create. It must start with a letter,
// and contain only letters ([A-Za-z]), numbers ([0-9]), dashes (-),
// underscores (_), periods (.), tildes (~), plus (+) or percent signs (%). It
// must be between 3 and 255 characters in length, and must not start with
// "goog".
//
// cfg.Topic is the topic from which the subscription should receive messages. It
// need not belong to the same project as the subscription. This field is required.
//
// cfg.AckDeadline is the maximum time after a subscriber receives a message before
// the subscriber should acknowledge the message. It must be between 10 and 600
// seconds (inclusive), and is rounded down to the nearest second. If the
// provided ackDeadline is 0, then the default value of 10 seconds is used.
// Note: messages which are obtained via Subscription.Receive need not be
// acknowledged within this deadline, as the deadline will be automatically
// extended.
//
// cfg.PushConfig may be set to configure this subscription for push delivery.
//
// If the subscription already exists an error will be returned.
func (c *Client) CreateSubscription(ctx context.Context, id string, cfg SubscriptionConfig) (*Subscription, error) {
if cfg.Topic == nil {
return nil, errors.New("pubsub: require non-nil Topic")
}
if cfg.AckDeadline == 0 {
cfg.AckDeadline = 10 * time.Second
}
if d := cfg.AckDeadline; d < 10*time.Second || d > 600*time.Second {
return nil, fmt.Errorf("ack deadline must be between 10 and 600 seconds; got: %v", d)
}

sub := c.Subscription(id)
_, err := c.subc.CreateSubscription(ctx, cfg.toProto(sub.name))
if err != nil {
return nil, err
}
return sub, nil
}

var errReceiveInProgress = errors.New("pubsub: Receive already in progress for this subscription")

// Receive calls f with the outstanding messages from the subscription.
// It blocks until ctx is done, or the service returns a non-retryable error.
//
// The standard way to terminate a Receive is to cancel its context:
//
// cctx, cancel := context.WithCancel(ctx)
// err := sub.Receive(cctx, callback)
// // Call cancel from callback, or another goroutine.
//
// If the service returns a non-retryable error, Receive returns that error after
// all of the outstanding calls to f have returned. If ctx is done, Receive
// returns nil after all of the outstanding calls to f have returned and
// all messages have been acknowledged or have expired.
//
// Receive calls f concurrently from multiple goroutines. It is encouraged to
// process messages synchronously in f, even if that processing is relatively
// time-consuming; Receive will spawn new goroutines for incoming messages,
// limited by MaxOutstandingMessages and MaxOutstandingBytes in ReceiveSettings.
//
// The context passed to f will be canceled when ctx is Done or there is a
// fatal service error.
//
// Receive will send an ack deadline extension on message receipt, then
// automatically extend the ack deadline of all fetched Messages up to the
// period specified by s.ReceiveSettings.MaxExtension.
//
// Each Subscription may have only one invocation of Receive active at a time.
func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Message)) error {
s.mu.Lock()
if s.receiveActive {
s.mu.Unlock()
return errReceiveInProgress
}
s.receiveActive = true
s.mu.Unlock()
defer func() { s.mu.Lock(); s.receiveActive = false; s.mu.Unlock() }()

maxCount := s.ReceiveSettings.MaxOutstandingMessages
if maxCount == 0 {
maxCount = DefaultReceiveSettings.MaxOutstandingMessages
}
maxBytes := s.ReceiveSettings.MaxOutstandingBytes
if maxBytes == 0 {
maxBytes = DefaultReceiveSettings.MaxOutstandingBytes
}
maxExt := s.ReceiveSettings.MaxExtension
if maxExt == 0 {
maxExt = DefaultReceiveSettings.MaxExtension
} else if maxExt < 0 {
// If MaxExtension is negative, disable automatic extension.
maxExt = 0
}
var numGoroutines int
switch {
case s.ReceiveSettings.Synchronous:
numGoroutines = 1
case s.ReceiveSettings.NumGoroutines >= 1:
numGoroutines = s.ReceiveSettings.NumGoroutines
default:
numGoroutines = DefaultReceiveSettings.NumGoroutines
}
// TODO(jba): add tests that verify that ReceiveSettings are correctly processed.
po := &pullOptions{
maxExtension: maxExt,
maxPrefetch: trunc32(int64(maxCount)),
synchronous: s.ReceiveSettings.Synchronous,
}
fc := newFlowController(maxCount, maxBytes)

// Wait for all goroutines started by Receive to return, so instead of an
// obscure goroutine leak we have an obvious blocked call to Receive.
group, gctx := errgroup.WithContext(ctx)
for i := 0; i < numGoroutines; i++ {
group.Go(func() error {
return s.receive(gctx, po, fc, f)
})
}
return group.Wait()
}

func (s *Subscription) receive(ctx context.Context, po *pullOptions, fc *flowController, f func(context.Context, *Message)) error {
// Cancel a sub-context when we return, to kick the context-aware callbacks
// and the goroutine below.
ctx2, cancel := context.WithCancel(ctx)
// The iterator does not use the context passed to Receive. If it did, canceling
// that context would immediately stop the iterator without waiting for unacked
// messages.
iter := newMessageIterator(s.c.subc, s.name, po)

// We cannot use errgroup from Receive here. Receive might already be calling group.Wait,
// and group.Wait cannot be called concurrently with group.Go. We give each receive() its
// own WaitGroup instead.
// Since wg.Add is only called from the main goroutine, wg.Wait is guaranteed
// to be called after all Adds.
var wg sync.WaitGroup
wg.Add(1)
go func() {
<-ctx2.Done()
// Call stop when Receive's context is done.
// Stop will block until all outstanding messages have been acknowledged
// or there was a fatal service error.
iter.stop()
wg.Done()
}()
defer wg.Wait()

defer cancel()
for {
var maxToPull int32 // maximum number of messages to pull
if po.synchronous {
if po.maxPrefetch < 0 {
// If there is no limit on the number of messages to pull, use a reasonable default.
maxToPull = 1000
} else {
// Limit the number of messages in memory to MaxOutstandingMessages
// (here, po.maxPrefetch). For each message currently in memory, we have
// called fc.acquire but not fc.release: this is fc.count(). The next
// call to Pull should fetch no more than the difference between these
// values.
maxToPull = po.maxPrefetch - int32(fc.count())
if maxToPull <= 0 {
// Wait for some callbacks to finish.
if err := gax.Sleep(ctx, synchronousWaitTime); err != nil {
// Return nil if the context is done, not err.
return nil
}
continue
}
}
}
msgs, err := iter.receive(maxToPull)
if err == io.EOF {
return nil
}
if err != nil {
return err
}
for i, msg := range msgs {
msg := msg
// TODO(jba): call acquire closer to when the message is allocated.
if err := fc.acquire(ctx, len(msg.Data)); err != nil {
// TODO(jba): test that these "orphaned" messages are nacked immediately when ctx is done.
for _, m := range msgs[i:] {
m.Nack()
}
// Return nil if the context is done, not err.
return nil
}
old := msg.doneFunc
msgLen := len(msg.Data)
msg.doneFunc = func(ackID string, ack bool, receiveTime time.Time) {
defer fc.release(msgLen)
old(ackID, ack, receiveTime)
}
wg.Add(1)
go func() {
defer wg.Done()
f(ctx2, msg)
}()
}
}
}

type pullOptions struct {
maxExtension time.Duration
maxPrefetch int32
// If true, use unary Pull instead of StreamingPull, and never pull more
// than maxPrefetch messages.
synchronous bool
}

+ 550
- 0
vendor/cloud.google.com/go/pubsub/topic.go View File

@@ -0,0 +1,550 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package pubsub

import (
"context"
"errors"
"fmt"
"log"
"runtime"
"strings"
"sync"
"time"

"cloud.google.com/go/iam"
"github.com/golang/protobuf/proto"
gax "github.com/googleapis/gax-go/v2"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
"google.golang.org/api/support/bundler"
pb "google.golang.org/genproto/googleapis/pubsub/v1"
fmpb "google.golang.org/genproto/protobuf/field_mask"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)

const (
// MaxPublishRequestCount is the maximum number of messages that can be in
// a single publish request, as defined by the PubSub service.
MaxPublishRequestCount = 1000

// MaxPublishRequestBytes is the maximum size of a single publish request
// in bytes, as defined by the PubSub service.
MaxPublishRequestBytes = 1e7
)

// ErrOversizedMessage indicates that a message's size exceeds MaxPublishRequestBytes.
var ErrOversizedMessage = bundler.ErrOversizedItem

// Topic is a reference to a PubSub topic.
//
// The methods of Topic are safe for use by multiple goroutines.
type Topic struct {
c *Client
// The fully qualified identifier for the topic, in the format "projects/<projid>/topics/<name>"
name string

// Settings for publishing messages. All changes must be made before the
// first call to Publish. The default is DefaultPublishSettings.
PublishSettings PublishSettings

mu sync.RWMutex
stopped bool
bundler *bundler.Bundler
}

// PublishSettings control the bundling of published messages.
type PublishSettings struct {

// Publish a non-empty batch after this delay has passed.
DelayThreshold time.Duration

// Publish a batch when it has this many messages. The maximum is
// MaxPublishRequestCount.
CountThreshold int

// Publish a batch when its size in bytes reaches this value.
ByteThreshold int

// The number of goroutines that invoke the Publish RPC concurrently.
//
// Defaults to a multiple of GOMAXPROCS.
NumGoroutines int

// The maximum time that the client will attempt to publish a bundle of messages.
Timeout time.Duration

// The maximum number of bytes that the Bundler will keep in memory before
// returning ErrOverflow.
//
// Defaults to DefaultPublishSettings.BufferedByteLimit.
BufferedByteLimit int
}

// DefaultPublishSettings holds the default values for topics' PublishSettings.
var DefaultPublishSettings = PublishSettings{
DelayThreshold: 1 * time.Millisecond,
CountThreshold: 100,
ByteThreshold: 1e6,
Timeout: 60 * time.Second,
// By default, limit the bundler to 10 times the max message size. The number 10 is
// chosen as a reasonable amount of messages in the worst case whilst still
// capping the number to a low enough value to not OOM users.
BufferedByteLimit: 10 * MaxPublishRequestBytes,
}

// CreateTopic creates a new topic.
//
// The specified topic ID must start with a letter, and contain only letters
// ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.),
// tildes (~), plus (+) or percent signs (%). It must be between 3 and 255
// characters in length, and must not start with "goog". For more information,
// see: https://cloud.google.com/pubsub/docs/admin#resource_names
//
// If the topic already exists an error will be returned.
func (c *Client) CreateTopic(ctx context.Context, topicID string) (*Topic, error) {
t := c.Topic(topicID)
_, err := c.pubc.CreateTopic(ctx, &pb.Topic{Name: t.name})
if err != nil {
return nil, err
}
return t, nil
}

// CreateTopicWithConfig creates a topic from TopicConfig.
//
// The specified topic ID must start with a letter, and contain only letters
// ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.),
// tildes (~), plus (+) or percent signs (%). It must be between 3 and 255
// characters in length, and must not start with "goog". For more information,
// see: https://cloud.google.com/pubsub/docs/admin#resource_names.
//
// If the topic already exists, an error will be returned.
func (c *Client) CreateTopicWithConfig(ctx context.Context, topicID string, tc *TopicConfig) (*Topic, error) {
t := c.Topic(topicID)
_, err := c.pubc.CreateTopic(ctx, &pb.Topic{
Name: t.name,
Labels: tc.Labels,
MessageStoragePolicy: messageStoragePolicyToProto(&tc.MessageStoragePolicy),
KmsKeyName: tc.KMSKeyName,
})
if err != nil {
return nil, err
}
return t, nil
}

// Topic creates a reference to a topic in the client's project.
//
// If a Topic's Publish method is called, it has background goroutines
// associated with it. Clean them up by calling Topic.Stop.
//
// Avoid creating many Topic instances if you use them to publish.
func (c *Client) Topic(id string) *Topic {
return c.TopicInProject(id, c.projectID)
}

// TopicInProject creates a reference to a topic in the given project.
//
// If a Topic's Publish method is called, it has background goroutines
// associated with it. Clean them up by calling Topic.Stop.
//
// Avoid creating many Topic instances if you use them to publish.
func (c *Client) TopicInProject(id, projectID string) *Topic {
return newTopic(c, fmt.Sprintf("projects/%s/topics/%s", projectID, id))
}

func newTopic(c *Client, name string) *Topic {
return &Topic{
c: c,
name: name,
PublishSettings: DefaultPublishSettings,
}
}

// TopicConfig describes the configuration of a topic.
type TopicConfig struct {
// The set of labels for the topic.
Labels map[string]string

// The topic's message storage policy.
MessageStoragePolicy MessageStoragePolicy

// The name of the Cloud KMS key to be used to protect access to messages
// published to this topic, in the format
// "projects/P/locations/L/keyRings/R/cryptoKeys/K".
KMSKeyName string
}

// TopicConfigToUpdate describes how to update a topic.
type TopicConfigToUpdate struct {
// If non-nil, the current set of labels is completely
// replaced by the new set.
Labels map[string]string

// If non-nil, the existing policy (containing the list of regions)
// is completely replaced by the new policy.
//
// Use the zero value &MessageStoragePolicy{} to reset the topic back to
// using the organization's Resource Location Restriction policy.
//
// If nil, the policy remains unchanged.
//
// This field has beta status. It is not subject to the stability guarantee
// and may change.
MessageStoragePolicy *MessageStoragePolicy
}

func protoToTopicConfig(pbt *pb.Topic) TopicConfig {
return TopicConfig{
Labels: pbt.Labels,
MessageStoragePolicy: protoToMessageStoragePolicy(pbt.MessageStoragePolicy),
KMSKeyName: pbt.KmsKeyName,
}
}

// MessageStoragePolicy constrains how messages published to the topic may be stored. It
// is determined when the topic is created based on the policy configured at
// the project level.
type MessageStoragePolicy struct {
// AllowedPersistenceRegions is the list of GCP regions where messages that are published
// to the topic may be persisted in storage. Messages published by publishers running in
// non-allowed GCP regions (or running outside of GCP altogether) will be
// routed for storage in one of the allowed regions.
//
// If empty, it indicates a misconfiguration at the project or organization level, which
// will result in all Publish operations failing. This field cannot be empty in updates.
//
// If nil, then the policy is not defined on a topic level. When used in updates, it resets
// the regions back to the organization level Resource Location Restriction policy.
//
// For more information, see
// https://cloud.google.com/pubsub/docs/resource-location-restriction#pubsub-storage-locations.
AllowedPersistenceRegions []string
}

func protoToMessageStoragePolicy(msp *pb.MessageStoragePolicy) MessageStoragePolicy {
if msp == nil {
return MessageStoragePolicy{}
}
return MessageStoragePolicy{AllowedPersistenceRegions: msp.AllowedPersistenceRegions}
}

func messageStoragePolicyToProto(msp *MessageStoragePolicy) *pb.MessageStoragePolicy {
if msp == nil || msp.AllowedPersistenceRegions == nil {
return nil
}
return &pb.MessageStoragePolicy{AllowedPersistenceRegions: msp.AllowedPersistenceRegions}
}

// Config returns the TopicConfig for the topic.
func (t *Topic) Config(ctx context.Context) (TopicConfig, error) {
pbt, err := t.c.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: t.name})
if err != nil {
return TopicConfig{}, err
}
return protoToTopicConfig(pbt), nil
}

// Update changes an existing topic according to the fields set in cfg. It returns
// the new TopicConfig.
func (t *Topic) Update(ctx context.Context, cfg TopicConfigToUpdate) (TopicConfig, error) {
req := t.updateRequest(cfg)
if len(req.UpdateMask.Paths) == 0 {
return TopicConfig{}, errors.New("pubsub: UpdateTopic call with nothing to update")
}
rpt, err := t.c.pubc.UpdateTopic(ctx, req)
if err != nil {
return TopicConfig{}, err
}
return protoToTopicConfig(rpt), nil
}

func (t *Topic) updateRequest(cfg TopicConfigToUpdate) *pb.UpdateTopicRequest {
pt := &pb.Topic{Name: t.name}
var paths []string
if cfg.Labels != nil {
pt.Labels = cfg.Labels
paths = append(paths, "labels")
}
if cfg.MessageStoragePolicy != nil {
pt.MessageStoragePolicy = messageStoragePolicyToProto(cfg.MessageStoragePolicy)
paths = append(paths, "message_storage_policy")
}
return &pb.UpdateTopicRequest{
Topic: pt,
UpdateMask: &fmpb.FieldMask{Paths: paths},
}
}

// Topics returns an iterator which returns all of the topics for the client's project.
func (c *Client) Topics(ctx context.Context) *TopicIterator {
it := c.pubc.ListTopics(ctx, &pb.ListTopicsRequest{Project: c.fullyQualifiedProjectName()})
return &TopicIterator{
c: c,
next: func() (string, error) {
topic, err := it.Next()
if err != nil {
return "", err
}
return topic.Name, nil
},
}
}

// TopicIterator is an iterator that returns a series of topics.
type TopicIterator struct {
c *Client
next func() (string, error)
}

// Next returns the next topic. If there are no more topics, iterator.Done will be returned.
func (tps *TopicIterator) Next() (*Topic, error) {
topicName, err := tps.next()
if err != nil {
return nil, err
}
return newTopic(tps.c, topicName), nil
}

// ID returns the unique identifier of the topic within its project.
func (t *Topic) ID() string {
slash := strings.LastIndex(t.name, "/")
if slash == -1 {
// name is not a fully-qualified name.
panic("bad topic name")
}
return t.name[slash+1:]
}

// String returns the printable globally unique name for the topic.
func (t *Topic) String() string {
return t.name
}

// Delete deletes the topic.
func (t *Topic) Delete(ctx context.Context) error {
return t.c.pubc.DeleteTopic(ctx, &pb.DeleteTopicRequest{Topic: t.name})
}

// Exists reports whether the topic exists on the server.
func (t *Topic) Exists(ctx context.Context) (bool, error) {
if t.name == "_deleted-topic_" {
return false, nil
}
_, err := t.c.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: t.name})
if err == nil {
return true, nil
}
if status.Code(err) == codes.NotFound {
return false, nil
}
return false, err
}

// IAM returns the topic's IAM handle.
func (t *Topic) IAM() *iam.Handle {
return iam.InternalNewHandle(t.c.pubc.Connection(), t.name)
}

// Subscriptions returns an iterator which returns the subscriptions for this topic.
//
// Some of the returned subscriptions may belong to a project other than t.
func (t *Topic) Subscriptions(ctx context.Context) *SubscriptionIterator {
it := t.c.pubc.ListTopicSubscriptions(ctx, &pb.ListTopicSubscriptionsRequest{
Topic: t.name,
})
return &SubscriptionIterator{
c: t.c,
next: it.Next,
}
}

var errTopicStopped = errors.New("pubsub: Stop has been called for this topic")

// Publish publishes msg to the topic asynchronously. Messages are batched and
// sent according to the topic's PublishSettings. Publish never blocks.
//
// Publish returns a non-nil PublishResult which will be ready when the
// message has been sent (or has failed to be sent) to the server.
//
// Publish creates goroutines for batching and sending messages. These goroutines
// need to be stopped by calling t.Stop(). Once stopped, future calls to Publish
// will immediately return a PublishResult with an error.
func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult {
// TODO(jba): if this turns out to take significant time, try to approximate it.
// Or, convert the messages to protos in Publish, instead of in the service.
msg.size = proto.Size(&pb.PubsubMessage{
Data: msg.Data,
Attributes: msg.Attributes,
})
r := &PublishResult{ready: make(chan struct{})}
t.initBundler()
t.mu.RLock()
defer t.mu.RUnlock()
// TODO(aboulhosn) [from bcmills] consider changing the semantics of bundler to perform this logic so we don't have to do it here
if t.stopped {
r.set("", errTopicStopped)
return r
}

// TODO(jba) [from bcmills] consider using a shared channel per bundle
// (requires Bundler API changes; would reduce allocations)
err := t.bundler.Add(&bundledMessage{msg, r}, msg.size)
if err != nil {
r.set("", err)
}
return r
}

// Stop sends all remaining published messages and stop goroutines created for handling
// publishing. Returns once all outstanding messages have been sent or have
// failed to be sent.
func (t *Topic) Stop() {
t.mu.Lock()
noop := t.stopped || t.bundler == nil
t.stopped = true
t.mu.Unlock()
if noop {
return
}
t.bundler.Flush()
}

// A PublishResult holds the result from a call to Publish.
type PublishResult struct {
ready chan struct{}
serverID string
err error
}

// Ready returns a channel that is closed when the result is ready.
// When the Ready channel is closed, Get is guaranteed not to block.
func (r *PublishResult) Ready() <-chan struct{} { return r.ready }

// Get returns the server-generated message ID and/or error result of a Publish call.
// Get blocks until the Publish call completes or the context is done.
func (r *PublishResult) Get(ctx context.Context) (serverID string, err error) {
// If the result is already ready, return it even if the context is done.
select {
case <-r.Ready():
return r.serverID, r.err
default:
}
select {
case <-ctx.Done():
return "", ctx.Err()
case <-r.Ready():
return r.serverID, r.err
}
}

func (r *PublishResult) set(sid string, err error) {
r.serverID = sid
r.err = err
close(r.ready)
}

type bundledMessage struct {
msg *Message
res *PublishResult
}

func (t *Topic) initBundler() {
t.mu.RLock()
noop := t.stopped || t.bundler != nil
t.mu.RUnlock()
if noop {
return
}
t.mu.Lock()
defer t.mu.Unlock()
// Must re-check, since we released the lock.
if t.stopped || t.bundler != nil {
return
}

timeout := t.PublishSettings.Timeout
t.bundler = bundler.NewBundler(&bundledMessage{}, func(items interface{}) {
// TODO(jba): use a context detached from the one passed to NewClient.
ctx := context.TODO()
if timeout != 0 {
var cancel func()
ctx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
t.publishMessageBundle(ctx, items.([]*bundledMessage))
})
t.bundler.DelayThreshold = t.PublishSettings.DelayThreshold
t.bundler.BundleCountThreshold = t.PublishSettings.CountThreshold
if t.bundler.BundleCountThreshold > MaxPublishRequestCount {
t.bundler.BundleCountThreshold = MaxPublishRequestCount
}
t.bundler.BundleByteThreshold = t.PublishSettings.ByteThreshold

bufferedByteLimit := DefaultPublishSettings.BufferedByteLimit
if t.PublishSettings.BufferedByteLimit > 0 {
bufferedByteLimit = t.PublishSettings.BufferedByteLimit
}
t.bundler.BufferedByteLimit = bufferedByteLimit

t.bundler.BundleByteLimit = MaxPublishRequestBytes
// Unless overridden, allow many goroutines per CPU to call the Publish RPC concurrently.
// The default value was determined via extensive load testing (see the loadtest subdirectory).
if t.PublishSettings.NumGoroutines > 0 {
t.bundler.HandlerLimit = t.PublishSettings.NumGoroutines
} else {
t.bundler.HandlerLimit = 25 * runtime.GOMAXPROCS(0)
}
}

func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) {
ctx, err := tag.New(ctx, tag.Insert(keyStatus, "OK"), tag.Upsert(keyTopic, t.name))
if err != nil {
log.Printf("pubsub: cannot create context with tag in publishMessageBundle: %v", err)
}
pbMsgs := make([]*pb.PubsubMessage, len(bms))
for i, bm := range bms {
pbMsgs[i] = &pb.PubsubMessage{
Data: bm.msg.Data,
Attributes: bm.msg.Attributes,
}
bm.msg = nil // release bm.msg for GC
}
start := time.Now()
res, err := t.c.pubc.Publish(ctx, &pb.PublishRequest{
Topic: t.name,
Messages: pbMsgs,
}, gax.WithGRPCOptions(grpc.MaxCallSendMsgSize(maxSendRecvBytes)))
end := time.Now()
if err != nil {
// Update context with error tag for OpenCensus,
// using same stats.Record() call as success case.
ctx, _ = tag.New(ctx, tag.Upsert(keyStatus, "ERROR"),
tag.Upsert(keyError, err.Error()))
}
stats.Record(ctx,
PublishLatency.M(float64(end.Sub(start)/time.Millisecond)),
PublishedMessages.M(int64(len(bms))))
for i, bm := range bms {
if err != nil {
bm.res.set("", err)
} else {
bm.res.set(res.MessageIds[i], nil)
}
}
}

+ 217
- 0
vendor/cloud.google.com/go/pubsub/trace.go View File

@@ -0,0 +1,217 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package pubsub

import (
"context"
"log"
"sync"

"go.opencensus.io/plugin/ocgrpc"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"google.golang.org/api/option"
"google.golang.org/grpc"
)

func openCensusOptions() []option.ClientOption {
return []option.ClientOption{
option.WithGRPCDialOption(grpc.WithStatsHandler(&ocgrpc.ClientHandler{})),
}
}

// The following keys are used to tag requests with a specific topic/subscription ID.
var (
keyTopic = tag.MustNewKey("topic")
keySubscription = tag.MustNewKey("subscription")
)

// In the following, errors are used if status is not "OK".
var (
keyStatus = tag.MustNewKey("status")
keyError = tag.MustNewKey("error")
)

const statsPrefix = "cloud.google.com/go/pubsub/"

// The following are measures recorded in publish/subscribe flows.
var (
// PublishedMessages is a measure of the number of messages published, which may include errors.
// It is EXPERIMENTAL and subject to change or removal without notice.
PublishedMessages = stats.Int64(statsPrefix+"published_messages", "Number of PubSub message published", stats.UnitDimensionless)

// PublishLatency is a measure of the number of milliseconds it took to publish a bundle,
// which may consist of one or more messages.
// It is EXPERIMENTAL and subject to change or removal without notice.
PublishLatency = stats.Float64(statsPrefix+"publish_roundtrip_latency", "The latency in milliseconds per publish batch", stats.UnitMilliseconds)

// PullCount is a measure of the number of messages pulled.
// It is EXPERIMENTAL and subject to change or removal without notice.
PullCount = stats.Int64(statsPrefix+"pull_count", "Number of PubSub messages pulled", stats.UnitDimensionless)

// AckCount is a measure of the number of messages acked.
// It is EXPERIMENTAL and subject to change or removal without notice.
AckCount = stats.Int64(statsPrefix+"ack_count", "Number of PubSub messages acked", stats.UnitDimensionless)

// NackCount is a measure of the number of messages nacked.
// It is EXPERIMENTAL and subject to change or removal without notice.
NackCount = stats.Int64(statsPrefix+"nack_count", "Number of PubSub messages nacked", stats.UnitDimensionless)

// ModAckCount is a measure of the number of messages whose ack-deadline was modified.
// It is EXPERIMENTAL and subject to change or removal without notice.
ModAckCount = stats.Int64(statsPrefix+"mod_ack_count", "Number of ack-deadlines modified", stats.UnitDimensionless)

// ModAckTimeoutCount is a measure of the number ModifyAckDeadline RPCs that timed out.
// It is EXPERIMENTAL and subject to change or removal without notice.
ModAckTimeoutCount = stats.Int64(statsPrefix+"mod_ack_timeout_count", "Number of ModifyAckDeadline RPCs that timed out", stats.UnitDimensionless)

// StreamOpenCount is a measure of the number of times a streaming-pull stream was opened.
// It is EXPERIMENTAL and subject to change or removal without notice.
StreamOpenCount = stats.Int64(statsPrefix+"stream_open_count", "Number of calls opening a new streaming pull", stats.UnitDimensionless)

// StreamRetryCount is a measure of the number of times a streaming-pull operation was retried.
// It is EXPERIMENTAL and subject to change or removal without notice.
StreamRetryCount = stats.Int64(statsPrefix+"stream_retry_count", "Number of retries of a stream send or receive", stats.UnitDimensionless)

// StreamRequestCount is a measure of the number of requests sent on a streaming-pull stream.
// It is EXPERIMENTAL and subject to change or removal without notice.
StreamRequestCount = stats.Int64(statsPrefix+"stream_request_count", "Number gRPC StreamingPull request messages sent", stats.UnitDimensionless)

// StreamResponseCount is a measure of the number of responses received on a streaming-pull stream.
// It is EXPERIMENTAL and subject to change or removal without notice.
StreamResponseCount = stats.Int64(statsPrefix+"stream_response_count", "Number of gRPC StreamingPull response messages received", stats.UnitDimensionless)
)

var (
// PublishedMessagesView is a cumulative sum of PublishedMessages.
// It is EXPERIMENTAL and subject to change or removal without notice.
PublishedMessagesView *view.View

// PublishLatencyView is a distribution of PublishLatency.
// It is EXPERIMENTAL and subject to change or removal without notice.
PublishLatencyView *view.View

// PullCountView is a cumulative sum of PullCount.
// It is EXPERIMENTAL and subject to change or removal without notice.
PullCountView *view.View

// AckCountView is a cumulative sum of AckCount.
// It is EXPERIMENTAL and subject to change or removal without notice.
AckCountView *view.View

// NackCountView is a cumulative sum of NackCount.
// It is EXPERIMENTAL and subject to change or removal without notice.
NackCountView *view.View

// ModAckCountView is a cumulative sum of ModAckCount.
// It is EXPERIMENTAL and subject to change or removal without notice.
ModAckCountView *view.View

// ModAckTimeoutCountView is a cumulative sum of ModAckTimeoutCount.
// It is EXPERIMENTAL and subject to change or removal without notice.
ModAckTimeoutCountView *view.View

// StreamOpenCountView is a cumulative sum of StreamOpenCount.
// It is EXPERIMENTAL and subject to change or removal without notice.
StreamOpenCountView *view.View

// StreamRetryCountView is a cumulative sum of StreamRetryCount.
// It is EXPERIMENTAL and subject to change or removal without notice.
StreamRetryCountView *view.View

// StreamRequestCountView is a cumulative sum of StreamRequestCount.
// It is EXPERIMENTAL and subject to change or removal without notice.
StreamRequestCountView *view.View

// StreamResponseCountView is a cumulative sum of StreamResponseCount.
// It is EXPERIMENTAL and subject to change or removal without notice.
StreamResponseCountView *view.View
)

func init() {
PublishedMessagesView = createCountView(stats.Measure(PublishedMessages), keyTopic, keyStatus, keyError)
PublishLatencyView = createDistView(PublishLatency, keyTopic, keyStatus, keyError)
PullCountView = createCountView(PullCount, keySubscription)
AckCountView = createCountView(AckCount, keySubscription)
NackCountView = createCountView(NackCount, keySubscription)
ModAckCountView = createCountView(ModAckCount, keySubscription)
ModAckTimeoutCountView = createCountView(ModAckTimeoutCount, keySubscription)
StreamOpenCountView = createCountView(StreamOpenCount, keySubscription)
StreamRetryCountView = createCountView(StreamRetryCount, keySubscription)
StreamRequestCountView = createCountView(StreamRequestCount, keySubscription)
StreamResponseCountView = createCountView(StreamResponseCount, keySubscription)

DefaultPublishViews = []*view.View{
PublishedMessagesView,
PublishLatencyView,
}

DefaultSubscribeViews = []*view.View{
PullCountView,
AckCountView,
NackCountView,
ModAckCountView,
ModAckTimeoutCountView,
StreamOpenCountView,
StreamRetryCountView,
StreamRequestCountView,
StreamResponseCountView,
}
}

// The following arrays are the default views related to publish/subscribe operations provided by this package.
// It is EXPERIMENTAL and subject to change or removal without notice.
var (
DefaultPublishViews []*view.View
DefaultSubscribeViews []*view.View
)

func createCountView(m stats.Measure, keys ...tag.Key) *view.View {
return &view.View{
Name: m.Name(),
Description: m.Description(),
TagKeys: keys,
Measure: m,
Aggregation: view.Sum(),
}
}

func createDistView(m stats.Measure, keys ...tag.Key) *view.View {
return &view.View{
Name: m.Name(),
Description: m.Description(),
TagKeys: keys,
Measure: m,
Aggregation: view.Distribution(0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000),
}
}

var logOnce sync.Once

// withSubscriptionKey returns a new context modified with the subscriptionKey tag map.
func withSubscriptionKey(ctx context.Context, subName string) context.Context {
ctx, err := tag.New(ctx, tag.Upsert(keySubscription, subName))
if err != nil {
logOnce.Do(func() {
log.Printf("pubsub: error creating tag map for 'subscribe' key: %v", err)
})
}
return ctx
}

func recordStat(ctx context.Context, m *stats.Int64Measure, n int64) {
stats.Record(ctx, m.M(n))
}

+ 1
- 0
vendor/github.com/RichardKnop/logging/.gitignore View File

@@ -0,0 +1 @@
coverage*

+ 17
- 0
vendor/github.com/RichardKnop/logging/.travis.yml View File

@@ -0,0 +1,17 @@
---
language: go

go:
- 1.11.x

env:
- GO111MODULE=on

services:
- docker

script:
- make test-with-coverage

after_success:
- bash <(curl -s https://codecov.io/bash)

+ 354
- 0
vendor/github.com/RichardKnop/logging/LICENSE View File

@@ -0,0 +1,354 @@
Mozilla Public License, version 2.0

1. Definitions

1.1. “Contributor”

means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.

1.2. “Contributor Version”

means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor’s Contribution.

1.3. “Contribution”

means Covered Software of a particular Contributor.

1.4. “Covered Software”

means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.

1.5. “Incompatible With Secondary Licenses”
means

a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or

b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.

1.6. “Executable Form”

means any form of the work other than Source Code Form.

1.7. “Larger Work”

means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.

1.8. “License”

means this document.

1.9. “Licensable”

means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.

1.10. “Modifications”

means any of the following:

a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or

b. any new file in Source Code Form that contains any Covered Software.

1.11. “Patent Claims” of a Contributor

means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.

1.12. “Secondary License”

means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.

1.13. “Source Code Form”

means the form of the work preferred for making modifications.

1.14. “You” (or “Your”)

means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.


2. License Grants and Conditions

2.1. Grants

Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:

a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and

b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.

2.2. Effective Date

The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.

2.3. Limitations on Grant Scope

The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:

a. for any code that a Contributor has removed from Covered Software; or

b. for infringements caused by: (i) Your and any other third party’s
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or

c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.

This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).

2.4. Subsequent Licenses

No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).

2.5. Representation

Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.

2.6. Fair Use

This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.

2.7. Conditions

Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.


3. Responsibilities

3.1. Distribution of Source Form

All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients’ rights in the Source Code Form.

3.2. Distribution of Executable Form

If You distribute Covered Software in Executable Form then:

a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and

b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients’
rights in the Source Code Form under this License.

3.3. Distribution of a Larger Work

You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).

3.4. Notices

You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.

3.5. Application of Additional Terms

You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.

4. Inability to Comply Due to Statute or Regulation

If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.

5. Termination

5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.

5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.

5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.

6. Disclaimer of Warranty

Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.

7. Limitation of Liability

Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
party’s negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.

8. Litigation

Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a party’s ability to bring cross-claims or counter-claims.

9. Miscellaneous

This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.


10. Versions of the License

10.1. New Versions

Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.

10.2. Effect of New Versions

You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.

10.3. Modified Versions

If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).

10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.

Exhibit A - Source Code Form License Notice

This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.

If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.

You may add additional accurate notices of copyright ownership.

Exhibit B - “Incompatible With Secondary Licenses” Notice

This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.


+ 34
- 0
vendor/github.com/RichardKnop/logging/Makefile View File

@@ -0,0 +1,34 @@
.PHONY: update-deps install-deps fmt lint golint test test-with-coverage
# TODO: When Go 1.9 is released vendor folder should be ignored automatically
PACKAGES=`go list ./... | grep -v vendor | grep -v mocks`

fmt:
for pkg in ${PACKAGES}; do \
go fmt $$pkg; \
done;

lint:
gometalinter --exclude=vendor/ --tests --config=gometalinter.json --disable-all -E vet -E gofmt -E misspell -E ineffassign -E goimports -E deadcode ./...

golint:
for pkg in ${PACKAGES}; do \
golint $$pkg; \
done;

test:
TEST_FAILED= ; \
for pkg in ${PACKAGES}; do \
go test $$pkg || TEST_FAILED=1; \
done; \
[ -z "$$TEST_FAILED" ]

test-with-coverage:
echo "" > coverage.out
echo "mode: set" > coverage-all.out
TEST_FAILED= ; \
for pkg in ${PACKAGES}; do \
go test -coverprofile=coverage.out -covermode=set $$pkg || TEST_FAILED=1; \
tail -n +2 coverage.out >> coverage-all.out; \
done; \
[ -z "$$TEST_FAILED" ]
#go tool cover -html=coverage-all.out

+ 58
- 0
vendor/github.com/RichardKnop/logging/README.md View File

@@ -0,0 +1,58 @@
## Logging

A simple leveled logging library with coloured output.

[![Travis Status for RichardKnop/logging](https://travis-ci.org/RichardKnop/logging.svg?branch=master&label=linux+build)](https://travis-ci.org/RichardKnop/logging)
[![godoc for RichardKnop/logging](https://godoc.org/github.com/nathany/looper?status.svg)](http://godoc.org/github.com/RichardKnop/logging)
[![codecov for RichardKnop/logging](https://codecov.io/gh/RichardKnop/logging/branch/master/graph/badge.svg)](https://codecov.io/gh/RichardKnop/logging)

---

Log levels:

- `INFO` (blue)
- `WARNING` (pink)
- `ERROR` (red)
- `FATAL` (red)

Formatters:

- `DefaultFormatter`
- `ColouredFormatter`

Example usage. Create a new package `log` in your app such that:

```go
package log

import (
"github.com/RichardKnop/logging"
)

var (
logger = logging.New(nil, nil, new(logging.ColouredFormatter))

// INFO ...
INFO = logger[logging.INFO]
// WARNING ...
WARNING = logger[logging.WARNING]
// ERROR ...
ERROR = logger[logging.ERROR]
// FATAL ...
FATAL = logger[logging.FATAL]
)
```

Then from your app you could do:

```go
package main

import (
"github.com/yourusername/yourapp/log"
)

func main() {
log.INFO.Print("log message")
}
```

+ 40
- 0
vendor/github.com/RichardKnop/logging/coloured_formatter.go View File

@@ -0,0 +1,40 @@
package logging

import (
"fmt"
)

const (
// For colouring
resetSeq = "\033[0m"
colourSeq = "\033[0;%dm"
)

// Colour map
var colour = map[level]string{
INFO: fmt.Sprintf(colourSeq, 94), // blue
WARNING: fmt.Sprintf(colourSeq, 95), // pink
ERROR: fmt.Sprintf(colourSeq, 91), // red
FATAL: fmt.Sprintf(colourSeq, 91), // red
}

// ColouredFormatter colours log messages with ASCI escape codes
// and adds filename and line number before the log message
// See https://en.wikipedia.org/wiki/ANSI_escape_code
type ColouredFormatter struct {
}

// GetPrefix returns colour escape code
func (f *ColouredFormatter) GetPrefix(lvl level) string {
return colour[lvl]
}

// GetSuffix returns reset sequence code
func (f *ColouredFormatter) GetSuffix(lvl level) string {
return resetSeq
}

// Format adds filename and line number before the log message
func (f *ColouredFormatter) Format(lvl level, v ...interface{}) []interface{} {
return append([]interface{}{header()}, v...)
}

+ 20
- 0
vendor/github.com/RichardKnop/logging/default_formatter.go View File

@@ -0,0 +1,20 @@
package logging

// DefaultFormatter adds filename and line number before the log message
type DefaultFormatter struct {
}

// GetPrefix returns ""
func (f *DefaultFormatter) GetPrefix(lvl level) string {
return ""
}

// GetSuffix returns ""
func (f *DefaultFormatter) GetSuffix(lvl level) string {
return ""
}

// Format adds filename and line number before the log message
func (f *DefaultFormatter) Format(lvl level, v ...interface{}) []interface{} {
return append([]interface{}{header()}, v...)
}

+ 30
- 0
vendor/github.com/RichardKnop/logging/formatter_interface.go View File

@@ -0,0 +1,30 @@
package logging

import (
"fmt"
"path/filepath"
"runtime"
)

const (
// Runtime caller depth
depth = 3
)

// Formatter interface
type Formatter interface {
GetPrefix(lvl level) string
Format(lvl level, v ...interface{}) []interface{}
GetSuffix(lvl level) string
}

// Returns header including filename and line number
func header() string {
_, fn, line, ok := runtime.Caller(depth)
if !ok {
fn = "???"
line = 1
}

return fmt.Sprintf("%s:%d ", filepath.Base(fn), line)
}

+ 7
- 0
vendor/github.com/RichardKnop/logging/go.mod View File

@@ -0,0 +1,7 @@
module github.com/RichardKnop/logging

require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/testify v1.2.2
)

+ 6
- 0
vendor/github.com/RichardKnop/logging/go.sum View File

@@ -0,0 +1,6 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=

+ 9
- 0
vendor/github.com/RichardKnop/logging/gometalinter.json View File

@@ -0,0 +1,9 @@
{
"Linters":
{
"vet":
{
"Command": "go tool vet"
}
}
}

+ 17
- 0
vendor/github.com/RichardKnop/logging/interface.go View File

@@ -0,0 +1,17 @@
package logging

// LoggerInterface will accept stdlib logger and a custom logger.
// There's no standard interface, this is the closest we get, unfortunately.
type LoggerInterface interface {
Print(...interface{})
Printf(string, ...interface{})
Println(...interface{})

Fatal(...interface{})
Fatalf(string, ...interface{})
Fatalln(...interface{})

Panic(...interface{})
Panicf(string, ...interface{})
Panicln(...interface{})
}

+ 134
- 0
vendor/github.com/RichardKnop/logging/logger.go View File

@@ -0,0 +1,134 @@
package logging

import (
"io"
"log"
"os"
)

// Level type
type level int

const (
// DEBUG level
DEBUG level = iota
// INFO level
INFO
// WARNING level
WARNING
// ERROR level
ERROR
// FATAL level
FATAL

flag = log.Ldate | log.Ltime
)

// Log level prefix map
var prefix = map[level]string{
DEBUG: "DEBUG: ",
INFO: "INFO: ",
WARNING: "WARNING: ",
ERROR: "ERROR: ",
FATAL: "FATAL: ",
}

// Logger ...
type Logger map[level]LoggerInterface

// New returns instance of Logger
func New(out, errOut io.Writer, f Formatter) Logger {
// Fall back to stdout if out not set
if out == nil {
out = os.Stdout
}

// Fall back to stderr if errOut not set
if errOut == nil {
errOut = os.Stderr
}

// Fall back to DefaultFormatter if f not set
if f == nil {
f = new(DefaultFormatter)
}

l := make(map[level]LoggerInterface, 5)
l[DEBUG] = &Wrapper{lvl: DEBUG, formatter: f, logger: log.New(out, f.GetPrefix(DEBUG)+prefix[DEBUG], flag)}
l[INFO] = &Wrapper{lvl: INFO, formatter: f, logger: log.New(out, f.GetPrefix(INFO)+prefix[INFO], flag)}
l[WARNING] = &Wrapper{lvl: INFO, formatter: f, logger: log.New(out, f.GetPrefix(WARNING)+prefix[WARNING], flag)}
l[ERROR] = &Wrapper{lvl: INFO, formatter: f, logger: log.New(errOut, f.GetPrefix(ERROR)+prefix[ERROR], flag)}
l[FATAL] = &Wrapper{lvl: INFO, formatter: f, logger: log.New(errOut, f.GetPrefix(FATAL)+prefix[FATAL], flag)}

return Logger(l)
}

// Wrapper ...
type Wrapper struct {
lvl level
formatter Formatter
logger LoggerInterface
}

// Print ...
func (w *Wrapper) Print(v ...interface{}) {
v = w.formatter.Format(w.lvl, v...)
v = append(v, w.formatter.GetSuffix(w.lvl))
w.logger.Print(v...)
}

// Printf ...
func (w *Wrapper) Printf(format string, v ...interface{}) {
suffix := w.formatter.GetSuffix(w.lvl)
v = w.formatter.Format(w.lvl, v...)
w.logger.Printf("%s"+format+suffix, v...)
}

// Println ...
func (w *Wrapper) Println(v ...interface{}) {
v = w.formatter.Format(w.lvl, v...)
v = append(v, w.formatter.GetSuffix(w.lvl))
w.logger.Println(v...)
}

// Fatal ...
func (w *Wrapper) Fatal(v ...interface{}) {
v = w.formatter.Format(w.lvl, v...)
v = append(v, w.formatter.GetSuffix(w.lvl))
w.logger.Fatal(v...)
}

// Fatalf ...
func (w *Wrapper) Fatalf(format string, v ...interface{}) {
suffix := w.formatter.GetSuffix(w.lvl)
v = w.formatter.Format(w.lvl, v...)
w.logger.Fatalf("%s"+format+suffix, v...)
}

// Fatalln ...
func (w *Wrapper) Fatalln(v ...interface{}) {
v = w.formatter.Format(w.lvl, v...)
v = append(v, w.formatter.GetSuffix(w.lvl))
w.logger.Fatalln(v...)
}

// Panic ...
func (w *Wrapper) Panic(v ...interface{}) {
v = w.formatter.Format(w.lvl, v...)
v = append(v, w.formatter.GetSuffix(w.lvl))
w.logger.Fatal(v...)
}

// Panicf ...
func (w *Wrapper) Panicf(format string, v ...interface{}) {
suffix := w.formatter.GetSuffix(w.lvl)
v = w.formatter.Format(w.lvl, v...)
w.logger.Panicf("%s"+format+suffix, v...)
}

// Panicln ...
func (w *Wrapper) Panicln(v ...interface{}) {
v = w.formatter.Format(w.lvl, v...)
v = append(v, w.formatter.GetSuffix(w.lvl))
w.logger.Panicln(v...)
}

+ 354
- 0
vendor/github.com/RichardKnop/machinery/LICENSE View File

@@ -0,0 +1,354 @@
Mozilla Public License, version 2.0

1. Definitions

1.1. “Contributor”

means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.

1.2. “Contributor Version”

means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor’s Contribution.

1.3. “Contribution”

means Covered Software of a particular Contributor.

1.4. “Covered Software”

means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.

1.5. “Incompatible With Secondary Licenses”
means

a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or

b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.

1.6. “Executable Form”

means any form of the work other than Source Code Form.

1.7. “Larger Work”

means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.

1.8. “License”

means this document.

1.9. “Licensable”

means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.

1.10. “Modifications”

means any of the following:

a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or

b. any new file in Source Code Form that contains any Covered Software.

1.11. “Patent Claims” of a Contributor

means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.

1.12. “Secondary License”

means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.

1.13. “Source Code Form”

means the form of the work preferred for making modifications.

1.14. “You” (or “Your”)

means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.


2. License Grants and Conditions

2.1. Grants

Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:

a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and

b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.

2.2. Effective Date

The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.

2.3. Limitations on Grant Scope

The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:

a. for any code that a Contributor has removed from Covered Software; or

b. for infringements caused by: (i) Your and any other third party’s
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or

c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.

This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).

2.4. Subsequent Licenses

No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).

2.5. Representation

Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.

2.6. Fair Use

This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.

2.7. Conditions

Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.


3. Responsibilities

3.1. Distribution of Source Form

All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients’ rights in the Source Code Form.

3.2. Distribution of Executable Form

If You distribute Covered Software in Executable Form then:

a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and

b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients’
rights in the Source Code Form under this License.

3.3. Distribution of a Larger Work

You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).

3.4. Notices

You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.

3.5. Application of Additional Terms

You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.

4. Inability to Comply Due to Statute or Regulation

If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.

5. Termination

5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.

5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.

5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.

6. Disclaimer of Warranty

Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.

7. Limitation of Liability

Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
party’s negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.

8. Litigation

Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a party’s ability to bring cross-claims or counter-claims.

9. Miscellaneous

This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.


10. Versions of the License

10.1. New Versions

Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.

10.2. Effect of New Versions

You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.

10.3. Modified Versions

If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).

10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.

Exhibit A - Source Code Form License Notice

This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.

If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.

You may add additional accurate notices of copyright ownership.

Exhibit B - “Incompatible With Secondary Licenses” Notice

This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.


+ 393
- 0
vendor/github.com/RichardKnop/machinery/v1/backends/amqp/amqp.go View File

@@ -0,0 +1,393 @@
package amqp

// NOTE: Using AMQP as a result backend is quite tricky since every time we
// read a message from the queue keeping task states, the message is removed
// from the queue. This leads to problems with keeping a reliable state of a
// group of tasks since concurrent processes updating the group state cause
// race conditions and inconsistent state.
//
// This is avoided by a "clever" hack. A special queue identified by a group
// UUID is created and we store serialised TaskState objects of successfully
// completed tasks. By inspecting the queue we can then say:
// 1) If all group tasks finished (number of unacked messages = group task count)
// 2) If all group tasks finished AND succeeded (by consuming the queue)
//
// It is important to consume the queue exclusively to avoid race conditions.

import (
"bytes"
"encoding/json"
"errors"
"fmt"

"github.com/RichardKnop/machinery/v1/backends/iface"
"github.com/RichardKnop/machinery/v1/common"
"github.com/RichardKnop/machinery/v1/config"
"github.com/RichardKnop/machinery/v1/log"
"github.com/RichardKnop/machinery/v1/tasks"
"github.com/streadway/amqp"
)

// Backend represents an AMQP result backend
type Backend struct {
common.Backend
common.AMQPConnector
}

// New creates Backend instance
func New(cnf *config.Config) iface.Backend {
return &Backend{Backend: common.NewBackend(cnf), AMQPConnector: common.AMQPConnector{}}
}

// InitGroup creates and saves a group meta data object
func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error {
return nil
}

// GroupCompleted returns true if all tasks in a group finished
// NOTE: Given AMQP limitation this will only return true if all finished
// tasks were successful as we do not keep track of completed failed tasks
func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) {
conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig)
if err != nil {
return false, err
}
defer b.Close(channel, conn)

queueState, err := b.InspectQueue(channel, groupUUID)
if err != nil {
return false, nil
}

return queueState.Messages == groupTaskCount, nil
}

// GroupTaskStates returns states of all tasks in the group
func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) {
conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig)
if err != nil {
return nil, err
}
defer b.Close(channel, conn)

queueState, err := b.InspectQueue(channel, groupUUID)
if err != nil {
return nil, err
}

if queueState.Messages != groupTaskCount {
return nil, fmt.Errorf("Already consumed: %v", err)
}

deliveries, err := channel.Consume(
groupUUID, // queue name
"", // consumer tag
false, // auto-ack
true, // exclusive
false, // no-local
false, // no-wait
nil, // arguments
)
if err != nil {
return nil, fmt.Errorf("Queue consume error: %s", err)
}

states := make([]*tasks.TaskState, groupTaskCount)
for i := 0; i < groupTaskCount; i++ {
d := <-deliveries

state := new(tasks.TaskState)
decoder := json.NewDecoder(bytes.NewReader([]byte(d.Body)))
decoder.UseNumber()
if err := decoder.Decode(state); err != nil {
d.Nack(false, false) // multiple, requeue
return nil, err
}

d.Ack(false) // multiple

states[i] = state
}

return states, nil
}

// TriggerChord flags chord as triggered in the backend storage to make sure
// chord is never trigerred multiple times. Returns a boolean flag to indicate
// whether the worker should trigger chord (true) or no if it has been triggered
// already (false)
func (b *Backend) TriggerChord(groupUUID string) (bool, error) {
conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig)
if err != nil {
return false, err
}
defer b.Close(channel, conn)

_, err = b.InspectQueue(channel, amqmChordTriggeredQueue(groupUUID))
if err != nil {
return true, nil
}

return false, nil
}

// SetStatePending updates task state to PENDING
func (b *Backend) SetStatePending(signature *tasks.Signature) error {
taskState := tasks.NewPendingTaskState(signature)
return b.updateState(taskState)
}

// SetStateReceived updates task state to RECEIVED
func (b *Backend) SetStateReceived(signature *tasks.Signature) error {
taskState := tasks.NewReceivedTaskState(signature)
return b.updateState(taskState)
}

// SetStateStarted updates task state to STARTED
func (b *Backend) SetStateStarted(signature *tasks.Signature) error {
taskState := tasks.NewStartedTaskState(signature)
return b.updateState(taskState)
}

// SetStateRetry updates task state to RETRY
func (b *Backend) SetStateRetry(signature *tasks.Signature) error {
state := tasks.NewRetryTaskState(signature)
return b.updateState(state)
}

// SetStateSuccess updates task state to SUCCESS
func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error {
taskState := tasks.NewSuccessTaskState(signature, results)

if err := b.updateState(taskState); err != nil {
return err
}

if signature.GroupUUID == "" {
return nil
}

return b.markTaskCompleted(signature, taskState)
}

// SetStateFailure updates task state to FAILURE
func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error {
taskState := tasks.NewFailureTaskState(signature, err)

if err := b.updateState(taskState); err != nil {
return err
}

if signature.GroupUUID == "" {
return nil
}

return b.markTaskCompleted(signature, taskState)
}

// GetState returns the latest task state. It will only return the status once
// as the message will get consumed and removed from the queue.
func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) {
declareQueueArgs := amqp.Table{
// Time in milliseconds
// after that message will expire
"x-message-ttl": int32(b.getExpiresIn()),
// Time after that the queue will be deleted.
"x-expires": int32(b.getExpiresIn()),
}
conn, channel, _, _, _, err := b.Connect(
b.GetConfig().ResultBackend,
b.GetConfig().TLSConfig,
b.GetConfig().AMQP.Exchange, // exchange name
b.GetConfig().AMQP.ExchangeType, // exchange type
taskUUID, // queue name
false, // queue durable
true, // queue delete when unused
taskUUID, // queue binding key
nil, // exchange declare args
declareQueueArgs, // queue declare args
nil, // queue binding args
)
if err != nil {
return nil, err
}
defer b.Close(channel, conn)

d, ok, err := channel.Get(
taskUUID, // queue name
false, // multiple
)
if err != nil {
return nil, err
}
if !ok {
return nil, errors.New("No state ready")
}

d.Ack(false)

state := new(tasks.TaskState)
decoder := json.NewDecoder(bytes.NewReader([]byte(d.Body)))
decoder.UseNumber()
if err := decoder.Decode(state); err != nil {
log.ERROR.Printf("Failed to unmarshal task state: %s", string(d.Body))
log.ERROR.Print(err)
return nil, err
}

return state, nil
}

// PurgeState deletes stored task state
func (b *Backend) PurgeState(taskUUID string) error {
conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig)
if err != nil {
return err
}
defer b.Close(channel, conn)

return b.DeleteQueue(channel, taskUUID)
}

// PurgeGroupMeta deletes stored group meta data
func (b *Backend) PurgeGroupMeta(groupUUID string) error {
conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig)
if err != nil {
return err
}
defer b.Close(channel, conn)

b.DeleteQueue(channel, amqmChordTriggeredQueue(groupUUID))

return b.DeleteQueue(channel, groupUUID)
}

// updateState saves current task state
func (b *Backend) updateState(taskState *tasks.TaskState) error {
message, err := json.Marshal(taskState)
if err != nil {
return fmt.Errorf("JSON marshal error: %s", err)
}

declareQueueArgs := amqp.Table{
// Time in milliseconds
// after that message will expire
"x-message-ttl": int32(b.getExpiresIn()),
// Time after that the queue will be deleted.
"x-expires": int32(b.getExpiresIn()),
}
conn, channel, queue, confirmsChan, _, err := b.Connect(
b.GetConfig().ResultBackend,
b.GetConfig().TLSConfig,
b.GetConfig().AMQP.Exchange, // exchange name
b.GetConfig().AMQP.ExchangeType, // exchange type
taskState.TaskUUID, // queue name
false, // queue durable
true, // queue delete when unused
taskState.TaskUUID, // queue binding key
nil, // exchange declare args
declareQueueArgs, // queue declare args
nil, // queue binding args
)
if err != nil {
return err
}
defer b.Close(channel, conn)

if err := channel.Publish(
b.GetConfig().AMQP.Exchange, // exchange
queue.Name, // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
ContentType: "application/json",
Body: message,
DeliveryMode: amqp.Persistent, // Persistent // Transient
},
); err != nil {
return err
}

confirmed := <-confirmsChan

if confirmed.Ack {
return nil
}

return fmt.Errorf("Failed delivery of delivery tag: %d", confirmed.DeliveryTag)
}

// getExpiresIn returns expiration time
func (b *Backend) getExpiresIn() int {
resultsExpireIn := b.GetConfig().ResultsExpireIn * 1000
if resultsExpireIn == 0 {
// // expire results after 1 hour by default
resultsExpireIn = config.DefaultResultsExpireIn * 1000
}
return resultsExpireIn
}

// markTaskCompleted marks task as completed in either groupdUUID_success
// or groupUUID_failure queue. This is important for GroupCompleted and
// GroupSuccessful methods
func (b *Backend) markTaskCompleted(signature *tasks.Signature, taskState *tasks.TaskState) error {
if signature.GroupUUID == "" || signature.GroupTaskCount == 0 {
return nil
}

message, err := json.Marshal(taskState)
if err != nil {
return fmt.Errorf("JSON marshal error: %s", err)
}

declareQueueArgs := amqp.Table{
// Time in milliseconds
// after that message will expire
"x-message-ttl": int32(b.getExpiresIn()),
// Time after that the queue will be deleted.
"x-expires": int32(b.getExpiresIn()),
}
conn, channel, queue, confirmsChan, _, err := b.Connect(
b.GetConfig().ResultBackend,
b.GetConfig().TLSConfig,
b.GetConfig().AMQP.Exchange, // exchange name
b.GetConfig().AMQP.ExchangeType, // exchange type
signature.GroupUUID, // queue name
false, // queue durable
true, // queue delete when unused
signature.GroupUUID, // queue binding key
nil, // exchange declare args
declareQueueArgs, // queue declare args
nil, // queue binding args
)
if err != nil {
return err
}
defer b.Close(channel, conn)

if err := channel.Publish(
b.GetConfig().AMQP.Exchange, // exchange
queue.Name, // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
ContentType: "application/json",
Body: message,
DeliveryMode: amqp.Persistent, // Persistent // Transient
},
); err != nil {
return err
}

confirmed := <-confirmsChan

if !confirmed.Ack {
return fmt.Errorf("Failed delivery of delivery tag: %v", confirmed.DeliveryTag)
}

return nil
}

func amqmChordTriggeredQueue(groupUUID string) string {
return fmt.Sprintf("%s_chord_triggered", groupUUID)
}

+ 512
- 0
vendor/github.com/RichardKnop/machinery/v1/backends/dynamodb/dynamodb.go View File

@@ -0,0 +1,512 @@
package dynamodb

import (
"errors"
"fmt"
"time"

"github.com/aws/aws-sdk-go/aws/session"

"github.com/RichardKnop/machinery/v1/backends/iface"
"github.com/RichardKnop/machinery/v1/common"
"github.com/RichardKnop/machinery/v1/config"
"github.com/RichardKnop/machinery/v1/log"
"github.com/RichardKnop/machinery/v1/tasks"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
)

// Backend ...
type Backend struct {
common.Backend
cnf *config.Config
client dynamodbiface.DynamoDBAPI
}

// New creates a Backend instance
func New(cnf *config.Config) iface.Backend {
backend := &Backend{Backend: common.NewBackend(cnf), cnf: cnf}

if cnf.DynamoDB != nil && cnf.DynamoDB.Client != nil {
backend.client = cnf.DynamoDB.Client
} else {
sess := session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
}))
backend.client = dynamodb.New(sess)
}

// Check if needed tables exist
err := backend.checkRequiredTablesIfExist()
if err != nil {
log.FATAL.Printf("Failed to prepare tables. Error: %v", err)
}
return backend
}

// InitGroup ...
func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error {
meta := tasks.GroupMeta{
GroupUUID: groupUUID,
TaskUUIDs: taskUUIDs,
CreatedAt: time.Now().UTC(),
}
av, err := dynamodbattribute.MarshalMap(meta)
if err != nil {
log.ERROR.Printf("Error when marshaling Dynamodb attributes. Err: %v", err)
return err
}
input := &dynamodb.PutItemInput{
Item: av,
TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable),
}
_, err = b.client.PutItem(input)

if err != nil {
log.ERROR.Printf("Got error when calling PutItem: %v; Error: %v", input, err)
return err
}
return nil
}

// GroupCompleted ...
func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) {
groupMeta, err := b.getGroupMeta(groupUUID)
if err != nil {
return false, err
}
taskStates, err := b.getStates(groupMeta.TaskUUIDs...)
if err != nil {
return false, err
}
var countSuccessTasks = 0
for _, taskState := range taskStates {
if taskState.IsCompleted() {
countSuccessTasks++
}
}

return countSuccessTasks == groupTaskCount, nil
}

// GroupTaskStates ...
func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) {
groupMeta, err := b.getGroupMeta(groupUUID)
if err != nil {
return nil, err
}

return b.getStates(groupMeta.TaskUUIDs...)
}

// TriggerChord ...
func (b *Backend) TriggerChord(groupUUID string) (bool, error) {
// Get the group meta data
groupMeta, err := b.getGroupMeta(groupUUID)

if err != nil {
return false, err
}

// Chord has already been triggered, return false (should not trigger again)
if groupMeta.ChordTriggered {
return false, nil
}

// If group meta is locked, wait until it's unlocked
for groupMeta.Lock {
groupMeta, _ = b.getGroupMeta(groupUUID)
log.WARNING.Print("Group meta locked, waiting")
time.Sleep(time.Millisecond * 5)
}

// Acquire lock
if err = b.lockGroupMeta(groupUUID); err != nil {
return false, err
}
defer b.unlockGroupMeta(groupUUID)

// update group meta data
err = b.chordTriggered(groupUUID)
if err != nil {
return false, err
}
return true, err
}

// SetStatePending ...
func (b *Backend) SetStatePending(signature *tasks.Signature) error {
taskState := tasks.NewPendingTaskState(signature)
// taskUUID is the primary key of the table, so a new task need to be created first, instead of using dynamodb.UpdateItemInput directly
return b.initTaskState(taskState)
}

// SetStateReceived ...
func (b *Backend) SetStateReceived(signature *tasks.Signature) error {
taskState := tasks.NewReceivedTaskState(signature)
return b.setTaskState(taskState)
}

// SetStateStarted ...
func (b *Backend) SetStateStarted(signature *tasks.Signature) error {
taskState := tasks.NewStartedTaskState(signature)
return b.setTaskState(taskState)
}

// SetStateRetry ...
func (b *Backend) SetStateRetry(signature *tasks.Signature) error {
taskState := tasks.NewRetryTaskState(signature)
return b.setTaskState(taskState)
}

// SetStateSuccess ...
func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error {
taskState := tasks.NewSuccessTaskState(signature, results)
return b.setTaskState(taskState)
}

// SetStateFailure ...
func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error {
taskState := tasks.NewFailureTaskState(signature, err)
return b.updateToFailureStateWithError(taskState)
}

// GetState ...
func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) {
result, err := b.client.GetItem(&dynamodb.GetItemInput{
TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable),
Key: map[string]*dynamodb.AttributeValue{
"TaskUUID": {
S: aws.String(taskUUID),
},
},
})
if err != nil {
return nil, err
}
return b.unmarshalTaskStateGetItemResult(result)
}

// PurgeState ...
func (b *Backend) PurgeState(taskUUID string) error {
input := &dynamodb.DeleteItemInput{
Key: map[string]*dynamodb.AttributeValue{
"TaskUUID": {
N: aws.String(taskUUID),
},
},
TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable),
}
_, err := b.client.DeleteItem(input)

if err != nil {
return err
}
return nil
}

// PurgeGroupMeta ...
func (b *Backend) PurgeGroupMeta(groupUUID string) error {
input := &dynamodb.DeleteItemInput{
Key: map[string]*dynamodb.AttributeValue{
"GroupUUID": {
N: aws.String(groupUUID),
},
},
TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable),
}
_, err := b.client.DeleteItem(input)

if err != nil {
return err
}
return nil
}

func (b *Backend) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) {
result, err := b.client.GetItem(&dynamodb.GetItemInput{
TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable),
Key: map[string]*dynamodb.AttributeValue{
"GroupUUID": {
S: aws.String(groupUUID),
},
},
})
if err != nil {
log.ERROR.Printf("Error when getting group meta. Error: %v", err)
return nil, err
}
item, err := b.unmarshalGroupMetaGetItemResult(result)
if err != nil {
log.INFO.Println("!!!", result)
log.ERROR.Printf("Failed to unmarshal item, %v", err)
return nil, err
}
return item, nil
}

func (b *Backend) getStates(taskUUIDs ...string) ([]*tasks.TaskState, error) {
var states []*tasks.TaskState
stateChan := make(chan *tasks.TaskState, len(taskUUIDs))
errChan := make(chan error)
// There is no method like querying items by `in` a list of primary keys.
// So a for loop with go routine is used to get multiple items
for _, id := range taskUUIDs {
go func(id string) {
state, err := b.GetState(id)
if err != nil {
errChan <- err
}
stateChan <- state
}(id)
}

for s := range stateChan {
states = append(states, s)
if len(states) == len(taskUUIDs) {
close(stateChan)
}
}
return states, nil
}

func (b *Backend) lockGroupMeta(groupUUID string) error {
err := b.updateGroupMetaLock(groupUUID, true)
if err != nil {
return err
}
return nil
}

func (b *Backend) unlockGroupMeta(groupUUID string) error {
err := b.updateGroupMetaLock(groupUUID, false)
if err != nil {
return err
}
return nil
}

func (b *Backend) updateGroupMetaLock(groupUUID string, status bool) error {
input := &dynamodb.UpdateItemInput{
ExpressionAttributeNames: map[string]*string{
"#L": aws.String("Lock"),
},
ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
":l": {
BOOL: aws.Bool(status),
},
},
Key: map[string]*dynamodb.AttributeValue{
"GroupUUID": {
S: aws.String(groupUUID),
},
},
ReturnValues: aws.String("UPDATED_NEW"),
TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable),
UpdateExpression: aws.String("SET #L = :l"),
}

_, err := b.client.UpdateItem(input)

if err != nil {
return err
}
return nil
}

func (b *Backend) chordTriggered(groupUUID string) error {
input := &dynamodb.UpdateItemInput{
ExpressionAttributeNames: map[string]*string{
"#CT": aws.String("ChordTriggered"),
},
ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
":ct": {
BOOL: aws.Bool(true),
},
},
Key: map[string]*dynamodb.AttributeValue{
"GroupUUID": {
S: aws.String(groupUUID),
},
},
ReturnValues: aws.String("UPDATED_NEW"),
TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable),
UpdateExpression: aws.String("SET #CT = :ct"),
}

_, err := b.client.UpdateItem(input)

if err != nil {
return err
}
return nil
}

func (b *Backend) setTaskState(taskState *tasks.TaskState) error {
expAttributeNames := map[string]*string{
"#S": aws.String("State"),
}
expAttributeValues := map[string]*dynamodb.AttributeValue{
":s": {
S: aws.String(taskState.State),
},
}
keyAttributeValues := map[string]*dynamodb.AttributeValue{
"TaskUUID": {
S: aws.String(taskState.TaskUUID),
},
}
exp := "SET #S = :s"
if !taskState.CreatedAt.IsZero() {
expAttributeNames["#C"] = aws.String("CreatedAt")
expAttributeValues[":c"] = &dynamodb.AttributeValue{
S: aws.String(taskState.CreatedAt.String()),
}
exp += ", #C = :c"
}
if taskState.Results != nil && len(taskState.Results) != 0 {
expAttributeNames["#R"] = aws.String("Results")
var results []*dynamodb.AttributeValue
for _, r := range taskState.Results {
avMap := map[string]*dynamodb.AttributeValue{
"Type": {
S: aws.String(r.Type),
},
"Value": {
S: aws.String(fmt.Sprintf("%v", r.Value)),
},
}
rs := &dynamodb.AttributeValue{
M: avMap,
}
results = append(results, rs)
}
expAttributeValues[":r"] = &dynamodb.AttributeValue{
L: results,
}
exp += ", #R = :r"
}
input := &dynamodb.UpdateItemInput{
ExpressionAttributeNames: expAttributeNames,
ExpressionAttributeValues: expAttributeValues,
Key: keyAttributeValues,
ReturnValues: aws.String("UPDATED_NEW"),
TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable),
UpdateExpression: aws.String(exp),
}

_, err := b.client.UpdateItem(input)

if err != nil {
return err
}
return nil
}

func (b *Backend) initTaskState(taskState *tasks.TaskState) error {
av, err := dynamodbattribute.MarshalMap(taskState)
input := &dynamodb.PutItemInput{
Item: av,
TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable),
}
if err != nil {
return err
}
_, err = b.client.PutItem(input)

if err != nil {
return err
}
return nil
}

func (b *Backend) updateToFailureStateWithError(taskState *tasks.TaskState) error {
input := &dynamodb.UpdateItemInput{
ExpressionAttributeNames: map[string]*string{
"#S": aws.String("State"),
"#E": aws.String("Error"),
},
ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
":s": {
S: aws.String(taskState.State),
},
":e": {
S: aws.String(taskState.Error),
},
},
Key: map[string]*dynamodb.AttributeValue{
"TaskUUID": {
S: aws.String(taskState.TaskUUID),
},
},
ReturnValues: aws.String("UPDATED_NEW"),
TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable),
UpdateExpression: aws.String("SET #S = :s, #E = :e"),
}

_, err := b.client.UpdateItem(input)

if err != nil {
return err
}
return nil
}

func (b *Backend) unmarshalGroupMetaGetItemResult(result *dynamodb.GetItemOutput) (*tasks.GroupMeta, error) {
if result == nil {
err := errors.New("task state is nil")
log.ERROR.Printf("Got error when unmarshal map. Error: %v", err)
return nil, err
}
item := tasks.GroupMeta{}
err := dynamodbattribute.UnmarshalMap(result.Item, &item)
if err != nil {
log.ERROR.Printf("Got error when unmarshal map. Error: %v", err)
return nil, err
}
return &item, err
}

func (b *Backend) unmarshalTaskStateGetItemResult(result *dynamodb.GetItemOutput) (*tasks.TaskState, error) {
if result == nil {
err := errors.New("task state is nil")
log.ERROR.Printf("Got error when unmarshal map. Error: %v", err)
return nil, err
}
state := tasks.TaskState{}
err := dynamodbattribute.UnmarshalMap(result.Item, &state)
if err != nil {
log.ERROR.Printf("Got error when unmarshal map. Error: %v", err)
return nil, err
}
return &state, nil
}

func (b *Backend) checkRequiredTablesIfExist() error {
var (
taskTableName = b.cnf.DynamoDB.TaskStatesTable
groupTableName = b.cnf.DynamoDB.GroupMetasTable
)
result, err := b.client.ListTables(&dynamodb.ListTablesInput{})
if err != nil {
return err
}
if !b.tableExists(taskTableName, result.TableNames) {
return errors.New("task table doesn't exist")
}
if !b.tableExists(groupTableName, result.TableNames) {
return errors.New("group table doesn't exist")
}
return nil
}

func (b *Backend) tableExists(tableName string, tableNames []*string) bool {
for _, t := range tableNames {
if tableName == *t {
return true
}
}
return false
}

+ 210
- 0
vendor/github.com/RichardKnop/machinery/v1/backends/eager/eager.go View File

@@ -0,0 +1,210 @@
package eager

import (
"bytes"
"encoding/json"
"fmt"
"sync"

"github.com/RichardKnop/machinery/v1/backends/iface"
"github.com/RichardKnop/machinery/v1/common"
"github.com/RichardKnop/machinery/v1/config"
"github.com/RichardKnop/machinery/v1/tasks"
)

// ErrGroupNotFound ...
type ErrGroupNotFound struct {
groupUUID string
}

// NewErrGroupNotFound returns new instance of ErrGroupNotFound
func NewErrGroupNotFound(groupUUID string) ErrGroupNotFound {
return ErrGroupNotFound{groupUUID: groupUUID}
}

// Error implements error interface
func (e ErrGroupNotFound) Error() string {
return fmt.Sprintf("Group not found: %v", e.groupUUID)
}

// ErrTasknotFound ...
type ErrTasknotFound struct {
taskUUID string
}

// NewErrTasknotFound returns new instance of ErrTasknotFound
func NewErrTasknotFound(taskUUID string) ErrTasknotFound {
return ErrTasknotFound{taskUUID: taskUUID}
}

// Error implements error interface
func (e ErrTasknotFound) Error() string {
return fmt.Sprintf("Task not found: %v", e.taskUUID)
}

// Backend represents an "eager" in-memory result backend
type Backend struct {
common.Backend
groups map[string][]string
tasks map[string][]byte
stateMutex sync.Mutex
}

// New creates EagerBackend instance
func New() iface.Backend {
return &Backend{
Backend: common.NewBackend(new(config.Config)),
groups: make(map[string][]string),
tasks: make(map[string][]byte),
}
}

// InitGroup creates and saves a group meta data object
func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error {
tasks := make([]string, 0, len(taskUUIDs))
// copy every task
for _, v := range taskUUIDs {
tasks = append(tasks, v)
}

b.groups[groupUUID] = tasks
return nil
}

// GroupCompleted returns true if all tasks in a group finished
func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) {
tasks, ok := b.groups[groupUUID]
if !ok {
return false, NewErrGroupNotFound(groupUUID)
}

var countSuccessTasks = 0
for _, v := range tasks {
t, err := b.GetState(v)
if err != nil {
return false, err
}

if t.IsCompleted() {
countSuccessTasks++
}
}

return countSuccessTasks == groupTaskCount, nil
}

// GroupTaskStates returns states of all tasks in the group
func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) {
taskUUIDs, ok := b.groups[groupUUID]
if !ok {
return nil, NewErrGroupNotFound(groupUUID)
}

ret := make([]*tasks.TaskState, 0, groupTaskCount)
for _, taskUUID := range taskUUIDs {
t, err := b.GetState(taskUUID)
if err != nil {
return nil, err
}

ret = append(ret, t)
}

return ret, nil
}

// TriggerChord flags chord as triggered in the backend storage to make sure
// chord is never trigerred multiple times. Returns a boolean flag to indicate
// whether the worker should trigger chord (true) or no if it has been triggered
// already (false)
func (b *Backend) TriggerChord(groupUUID string) (bool, error) {
return true, nil
}

// SetStatePending updates task state to PENDING
func (b *Backend) SetStatePending(signature *tasks.Signature) error {
state := tasks.NewPendingTaskState(signature)
return b.updateState(state)
}

// SetStateReceived updates task state to RECEIVED
func (b *Backend) SetStateReceived(signature *tasks.Signature) error {
state := tasks.NewReceivedTaskState(signature)
return b.updateState(state)
}

// SetStateStarted updates task state to STARTED
func (b *Backend) SetStateStarted(signature *tasks.Signature) error {
state := tasks.NewStartedTaskState(signature)
return b.updateState(state)
}

// SetStateRetry updates task state to RETRY
func (b *Backend) SetStateRetry(signature *tasks.Signature) error {
state := tasks.NewRetryTaskState(signature)
return b.updateState(state)
}

// SetStateSuccess updates task state to SUCCESS
func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error {
state := tasks.NewSuccessTaskState(signature, results)
return b.updateState(state)
}

// SetStateFailure updates task state to FAILURE
func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error {
state := tasks.NewFailureTaskState(signature, err)
return b.updateState(state)
}

// GetState returns the latest task state
func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) {
tasktStateBytes, ok := b.tasks[taskUUID]
if !ok {
return nil, NewErrTasknotFound(taskUUID)
}

state := new(tasks.TaskState)
decoder := json.NewDecoder(bytes.NewReader(tasktStateBytes))
decoder.UseNumber()
if err := decoder.Decode(state); err != nil {
return nil, fmt.Errorf("Failed to unmarshal task state %v", b)
}

return state, nil
}

// PurgeState deletes stored task state
func (b *Backend) PurgeState(taskUUID string) error {
_, ok := b.tasks[taskUUID]
if !ok {
return NewErrTasknotFound(taskUUID)
}

delete(b.tasks, taskUUID)
return nil
}

// PurgeGroupMeta deletes stored group meta data
func (b *Backend) PurgeGroupMeta(groupUUID string) error {
_, ok := b.groups[groupUUID]
if !ok {
return NewErrGroupNotFound(groupUUID)
}

delete(b.groups, groupUUID)
return nil
}

func (b *Backend) updateState(s *tasks.TaskState) error {
// simulate the behavior of json marshal/unmarshal
b.stateMutex.Lock()
defer b.stateMutex.Unlock()
msg, err := json.Marshal(s)
if err != nil {
return fmt.Errorf("Marshal task state error: %v", err)
}

b.tasks[s.TaskUUID] = msg
return nil
}

+ 28
- 0
vendor/github.com/RichardKnop/machinery/v1/backends/iface/interfaces.go View File

@@ -0,0 +1,28 @@
package iface

import (
"github.com/RichardKnop/machinery/v1/tasks"
)

// Backend - a common interface for all result backends
type Backend interface {
// Group related functions
InitGroup(groupUUID string, taskUUIDs []string) error
GroupCompleted(groupUUID string, groupTaskCount int) (bool, error)
GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error)
TriggerChord(groupUUID string) (bool, error)

// Setting / getting task state
SetStatePending(signature *tasks.Signature) error
SetStateReceived(signature *tasks.Signature) error
SetStateStarted(signature *tasks.Signature) error
SetStateRetry(signature *tasks.Signature) error
SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error
SetStateFailure(signature *tasks.Signature, err string) error
GetState(taskUUID string) (*tasks.TaskState, error)

// Purging stored stored tasks states and group meta data
IsAMQP() bool
PurgeState(taskUUID string) error
PurgeGroupMeta(groupUUID string) error
}

+ 292
- 0
vendor/github.com/RichardKnop/machinery/v1/backends/memcache/memcache.go View File

@@ -0,0 +1,292 @@
package memcache

import (
"bytes"
"encoding/json"
"time"

"github.com/RichardKnop/machinery/v1/backends/iface"
"github.com/RichardKnop/machinery/v1/common"
"github.com/RichardKnop/machinery/v1/config"
"github.com/RichardKnop/machinery/v1/log"
"github.com/RichardKnop/machinery/v1/tasks"

gomemcache "github.com/bradfitz/gomemcache/memcache"
)

// Backend represents a Memcache result backend
type Backend struct {
common.Backend
servers []string
client *gomemcache.Client
}

// New creates Backend instance
func New(cnf *config.Config, servers []string) iface.Backend {
return &Backend{
Backend: common.NewBackend(cnf),
servers: servers,
}
}

// InitGroup creates and saves a group meta data object
func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error {
groupMeta := &tasks.GroupMeta{
GroupUUID: groupUUID,
TaskUUIDs: taskUUIDs,
CreatedAt: time.Now().UTC(),
}

encoded, err := json.Marshal(&groupMeta)
if err != nil {
return err
}

return b.getClient().Set(&gomemcache.Item{
Key: groupUUID,
Value: encoded,
Expiration: b.getExpirationTimestamp(),
})
}

// GroupCompleted returns true if all tasks in a group finished
func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) {
groupMeta, err := b.getGroupMeta(groupUUID)
if err != nil {
return false, err
}

taskStates, err := b.getStates(groupMeta.TaskUUIDs...)
if err != nil {
return false, err
}

var countSuccessTasks = 0
for _, taskState := range taskStates {
if taskState.IsCompleted() {
countSuccessTasks++
}
}

return countSuccessTasks == groupTaskCount, nil
}

// GroupTaskStates returns states of all tasks in the group
func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) {
groupMeta, err := b.getGroupMeta(groupUUID)
if err != nil {
return []*tasks.TaskState{}, err
}

return b.getStates(groupMeta.TaskUUIDs...)
}

// TriggerChord flags chord as triggered in the backend storage to make sure
// chord is never trigerred multiple times. Returns a boolean flag to indicate
// whether the worker should trigger chord (true) or no if it has been triggered
// already (false)
func (b *Backend) TriggerChord(groupUUID string) (bool, error) {
groupMeta, err := b.getGroupMeta(groupUUID)
if err != nil {
return false, err
}

// Chord has already been triggered, return false (should not trigger again)
if groupMeta.ChordTriggered {
return false, nil
}

// If group meta is locked, wait until it's unlocked
for groupMeta.Lock {
groupMeta, _ = b.getGroupMeta(groupUUID)
log.WARNING.Print("Group meta locked, waiting")
time.Sleep(time.Millisecond * 5)
}

// Acquire lock
if err = b.lockGroupMeta(groupMeta); err != nil {
return false, err
}
defer b.unlockGroupMeta(groupMeta)

// Update the group meta data
groupMeta.ChordTriggered = true
encoded, err := json.Marshal(&groupMeta)
if err != nil {
return false, err
}
if err = b.getClient().Replace(&gomemcache.Item{
Key: groupUUID,
Value: encoded,
Expiration: b.getExpirationTimestamp(),
}); err != nil {
return false, err
}

return true, nil
}

// SetStatePending updates task state to PENDING
func (b *Backend) SetStatePending(signature *tasks.Signature) error {
taskState := tasks.NewPendingTaskState(signature)
return b.updateState(taskState)
}

// SetStateReceived updates task state to RECEIVED
func (b *Backend) SetStateReceived(signature *tasks.Signature) error {
taskState := tasks.NewReceivedTaskState(signature)
return b.updateState(taskState)
}

// SetStateStarted updates task state to STARTED
func (b *Backend) SetStateStarted(signature *tasks.Signature) error {
taskState := tasks.NewStartedTaskState(signature)
return b.updateState(taskState)
}

// SetStateRetry updates task state to RETRY
func (b *Backend) SetStateRetry(signature *tasks.Signature) error {
state := tasks.NewRetryTaskState(signature)
return b.updateState(state)
}

// SetStateSuccess updates task state to SUCCESS
func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error {
taskState := tasks.NewSuccessTaskState(signature, results)
return b.updateState(taskState)
}

// SetStateFailure updates task state to FAILURE
func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error {
taskState := tasks.NewFailureTaskState(signature, err)
return b.updateState(taskState)
}

// GetState returns the latest task state
func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) {
item, err := b.getClient().Get(taskUUID)
if err != nil {
return nil, err
}

state := new(tasks.TaskState)
decoder := json.NewDecoder(bytes.NewReader(item.Value))
decoder.UseNumber()
if err := decoder.Decode(state); err != nil {
return nil, err
}

return state, nil
}

// PurgeState deletes stored task state
func (b *Backend) PurgeState(taskUUID string) error {
return b.getClient().Delete(taskUUID)
}

// PurgeGroupMeta deletes stored group meta data
func (b *Backend) PurgeGroupMeta(groupUUID string) error {
return b.getClient().Delete(groupUUID)
}

// updateState saves current task state
func (b *Backend) updateState(taskState *tasks.TaskState) error {
encoded, err := json.Marshal(taskState)
if err != nil {
return err
}

return b.getClient().Set(&gomemcache.Item{
Key: taskState.TaskUUID,
Value: encoded,
Expiration: b.getExpirationTimestamp(),
})
}

// lockGroupMeta acquires lock on group meta data
func (b *Backend) lockGroupMeta(groupMeta *tasks.GroupMeta) error {
groupMeta.Lock = true
encoded, err := json.Marshal(groupMeta)
if err != nil {
return err
}

return b.getClient().Set(&gomemcache.Item{
Key: groupMeta.GroupUUID,
Value: encoded,
Expiration: b.getExpirationTimestamp(),
})
}

// unlockGroupMeta releases lock on group meta data
func (b *Backend) unlockGroupMeta(groupMeta *tasks.GroupMeta) error {
groupMeta.Lock = false
encoded, err := json.Marshal(groupMeta)
if err != nil {
return err
}

return b.getClient().Set(&gomemcache.Item{
Key: groupMeta.GroupUUID,
Value: encoded,
Expiration: b.getExpirationTimestamp(),
})
}

// getGroupMeta retrieves group meta data, convenience function to avoid repetition
func (b *Backend) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) {
item, err := b.getClient().Get(groupUUID)
if err != nil {
return nil, err
}

groupMeta := new(tasks.GroupMeta)
decoder := json.NewDecoder(bytes.NewReader(item.Value))
decoder.UseNumber()
if err := decoder.Decode(groupMeta); err != nil {
return nil, err
}

return groupMeta, nil
}

// getStates returns multiple task states
func (b *Backend) getStates(taskUUIDs ...string) ([]*tasks.TaskState, error) {
states := make([]*tasks.TaskState, len(taskUUIDs))

for i, taskUUID := range taskUUIDs {
item, err := b.getClient().Get(taskUUID)
if err != nil {
return nil, err
}

state := new(tasks.TaskState)
decoder := json.NewDecoder(bytes.NewReader(item.Value))
decoder.UseNumber()
if err := decoder.Decode(state); err != nil {
return nil, err
}

states[i] = state
}

return states, nil
}

// getExpirationTimestamp returns expiration timestamp
func (b *Backend) getExpirationTimestamp() int32 {
expiresIn := b.GetConfig().ResultsExpireIn
if expiresIn == 0 {
// // expire results after 1 hour by default
expiresIn = config.DefaultResultsExpireIn
}
return int32(time.Now().Unix() + int64(expiresIn))
}

// getClient returns or creates instance of Memcache client
func (b *Backend) getClient() *gomemcache.Client {
if b.client == nil {
b.client = gomemcache.New(b.servers...)
}
return b.client
}

+ 358
- 0
vendor/github.com/RichardKnop/machinery/v1/backends/mongo/mongodb.go View File

@@ -0,0 +1,358 @@
package mongo

import (
"context"
"encoding/json"
"fmt"
"reflect"
"strings"
"sync"
"time"

"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"

"github.com/RichardKnop/machinery/v1/backends/iface"
"github.com/RichardKnop/machinery/v1/common"
"github.com/RichardKnop/machinery/v1/config"
"github.com/RichardKnop/machinery/v1/log"
"github.com/RichardKnop/machinery/v1/tasks"
)

// Backend represents a MongoDB result backend
type Backend struct {
common.Backend
client *mongo.Client
tc *mongo.Collection
gmc *mongo.Collection
once sync.Once
}

// New creates Backend instance
func New(cnf *config.Config) (iface.Backend, error) {
backend := &Backend{
Backend: common.NewBackend(cnf),
once: sync.Once{},
}

return backend, nil
}

// InitGroup creates and saves a group meta data object
func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error {
groupMeta := &tasks.GroupMeta{
GroupUUID: groupUUID,
TaskUUIDs: taskUUIDs,
CreatedAt: time.Now().UTC(),
}
_, err := b.groupMetasCollection().InsertOne(context.Background(), groupMeta)
return err
}

// GroupCompleted returns true if all tasks in a group finished
func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) {
groupMeta, err := b.getGroupMeta(groupUUID)
if err != nil {
return false, err
}

taskStates, err := b.getStates(groupMeta.TaskUUIDs...)
if err != nil {
return false, err
}

var countSuccessTasks = 0
for _, taskState := range taskStates {
if taskState.IsCompleted() {
countSuccessTasks++
}
}

return countSuccessTasks == groupTaskCount, nil
}

// GroupTaskStates returns states of all tasks in the group
func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) {
groupMeta, err := b.getGroupMeta(groupUUID)
if err != nil {
return []*tasks.TaskState{}, err
}

return b.getStates(groupMeta.TaskUUIDs...)
}

// TriggerChord flags chord as triggered in the backend storage to make sure
// chord is never triggered multiple times. Returns a boolean flag to indicate
// whether the worker should trigger chord (true) or no if it has been triggered
// already (false)
func (b *Backend) TriggerChord(groupUUID string) (bool, error) {
query := bson.M{
"_id": groupUUID,
"chord_triggered": false,
}
change := bson.M{
"$set": bson.M{
"chord_triggered": true,
},
}

_, err := b.groupMetasCollection().UpdateOne(context.Background(), query, change, options.Update())

if err != nil {
if err == mongo.ErrNoDocuments {
log.WARNING.Printf("Chord already triggered for group %s", groupUUID)
return false, nil
}
return false, err
}
return true, nil
}

// SetStatePending updates task state to PENDING
func (b *Backend) SetStatePending(signature *tasks.Signature) error {
update := bson.M{
"state": tasks.StatePending,
"task_name": signature.Name,
"created_at": time.Now().UTC(),
}
return b.updateState(signature, update)
}

// SetStateReceived updates task state to RECEIVED
func (b *Backend) SetStateReceived(signature *tasks.Signature) error {
update := bson.M{"state": tasks.StateReceived}
return b.updateState(signature, update)
}

// SetStateStarted updates task state to STARTED
func (b *Backend) SetStateStarted(signature *tasks.Signature) error {
update := bson.M{"state": tasks.StateStarted}
return b.updateState(signature, update)
}

// SetStateRetry updates task state to RETRY
func (b *Backend) SetStateRetry(signature *tasks.Signature) error {
update := bson.M{"state": tasks.StateRetry}
return b.updateState(signature, update)
}

// SetStateSuccess updates task state to SUCCESS
func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error {
decodedResults := b.decodeResults(results)
update := bson.M{
"state": tasks.StateSuccess,
"results": decodedResults,
}
return b.updateState(signature, update)
}

// decodeResults detects & decodes json strings in TaskResult.Value and returns a new slice
func (b *Backend) decodeResults(results []*tasks.TaskResult) []*tasks.TaskResult {
l := len(results)
jsonResults := make([]*tasks.TaskResult, l, l)
for i, result := range results {
jsonResult := new(bson.M)
resultType := reflect.TypeOf(result.Value).Kind()
if resultType == reflect.String {
err := json.NewDecoder(strings.NewReader(result.Value.(string))).Decode(&jsonResult)
if err == nil {
jsonResults[i] = &tasks.TaskResult{
Type: "json",
Value: jsonResult,
}
continue
}
}
jsonResults[i] = result
}
return jsonResults
}

// SetStateFailure updates task state to FAILURE
func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error {
update := bson.M{"state": tasks.StateFailure, "error": err}
return b.updateState(signature, update)
}

// GetState returns the latest task state
func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) {
state := &tasks.TaskState{}
err := b.tasksCollection().FindOne(context.Background(), bson.M{"_id": taskUUID}).Decode(state)

if err != nil {
return nil, err
}
return state, nil
}

// PurgeState deletes stored task state
func (b *Backend) PurgeState(taskUUID string) error {
_, err := b.tasksCollection().DeleteOne(context.Background(), bson.M{"_id": taskUUID})
return err
}

// PurgeGroupMeta deletes stored group meta data
func (b *Backend) PurgeGroupMeta(groupUUID string) error {
_, err := b.groupMetasCollection().DeleteOne(context.Background(), bson.M{"_id": groupUUID})
return err
}

// lockGroupMeta acquires lock on groupUUID document
func (b *Backend) lockGroupMeta(groupUUID string) error {
query := bson.M{
"_id": groupUUID,
"lock": false,
}
change := bson.M{
"$set": bson.M{
"lock": true,
},
}

_, err := b.groupMetasCollection().UpdateOne(context.Background(), query, change, options.Update().SetUpsert(true))

return err
}

// unlockGroupMeta releases lock on groupUUID document
func (b *Backend) unlockGroupMeta(groupUUID string) error {
update := bson.M{"$set": bson.M{"lock": false}}
_, err := b.groupMetasCollection().UpdateOne(context.Background(), bson.M{"_id": groupUUID}, update, options.Update())
return err
}

// getGroupMeta retrieves group meta data, convenience function to avoid repetition
func (b *Backend) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) {
groupMeta := &tasks.GroupMeta{}
query := bson.M{"_id": groupUUID}

err := b.groupMetasCollection().FindOne(context.Background(), query).Decode(groupMeta)
if err != nil {
return nil, err
}
return groupMeta, nil
}

// getStates returns multiple task states
func (b *Backend) getStates(taskUUIDs ...string) ([]*tasks.TaskState, error) {
states := make([]*tasks.TaskState, 0, len(taskUUIDs))
cur, err := b.tasksCollection().Find(context.Background(), bson.M{"_id": bson.M{"$in": taskUUIDs}})
if err != nil {
return nil, err
}
defer cur.Close(context.Background())

for cur.Next(context.Background()) {
state := &tasks.TaskState{}
if err := cur.Decode(state); err != nil {
return nil, err
}
states = append(states, state)
}
if cur.Err() != nil {
return nil, err
}
return states, nil
}

// updateState saves current task state
func (b *Backend) updateState(signature *tasks.Signature, update bson.M) error {
update = bson.M{"$set": update}
_, err := b.tasksCollection().UpdateOne(context.Background(), bson.M{"_id": signature.UUID}, update, options.Update().SetUpsert(true))
return err
}

func (b *Backend) tasksCollection() *mongo.Collection {
b.once.Do(func() {
b.connect()
})

return b.tc
}

func (b *Backend) groupMetasCollection() *mongo.Collection {
b.once.Do(func() {
b.connect()
})

return b.gmc
}

// connect creates the underlying mgo connection if it doesn't exist
// creates required indexes for our collections
func (b *Backend) connect() error {
client, err := b.dial()
if err != nil {
return err
}
b.client = client

database := "machinery"

if b.GetConfig().MongoDB != nil {
database = b.GetConfig().MongoDB.Database
}

b.tc = b.client.Database(database).Collection("tasks")
b.gmc = b.client.Database(database).Collection("group_metas")

err = b.createMongoIndexes(database)
if err != nil {
return err
}
return nil
}

// dial connects to mongo with TLSConfig if provided
// else connects via ResultBackend uri
func (b *Backend) dial() (*mongo.Client, error) {

if b.GetConfig().MongoDB != nil && b.GetConfig().MongoDB.Client != nil {
return b.GetConfig().MongoDB.Client, nil
}

uri := b.GetConfig().ResultBackend
if strings.HasPrefix(uri, "mongodb://") == false &&
strings.HasPrefix(uri, "mongodb+srv://") == false {
uri = fmt.Sprintf("mongodb://%s", uri)
}

client, err := mongo.NewClient(options.Client().ApplyURI(uri))
if err != nil {
return nil, err
}

ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()

if err := client.Connect(ctx); err != nil {
return nil, err
}

return client, nil
}

// createMongoIndexes ensures all indexes are in place
func (b *Backend) createMongoIndexes(database string) error {

tasksCollection := b.client.Database(database).Collection("tasks")

expireIn := int32(b.GetConfig().ResultsExpireIn)

_, err := tasksCollection.Indexes().CreateMany(context.Background(), []mongo.IndexModel{
{
Keys: bson.M{"state": 1},
Options: options.Index().SetBackground(true).SetExpireAfterSeconds(expireIn),
},
mongo.IndexModel{
Keys: bson.M{"lock": 1},
Options: options.Index().SetBackground(true).SetExpireAfterSeconds(expireIn),
},
})
if err != nil {
return err
}

return err
}

+ 150
- 0
vendor/github.com/RichardKnop/machinery/v1/backends/null/null.go View File

@@ -0,0 +1,150 @@
package null

import (
"fmt"

"github.com/RichardKnop/machinery/v1/backends/iface"
"github.com/RichardKnop/machinery/v1/common"
"github.com/RichardKnop/machinery/v1/config"
"github.com/RichardKnop/machinery/v1/tasks"
)

// ErrGroupNotFound ...
type ErrGroupNotFound struct {
groupUUID string
}

// NewErrGroupNotFound returns new instance of ErrGroupNotFound
func NewErrGroupNotFound(groupUUID string) ErrGroupNotFound {
return ErrGroupNotFound{groupUUID: groupUUID}
}

// Error implements error interface
func (e ErrGroupNotFound) Error() string {
return fmt.Sprintf("Group not found: %v", e.groupUUID)
}

// ErrTasknotFound ...
type ErrTasknotFound struct {
taskUUID string
}

// NewErrTasknotFound returns new instance of ErrTasknotFound
func NewErrTasknotFound(taskUUID string) ErrTasknotFound {
return ErrTasknotFound{taskUUID: taskUUID}
}

// Error implements error interface
func (e ErrTasknotFound) Error() string {
return fmt.Sprintf("Task not found: %v", e.taskUUID)
}

// Backend represents an "eager" in-memory result backend
type Backend struct {
common.Backend
groups map[string]struct{}
}

// New creates EagerBackend instance
func New() iface.Backend {
return &Backend{
Backend: common.NewBackend(new(config.Config)),
groups: make(map[string]struct{}),
}
}

// InitGroup creates and saves a group meta data object
func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error {
b.groups[groupUUID] = struct{}{}
return nil
}

// GroupCompleted returns true if all tasks in a group finished
func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) {
_, ok := b.groups[groupUUID]
if !ok {
return false, NewErrGroupNotFound(groupUUID)
}

return true, nil
}

// GroupTaskStates returns states of all tasks in the group
func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) {
_, ok := b.groups[groupUUID]
if !ok {
return nil, NewErrGroupNotFound(groupUUID)
}

ret := make([]*tasks.TaskState, 0, groupTaskCount)
return ret, nil
}

// TriggerChord flags chord as triggered in the backend storage to make sure
// chord is never trigerred multiple times. Returns a boolean flag to indicate
// whether the worker should trigger chord (true) or no if it has been triggered
// already (false)
func (b *Backend) TriggerChord(groupUUID string) (bool, error) {
return true, nil
}

// SetStatePending updates task state to PENDING
func (b *Backend) SetStatePending(signature *tasks.Signature) error {
state := tasks.NewPendingTaskState(signature)
return b.updateState(state)
}

// SetStateReceived updates task state to RECEIVED
func (b *Backend) SetStateReceived(signature *tasks.Signature) error {
state := tasks.NewReceivedTaskState(signature)
return b.updateState(state)
}

// SetStateStarted updates task state to STARTED
func (b *Backend) SetStateStarted(signature *tasks.Signature) error {
state := tasks.NewStartedTaskState(signature)
return b.updateState(state)
}

// SetStateRetry updates task state to RETRY
func (b *Backend) SetStateRetry(signature *tasks.Signature) error {
state := tasks.NewRetryTaskState(signature)
return b.updateState(state)
}

// SetStateSuccess updates task state to SUCCESS
func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error {
state := tasks.NewSuccessTaskState(signature, results)
return b.updateState(state)
}

// SetStateFailure updates task state to FAILURE
func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error {
state := tasks.NewFailureTaskState(signature, err)
return b.updateState(state)
}

// GetState returns the latest task state
func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) {
return nil, NewErrTasknotFound(taskUUID)
}

// PurgeState deletes stored task state
func (b *Backend) PurgeState(taskUUID string) error {
return NewErrTasknotFound(taskUUID)
}

// PurgeGroupMeta deletes stored group meta data
func (b *Backend) PurgeGroupMeta(groupUUID string) error {
_, ok := b.groups[groupUUID]
if !ok {
return NewErrGroupNotFound(groupUUID)
}

return nil
}

func (b *Backend) updateState(s *tasks.TaskState) error {
// simulate the behavior of json marshal/unmarshal
return nil
}

+ 338
- 0
vendor/github.com/RichardKnop/machinery/v1/backends/redis/redis.go View File

@@ -0,0 +1,338 @@
package redis

import (
"bytes"
"encoding/json"
"fmt"
"sync"
"time"

"github.com/RichardKnop/machinery/v1/backends/iface"
"github.com/RichardKnop/machinery/v1/common"
"github.com/RichardKnop/machinery/v1/config"
"github.com/RichardKnop/machinery/v1/log"
"github.com/RichardKnop/machinery/v1/tasks"
"github.com/RichardKnop/redsync"
"github.com/gomodule/redigo/redis"
)

// Backend represents a Redis result backend
type Backend struct {
common.Backend
host string
password string
db int
pool *redis.Pool
// If set, path to a socket file overrides hostname
socketPath string
redsync *redsync.Redsync
redisOnce sync.Once
common.RedisConnector
}

// New creates Backend instance
func New(cnf *config.Config, host, password, socketPath string, db int) iface.Backend {
return &Backend{
Backend: common.NewBackend(cnf),
host: host,
db: db,
password: password,
socketPath: socketPath,
}
}

// InitGroup creates and saves a group meta data object
func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error {
groupMeta := &tasks.GroupMeta{
GroupUUID: groupUUID,
TaskUUIDs: taskUUIDs,
CreatedAt: time.Now().UTC(),
}

encoded, err := json.Marshal(groupMeta)
if err != nil {
return err
}

conn := b.open()
defer conn.Close()

_, err = conn.Do("SET", groupUUID, encoded)
if err != nil {
return err
}

return b.setExpirationTime(groupUUID)
}

// GroupCompleted returns true if all tasks in a group finished
func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) {
groupMeta, err := b.getGroupMeta(groupUUID)
if err != nil {
return false, err
}

taskStates, err := b.getStates(groupMeta.TaskUUIDs...)
if err != nil {
return false, err
}

var countSuccessTasks = 0
for _, taskState := range taskStates {
if taskState.IsCompleted() {
countSuccessTasks++
}
}

return countSuccessTasks == groupTaskCount, nil
}

// GroupTaskStates returns states of all tasks in the group
func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) {
groupMeta, err := b.getGroupMeta(groupUUID)
if err != nil {
return []*tasks.TaskState{}, err
}

return b.getStates(groupMeta.TaskUUIDs...)
}

// TriggerChord flags chord as triggered in the backend storage to make sure
// chord is never trigerred multiple times. Returns a boolean flag to indicate
// whether the worker should trigger chord (true) or no if it has been triggered
// already (false)
func (b *Backend) TriggerChord(groupUUID string) (bool, error) {
conn := b.open()
defer conn.Close()

m := b.redsync.NewMutex("TriggerChordMutex")
if err := m.Lock(); err != nil {
return false, err
}
defer m.Unlock()

groupMeta, err := b.getGroupMeta(groupUUID)
if err != nil {
return false, err
}

// Chord has already been triggered, return false (should not trigger again)
if groupMeta.ChordTriggered {
return false, nil
}

// Set flag to true
groupMeta.ChordTriggered = true

// Update the group meta
encoded, err := json.Marshal(&groupMeta)
if err != nil {
return false, err
}

_, err = conn.Do("SET", groupUUID, encoded)
if err != nil {
return false, err
}

return true, b.setExpirationTime(groupUUID)
}

func (b *Backend) mergeNewTaskState(newState *tasks.TaskState) {
state, err := b.GetState(newState.TaskUUID)
if err == nil {
newState.CreatedAt = state.CreatedAt
newState.TaskName = state.TaskName
}
}

// SetStatePending updates task state to PENDING
func (b *Backend) SetStatePending(signature *tasks.Signature) error {
taskState := tasks.NewPendingTaskState(signature)
return b.updateState(taskState)
}

// SetStateReceived updates task state to RECEIVED
func (b *Backend) SetStateReceived(signature *tasks.Signature) error {
taskState := tasks.NewReceivedTaskState(signature)
b.mergeNewTaskState(taskState)
return b.updateState(taskState)
}

// SetStateStarted updates task state to STARTED
func (b *Backend) SetStateStarted(signature *tasks.Signature) error {
taskState := tasks.NewStartedTaskState(signature)
b.mergeNewTaskState(taskState)
return b.updateState(taskState)
}

// SetStateRetry updates task state to RETRY
func (b *Backend) SetStateRetry(signature *tasks.Signature) error {
taskState := tasks.NewRetryTaskState(signature)
b.mergeNewTaskState(taskState)
return b.updateState(taskState)
}

// SetStateSuccess updates task state to SUCCESS
func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error {
taskState := tasks.NewSuccessTaskState(signature, results)
b.mergeNewTaskState(taskState)
return b.updateState(taskState)
}

// SetStateFailure updates task state to FAILURE
func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error {
taskState := tasks.NewFailureTaskState(signature, err)
b.mergeNewTaskState(taskState)
return b.updateState(taskState)
}

// GetState returns the latest task state
func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) {
conn := b.open()
defer conn.Close()

item, err := redis.Bytes(conn.Do("GET", taskUUID))
if err != nil {
return nil, err
}
state := new(tasks.TaskState)
decoder := json.NewDecoder(bytes.NewReader(item))
decoder.UseNumber()
if err := decoder.Decode(state); err != nil {
return nil, err
}

return state, nil
}

// PurgeState deletes stored task state
func (b *Backend) PurgeState(taskUUID string) error {
conn := b.open()
defer conn.Close()

_, err := conn.Do("DEL", taskUUID)
if err != nil {
return err
}

return nil
}

// PurgeGroupMeta deletes stored group meta data
func (b *Backend) PurgeGroupMeta(groupUUID string) error {
conn := b.open()
defer conn.Close()

_, err := conn.Do("DEL", groupUUID)
if err != nil {
return err
}

return nil
}

// getGroupMeta retrieves group meta data, convenience function to avoid repetition
func (b *Backend) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) {
conn := b.open()
defer conn.Close()

item, err := redis.Bytes(conn.Do("GET", groupUUID))
if err != nil {
return nil, err
}

groupMeta := new(tasks.GroupMeta)
decoder := json.NewDecoder(bytes.NewReader(item))
decoder.UseNumber()
if err := decoder.Decode(groupMeta); err != nil {
return nil, err
}

return groupMeta, nil
}

// getStates returns multiple task states
func (b *Backend) getStates(taskUUIDs ...string) ([]*tasks.TaskState, error) {
taskStates := make([]*tasks.TaskState, len(taskUUIDs))

conn := b.open()
defer conn.Close()

// conn.Do requires []interface{}... can't pass []string unfortunately
taskUUIDInterfaces := make([]interface{}, len(taskUUIDs))
for i, taskUUID := range taskUUIDs {
taskUUIDInterfaces[i] = interface{}(taskUUID)
}

reply, err := redis.Values(conn.Do("MGET", taskUUIDInterfaces...))
if err != nil {
return taskStates, err
}

for i, value := range reply {
stateBytes, ok := value.([]byte)
if !ok {
return taskStates, fmt.Errorf("Expected byte array, instead got: %v", value)
}

taskState := new(tasks.TaskState)
decoder := json.NewDecoder(bytes.NewReader(stateBytes))
decoder.UseNumber()
if err := decoder.Decode(taskState); err != nil {
log.ERROR.Print(err)
return taskStates, err
}

taskStates[i] = taskState
}

return taskStates, nil
}

// updateState saves current task state
func (b *Backend) updateState(taskState *tasks.TaskState) error {
conn := b.open()
defer conn.Close()

encoded, err := json.Marshal(taskState)
if err != nil {
return err
}

_, err = conn.Do("SET", taskState.TaskUUID, encoded)
if err != nil {
return err
}

return b.setExpirationTime(taskState.TaskUUID)
}

// setExpirationTime sets expiration timestamp on a stored task state
func (b *Backend) setExpirationTime(key string) error {
expiresIn := b.GetConfig().ResultsExpireIn
if expiresIn == 0 {
// // expire results after 1 hour by default
expiresIn = config.DefaultResultsExpireIn
}
expirationTimestamp := int32(time.Now().Unix() + int64(expiresIn))

conn := b.open()
defer conn.Close()

_, err := conn.Do("EXPIREAT", key, expirationTimestamp)
if err != nil {
return err
}

return nil
}

// open returns or creates instance of Redis connection
func (b *Backend) open() redis.Conn {
b.redisOnce.Do(func() {
b.pool = b.NewPool(b.socketPath, b.host, b.password, b.db, b.GetConfig().Redis, b.GetConfig().TLSConfig)
b.redsync = redsync.New([]redsync.Pool{b.pool})
})
return b.pool.Get()
}

+ 256
- 0
vendor/github.com/RichardKnop/machinery/v1/backends/result/async_result.go View File

@@ -0,0 +1,256 @@
package result

import (
"errors"
"reflect"
"time"

"github.com/RichardKnop/machinery/v1/backends/iface"
"github.com/RichardKnop/machinery/v1/tasks"
)

var (
// ErrBackendNotConfigured ...
ErrBackendNotConfigured = errors.New("Result backend not configured")
// ErrTimeoutReached ...
ErrTimeoutReached = errors.New("Timeout reached")
)

// AsyncResult represents a task result
type AsyncResult struct {
Signature *tasks.Signature
taskState *tasks.TaskState
backend iface.Backend
}

// ChordAsyncResult represents a result of a chord
type ChordAsyncResult struct {
groupAsyncResults []*AsyncResult
chordAsyncResult *AsyncResult
backend iface.Backend
}

// ChainAsyncResult represents a result of a chain of tasks
type ChainAsyncResult struct {
asyncResults []*AsyncResult
backend iface.Backend
}

// NewAsyncResult creates AsyncResult instance
func NewAsyncResult(signature *tasks.Signature, backend iface.Backend) *AsyncResult {
return &AsyncResult{
Signature: signature,
taskState: new(tasks.TaskState),
backend: backend,
}
}

// NewChordAsyncResult creates ChordAsyncResult instance
func NewChordAsyncResult(groupTasks []*tasks.Signature, chordCallback *tasks.Signature, backend iface.Backend) *ChordAsyncResult {
asyncResults := make([]*AsyncResult, len(groupTasks))
for i, task := range groupTasks {
asyncResults[i] = NewAsyncResult(task, backend)
}
return &ChordAsyncResult{
groupAsyncResults: asyncResults,
chordAsyncResult: NewAsyncResult(chordCallback, backend),
backend: backend,
}
}

// NewChainAsyncResult creates ChainAsyncResult instance
func NewChainAsyncResult(tasks []*tasks.Signature, backend iface.Backend) *ChainAsyncResult {
asyncResults := make([]*AsyncResult, len(tasks))
for i, task := range tasks {
asyncResults[i] = NewAsyncResult(task, backend)
}
return &ChainAsyncResult{
asyncResults: asyncResults,
backend: backend,
}
}

// Touch the state and don't wait
func (asyncResult *AsyncResult) Touch() ([]reflect.Value, error) {
if asyncResult.backend == nil {
return nil, ErrBackendNotConfigured
}

asyncResult.GetState()

// Purge state if we are using AMQP backend
if asyncResult.backend.IsAMQP() && asyncResult.taskState.IsCompleted() {
asyncResult.backend.PurgeState(asyncResult.taskState.TaskUUID)
}

if asyncResult.taskState.IsFailure() {
return nil, errors.New(asyncResult.taskState.Error)
}

if asyncResult.taskState.IsSuccess() {
return tasks.ReflectTaskResults(asyncResult.taskState.Results)
}

return nil, nil
}

// Get returns task results (synchronous blocking call)
func (asyncResult *AsyncResult) Get(sleepDuration time.Duration) ([]reflect.Value, error) {
for {
results, err := asyncResult.Touch()

if results == nil && err == nil {
time.Sleep(sleepDuration)
} else {
return results, err
}
}
}

// GetWithTimeout returns task results with a timeout (synchronous blocking call)
func (asyncResult *AsyncResult) GetWithTimeout(timeoutDuration, sleepDuration time.Duration) ([]reflect.Value, error) {
timeout := time.NewTimer(timeoutDuration)

for {
select {
case <-timeout.C:
return nil, ErrTimeoutReached
default:
results, err := asyncResult.Touch()

if results == nil && err == nil {
time.Sleep(sleepDuration)
} else {
return results, err
}
}
}
}

// GetState returns latest task state
func (asyncResult *AsyncResult) GetState() *tasks.TaskState {
if asyncResult.taskState.IsCompleted() {
return asyncResult.taskState
}

taskState, err := asyncResult.backend.GetState(asyncResult.Signature.UUID)
if err == nil {
asyncResult.taskState = taskState
}

return asyncResult.taskState
}

// Get returns results of a chain of tasks (synchronous blocking call)
func (chainAsyncResult *ChainAsyncResult) Get(sleepDuration time.Duration) ([]reflect.Value, error) {
if chainAsyncResult.backend == nil {
return nil, ErrBackendNotConfigured
}

var (
results []reflect.Value
err error
)

for _, asyncResult := range chainAsyncResult.asyncResults {
results, err = asyncResult.Get(sleepDuration)
if err != nil {
return nil, err
}
}

return results, err
}

// Get returns result of a chord (synchronous blocking call)
func (chordAsyncResult *ChordAsyncResult) Get(sleepDuration time.Duration) ([]reflect.Value, error) {
if chordAsyncResult.backend == nil {
return nil, ErrBackendNotConfigured
}

var err error
for _, asyncResult := range chordAsyncResult.groupAsyncResults {
_, err = asyncResult.Get(sleepDuration)
if err != nil {
return nil, err
}
}

return chordAsyncResult.chordAsyncResult.Get(sleepDuration)
}

// GetWithTimeout returns results of a chain of tasks with timeout (synchronous blocking call)
func (chainAsyncResult *ChainAsyncResult) GetWithTimeout(timeoutDuration, sleepDuration time.Duration) ([]reflect.Value, error) {
if chainAsyncResult.backend == nil {
return nil, ErrBackendNotConfigured
}

var (
results []reflect.Value
err error
)

timeout := time.NewTimer(timeoutDuration)
ln := len(chainAsyncResult.asyncResults)
lastResult := chainAsyncResult.asyncResults[ln-1]

for {
select {
case <-timeout.C:
return nil, ErrTimeoutReached
default:

for _, asyncResult := range chainAsyncResult.asyncResults {
_, errcur := asyncResult.Touch()
if errcur != nil {
return nil, err
}
}

results, err = lastResult.Touch()
if err != nil {
return nil, err
}
if results != nil {
return results, err
}
time.Sleep(sleepDuration)
}
}
}

// GetWithTimeout returns result of a chord with a timeout (synchronous blocking call)
func (chordAsyncResult *ChordAsyncResult) GetWithTimeout(timeoutDuration, sleepDuration time.Duration) ([]reflect.Value, error) {
if chordAsyncResult.backend == nil {
return nil, ErrBackendNotConfigured
}

var (
results []reflect.Value
err error
)

timeout := time.NewTimer(timeoutDuration)
for {
select {
case <-timeout.C:
return nil, ErrTimeoutReached
default:
for _, asyncResult := range chordAsyncResult.groupAsyncResults {
_, errcur := asyncResult.Touch()
if errcur != nil {
return nil, err
}
}

results, err = chordAsyncResult.chordAsyncResult.Touch()
if err != nil {
return nil, nil
}
if results != nil {
return results, err
}
time.Sleep(sleepDuration)
}
}
}

+ 424
- 0
vendor/github.com/RichardKnop/machinery/v1/brokers/amqp/amqp.go View File

@@ -0,0 +1,424 @@
package amqp

import (
"bytes"
"context"
"encoding/json"
"fmt"
"sync"
"time"

"github.com/RichardKnop/machinery/v1/brokers/errs"
"github.com/RichardKnop/machinery/v1/brokers/iface"
"github.com/RichardKnop/machinery/v1/common"
"github.com/RichardKnop/machinery/v1/config"
"github.com/RichardKnop/machinery/v1/log"
"github.com/RichardKnop/machinery/v1/tasks"
"github.com/pkg/errors"
"github.com/streadway/amqp"
)

type AMQPConnection struct {
queueName string
connection *amqp.Connection
channel *amqp.Channel
queue amqp.Queue
confirmation <-chan amqp.Confirmation
errorchan <-chan *amqp.Error
cleanup chan struct{}
}

// Broker represents an AMQP broker
type Broker struct {
common.Broker
common.AMQPConnector
processingWG sync.WaitGroup // use wait group to make sure task processing completes on interrupt signal

connections map[string]*AMQPConnection
connectionsMutex sync.RWMutex
}

// New creates new Broker instance
func New(cnf *config.Config) iface.Broker {
return &Broker{Broker: common.NewBroker(cnf), AMQPConnector: common.AMQPConnector{}, connections: make(map[string]*AMQPConnection)}
}

// StartConsuming enters a loop and waits for incoming messages
func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) {
b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor)

queueName := taskProcessor.CustomQueue()
if queueName == "" {
queueName = b.GetConfig().DefaultQueue
}

conn, channel, queue, _, amqpCloseChan, err := b.Connect(
b.GetConfig().Broker,
b.GetConfig().TLSConfig,
b.GetConfig().AMQP.Exchange, // exchange name
b.GetConfig().AMQP.ExchangeType, // exchange type
queueName, // queue name
true, // queue durable
false, // queue delete when unused
b.GetConfig().AMQP.BindingKey, // queue binding key
nil, // exchange declare args
nil, // queue declare args
amqp.Table(b.GetConfig().AMQP.QueueBindingArgs), // queue binding args
)
if err != nil {
b.GetRetryFunc()(b.GetRetryStopChan())
return b.GetRetry(), err
}
defer b.Close(channel, conn)

if err = channel.Qos(
b.GetConfig().AMQP.PrefetchCount,
0, // prefetch size
false, // global
); err != nil {
return b.GetRetry(), fmt.Errorf("Channel qos error: %s", err)
}

deliveries, err := channel.Consume(
queue.Name, // queue
consumerTag, // consumer tag
false, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // arguments
)
if err != nil {
return b.GetRetry(), fmt.Errorf("Queue consume error: %s", err)
}

log.INFO.Print("[*] Waiting for messages. To exit press CTRL+C")

if err := b.consume(deliveries, concurrency, taskProcessor, amqpCloseChan); err != nil {
return b.GetRetry(), err
}

// Waiting for any tasks being processed to finish
b.processingWG.Wait()

return b.GetRetry(), nil
}

// StopConsuming quits the loop
func (b *Broker) StopConsuming() {
b.Broker.StopConsuming()

// Waiting for any tasks being processed to finish
b.processingWG.Wait()
}

// GetOrOpenConnection will return a connection on a particular queue name. Open connections
// are saved to avoid having to reopen connection for multiple queues
func (b *Broker) GetOrOpenConnection(queueName string, queueBindingKey string, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs amqp.Table) (*AMQPConnection, error) {
var err error

b.connectionsMutex.Lock()
defer b.connectionsMutex.Unlock()

conn, ok := b.connections[queueName]
if !ok {
conn = &AMQPConnection{
queueName: queueName,
cleanup: make(chan struct{}),
}
conn.connection, conn.channel, conn.queue, conn.confirmation, conn.errorchan, err = b.Connect(
b.GetConfig().Broker,
b.GetConfig().TLSConfig,
b.GetConfig().AMQP.Exchange, // exchange name
b.GetConfig().AMQP.ExchangeType, // exchange type
queueName, // queue name
true, // queue durable
false, // queue delete when unused
queueBindingKey, // queue binding key
exchangeDeclareArgs, // exchange declare args
queueDeclareArgs, // queue declare args
queueBindingArgs, // queue binding args
)
if err != nil {
return nil, errors.Wrapf(err, "Failed to connect to queue %s", queueName)
}

// Reconnect to the channel if it disconnects/errors out
go func() {
select {
case err = <-conn.errorchan:
log.INFO.Printf("Error occured on queue: %s. Reconnecting", queueName)
b.connectionsMutex.Lock()
delete(b.connections, queueName)
b.connectionsMutex.Unlock()
_, err := b.GetOrOpenConnection(queueName, queueBindingKey, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs)
if err != nil {
log.ERROR.Printf("Failed to reopen queue: %s.", queueName)
}
case <-conn.cleanup:
return
}
return
}()
b.connections[queueName] = conn
}
return conn, nil
}

func (b *Broker) CloseConnections() error {
b.connectionsMutex.Lock()
defer b.connectionsMutex.Unlock()

for key, conn := range b.connections {
if err := b.Close(conn.channel, conn.connection); err != nil {
log.ERROR.Print("Failed to close channel")
return nil
}
close(conn.cleanup)
delete(b.connections, key)
}
return nil
}

// Publish places a new message on the default queue
func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error {
// Adjust routing key (this decides which queue the message will be published to)
b.AdjustRoutingKey(signature)

msg, err := json.Marshal(signature)
if err != nil {
return fmt.Errorf("JSON marshal error: %s", err)
}

// Check the ETA signature field, if it is set and it is in the future,
// delay the task
if signature.ETA != nil {
now := time.Now().UTC()

if signature.ETA.After(now) {
delayMs := int64(signature.ETA.Sub(now) / time.Millisecond)

return b.delay(signature, delayMs)
}
}

queue := b.GetConfig().DefaultQueue
bindingKey := b.GetConfig().AMQP.BindingKey // queue binding key
if b.isDirectExchange() {
queue = signature.RoutingKey
bindingKey = signature.RoutingKey
}

connection, err := b.GetOrOpenConnection(
queue,
bindingKey, // queue binding key
nil, // exchange declare args
nil, // queue declare args
amqp.Table(b.GetConfig().AMQP.QueueBindingArgs), // queue binding args
)
if err != nil {
return errors.Wrapf(err, "Failed to get a connection for queue %s", queue)
}

channel := connection.channel
confirmsChan := connection.confirmation

if err := channel.Publish(
b.GetConfig().AMQP.Exchange, // exchange name
signature.RoutingKey, // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
Headers: amqp.Table(signature.Headers),
ContentType: "application/json",
Body: msg,
DeliveryMode: amqp.Persistent,
},
); err != nil {
return errors.Wrap(err, "Failed to publish task")
}

confirmed := <-confirmsChan

if confirmed.Ack {
return nil
}

return fmt.Errorf("Failed delivery of delivery tag: %v", confirmed.DeliveryTag)
}

// consume takes delivered messages from the channel and manages a worker pool
// to process tasks concurrently
func (b *Broker) consume(deliveries <-chan amqp.Delivery, concurrency int, taskProcessor iface.TaskProcessor, amqpCloseChan <-chan *amqp.Error) error {
pool := make(chan struct{}, concurrency)

// initialize worker pool with maxWorkers workers
go func() {
for i := 0; i < concurrency; i++ {
pool <- struct{}{}
}
}()

errorsChan := make(chan error)

for {
select {
case amqpErr := <-amqpCloseChan:
return amqpErr
case err := <-errorsChan:
return err
case d := <-deliveries:
if concurrency > 0 {
// get worker from pool (blocks until one is available)
<-pool
}

b.processingWG.Add(1)

// Consume the task inside a gotourine so multiple tasks
// can be processed concurrently
go func() {
if err := b.consumeOne(d, taskProcessor); err != nil {
errorsChan <- err
}

b.processingWG.Done()

if concurrency > 0 {
// give worker back to pool
pool <- struct{}{}
}
}()
case <-b.GetStopChan():
return nil
}
}
}

// consumeOne processes a single message using TaskProcessor
func (b *Broker) consumeOne(delivery amqp.Delivery, taskProcessor iface.TaskProcessor) error {
if len(delivery.Body) == 0 {
delivery.Nack(true, false) // multiple, requeue
return errors.New("Received an empty message") // RabbitMQ down?
}

var multiple, requeue = false, false

// Unmarshal message body into signature struct
signature := new(tasks.Signature)
decoder := json.NewDecoder(bytes.NewReader(delivery.Body))
decoder.UseNumber()
if err := decoder.Decode(signature); err != nil {
delivery.Nack(multiple, requeue)
return errs.NewErrCouldNotUnmarshaTaskSignature(delivery.Body, err)
}

// If the task is not registered, we nack it and requeue,
// there might be different workers for processing specific tasks
if !b.IsTaskRegistered(signature.Name) {
if !delivery.Redelivered {
requeue = true
log.INFO.Printf("Task not registered with this worker. Requeing message: %s", delivery.Body)
}
delivery.Nack(multiple, requeue)
return nil
}

log.INFO.Printf("Received new message: %s", delivery.Body)

err := taskProcessor.Process(signature)
delivery.Ack(multiple)
return err
}

// delay a task by delayDuration miliseconds, the way it works is a new queue
// is created without any consumers, the message is then published to this queue
// with appropriate ttl expiration headers, after the expiration, it is sent to
// the proper queue with consumers
func (b *Broker) delay(signature *tasks.Signature, delayMs int64) error {
if delayMs <= 0 {
return errors.New("Cannot delay task by 0ms")
}

message, err := json.Marshal(signature)
if err != nil {
return fmt.Errorf("JSON marshal error: %s", err)
}

// It's necessary to redeclare the queue each time (to zero its TTL timer).
queueName := fmt.Sprintf(
"delay.%d.%s.%s",
delayMs, // delay duration in mileseconds
b.GetConfig().AMQP.Exchange,
signature.RoutingKey, // routing key
)
declareQueueArgs := amqp.Table{
// Exchange where to send messages after TTL expiration.
"x-dead-letter-exchange": b.GetConfig().AMQP.Exchange,
// Routing key which use when resending expired messages.
"x-dead-letter-routing-key": signature.RoutingKey,
// Time in milliseconds
// after that message will expire and be sent to destination.
"x-message-ttl": delayMs,
// Time after that the queue will be deleted.
"x-expires": delayMs * 2,
}
conn, channel, _, _, _, err := b.Connect(
b.GetConfig().Broker,
b.GetConfig().TLSConfig,
b.GetConfig().AMQP.Exchange, // exchange name
b.GetConfig().AMQP.ExchangeType, // exchange type
queueName, // queue name
true, // queue durable
b.GetConfig().AMQP.AutoDelete, // queue delete when unused
queueName, // queue binding key
nil, // exchange declare args
declareQueueArgs, // queue declare args
amqp.Table(b.GetConfig().AMQP.QueueBindingArgs), // queue binding args
)
if err != nil {
return err
}

defer b.Close(channel, conn)

if err := channel.Publish(
b.GetConfig().AMQP.Exchange, // exchange
queueName, // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
Headers: amqp.Table(signature.Headers),
ContentType: "application/json",
Body: message,
DeliveryMode: amqp.Persistent,
},
); err != nil {
return err
}

return nil
}

func (b *Broker) isDirectExchange() bool {
return b.GetConfig().AMQP != nil && b.GetConfig().AMQP.ExchangeType == "direct"
}

// AdjustRoutingKey makes sure the routing key is correct.
// If the routing key is an empty string:
// a) set it to binding key for direct exchange type
// b) set it to default queue name
func (b *Broker) AdjustRoutingKey(s *tasks.Signature) {
if s.RoutingKey != "" {
return
}

if b.isDirectExchange() {
// The routing algorithm behind a direct exchange is simple - a message goes
// to the queues whose binding key exactly matches the routing key of the message.
s.RoutingKey = b.GetConfig().AMQP.BindingKey
return
}

s.RoutingKey = b.GetConfig().DefaultQueue
}

+ 73
- 0
vendor/github.com/RichardKnop/machinery/v1/brokers/eager/eager.go View File

@@ -0,0 +1,73 @@
package eager

import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"

"github.com/RichardKnop/machinery/v1/brokers/iface"
"github.com/RichardKnop/machinery/v1/common"
"github.com/RichardKnop/machinery/v1/tasks"
)

// Broker represents an "eager" in-memory broker
type Broker struct {
worker iface.TaskProcessor
common.Broker
}

// New creates new Broker instance
func New() iface.Broker {
return new(Broker)
}

// Mode interface with methods specific for this broker
type Mode interface {
AssignWorker(p iface.TaskProcessor)
}

// StartConsuming enters a loop and waits for incoming messages
func (eagerBroker *Broker) StartConsuming(consumerTag string, concurrency int, p iface.TaskProcessor) (bool, error) {
return true, nil
}

// StopConsuming quits the loop
func (eagerBroker *Broker) StopConsuming() {
// do nothing
}

// Publish places a new message on the default queue
func (eagerBroker *Broker) Publish(ctx context.Context, task *tasks.Signature) error {
if eagerBroker.worker == nil {
return errors.New("worker is not assigned in eager-mode")
}

// faking the behavior to marshal input into json
// and unmarshal it back
message, err := json.Marshal(task)
if err != nil {
return fmt.Errorf("JSON marshal error: %s", err)
}

signature := new(tasks.Signature)
decoder := json.NewDecoder(bytes.NewReader(message))
decoder.UseNumber()
if err := decoder.Decode(signature); err != nil {
return fmt.Errorf("JSON unmarshal error: %s", err)
}

// blocking call to the task directly
return eagerBroker.worker.Process(signature)
}

// GetPendingTasks returns a slice of task.Signatures waiting in the queue
func (eagerBroker *Broker) GetPendingTasks(queue string) ([]*tasks.Signature, error) {
return []*tasks.Signature{}, errors.New("Not implemented")
}

// AssignWorker assigns a worker to the eager broker
func (eagerBroker *Broker) AssignWorker(w iface.TaskProcessor) {
eagerBroker.worker = w
}

+ 25
- 0
vendor/github.com/RichardKnop/machinery/v1/brokers/errs/errors.go View File

@@ -0,0 +1,25 @@
package errs

import (
"errors"
"fmt"
)

// ErrCouldNotUnmarshaTaskSignature ...
type ErrCouldNotUnmarshaTaskSignature struct {
msg []byte
reason string
}

// Error implements the error interface
func (e ErrCouldNotUnmarshaTaskSignature) Error() string {
return fmt.Sprintf("Could not unmarshal '%s' into a task signature: %v", e.msg, e.reason)
}

// NewErrCouldNotUnmarshaTaskSignature returns new ErrCouldNotUnmarshaTaskSignature instance
func NewErrCouldNotUnmarshaTaskSignature(msg []byte, err error) ErrCouldNotUnmarshaTaskSignature {
return ErrCouldNotUnmarshaTaskSignature{msg: msg, reason: err.Error()}
}

// ErrConsumerStopped indicates that the operation is now illegal because of the consumer being stopped.
var ErrConsumerStopped = errors.New("the server has been stopped")

+ 196
- 0
vendor/github.com/RichardKnop/machinery/v1/brokers/gcppubsub/gcp_pubsub.go View File

@@ -0,0 +1,196 @@
package gcppubsub

import (
"bytes"
"context"
"encoding/json"
"fmt"
"time"

"cloud.google.com/go/pubsub"
"github.com/RichardKnop/machinery/v1/brokers/iface"
"github.com/RichardKnop/machinery/v1/common"
"github.com/RichardKnop/machinery/v1/config"
"github.com/RichardKnop/machinery/v1/log"
"github.com/RichardKnop/machinery/v1/tasks"
)

// Broker represents an Google Cloud Pub/Sub broker
type Broker struct {
common.Broker

service *pubsub.Client
subscriptionName string
MaxExtension time.Duration

stopDone chan struct{}
}

// New creates new Broker instance
func New(cnf *config.Config, projectID, subscriptionName string) (iface.Broker, error) {
b := &Broker{Broker: common.NewBroker(cnf), stopDone: make(chan struct{})}
b.subscriptionName = subscriptionName

ctx := context.Background()

if cnf.GCPPubSub != nil {
b.MaxExtension = cnf.GCPPubSub.MaxExtension
}

if cnf.GCPPubSub != nil && cnf.GCPPubSub.Client != nil {
b.service = cnf.GCPPubSub.Client
} else {
pubsubClient, err := pubsub.NewClient(ctx, projectID)
if err != nil {
return nil, err
}
b.service = pubsubClient
cnf.GCPPubSub = &config.GCPPubSubConfig{
Client: pubsubClient,
}
}

// Validate topic exists
defaultQueue := b.GetConfig().DefaultQueue
topic := b.service.Topic(defaultQueue)
defer topic.Stop()

topicExists, err := topic.Exists(ctx)
if err != nil {
return nil, err
}
if !topicExists {
return nil, fmt.Errorf("topic does not exist, instead got %s", defaultQueue)
}

// Validate subscription exists
sub := b.service.Subscription(b.subscriptionName)

if b.MaxExtension != 0 {
sub.ReceiveSettings.MaxExtension = b.MaxExtension
}

subscriptionExists, err := sub.Exists(ctx)
if err != nil {
return nil, err
}
if !subscriptionExists {
return nil, fmt.Errorf("subscription does not exist, instead got %s", b.subscriptionName)
}

return b, nil
}

// StartConsuming enters a loop and waits for incoming messages
func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) {
b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor)

sub := b.service.Subscription(b.subscriptionName)

if b.MaxExtension != 0 {
sub.ReceiveSettings.MaxExtension = b.MaxExtension
}

sub.ReceiveSettings.NumGoroutines = concurrency
log.INFO.Print("[*] Waiting for messages. To exit press CTRL+C")

ctx, cancel := context.WithCancel(context.Background())
go func() {
<-b.GetStopChan()
cancel()
}()

for {
err := sub.Receive(ctx, func(_ctx context.Context, msg *pubsub.Message) {
b.consumeOne(msg, taskProcessor)
})
if err == nil {
break
}

log.ERROR.Printf("Error when receiving messages. Error: %v", err)
continue
}

close(b.stopDone)

return b.GetRetry(), nil
}

// StopConsuming quits the loop
func (b *Broker) StopConsuming() {
b.Broker.StopConsuming()

// Waiting for any tasks being processed to finish
<-b.stopDone
}

// Publish places a new message on the default queue or the queue pointed to
// by the routing key
func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error {
// Adjust routing key (this decides which queue the message will be published to)
b.AdjustRoutingKey(signature)

msg, err := json.Marshal(signature)
if err != nil {
return fmt.Errorf("JSON marshal error: %s", err)
}

topic := b.service.Topic(signature.RoutingKey)
defer topic.Stop()

// Check the ETA signature field, if it is set and it is in the future,
// delay the task
if signature.ETA != nil {
now := time.Now().UTC()

if signature.ETA.After(now) {
topic.PublishSettings.DelayThreshold = signature.ETA.Sub(now)
}
}

result := topic.Publish(ctx, &pubsub.Message{
Data: msg,
})

id, err := result.Get(ctx)
if err != nil {
log.ERROR.Printf("Error when sending a message: %v", err)
return err
}

log.INFO.Printf("Sending a message successfully, server-generated message ID %v", id)
return nil
}

// consumeOne processes a single message using TaskProcessor
func (b *Broker) consumeOne(delivery *pubsub.Message, taskProcessor iface.TaskProcessor) {
if len(delivery.Data) == 0 {
delivery.Nack()
log.ERROR.Printf("received an empty message, the delivery was %v", delivery)
}

sig := new(tasks.Signature)
decoder := json.NewDecoder(bytes.NewBuffer(delivery.Data))
decoder.UseNumber()
if err := decoder.Decode(sig); err != nil {
delivery.Nack()
log.ERROR.Printf("unmarshal error. the delivery is %v", delivery)
}

// If the task is not registered return an error
// and leave the message in the queue
if !b.IsTaskRegistered(sig.Name) {
delivery.Nack()
log.ERROR.Printf("task %s is not registered", sig.Name)
}

err := taskProcessor.Process(sig)
if err != nil {
delivery.Nack()
log.ERROR.Printf("Failed process of task", err)
}

// Call Ack() after successfully consuming and processing the message
delivery.Ack()
}

+ 27
- 0
vendor/github.com/RichardKnop/machinery/v1/brokers/iface/interfaces.go View File

@@ -0,0 +1,27 @@
package iface

import (
"context"

"github.com/RichardKnop/machinery/v1/config"
"github.com/RichardKnop/machinery/v1/tasks"
)

// Broker - a common interface for all brokers
type Broker interface {
GetConfig() *config.Config
SetRegisteredTaskNames(names []string)
IsTaskRegistered(name string) bool
StartConsuming(consumerTag string, concurrency int, p TaskProcessor) (bool, error)
StopConsuming()
Publish(ctx context.Context, task *tasks.Signature) error
GetPendingTasks(queue string) ([]*tasks.Signature, error)
AdjustRoutingKey(s *tasks.Signature)
}

// TaskProcessor - can process a delivered task
// This will probably always be a worker instance
type TaskProcessor interface {
Process(signature *tasks.Signature) error
CustomQueue() string
}

+ 418
- 0
vendor/github.com/RichardKnop/machinery/v1/brokers/redis/redis.go View File

@@ -0,0 +1,418 @@
package redis

import (
"bytes"
"context"
"encoding/json"
"fmt"
"sync"
"time"

"github.com/RichardKnop/machinery/v1/brokers/errs"
"github.com/RichardKnop/machinery/v1/brokers/iface"
"github.com/RichardKnop/machinery/v1/common"
"github.com/RichardKnop/machinery/v1/config"
"github.com/RichardKnop/machinery/v1/log"
"github.com/RichardKnop/machinery/v1/tasks"
"github.com/RichardKnop/redsync"
"github.com/gomodule/redigo/redis"
)

var redisDelayedTasksKey = "delayed_tasks"

// Broker represents a Redis broker
type Broker struct {
common.Broker
common.RedisConnector
host string
password string
db int
pool *redis.Pool
consumingWG sync.WaitGroup // wait group to make sure whole consumption completes
processingWG sync.WaitGroup // use wait group to make sure task processing completes
delayedWG sync.WaitGroup
// If set, path to a socket file overrides hostname
socketPath string
redsync *redsync.Redsync
redisOnce sync.Once
}

// New creates new Broker instance
func New(cnf *config.Config, host, password, socketPath string, db int) iface.Broker {
b := &Broker{Broker: common.NewBroker(cnf)}
b.host = host
b.db = db
b.password = password
b.socketPath = socketPath

return b
}

// StartConsuming enters a loop and waits for incoming messages
func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) {
b.consumingWG.Add(1)
defer b.consumingWG.Done()

if concurrency < 1 {
concurrency = 1
}

b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor)

conn := b.open()
defer conn.Close()

// Ping the server to make sure connection is live
_, err := conn.Do("PING")
if err != nil {
b.GetRetryFunc()(b.GetRetryStopChan())

// Return err if retry is still true.
// If retry is false, broker.StopConsuming() has been called and
// therefore Redis might have been stopped. Return nil exit
// StartConsuming()
if b.GetRetry() {
return b.GetRetry(), err
}
return b.GetRetry(), errs.ErrConsumerStopped
}

// Channel to which we will push tasks ready for processing by worker
deliveries := make(chan []byte, concurrency)
pool := make(chan struct{}, concurrency)

// initialize worker pool with maxWorkers workers
for i := 0; i < concurrency; i++ {
pool <- struct{}{}
}

// A receiving goroutine keeps popping messages from the queue by BLPOP
// If the message is valid and can be unmarshaled into a proper structure
// we send it to the deliveries channel
go func() {

log.INFO.Print("[*] Waiting for messages. To exit press CTRL+C")

for {
select {
// A way to stop this goroutine from b.StopConsuming
case <-b.GetStopChan():
close(deliveries)
return
case <-pool:
task, _ := b.nextTask(getQueue(b.GetConfig(), taskProcessor))
//TODO: should this error be ignored?
if len(task) > 0 {
deliveries <- task
}

pool <- struct{}{}
}
}
}()

// A goroutine to watch for delayed tasks and push them to deliveries
// channel for consumption by the worker
b.delayedWG.Add(1)
go func() {
defer b.delayedWG.Done()

for {
select {
// A way to stop this goroutine from b.StopConsuming
case <-b.GetStopChan():
return
default:
task, err := b.nextDelayedTask(redisDelayedTasksKey)
if err != nil {
continue
}

signature := new(tasks.Signature)
decoder := json.NewDecoder(bytes.NewReader(task))
decoder.UseNumber()
if err := decoder.Decode(signature); err != nil {
log.ERROR.Print(errs.NewErrCouldNotUnmarshaTaskSignature(task, err))
}

if err := b.Publish(context.Background(), signature); err != nil {
log.ERROR.Print(err)
}
}
}
}()

if err := b.consume(deliveries, concurrency, taskProcessor); err != nil {
return b.GetRetry(), err
}

// Waiting for any tasks being processed to finish
b.processingWG.Wait()

return b.GetRetry(), nil
}

// StopConsuming quits the loop
func (b *Broker) StopConsuming() {
b.Broker.StopConsuming()
// Waiting for the delayed tasks goroutine to have stopped
b.delayedWG.Wait()
// Waiting for consumption to finish
b.consumingWG.Wait()

if b.pool != nil {
b.pool.Close()
}
}

// Publish places a new message on the default queue
func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error {
// Adjust routing key (this decides which queue the message will be published to)
b.Broker.AdjustRoutingKey(signature)

msg, err := json.Marshal(signature)
if err != nil {
return fmt.Errorf("JSON marshal error: %s", err)
}

conn := b.open()
defer conn.Close()

// Check the ETA signature field, if it is set and it is in the future,
// delay the task
if signature.ETA != nil {
now := time.Now().UTC()

if signature.ETA.After(now) {
score := signature.ETA.UnixNano()
_, err = conn.Do("ZADD", redisDelayedTasksKey, score, msg)
return err
}
}

_, err = conn.Do("RPUSH", signature.RoutingKey, msg)
return err
}

// GetPendingTasks returns a slice of task signatures waiting in the queue
func (b *Broker) GetPendingTasks(queue string) ([]*tasks.Signature, error) {
conn := b.open()
defer conn.Close()

if queue == "" {
queue = b.GetConfig().DefaultQueue
}
dataBytes, err := conn.Do("LRANGE", queue, 0, -1)
if err != nil {
return nil, err
}
results, err := redis.ByteSlices(dataBytes, err)
if err != nil {
return nil, err
}

taskSignatures := make([]*tasks.Signature, len(results))
for i, result := range results {
signature := new(tasks.Signature)
decoder := json.NewDecoder(bytes.NewReader(result))
decoder.UseNumber()
if err := decoder.Decode(signature); err != nil {
return nil, err
}
taskSignatures[i] = signature
}
return taskSignatures, nil
}

// consume takes delivered messages from the channel and manages a worker pool
// to process tasks concurrently
func (b *Broker) consume(deliveries <-chan []byte, concurrency int, taskProcessor iface.TaskProcessor) error {
errorsChan := make(chan error, concurrency*2)
pool := make(chan struct{}, concurrency)

// init pool for Worker tasks execution, as many slots as Worker concurrency param
go func() {
for i := 0; i < concurrency; i++ {
pool <- struct{}{}
}
}()

for {
select {
case err := <-errorsChan:
return err
case d, open := <-deliveries:
if !open {
return nil
}
if concurrency > 0 {
// get execution slot from pool (blocks until one is available)
<-pool
}

b.processingWG.Add(1)

// Consume the task inside a goroutine so multiple tasks
// can be processed concurrently
go func() {
if err := b.consumeOne(d, taskProcessor); err != nil {
errorsChan <- err
}

b.processingWG.Done()

if concurrency > 0 {
// give slot back to pool
pool <- struct{}{}
}
}()
}
}
}

// consumeOne processes a single message using TaskProcessor
func (b *Broker) consumeOne(delivery []byte, taskProcessor iface.TaskProcessor) error {
signature := new(tasks.Signature)
decoder := json.NewDecoder(bytes.NewReader(delivery))
decoder.UseNumber()
if err := decoder.Decode(signature); err != nil {
return errs.NewErrCouldNotUnmarshaTaskSignature(delivery, err)
}

// If the task is not registered, we requeue it,
// there might be different workers for processing specific tasks
if !b.IsTaskRegistered(signature.Name) {
log.INFO.Printf("Task not registered with this worker. Requeing message: %s", delivery)

conn := b.open()
defer conn.Close()

conn.Do("RPUSH", getQueue(b.GetConfig(), taskProcessor), delivery)
return nil
}

log.DEBUG.Printf("Received new message: %s", delivery)

return taskProcessor.Process(signature)
}

// nextTask pops next available task from the default queue
func (b *Broker) nextTask(queue string) (result []byte, err error) {
conn := b.open()
defer conn.Close()

pollPeriodMilliseconds := 1000 // default poll period for normal tasks
if b.GetConfig().Redis != nil {
configuredPollPeriod := b.GetConfig().Redis.NormalTasksPollPeriod
if configuredPollPeriod > 0 {
pollPeriodMilliseconds = configuredPollPeriod
}
}
pollPeriod := time.Duration(pollPeriodMilliseconds) * time.Millisecond

items, err := redis.ByteSlices(conn.Do("BLPOP", queue, pollPeriod.Seconds()))
if err != nil {
return []byte{}, err
}

// items[0] - the name of the key where an element was popped
// items[1] - the value of the popped element
if len(items) != 2 {
return []byte{}, redis.ErrNil
}

result = items[1]

return result, nil
}

// nextDelayedTask pops a value from the ZSET key using WATCH/MULTI/EXEC commands.
// https://github.com/gomodule/redigo/blob/master/redis/zpop_example_test.go
func (b *Broker) nextDelayedTask(key string) (result []byte, err error) {
conn := b.open()
defer conn.Close()

defer func() {
// Return connection to normal state on error.
// https://redis.io/commands/discard
if err != nil {
conn.Do("DISCARD")
}
}()

var (
items [][]byte
reply interface{}
)

pollPeriod := 500 // default poll period for delayed tasks
if b.GetConfig().Redis != nil {
configuredPollPeriod := b.GetConfig().Redis.DelayedTasksPollPeriod
// the default period is 0, which bombards redis with requests, despite
// our intention of doing the opposite
if configuredPollPeriod > 0 {
pollPeriod = configuredPollPeriod
}
}

for {
// Space out queries to ZSET so we don't bombard redis
// server with relentless ZRANGEBYSCOREs
time.Sleep(time.Duration(pollPeriod) * time.Millisecond)
if _, err = conn.Do("WATCH", key); err != nil {
return
}

now := time.Now().UTC().UnixNano()

// https://redis.io/commands/zrangebyscore
items, err = redis.ByteSlices(conn.Do(
"ZRANGEBYSCORE",
key,
0,
now,
"LIMIT",
0,
1,
))
if err != nil {
return
}
if len(items) != 1 {
err = redis.ErrNil
return
}

_ = conn.Send("MULTI")
_ = conn.Send("ZREM", key, items[0])
reply, err = conn.Do("EXEC")
if err != nil {
return
}

if reply != nil {
result = items[0]
break
}
}

return
}

// open returns or creates instance of Redis connection
func (b *Broker) open() redis.Conn {
b.redisOnce.Do(func() {
b.pool = b.NewPool(b.socketPath, b.host, b.password, b.db, b.GetConfig().Redis, b.GetConfig().TLSConfig)
b.redsync = redsync.New([]redsync.Pool{b.pool})
})

return b.pool.Get()
}

func getQueue(config *config.Config, taskProcessor iface.TaskProcessor) string {
customQueue := taskProcessor.CustomQueue()
if customQueue == "" {
return config.DefaultQueue
}
return customQueue
}

+ 361
- 0
vendor/github.com/RichardKnop/machinery/v1/brokers/sqs/sqs.go View File

@@ -0,0 +1,361 @@
package sqs

import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"sync"
"time"

"github.com/RichardKnop/machinery/v1/brokers/iface"
"github.com/RichardKnop/machinery/v1/common"
"github.com/RichardKnop/machinery/v1/config"
"github.com/RichardKnop/machinery/v1/log"
"github.com/RichardKnop/machinery/v1/tasks"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sqs/sqsiface"

awssqs "github.com/aws/aws-sdk-go/service/sqs"
)

const (
maxAWSSQSDelay = time.Minute * 15 // Max supported SQS delay is 15 min: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html
)

// Broker represents a AWS SQS broker
// There are examples on: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sqs-example-create-queue.html
type Broker struct {
common.Broker
processingWG sync.WaitGroup // use wait group to make sure task processing completes on interrupt signal
receivingWG sync.WaitGroup
stopReceivingChan chan int
sess *session.Session
service sqsiface.SQSAPI
queueUrl *string
}

// New creates new Broker instance
func New(cnf *config.Config) iface.Broker {
b := &Broker{Broker: common.NewBroker(cnf)}
if cnf.SQS != nil && cnf.SQS.Client != nil {
// Use provided *SQS client
b.service = cnf.SQS.Client
} else {
// Initialize a session that the SDK will use to load credentials from the shared credentials file, ~/.aws/credentials.
// See details on: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html
// Also, env AWS_REGION is also required
b.sess = session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
}))
b.service = awssqs.New(b.sess)
}

return b
}

// GetPendingTasks returns a slice of task.Signatures waiting in the queue
func (b *Broker) GetPendingTasks(queue string) ([]*tasks.Signature, error) {
return nil, errors.New("Not implemented")
}

// StartConsuming enters a loop and waits for incoming messages
func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) {
b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor)
qURL := b.getQueueURL(taskProcessor)
//save it so that it can be used later when attempting to delete task
b.queueUrl = qURL

deliveries := make(chan *awssqs.ReceiveMessageOutput, concurrency)
pool := make(chan struct{}, concurrency)

// initialize worker pool with maxWorkers workers
for i := 0; i < concurrency; i++ {
pool <- struct{}{}
}
b.stopReceivingChan = make(chan int)
b.receivingWG.Add(1)

go func() {
defer b.receivingWG.Done()

log.INFO.Printf("[*] Waiting for messages on queue: %s. To exit press CTRL+C\n", *qURL)

for {
select {
// A way to stop this goroutine from b.StopConsuming
case <-b.stopReceivingChan:
close(deliveries)
return
case <-pool:
output, err := b.receiveMessage(qURL)
if err == nil && len(output.Messages) > 0 {
deliveries <- output

} else {
//return back to pool right away
pool <- struct{}{}
if err != nil {
log.ERROR.Printf("Queue consume error: %s", err)
}

}
}

}
}()

if err := b.consume(deliveries, concurrency, taskProcessor, pool); err != nil {
return b.GetRetry(), err
}

return b.GetRetry(), nil
}

// StopConsuming quits the loop
func (b *Broker) StopConsuming() {
b.Broker.StopConsuming()

b.stopReceiving()

// Waiting for any tasks being processed to finish
b.processingWG.Wait()

// Waiting for the receiving goroutine to have stopped
b.receivingWG.Wait()
}

// Publish places a new message on the default queue
func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error {
msg, err := json.Marshal(signature)
if err != nil {
return fmt.Errorf("JSON marshal error: %s", err)
}

// Check that signature.RoutingKey is set, if not switch to DefaultQueue
b.AdjustRoutingKey(signature)

MsgInput := &awssqs.SendMessageInput{
MessageBody: aws.String(string(msg)),
QueueUrl: aws.String(b.GetConfig().Broker + "/" + signature.RoutingKey),
}

// if this is a fifo queue, there needs to be some additional parameters.
if strings.HasSuffix(signature.RoutingKey, ".fifo") {
// Use Machinery's signature Task UUID as SQS Message Group ID.
MsgDedupID := signature.UUID
MsgInput.MessageDeduplicationId = aws.String(MsgDedupID)

// Do not Use Machinery's signature Group UUID as SQS Message Group ID, instead use BrokerMessageGroupId
MsgGroupID := signature.BrokerMessageGroupId
if MsgGroupID == "" {
return fmt.Errorf("please specify BrokerMessageGroupId attribute for task Signature when submitting a task to FIFO queue")
}
MsgInput.MessageGroupId = aws.String(MsgGroupID)
}

// Check the ETA signature field, if it is set and it is in the future,
// and is not a fifo queue, set a delay in seconds for the task.
if signature.ETA != nil && !strings.HasSuffix(signature.RoutingKey, ".fifo") {
now := time.Now().UTC()
delay := signature.ETA.Sub(now)
if delay > 0 {
if delay > maxAWSSQSDelay {
return errors.New("Max AWS SQS delay exceeded")
}
MsgInput.DelaySeconds = aws.Int64(int64(delay.Seconds()))
}
}

result, err := b.service.SendMessageWithContext(ctx, MsgInput)

if err != nil {
log.ERROR.Printf("Error when sending a message: %v", err)
return err

}
log.INFO.Printf("Sending a message successfully, the messageId is %v", *result.MessageId)
return nil

}

// consume is a method which keeps consuming deliveries from a channel, until there is an error or a stop signal
func (b *Broker) consume(deliveries <-chan *awssqs.ReceiveMessageOutput, concurrency int, taskProcessor iface.TaskProcessor, pool chan struct{}) error {

errorsChan := make(chan error)

for {
whetherContinue, err := b.consumeDeliveries(deliveries, concurrency, taskProcessor, pool, errorsChan)
if err != nil {
return err
}
if whetherContinue == false {
return nil
}
}
}

// consumeOne is a method consumes a delivery. If a delivery was consumed successfully, it will be deleted from AWS SQS
func (b *Broker) consumeOne(delivery *awssqs.ReceiveMessageOutput, taskProcessor iface.TaskProcessor) error {
if len(delivery.Messages) == 0 {
log.ERROR.Printf("received an empty message, the delivery was %v", delivery)
return errors.New("received empty message, the delivery is " + delivery.GoString())
}

sig := new(tasks.Signature)
decoder := json.NewDecoder(strings.NewReader(*delivery.Messages[0].Body))
decoder.UseNumber()
if err := decoder.Decode(sig); err != nil {
log.ERROR.Printf("unmarshal error. the delivery is %v", delivery)
return err
}
if delivery.Messages[0].ReceiptHandle != nil {
sig.SQSReceiptHandle = *delivery.Messages[0].ReceiptHandle
}

// If the task is not registered return an error
// and leave the message in the queue
if !b.IsTaskRegistered(sig.Name) {
return fmt.Errorf("task %s is not registered", sig.Name)
}

err := taskProcessor.Process(sig)
if err != nil {
return err
}
// Delete message after successfully consuming and processing the message
if err = b.deleteOne(delivery); err != nil {
log.ERROR.Printf("error when deleting the delivery. delivery is %v, Error=%s", delivery, err)
}
return err
}

// deleteOne is a method delete a delivery from AWS SQS
func (b *Broker) deleteOne(delivery *awssqs.ReceiveMessageOutput) error {
qURL := b.defaultQueueURL()
_, err := b.service.DeleteMessage(&awssqs.DeleteMessageInput{
QueueUrl: qURL,
ReceiptHandle: delivery.Messages[0].ReceiptHandle,
})

if err != nil {
return err
}
return nil
}

// defaultQueueURL is a method returns the default queue url
func (b *Broker) defaultQueueURL() *string {
if b.queueUrl != nil {
return b.queueUrl
} else {
return aws.String(b.GetConfig().Broker + "/" + b.GetConfig().DefaultQueue)
}

}

// receiveMessage is a method receives a message from specified queue url
func (b *Broker) receiveMessage(qURL *string) (*awssqs.ReceiveMessageOutput, error) {
var waitTimeSeconds int
var visibilityTimeout *int
if b.GetConfig().SQS != nil {
waitTimeSeconds = b.GetConfig().SQS.WaitTimeSeconds
visibilityTimeout = b.GetConfig().SQS.VisibilityTimeout
} else {
waitTimeSeconds = 0
}
input := &awssqs.ReceiveMessageInput{
AttributeNames: []*string{
aws.String(awssqs.MessageSystemAttributeNameSentTimestamp),
},
MessageAttributeNames: []*string{
aws.String(awssqs.QueueAttributeNameAll),
},
QueueUrl: qURL,
MaxNumberOfMessages: aws.Int64(1),
WaitTimeSeconds: aws.Int64(int64(waitTimeSeconds)),
}
if visibilityTimeout != nil {
input.VisibilityTimeout = aws.Int64(int64(*visibilityTimeout))
}
result, err := b.service.ReceiveMessage(input)
if err != nil {
return nil, err
}
return result, err
}

// initializePool is a method which initializes concurrency pool
func (b *Broker) initializePool(pool chan struct{}, concurrency int) {
for i := 0; i < concurrency; i++ {
pool <- struct{}{}
}
}

// consumeDeliveries is a method consuming deliveries from deliveries channel
func (b *Broker) consumeDeliveries(deliveries <-chan *awssqs.ReceiveMessageOutput, concurrency int, taskProcessor iface.TaskProcessor, pool chan struct{}, errorsChan chan error) (bool, error) {
select {
case err := <-errorsChan:
return false, err
case d := <-deliveries:

b.processingWG.Add(1)

// Consume the task inside a goroutine so multiple tasks
// can be processed concurrently
go func() {

if err := b.consumeOne(d, taskProcessor); err != nil {
errorsChan <- err
}

b.processingWG.Done()

if concurrency > 0 {
// give worker back to pool
pool <- struct{}{}
}
}()
case <-b.GetStopChan():
return false, nil
}
return true, nil
}

// continueReceivingMessages is a method returns a continue signal
func (b *Broker) continueReceivingMessages(qURL *string, deliveries chan *awssqs.ReceiveMessageOutput) (bool, error) {
select {
// A way to stop this goroutine from b.StopConsuming
case <-b.stopReceivingChan:
return false, nil
default:
output, err := b.receiveMessage(qURL)
if err != nil {
return true, err
}
if len(output.Messages) == 0 {
return true, nil
}
go func() { deliveries <- output }()
}
return true, nil
}

// stopReceiving is a method sending a signal to stopReceivingChan
func (b *Broker) stopReceiving() {
// Stop the receiving goroutine
b.stopReceivingChan <- 1
}

// getQueueURL is a method returns that returns queueURL first by checking if custom queue was set and usign it
// otherwise using default queueName from config
func (b *Broker) getQueueURL(taskProcessor iface.TaskProcessor) *string {
queueName := b.GetConfig().DefaultQueue
if taskProcessor.CustomQueue() != "" {
queueName = taskProcessor.CustomQueue()
}

return aws.String(b.GetConfig().Broker + "/" + queueName)
}

+ 129
- 0
vendor/github.com/RichardKnop/machinery/v1/common/amqp.go View File

@@ -0,0 +1,129 @@
package common

import (
"crypto/tls"
"fmt"

"github.com/streadway/amqp"
)

// AMQPConnector ...
type AMQPConnector struct{}

// Connect opens a connection to RabbitMQ, declares an exchange, opens a channel,
// declares and binds the queue and enables publish notifications
func (ac *AMQPConnector) Connect(url string, tlsConfig *tls.Config, exchange, exchangeType, queueName string, queueDurable, queueDelete bool, queueBindingKey string, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs amqp.Table) (*amqp.Connection, *amqp.Channel, amqp.Queue, <-chan amqp.Confirmation, <-chan *amqp.Error, error) {
// Connect to server
conn, channel, err := ac.Open(url, tlsConfig)
if err != nil {
return nil, nil, amqp.Queue{}, nil, nil, err
}

if exchange != "" {
// Declare an exchange
if err = channel.ExchangeDeclare(
exchange, // name of the exchange
exchangeType, // type
true, // durable
false, // delete when complete
false, // internal
false, // noWait
exchangeDeclareArgs, // arguments
); err != nil {
return conn, channel, amqp.Queue{}, nil, nil, fmt.Errorf("Exchange declare error: %s", err)
}
}

var queue amqp.Queue
if queueName != "" {
// Declare a queue
queue, err = channel.QueueDeclare(
queueName, // name
queueDurable, // durable
queueDelete, // delete when unused
false, // exclusive
false, // no-wait
queueDeclareArgs, // arguments
)
if err != nil {
return conn, channel, amqp.Queue{}, nil, nil, fmt.Errorf("Queue declare error: %s", err)
}

// Bind the queue
if err = channel.QueueBind(
queue.Name, // name of the queue
queueBindingKey, // binding key
exchange, // source exchange
false, // noWait
queueBindingArgs, // arguments
); err != nil {
return conn, channel, queue, nil, nil, fmt.Errorf("Queue bind error: %s", err)
}
}

// Enable publish confirmations
if err = channel.Confirm(false); err != nil {
return conn, channel, queue, nil, nil, fmt.Errorf("Channel could not be put into confirm mode: %s", err)
}

return conn, channel, queue, channel.NotifyPublish(make(chan amqp.Confirmation, 1)), conn.NotifyClose(make(chan *amqp.Error, 1)), nil
}

// DeleteQueue deletes a queue by name
func (ac *AMQPConnector) DeleteQueue(channel *amqp.Channel, queueName string) error {
// First return value is number of messages removed
_, err := channel.QueueDelete(
queueName, // name
false, // ifUnused
false, // ifEmpty
false, // noWait
)

return err
}

// InspectQueue provides information about a specific queue
func (*AMQPConnector) InspectQueue(channel *amqp.Channel, queueName string) (*amqp.Queue, error) {
queueState, err := channel.QueueInspect(queueName)
if err != nil {
return nil, fmt.Errorf("Queue inspect error: %s", err)
}

return &queueState, nil
}

// Open new RabbitMQ connection
func (ac *AMQPConnector) Open(url string, tlsConfig *tls.Config) (*amqp.Connection, *amqp.Channel, error) {
// Connect
// From amqp docs: DialTLS will use the provided tls.Config when it encounters an amqps:// scheme
// and will dial a plain connection when it encounters an amqp:// scheme.
conn, err := amqp.DialTLS(url, tlsConfig)
if err != nil {
return nil, nil, fmt.Errorf("Dial error: %s", err)
}

// Open a channel
channel, err := conn.Channel()
if err != nil {
return nil, nil, fmt.Errorf("Open channel error: %s", err)
}

return conn, channel, nil
}

// Close connection
func (ac *AMQPConnector) Close(channel *amqp.Channel, conn *amqp.Connection) error {
if channel != nil {
if err := channel.Close(); err != nil {
return fmt.Errorf("Close channel error: %s", err)
}
}

if conn != nil {
if err := conn.Close(); err != nil {
return fmt.Errorf("Close connection error: %s", err)
}
}

return nil
}

+ 25
- 0
vendor/github.com/RichardKnop/machinery/v1/common/backend.go View File

@@ -0,0 +1,25 @@
package common

import (
"github.com/RichardKnop/machinery/v1/config"
)

// Backend represents a base backend structure
type Backend struct {
cnf *config.Config
}

// NewBackend creates new Backend instance
func NewBackend(cnf *config.Config) Backend {
return Backend{cnf: cnf}
}

// GetConfig returns config
func (b *Backend) GetConfig() *config.Config {
return b.cnf
}

// IsAMQP ...
func (b *Backend) IsAMQP() bool {
return false
}

+ 121
- 0
vendor/github.com/RichardKnop/machinery/v1/common/broker.go View File

@@ -0,0 +1,121 @@
package common

import (
"errors"

"github.com/RichardKnop/machinery/v1/brokers/iface"
"github.com/RichardKnop/machinery/v1/config"
"github.com/RichardKnop/machinery/v1/log"
"github.com/RichardKnop/machinery/v1/retry"
"github.com/RichardKnop/machinery/v1/tasks"
)

// Broker represents a base broker structure
type Broker struct {
cnf *config.Config
registeredTaskNames []string
retry bool
retryFunc func(chan int)
retryStopChan chan int
stopChan chan int
}

// NewBroker creates new Broker instance
func NewBroker(cnf *config.Config) Broker {
return Broker{
cnf: cnf,
retry: true,
stopChan: make(chan int),
retryStopChan: make(chan int),
}
}

// GetConfig returns config
func (b *Broker) GetConfig() *config.Config {
return b.cnf
}

// GetRetry ...
func (b *Broker) GetRetry() bool {
return b.retry
}

// GetRetryFunc ...
func (b *Broker) GetRetryFunc() func(chan int) {
return b.retryFunc
}

// GetRetryStopChan ...
func (b *Broker) GetRetryStopChan() chan int {
return b.retryStopChan
}

// GetStopChan ...
func (b *Broker) GetStopChan() chan int {
return b.stopChan
}

// Publish places a new message on the default queue
func (b *Broker) Publish(signature *tasks.Signature) error {
return errors.New("Not implemented")
}

// SetRegisteredTaskNames sets registered task names
func (b *Broker) SetRegisteredTaskNames(names []string) {
b.registeredTaskNames = names
}

// IsTaskRegistered returns true if the task is registered with this broker
func (b *Broker) IsTaskRegistered(name string) bool {
for _, registeredTaskName := range b.registeredTaskNames {
if registeredTaskName == name {
return true
}
}
return false
}

// GetPendingTasks returns a slice of task.Signatures waiting in the queue
func (b *Broker) GetPendingTasks(queue string) ([]*tasks.Signature, error) {
return nil, errors.New("Not implemented")
}

// StartConsuming is a common part of StartConsuming method
func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) {
if b.retryFunc == nil {
b.retryFunc = retry.Closure()
}

}

// StopConsuming is a common part of StopConsuming
func (b *Broker) StopConsuming() {
// Do not retry from now on
b.retry = false
// Stop the retry closure earlier
select {
case b.retryStopChan <- 1:
log.WARNING.Print("Stopping retry closure.")
default:
}
// Notifying the stop channel stops consuming of messages
close(b.stopChan)
log.WARNING.Print("Stop channel")
}

// GetRegisteredTaskNames returns registered tasks names
func (b *Broker) GetRegisteredTaskNames() []string {
return b.registeredTaskNames
}

// AdjustRoutingKey makes sure the routing key is correct.
// If the routing key is an empty string:
// a) set it to binding key for direct exchange type
// b) set it to default queue name
func (b *Broker) AdjustRoutingKey(s *tasks.Signature) {
if s.RoutingKey != "" {
return
}

s.RoutingKey = b.GetConfig().DefaultQueue
}

+ 84
- 0
vendor/github.com/RichardKnop/machinery/v1/common/redis.go View File

@@ -0,0 +1,84 @@
package common

import (
"crypto/tls"
"time"

"github.com/RichardKnop/machinery/v1/config"
"github.com/gomodule/redigo/redis"
)

var (
defaultConfig = &config.RedisConfig{
MaxIdle: 3,
IdleTimeout: 240,
ReadTimeout: 15,
WriteTimeout: 15,
ConnectTimeout: 15,
NormalTasksPollPeriod: 1000,
DelayedTasksPollPeriod: 20,
}
)

// RedisConnector ...
type RedisConnector struct{}

// NewPool returns a new pool of Redis connections
func (rc *RedisConnector) NewPool(socketPath, host, password string, db int, cnf *config.RedisConfig, tlsConfig *tls.Config) *redis.Pool {
if cnf == nil {
cnf = defaultConfig
}
return &redis.Pool{
MaxIdle: cnf.MaxIdle,
IdleTimeout: time.Duration(cnf.IdleTimeout) * time.Second,
MaxActive: cnf.MaxActive,
Wait: cnf.Wait,
Dial: func() (redis.Conn, error) {
c, err := rc.open(socketPath, host, password, db, cnf, tlsConfig)
if err != nil {
return nil, err
}

if db != 0 {
_, err = c.Do("SELECT", db)
if err != nil {
return nil, err
}
}

return c, err
},
// PINGs connections that have been idle more than 10 seconds
TestOnBorrow: func(c redis.Conn, t time.Time) error {
if time.Since(t) < time.Duration(10*time.Second) {
return nil
}
_, err := c.Do("PING")
return err
},
}
}

// Open a new Redis connection
func (rc *RedisConnector) open(socketPath, host, password string, db int, cnf *config.RedisConfig, tlsConfig *tls.Config) (redis.Conn, error) {
var opts = []redis.DialOption{
redis.DialDatabase(db),
redis.DialReadTimeout(time.Duration(cnf.ReadTimeout) * time.Second),
redis.DialWriteTimeout(time.Duration(cnf.WriteTimeout) * time.Second),
redis.DialConnectTimeout(time.Duration(cnf.ConnectTimeout) * time.Second),
}

if tlsConfig != nil {
opts = append(opts, redis.DialTLSConfig(tlsConfig), redis.DialUseTLS(true))
}

if password != "" {
opts = append(opts, redis.DialPassword(password))
}

if socketPath != "" {
return redis.Dial("unix", socketPath, opts...)
}

return redis.Dial("tcp", host, opts...)
}

+ 161
- 0
vendor/github.com/RichardKnop/machinery/v1/config/config.go View File

@@ -0,0 +1,161 @@
package config

import (
"crypto/tls"
"fmt"
"strings"
"time"

"cloud.google.com/go/pubsub"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/sqs"
"go.mongodb.org/mongo-driver/mongo"
)

const (
// DefaultResultsExpireIn is a default time used to expire task states and group metadata from the backend
DefaultResultsExpireIn = 3600
)

var (
// Start with sensible default values
defaultCnf = &Config{
Broker: "amqp://guest:guest@localhost:5672/",
DefaultQueue: "machinery_tasks",
ResultBackend: "amqp://guest:guest@localhost:5672/",
ResultsExpireIn: DefaultResultsExpireIn,
AMQP: &AMQPConfig{
Exchange: "machinery_exchange",
ExchangeType: "direct",
BindingKey: "machinery_task",
PrefetchCount: 3,
},
DynamoDB: &DynamoDBConfig{
TaskStatesTable: "task_states",
GroupMetasTable: "group_metas",
},
Redis: &RedisConfig{
MaxIdle: 3,
IdleTimeout: 240,
ReadTimeout: 15,
WriteTimeout: 15,
ConnectTimeout: 15,
NormalTasksPollPeriod: 1000,
DelayedTasksPollPeriod: 20,
},
GCPPubSub: &GCPPubSubConfig{
Client: nil,
},
}

reloadDelay = time.Second * 10
)

// Config holds all configuration for our program
type Config struct {
Broker string `yaml:"broker" envconfig:"BROKER"`
DefaultQueue string `yaml:"default_queue" envconfig:"DEFAULT_QUEUE"`
ResultBackend string `yaml:"result_backend" envconfig:"RESULT_BACKEND"`
ResultsExpireIn int `yaml:"results_expire_in" envconfig:"RESULTS_EXPIRE_IN"`
AMQP *AMQPConfig `yaml:"amqp"`
SQS *SQSConfig `yaml:"sqs"`
Redis *RedisConfig `yaml:"redis"`
GCPPubSub *GCPPubSubConfig `yaml:"-" ignored:"true"`
MongoDB *MongoDBConfig `yaml:"-" ignored:"true"`
TLSConfig *tls.Config
// NoUnixSignals - when set disables signal handling in machinery
NoUnixSignals bool `yaml:"no_unix_signals" envconfig:"NO_UNIX_SIGNALS"`
DynamoDB *DynamoDBConfig `yaml:"dynamodb"`
}

// QueueBindingArgs arguments which are used when binding to the exchange
type QueueBindingArgs map[string]interface{}

// AMQPConfig wraps RabbitMQ related configuration
type AMQPConfig struct {
Exchange string `yaml:"exchange" envconfig:"AMQP_EXCHANGE"`
ExchangeType string `yaml:"exchange_type" envconfig:"AMQP_EXCHANGE_TYPE"`
QueueBindingArgs QueueBindingArgs `yaml:"queue_binding_args" envconfig:"AMQP_QUEUE_BINDING_ARGS"`
BindingKey string `yaml:"binding_key" envconfig:"AMQP_BINDING_KEY"`
PrefetchCount int `yaml:"prefetch_count" envconfig:"AMQP_PREFETCH_COUNT"`
AutoDelete bool `yaml:"auto_delete" envconfig:"AMQP_AUTO_DELETE"`
}

// DynamoDBConfig wraps DynamoDB related configuration
type DynamoDBConfig struct {
Client *dynamodb.DynamoDB
TaskStatesTable string `yaml:"task_states_table" envconfig:"TASK_STATES_TABLE"`
GroupMetasTable string `yaml:"group_metas_table" envconfig:"GROUP_METAS_TABLE"`
}

// SQSConfig wraps SQS related configuration
type SQSConfig struct {
Client *sqs.SQS
WaitTimeSeconds int `yaml:"receive_wait_time_seconds" envconfig:"SQS_WAIT_TIME_SECONDS"`
// https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html
// visibility timeout should default to nil to use the overall visibility timeout for the queue
VisibilityTimeout *int `yaml:"receive_visibility_timeout" envconfig:"SQS_VISIBILITY_TIMEOUT"`
}

// RedisConfig ...
type RedisConfig struct {
// Maximum number of idle connections in the pool.
MaxIdle int `yaml:"max_idle" envconfig:"REDIS_MAX_IDLE"`

// Maximum number of connections allocated by the pool at a given time.
// When zero, there is no limit on the number of connections in the pool.
MaxActive int `yaml:"max_active" envconfig:"REDIS_MAX_ACTIVE"`

// Close connections after remaining idle for this duration in seconds. If the value
// is zero, then idle connections are not closed. Applications should set
// the timeout to a value less than the server's timeout.
IdleTimeout int `yaml:"max_idle_timeout" envconfig:"REDIS_IDLE_TIMEOUT"`

// If Wait is true and the pool is at the MaxActive limit, then Get() waits
// for a connection to be returned to the pool before returning.
Wait bool `yaml:"wait" envconfig:"REDIS_WAIT"`

// ReadTimeout specifies the timeout in seconds for reading a single command reply.
ReadTimeout int `yaml:"read_timeout" envconfig:"REDIS_READ_TIMEOUT"`

// WriteTimeout specifies the timeout in seconds for writing a single command.
WriteTimeout int `yaml:"write_timeout" envconfig:"REDIS_WRITE_TIMEOUT"`

// ConnectTimeout specifies the timeout in seconds for connecting to the Redis server when
// no DialNetDial option is specified.
ConnectTimeout int `yaml:"connect_timeout" envconfig:"REDIS_CONNECT_TIMEOUT"`

// NormalTasksPollPeriod specifies the period in milliseconds when polling redis for normal tasks
NormalTasksPollPeriod int `yaml:"normal_tasks_poll_period" envconfig:"REDIS_NORMAL_TASKS_POLL_PERIOD"`

// DelayedTasksPollPeriod specifies the period in milliseconds when polling redis for delayed tasks
DelayedTasksPollPeriod int `yaml:"delayed_tasks_poll_period" envconfig:"REDIS_DELAYED_TASKS_POLL_PERIOD"`
}

// GCPPubSubConfig wraps GCP PubSub related configuration
type GCPPubSubConfig struct {
Client *pubsub.Client
MaxExtension time.Duration
}

// MongoDBConfig ...
type MongoDBConfig struct {
Client *mongo.Client
Database string
}

// Decode from yaml to map (any field whose type or pointer-to-type implements
// envconfig.Decoder can control its own deserialization)
func (args *QueueBindingArgs) Decode(value string) error {
pairs := strings.Split(value, ",")
mp := make(map[string]interface{}, len(pairs))
for _, pair := range pairs {
kvpair := strings.Split(pair, ":")
if len(kvpair) != 2 {
return fmt.Errorf("invalid map item: %q", pair)
}
mp[kvpair[0]] = kvpair[1]
}
*args = QueueBindingArgs(mp)
return nil
}

+ 58
- 0
vendor/github.com/RichardKnop/machinery/v1/config/env.go View File

@@ -0,0 +1,58 @@
package config

import (
"time"

"github.com/RichardKnop/machinery/v1/log"
"github.com/kelseyhightower/envconfig"
)

// NewFromEnvironment creates a config object from environment variables
func NewFromEnvironment(keepReloading bool) (*Config, error) {
cnf, err := fromEnvironment()
if err != nil {
return nil, err
}

log.INFO.Print("Successfully loaded config from the environment")

if keepReloading {
// Open a goroutine to watch remote changes forever
go func() {
for {
// Delay after each request
time.Sleep(reloadDelay)

// Attempt to reload the config
newCnf, newErr := fromEnvironment()
if newErr != nil {
log.WARNING.Printf("Failed to reload config from the environment: %v", newErr)
continue
}

*cnf = *newCnf
// log.INFO.Printf("Successfully reloaded config from the environment")
}
}()
}

return cnf, nil
}

func fromEnvironment() (*Config, error) {
loadedCnf, cnf := new(Config), new(Config)
*cnf = *defaultCnf

if err := envconfig.Process("", cnf); err != nil {
return nil, err
}
if err := envconfig.Process("", loadedCnf); err != nil {
return nil, err
}

if loadedCnf.AMQP == nil {
cnf.AMQP = nil
}

return cnf, nil
}

+ 83
- 0
vendor/github.com/RichardKnop/machinery/v1/config/file.go View File

@@ -0,0 +1,83 @@
package config

import (
"fmt"
"os"
"time"

"github.com/RichardKnop/machinery/v1/log"
"gopkg.in/yaml.v2"
)

// NewFromYaml creates a config object from YAML file
func NewFromYaml(cnfPath string, keepReloading bool) (*Config, error) {
cnf, err := fromFile(cnfPath)
if err != nil {
return nil, err
}

log.INFO.Printf("Successfully loaded config from file %s", cnfPath)

if keepReloading {
// Open a goroutine to watch remote changes forever
go func() {
for {
// Delay after each request
time.Sleep(reloadDelay)

// Attempt to reload the config
newCnf, newErr := fromFile(cnfPath)
if newErr != nil {
log.WARNING.Printf("Failed to reload config from file %s: %v", cnfPath, newErr)
continue
}

*cnf = *newCnf
// log.INFO.Printf("Successfully reloaded config from file %s", cnfPath)
}
}()
}

return cnf, nil
}

// ReadFromFile reads data from a file
func ReadFromFile(cnfPath string) ([]byte, error) {
file, err := os.Open(cnfPath)

// Config file not found
if err != nil {
return nil, fmt.Errorf("Open file error: %s", err)
}

// Config file found, let's try to read it
data := make([]byte, 1000)
count, err := file.Read(data)
if err != nil {
return nil, fmt.Errorf("Read from file error: %s", err)
}

return data[:count], nil
}

func fromFile(cnfPath string) (*Config, error) {
loadedCnf, cnf := new(Config), new(Config)
*cnf = *defaultCnf

data, err := ReadFromFile(cnfPath)
if err != nil {
return nil, err
}

if err := yaml.Unmarshal(data, cnf); err != nil {
return nil, fmt.Errorf("Unmarshal YAML error: %s", err)
}
if err := yaml.Unmarshal(data, loadedCnf); err != nil {
return nil, fmt.Errorf("Unmarshal YAML error: %s", err)
}
if loadedCnf.AMQP == nil {
cnf.AMQP = nil
}

return cnf, nil
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save