diff --git a/go.mod b/go.mod index d9c12ea3a..8c1844cb4 100755 --- a/go.mod +++ b/go.mod @@ -87,6 +87,7 @@ require ( github.com/niklasfasching/go-org v0.1.9 github.com/oliamb/cutter v0.2.2 github.com/olivere/elastic/v7 v7.0.9 + github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/errors v0.9.1 github.com/pquerna/otp v1.2.0 github.com/prometheus/client_golang v1.1.0 diff --git a/go.sum b/go.sum index 9bae7932d..1d5ce94c7 100755 --- a/go.sum +++ b/go.sum @@ -39,11 +39,9 @@ gitea.com/macaron/inject v0.0.0-20190803172902-8375ba841591/go.mod h1:h6E4kLao1Y gitea.com/macaron/inject v0.0.0-20190805023432-d4c86e31027a h1:aOKEXkDTnh4euoH0so/THLXeHtQuqHmDPb1xEk6Ehok= gitea.com/macaron/inject v0.0.0-20190805023432-d4c86e31027a/go.mod h1:h6E4kLao1Yko6DOU6QDnQPcuoNzvbZqzj2mtPcEn1aM= gitea.com/macaron/macaron v1.3.3-0.20190803174002-53e005ff4827/go.mod h1:/rvxMjIkOq4BM8uPUb+VHuU02ZfAO6R4+wD//tiCiRw= -gitea.com/macaron/macaron v1.3.3-0.20190821202302-9646c0587edb h1:amL0md6orTj1tXY16ANzVU9FmzQB+W7aJwp8pVDbrmA= gitea.com/macaron/macaron v1.3.3-0.20190821202302-9646c0587edb/go.mod h1:0coI+mSPSwbsyAbOuFllVS38awuk9mevhLD52l50Gjs= gitea.com/macaron/macaron v1.4.0 h1:FY1QDGqyuUzs21K6ChkbYbRUfwL7v2aUrhNEJ0IgsAw= gitea.com/macaron/macaron v1.4.0/go.mod h1:P7hfDbQjcW22lkYkXlxdRIfWOXxH2+K4EogN4Q0UlLY= -gitea.com/macaron/session v0.0.0-20190821211443-122c47c5f705 h1:mvkQGAlON1Z6Y8pqa/+FpYIskk54mazuECUfZK5oTg0= gitea.com/macaron/session v0.0.0-20190821211443-122c47c5f705/go.mod h1:1ujH0jD6Ca4iK9NL0Q2a7fG2chvXx5hVa7hBfABwpkA= gitea.com/macaron/session v0.0.0-20191207215012-613cebf0674d h1:XLww3CvnFZkXVwauN67fniDaIpIqsE+9KVcxlZKlvLU= gitea.com/macaron/session v0.0.0-20191207215012-613cebf0674d/go.mod h1:FanKy3WjWb5iw/iZBPk4ggoQT9FcM6bkBPvmDmsH6tY= @@ -70,7 +68,6 @@ github.com/RichardKnop/machinery v1.6.9 h1:dQu1c7ENgPFrN9qWweEe7xDDvNYGSqEyprK0G github.com/RichardKnop/machinery v1.6.9/go.mod h1:BO7MG/5tvdpgMVkOT8V94SEf8x8H8aceRzTt8Tx1IMc= github.com/RichardKnop/redsync v1.2.0 h1:gK35hR3zZkQigHKm8wOGb9MpJ9BsrW6MzxezwjTcHP0= github.com/RichardKnop/redsync v1.2.0/go.mod h1:9b8nBGAX3bE2uCfJGSnsDvF23mKyHTZzmvmj5FH3Tp0= -github.com/RoaringBitmap/roaring v0.4.21 h1:WJ/zIlNX4wQZ9x8Ey33O1UaD9TCTakYsdLFSBcTwH+8= github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhINmlHxKeo= github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= @@ -140,11 +137,9 @@ github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= -github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d h1:XMf4E1U+b9E3ElF0mjvfXZdflBRZz4gLp16nQ/QSHQM= github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= github.com/couchbase/gomemcached v0.0.0-20191004160342-7b5da2ec40b2 h1:vZryARwW4PSFXd9arwegEywvMTvPuXL3/oa+4L5NTe8= github.com/couchbase/gomemcached v0.0.0-20191004160342-7b5da2ec40b2/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= -github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b h1:bZ9rKU2/V8sY+NulSfxDOnXTWcs1rySqdF1sVepihvo= github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85 h1:0WMIDtuXCKEm4wtAJgAAXa/qtM5O9MariLwgHaRlYmk= github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= @@ -169,7 +164,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= -github.com/denisenkom/go-mssqldb v0.0.0-20190924004331-208c0a498538 h1:bpWCJ5MddHsv4Xtl3azkK89mZzd/vvut32mvAnKbyUA= github.com/denisenkom/go-mssqldb v0.0.0-20190924004331-208c0a498538/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc h1:VRRKCwnzqk8QCaRC4os14xoKDdbHqqlJtJA0oc1ZAjg= github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= @@ -185,7 +179,6 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/editorconfig/editorconfig-core-go/v2 v2.1.1 h1:mhPg/0hGebcpiiQLqJD2PWWyoHRLEdZ3sXKaEvT1EQU= github.com/editorconfig/editorconfig-core-go/v2 v2.1.1/go.mod h1:/LuhWJiQ9Gvo1DhVpa4ssm5qeg8rrztdtI7j/iCie2k= -github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= @@ -306,7 +299,6 @@ github.com/go-swagger/go-swagger v0.21.0 h1:AX9mdfzp6eJtUe92nFrWmbK7ocRgkCDPJs0F github.com/go-swagger/go-swagger v0.21.0/go.mod h1:tDb8PdDVFcaE8EPXkMOsuxpL3UEPiwu1UDZar9Z/1RY= github.com/go-swagger/scan-repo-boundary v0.0.0-20180623220736-973b3573c013 h1:l9rI6sNaZgNC0LnF3MiE+qTmyBA/tZAg1rtyrGbUMK0= github.com/go-swagger/scan-repo-boundary v0.0.0-20180623220736-973b3573c013/go.mod h1:b65mBPzqzZWxOZGxSWrqs4GInLIn+u99Q9q7p+GKni0= -github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:9wScpmSP5A3Bk8V3XHWUcJmYTh+ZnlHVyc+A4oZYS3Y= github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:56xuuqnHyryaerycW3BfssRdxQstACi0Epw/yC5E2xM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= @@ -330,13 +322,11 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= @@ -349,7 +339,6 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -375,7 +364,6 @@ github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw= github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -406,7 +394,6 @@ github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVo github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-retryablehttp v0.6.4 h1:BbgctKO892xEyOXnGiaAwIoSq1QZ/SS4AhjoAh9DnfY= github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= @@ -458,7 +445,6 @@ github.com/keybase/go-crypto v0.0.0-20200123153347-de78d2cb44f4 h1:cTxwSmnaqLoo+ github.com/keybase/go-crypto v0.0.0-20200123153347-de78d2cb44f4/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY= github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0= github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= @@ -472,7 +458,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -545,7 +530,6 @@ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/mrjones/oauth v0.0.0-20180629183705-f4e24b6d100c h1:3wkDRdxK92dF+c1ke2dtj7ZzemFWBHB9plnJOtlwdFA= github.com/mrjones/oauth v0.0.0-20180629183705-f4e24b6d100c/go.mod h1:skjdDftzkFALcuGzYSklqYd8gvat6F1gZJ4YPVbkZpM= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= @@ -580,6 +564,8 @@ github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg= @@ -588,7 +574,6 @@ github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -715,7 +700,6 @@ github.com/streadway/amqp v0.0.0-20190214183023-884228600bc9 h1:wR6aLKdbJ5E8m+NZ github.com/streadway/amqp v0.0.0-20190214183023-884228600bc9/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -731,7 +715,6 @@ github.com/tecbot/gorocksdb v0.0.0-20181010114359-8752a9433481/go.mod h1:ahpPrc7 github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= @@ -746,7 +729,6 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/unknwon/cae v1.0.0 h1:i39lOFaBXZxhGjQOy/RNbi8uzettCs6OQxpR0xXohGU= github.com/unknwon/cae v1.0.0/go.mod h1:QaSeRctcea9fK6piJpAMCCPKxzJ01+xFcr2k1m3WRPU= -github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e h1:GSGeB9EAKY2spCABz6xOX5DbxZEXolK+nBSvmsQwRjM= github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e/go.mod h1:tOOxU81rwgoCLoOVVPHb6T/wt8HZygqH5id+GNnlCXM= github.com/unknwon/com v1.0.1 h1:3d1LTxD+Lnf3soQiD4Cp/0BRB+Rsa/+RTvz8GMMzIXs= github.com/unknwon/com v1.0.1/go.mod h1:tOOxU81rwgoCLoOVVPHb6T/wt8HZygqH5id+GNnlCXM= @@ -772,7 +754,6 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: github.com/yohcop/openid-go v1.0.0 h1:EciJ7ZLETHR3wOtxBvKXx9RV6eyHZpCaSZ1inbBaUXE= github.com/yohcop/openid-go v1.0.0/go.mod h1:/408xiwkeItSPJZSTPF7+VtZxPkPrRRpRNK2vjGh6yI= github.com/yuin/goldmark v1.1.7/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.25 h1:isv+Q6HQAmmL2Ofcmg8QauBmDPlUUnSoNhEcC940Rds= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27 h1:nqDD4MMMQA0lmWq03Z2/myGPYLQoXtmi0rGVs95ntbo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -813,16 +794,15 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190907121410-71b5226ff739/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ= golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79 h1:IaQbIIB2X/Mp/DKctl6ROxz1KyMlKp4uyvL6+kQ7C88= golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a h1:gHevYm0pO4QUbwy8Dmdr01R5r1BuKtfYqRqF0h/Cbh0= golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -832,9 +812,7 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -865,7 +843,6 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200513185701-a91f0712d120 h1:EZ3cVSzKOlJxAd8e8YAJ7no8nNypTxexh/YE/xW3ZEY= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -876,7 +853,6 @@ golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAG golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190220154721-9b3c75971fc9/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -915,13 +891,10 @@ golang.org/x/sys v0.0.0-20190730183949-1393eb018365/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190907184412-d223b2b6db03/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f h1:mOhmO9WsBaJCNmaZHPtHs9wOcdqdKCjF6OPJlmDM3KI= golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -932,7 +905,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -955,14 +927,11 @@ golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200225230052-807dcd883420 h1:4RJNOV+2rLxMEfr6QIpC7GEv9MjD6ApGXTCLrNF9+eA= golang.org/x/tools v0.0.0-20200225230052-807dcd883420/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224 h1:azwY/v0y0K4mFHVsg5+UrTgchqALYWpqVo6vL5OmkmI= golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200515220128-d3bf790afa53 h1:vmsb6v0zUdmUlXfwKaYrHPPRCV0lHq/IwNIf0ASGjyQ= golang.org/x/tools v0.0.0-20200515220128-d3bf790afa53/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -984,7 +953,6 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.4 h1:WiKh4+/eMB2HaY7QhCfW/R7MuRAoA8QMCSJA6jP5/fo= google.golang.org/appengine v1.6.4/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= @@ -1016,7 +984,6 @@ google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLY google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= @@ -1027,7 +994,6 @@ gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 h1:nn6Zav2sOQHCFJHEspya8 gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1058,9 +1024,7 @@ gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1078,13 +1042,11 @@ sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2 sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251 h1:mUcz5b3FJbP5Cvdq7Khzn6J9OCUQJaBwgBkCR+MOwSs= strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251/go.mod h1:FJGmPh3vz9jSos1L/F91iAgnC/aejc0wIIrF2ZwJxdY= -xorm.io/builder v0.3.6 h1:ha28mQ2M+TFx96Hxo+iq6tQgnkC9IZkM6D8w9sKHHF8= xorm.io/builder v0.3.6/go.mod h1:LEFAPISnRzG+zxaxj2vPicRwz67BdhFreKg8yv8/TgU= xorm.io/builder v0.3.7 h1:2pETdKRK+2QG4mLX4oODHEhn5Z8j1m8sXa7jfu+/SZI= xorm.io/builder v0.3.7/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE= xorm.io/core v0.7.2 h1:mEO22A2Z7a3fPaZMk6gKL/jMD80iiyNwRrX5HOv3XLw= xorm.io/core v0.7.2/go.mod h1:jJfd0UAEzZ4t87nbQYtVjmqpIODugN6PD2D9E+dJvdM= -xorm.io/xorm v0.8.0 h1:iALxgJrX8O00f8Jk22GbZwPmxJNgssV5Mv4uc2HL9PM= xorm.io/xorm v0.8.0/go.mod h1:ZkJLEYLoVyg7amJK/5r779bHyzs2AU8f8VMiP6BM7uY= xorm.io/xorm v1.0.1 h1:/lITxpJtkZauNpdzj+L9CN/3OQxZaABrbergMcJu+Cw= xorm.io/xorm v1.0.1/go.mod h1:o4vnEsQ5V2F1/WK6w4XTwmiWJeGj82tqjAnHe44wVHY= diff --git a/models/cloudbrain.go b/models/cloudbrain.go index e64fc8383..efaa9ffeb 100755 --- a/models/cloudbrain.go +++ b/models/cloudbrain.go @@ -19,8 +19,8 @@ type JobType string type ModelArtsJobStatus string const ( - NPUResource = "NPU" - GPUResource = "CPU/GPU" + NPUResource = "NPU" + GPUResource = "CPU/GPU" JobWaiting CloudbrainStatus = "WAITING" JobStopped CloudbrainStatus = "STOPPED" @@ -210,6 +210,7 @@ type CloudbrainsOptions struct { JobType string VersionName string IsLatestVersion string + JobTypeNot bool } type TaskPod struct { @@ -894,9 +895,15 @@ func Cloudbrains(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, error) { } if (opts.JobType) != "" { - cond = cond.And( - builder.Eq{"cloudbrain.job_type": opts.JobType}, - ) + if opts.JobTypeNot { + cond = cond.And( + builder.Neq{"cloudbrain.job_type": opts.JobType}, + ) + } else { + cond = cond.And( + builder.Eq{"cloudbrain.job_type": opts.JobType}, + ) + } } if (opts.IsLatestVersion) != "" { @@ -968,7 +975,9 @@ func QueryModelTrainJobList(repoId int64) ([]*CloudbrainInfo, int, error) { cond = cond.And( builder.Eq{"Status": "COMPLETED"}, ) - + cond = cond.And( + builder.Eq{"job_type": "TRAIN"}, + ) cloudbrains := make([]*CloudbrainInfo, 0) if err := sess.Select("job_id,job_name").Table(&Cloudbrain{}).Where(cond).OrderBy("created_unix DESC"). Find(&cloudbrains); err != nil { @@ -1201,3 +1210,28 @@ func GetCloudbrainTrainJobCountByUserID(userID int64) (int, error) { And("job_type = ? and user_id = ? and type = ?", JobTypeTrain, userID, TypeCloudBrainTwo).Count(new(Cloudbrain)) return int(count), err } + +func RestartCloudbrain(old *Cloudbrain, new *Cloudbrain) (err error) { + sess := x.NewSession() + defer sess.Close() + + if err = sess.Begin(); err != nil { + return err + } + + if _, err = sess.Delete(old); err != nil { + sess.Rollback() + return err + } + + if _, err = sess.Insert(new); err != nil { + sess.Rollback() + return err + } + + if err = sess.Commit(); err != nil { + return err + } + + return nil +} diff --git a/models/models.go b/models/models.go index a72ebe5db..11f445830 100755 --- a/models/models.go +++ b/models/models.go @@ -143,6 +143,12 @@ func init() { new(SummaryStatistic), new(UserBusinessAnalysis), new(UserBusinessAnalysisAll), + new(UserBusinessAnalysisCurrentYear), + new(UserBusinessAnalysisLast30Day), + new(UserBusinessAnalysisLastMonth), + new(UserBusinessAnalysisCurrentMonth), + new(UserBusinessAnalysisCurrentWeek), + new(UserBusinessAnalysisYesterday), new(UserLoginLog), ) diff --git a/models/repo.go b/models/repo.go index 8070d7442..f393b51b2 100755 --- a/models/repo.go +++ b/models/repo.go @@ -6,6 +6,7 @@ package models import ( + "code.gitea.io/gitea/modules/git" "context" "crypto/md5" "errors" @@ -2519,3 +2520,53 @@ func UpdateRepositoryCommitNum(repo *Repository) error { return nil } + +type RepoFile struct { + CommitId string + Content []byte +} + +// ReadLatestFileInRepo read latest version of file in repository +// return a RepoFile +func ReadLatestFileInRepo(userName, repoName, refName, treePath string) (*RepoFile, error) { + var err error + repoPath := RepoPath(userName, repoName) + gitRepo, err := git.OpenRepository(repoPath) + if err != nil { + log.Error("ReadLatestFileInRepo error when OpenRepository,error=%v", err) + return nil, err + } + commitID, err := gitRepo.GetBranchCommitID(refName) + if err != nil { + log.Error("ReadLatestFileInRepo error when GetBranchCommitID,error=%v", err) + return nil, err + } + commit, err := gitRepo.GetBranchCommit(refName) + if err != nil { + log.Error("ReadLatestFileInRepo error when GetBranchCommit,error=%v", err) + return nil, err + } + + blob, err := commit.GetBlobByPath(treePath) + if err != nil { + log.Error("ReadLatestFileInRepo error when GetBlobByPath,error=%v", err) + return nil, err + } + + reader, err := blob.DataAsync() + if err != nil { + return nil, err + } + defer func() { + if err = reader.Close(); err != nil { + log.Error("ReadLatestFileInRepo: Close: %v", err) + } + }() + + buf := make([]byte, 1024) + n, _ := reader.Read(buf) + if n >= 0 { + buf = buf[:n] + } + return &RepoFile{CommitId: commitID, Content: buf}, nil +} diff --git a/models/repo_watch.go b/models/repo_watch.go index 4a8f1af8a..85a7834bb 100644 --- a/models/repo_watch.go +++ b/models/repo_watch.go @@ -287,7 +287,9 @@ func NotifyWatchers(actions ...*Action) error { func producer(actions ...*Action) { for _, action := range actions { - ActionChan <- action + if !action.IsPrivate{ + ActionChan <- action + } } } diff --git a/models/user_business_analysis.go b/models/user_business_analysis.go index a15b9db5f..d04e350c2 100644 --- a/models/user_business_analysis.go +++ b/models/user_business_analysis.go @@ -10,10 +10,12 @@ import ( "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/timeutil" "xorm.io/builder" + "xorm.io/xorm" ) const ( - Page_SIZE = 2000 + PAGE_SIZE = 2000 + BATCH_INSERT_SIZE = 50 ) type UserBusinessAnalysisAll struct { @@ -163,14 +165,6 @@ func (ulist UserBusinessAnalysisList) Less(i, j int) bool { return ulist[i].ID > ulist[j].ID } -type UserBusinessAnalysisAllList []*UserBusinessAnalysisAll - -func (ulist UserBusinessAnalysisAllList) Swap(i, j int) { ulist[i], ulist[j] = ulist[j], ulist[i] } -func (ulist UserBusinessAnalysisAllList) Len() int { return len(ulist) } -func (ulist UserBusinessAnalysisAllList) Less(i, j int) bool { - return ulist[i].ID > ulist[j].ID -} - func getLastCountDate() int64 { statictisSess := xStatistic.NewSession() defer statictisSess.Close() @@ -189,6 +183,29 @@ func getLastCountDate() int64 { return pageStartTime.Unix() } +func QueryUserStaticDataByTableName(start int, pageSize int, tableName string, queryObj interface{}, userName string) ([]*UserBusinessAnalysisAll, int64) { + statictisSess := xStatistic.NewSession() + defer statictisSess.Close() + var cond = builder.NewCond() + if len(userName) > 0 { + cond = cond.And( + builder.Like{"name", userName}, + ) + } + allCount, err := statictisSess.Where(cond).Count(queryObj) + if err != nil { + log.Info("query error." + err.Error()) + return nil, 0 + } + log.Info("query return total:" + fmt.Sprint(allCount)) + userBusinessAnalysisAllList := make([]*UserBusinessAnalysisAll, 0) + if err := statictisSess.Table(tableName).Where(cond).OrderBy("commit_count desc,id desc").Limit(pageSize, start). + Find(&userBusinessAnalysisAllList); err != nil { + return nil, 0 + } + return userBusinessAnalysisAllList, allCount +} + func QueryUserStaticDataAll(opts *UserBusinessAnalysisQueryOptions) ([]*UserBusinessAnalysisAll, int64) { log.Info("query startTime =" + fmt.Sprint(opts.StartTime) + " endTime=" + fmt.Sprint(opts.EndTime) + " isAll=" + fmt.Sprint(opts.IsAll)) @@ -202,9 +219,9 @@ func QueryUserStaticDataAll(opts *UserBusinessAnalysisQueryOptions) ([]*UserBusi } log.Info("query return total:" + fmt.Sprint(allCount)) - pageSize := 1000 + pageSize := PAGE_SIZE totalPage := int(allCount) / pageSize - userBusinessAnalysisReturnList := UserBusinessAnalysisAllList{} + userBusinessAnalysisReturnList := make([]*UserBusinessAnalysisAll, 0) for i := 0; i <= int(totalPage); i++ { userBusinessAnalysisAllList := make([]*UserBusinessAnalysisAll, 0) if err := statictisSess.Table("user_business_analysis_all").OrderBy("id desc").Limit(pageSize, i*pageSize). @@ -217,7 +234,6 @@ func QueryUserStaticDataAll(opts *UserBusinessAnalysisQueryOptions) ([]*UserBusi } } - sort.Sort(userBusinessAnalysisReturnList) log.Info("return size=" + fmt.Sprint(len(userBusinessAnalysisReturnList))) return userBusinessAnalysisReturnList, allCount } @@ -337,28 +353,24 @@ func QueryUserStaticDataPage(opts *UserBusinessAnalysisQueryOptions) ([]*UserBus return userBusinessAnalysisReturnList, count } -func RefreshUserStaticAllTabel(wikiCountMap map[string]int, CommitCodeSizeMap map[string]*git.UserKPIStats) { - +func refreshUserStaticTable(wikiCountMap map[string]int, CommitCodeSizeMap map[string]*git.UserKPIStats, tableName string, pageStartTime time.Time, pageEndTime time.Time) { sess := x.NewSession() defer sess.Close() statictisSess := xStatistic.NewSession() defer statictisSess.Close() - log.Info("truncate all data from table: user_business_analysis_all") - statictisSess.Exec("TRUNCATE TABLE user_business_analysis_all") + log.Info("truncate all data from table: " + tableName) + statictisSess.Exec("TRUNCATE TABLE " + tableName) - currentTimeNow := time.Now() - - startTime := currentTimeNow.AddDate(0, 0, -1) - - pageStartTime := time.Date(2021, 11, 5, 0, 0, 0, 0, currentTimeNow.Location()) log.Info("pageStartTime:" + pageStartTime.Format("2006-01-02 15:04:05")) - pageEndTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 23, 59, 59, 0, currentTimeNow.Location()) log.Info("pageEndTime time:" + pageEndTime.Format("2006-01-02 15:04:05")) start_unix := pageStartTime.Unix() end_unix := pageEndTime.Unix() + currentTimeNow := time.Now() + startTime := currentTimeNow.AddDate(0, 0, -1) + CodeMergeCountMap := queryPullRequest(start_unix, end_unix) CommitCountMap := queryCommitAction(start_unix, end_unix, 5) IssueCountMap := queryCreateIssue(start_unix, end_unix) @@ -385,12 +397,14 @@ func RefreshUserStaticAllTabel(wikiCountMap map[string]int, CommitCodeSizeMap ma } var indexTotal int64 indexTotal = 0 + insertCount := 0 + dateRecordBatch := make([]UserBusinessAnalysisAll, 0) for { - sess.Select("`user`.*").Table("user").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) + sess.Select("`user`.*").Table("user").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) userList := make([]*User, 0) sess.Find(&userList) - for i, userRecord := range userList { - log.Info("insert all static, i=" + fmt.Sprint(i) + " userName=" + userRecord.Name) + + for _, userRecord := range userList { var dateRecordAll UserBusinessAnalysisAll dateRecordAll.ID = userRecord.ID dateRecordAll.Email = userRecord.Email @@ -484,18 +498,85 @@ func RefreshUserStaticAllTabel(wikiCountMap map[string]int, CommitCodeSizeMap ma } dateRecordAll.CommitModelCount = 0 - _, err = statictisSess.Insert(&dateRecordAll) - if err != nil { - log.Info("insert all data failed." + err.Error()) + + dateRecordBatch = append(dateRecordBatch, dateRecordAll) + if len(dateRecordBatch) >= BATCH_INSERT_SIZE { + insertTable(dateRecordBatch, tableName, statictisSess) + insertCount += BATCH_INSERT_SIZE + if err != nil { + log.Info("insert all data failed." + err.Error()) + } + dateRecordBatch = make([]UserBusinessAnalysisAll, 0) } } - indexTotal += Page_SIZE + indexTotal += PAGE_SIZE if indexTotal >= count { break } } + if len(dateRecordBatch) > 0 { + insertTable(dateRecordBatch, tableName, statictisSess) + insertCount += len(dateRecordBatch) + if err != nil { + log.Info("insert all data failed." + err.Error()) + } + } + + log.Info("refresh data finished.tableName=" + tableName + " total record:" + fmt.Sprint(insertCount)) +} + +func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, statictisSess *xorm.Session) { + + insertBatchSql := "INSERT INTO public." + tableName + + "(id, count_date, code_merge_count, commit_count, issue_count, comment_count, focus_repo_count, star_repo_count, watched_count, gitea_age_month, commit_code_size, commit_dataset_size, " + + "commit_model_count, solve_issue_count, encyclopedias_count, regist_date, create_repo_count, login_count, open_i_index, email, name, data_date) " + + "VALUES" + + for i, record := range dateRecords { + insertBatchSql += "(" + fmt.Sprint(record.ID) + ", " + fmt.Sprint(record.CountDate) + ", " + fmt.Sprint(record.CodeMergeCount) + ", " + fmt.Sprint(record.CommitCount) + + ", " + fmt.Sprint(record.IssueCount) + ", " + fmt.Sprint(record.CommentCount) + ", " + fmt.Sprint(record.FocusRepoCount) + ", " + fmt.Sprint(record.StarRepoCount) + + ", " + fmt.Sprint(record.WatchedCount) + ", " + fmt.Sprint(record.GiteaAgeMonth) + ", " + fmt.Sprint(record.CommitCodeSize) + ", " + fmt.Sprint(record.CommitDatasetSize) + + ", " + fmt.Sprint(record.CommitModelCount) + ", " + fmt.Sprint(record.SolveIssueCount) + ", " + fmt.Sprint(record.EncyclopediasCount) + ", " + fmt.Sprint(record.RegistDate) + + ", " + fmt.Sprint(record.CreateRepoCount) + ", " + fmt.Sprint(record.LoginCount) + ", " + fmt.Sprint(record.OpenIIndex) + ", '" + record.Email + "', '" + record.Name + "', '" + record.DataDate + "')" + if i < (len(dateRecords) - 1) { + insertBatchSql += "," + } + } + statictisSess.Exec(insertBatchSql) +} + +func RefreshUserStaticAllTabel(wikiCountMap map[string]int, CommitCodeSizeMap map[string]*git.UserKPIStats) { + currentTimeNow := time.Now() + pageStartTime := time.Date(2021, 11, 5, 0, 0, 0, 0, currentTimeNow.Location()) + pageEndTime := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 23, 59, 59, 0, currentTimeNow.Location()) + refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_all", pageStartTime, pageEndTime) log.Info("refresh all data finished.") + + pageStartTime = time.Date(currentTimeNow.Year(), 1, 1, 0, 0, 0, 0, currentTimeNow.Location()) + refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_current_year", pageStartTime, pageEndTime) + + thisMonth := time.Date(currentTimeNow.Year(), currentTimeNow.Month(), 1, 0, 0, 0, 0, currentTimeNow.Location()) + refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_current_month", thisMonth, pageEndTime) + + offset := int(time.Monday - currentTimeNow.Weekday()) + if offset > 0 { + offset = -6 + } + pageStartTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, offset) + refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_current_week", pageStartTime, pageEndTime) + + pageStartTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, -30) + refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_last30_day", pageStartTime, pageEndTime) + + pageStartTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 0, 0, 0, 0, time.Local).AddDate(0, 0, -1) + pageEndTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), currentTimeNow.Day(), 23, 59, 59, 0, currentTimeNow.Location()).AddDate(0, 0, -1) + refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_yesterday", pageStartTime, pageEndTime) + + pageStartTime = thisMonth.AddDate(0, -1, 0) + pageEndTime = time.Date(currentTimeNow.Year(), currentTimeNow.Month(), 1, 23, 59, 59, 0, currentTimeNow.Location()).AddDate(0, 0, -1) + refreshUserStaticTable(wikiCountMap, CommitCodeSizeMap, "user_business_analysis_last_month", pageStartTime, pageEndTime) + } func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, endTime time.Time, isReCount bool) error { @@ -550,7 +631,7 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, var indexTotal int64 indexTotal = 0 for { - sess.Select("`user`.*").Table("user").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) + sess.Select("`user`.*").Table("user").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) userList := make([]*User, 0) sess.Find(&userList) @@ -660,7 +741,7 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, } } - indexTotal += Page_SIZE + indexTotal += PAGE_SIZE if indexTotal >= count { break } @@ -700,7 +781,7 @@ func querySolveIssue(start_unix int64, end_unix int64) map[int64]int { issueAssigneesList := make([]*IssueAssignees, 0) sess.Select("issue_assignees.*").Table("issue_assignees"). Join("inner", "issue", "issue.id=issue_assignees.issue_id"). - Where(cond).OrderBy("issue_assignees.id asc").Limit(Page_SIZE, int(indexTotal)) + Where(cond).OrderBy("issue_assignees.id asc").Limit(PAGE_SIZE, int(indexTotal)) sess.Find(&issueAssigneesList) @@ -712,7 +793,7 @@ func querySolveIssue(start_unix int64, end_unix int64) map[int64]int { resultMap[issueAssigneesRecord.AssigneeID] += 1 } } - indexTotal += Page_SIZE + indexTotal += PAGE_SIZE if indexTotal >= count { break } @@ -735,7 +816,7 @@ func queryPullRequest(start_unix int64, end_unix int64) map[int64]int { indexTotal = 0 for { issueList := make([]*Issue, 0) - sess.Select("issue.*").Table("issue").Join("inner", "pull_request", "issue.id=pull_request.issue_id").Where(cond).OrderBy("issue.id asc").Limit(Page_SIZE, int(indexTotal)) + sess.Select("issue.*").Table("issue").Join("inner", "pull_request", "issue.id=pull_request.issue_id").Where(cond).OrderBy("issue.id asc").Limit(PAGE_SIZE, int(indexTotal)) sess.Find(&issueList) log.Info("query issue(PR) size=" + fmt.Sprint(len(issueList))) for _, issueRecord := range issueList { @@ -745,7 +826,7 @@ func queryPullRequest(start_unix int64, end_unix int64) map[int64]int { resultMap[issueRecord.PosterID] += 1 } } - indexTotal += Page_SIZE + indexTotal += PAGE_SIZE if indexTotal >= count { break } @@ -768,7 +849,7 @@ func queryCommitAction(start_unix int64, end_unix int64, actionType int64) map[i var indexTotal int64 indexTotal = 0 for { - sess.Select("id,user_id,op_type,act_user_id").Table("action").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) + sess.Select("id,user_id,op_type,act_user_id").Table("action").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) actionList := make([]*Action, 0) sess.Find(&actionList) @@ -781,7 +862,7 @@ func queryCommitAction(start_unix int64, end_unix int64, actionType int64) map[i } } - indexTotal += Page_SIZE + indexTotal += PAGE_SIZE if indexTotal >= count { break } @@ -805,7 +886,7 @@ func queryCreateIssue(start_unix int64, end_unix int64) map[int64]int { var indexTotal int64 indexTotal = 0 for { - sess.Select("id,poster_id").Table("issue").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) + sess.Select("id,poster_id").Table("issue").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) issueList := make([]*Issue, 0) sess.Find(&issueList) log.Info("query issue size=" + fmt.Sprint(len(issueList))) @@ -816,7 +897,7 @@ func queryCreateIssue(start_unix int64, end_unix int64) map[int64]int { resultMap[issueRecord.PosterID] += 1 } } - indexTotal += Page_SIZE + indexTotal += PAGE_SIZE if indexTotal >= count { break } @@ -839,7 +920,7 @@ func queryComment(start_unix int64, end_unix int64) map[int64]int { var indexTotal int64 indexTotal = 0 for { - sess.Select("id,type,poster_id").Table("comment").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) + sess.Select("id,type,poster_id").Table("comment").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) commentList := make([]*Comment, 0) sess.Find(&commentList) log.Info("query Comment size=" + fmt.Sprint(len(commentList))) @@ -850,7 +931,7 @@ func queryComment(start_unix int64, end_unix int64) map[int64]int { resultMap[commentRecord.PosterID] += 1 } } - indexTotal += Page_SIZE + indexTotal += PAGE_SIZE if indexTotal >= count { break } @@ -875,7 +956,7 @@ func queryWatch(start_unix int64, end_unix int64) map[int64]int { indexTotal = 0 for { watchList := make([]*Watch, 0) - sess.Select("id,user_id,repo_id").Table("watch").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) + sess.Select("id,user_id,repo_id").Table("watch").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) sess.Find(&watchList) log.Info("query Watch size=" + fmt.Sprint(len(watchList))) @@ -887,7 +968,7 @@ func queryWatch(start_unix int64, end_unix int64) map[int64]int { } } - indexTotal += Page_SIZE + indexTotal += PAGE_SIZE if indexTotal >= count { break } @@ -913,7 +994,7 @@ func queryStar(start_unix int64, end_unix int64) map[int64]int { var indexTotal int64 indexTotal = 0 for { - sess.Select("id,uid,repo_id").Table("star").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) + sess.Select("id,uid,repo_id").Table("star").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) starList := make([]*Star, 0) sess.Find(&starList) @@ -926,7 +1007,7 @@ func queryStar(start_unix int64, end_unix int64) map[int64]int { } } - indexTotal += Page_SIZE + indexTotal += PAGE_SIZE if indexTotal >= count { break } @@ -949,7 +1030,7 @@ func queryFollow(start_unix int64, end_unix int64) map[int64]int { var indexTotal int64 indexTotal = 0 for { - sess.Select("id,user_id,follow_id").Table("follow").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) + sess.Select("id,user_id,follow_id").Table("follow").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) followList := make([]*Follow, 0) sess.Find(&followList) @@ -962,7 +1043,7 @@ func queryFollow(start_unix int64, end_unix int64) map[int64]int { } } - indexTotal += Page_SIZE + indexTotal += PAGE_SIZE if indexTotal >= count { break } @@ -985,7 +1066,7 @@ func queryDatasetSize(start_unix int64, end_unix int64) map[int64]int { var indexTotal int64 indexTotal = 0 for { - sess.Select("id,uploader_id,size").Table("attachment").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) + sess.Select("id,uploader_id,size").Table("attachment").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) attachmentList := make([]*Attachment, 0) sess.Find(&attachmentList) @@ -998,7 +1079,7 @@ func queryDatasetSize(start_unix int64, end_unix int64) map[int64]int { } } - indexTotal += Page_SIZE + indexTotal += PAGE_SIZE if indexTotal >= count { break } @@ -1021,7 +1102,7 @@ func queryUserCreateRepo(start_unix int64, end_unix int64) map[int64]int { var indexTotal int64 indexTotal = 0 for { - sess.Select("id,owner_id,name").Table("repository").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) + sess.Select("id,owner_id,name").Table("repository").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) repoList := make([]*Repository, 0) sess.Find(&repoList) log.Info("query Repository size=" + fmt.Sprint(len(repoList))) @@ -1032,7 +1113,7 @@ func queryUserCreateRepo(start_unix int64, end_unix int64) map[int64]int { resultMap[repoRecord.OwnerID] += 1 } } - indexTotal += Page_SIZE + indexTotal += PAGE_SIZE if indexTotal >= count { break } @@ -1111,7 +1192,7 @@ func queryLoginCount(start_unix int64, end_unix int64) map[int64]int { var indexTotal int64 indexTotal = 0 for { - statictisSess.Select("id,u_id").Table("user_login_log").Where(cond).OrderBy("id asc").Limit(Page_SIZE, int(indexTotal)) + statictisSess.Select("id,u_id").Table("user_login_log").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) userLoginLogList := make([]*UserLoginLog, 0) statictisSess.Find(&userLoginLogList) log.Info("query user login size=" + fmt.Sprint(len(userLoginLogList))) @@ -1122,7 +1203,7 @@ func queryLoginCount(start_unix int64, end_unix int64) map[int64]int { resultMap[loginRecord.UId] += 1 } } - indexTotal += Page_SIZE + indexTotal += PAGE_SIZE if indexTotal >= count { break } diff --git a/models/user_business_struct.go b/models/user_business_struct.go new file mode 100644 index 000000000..c435c0b07 --- /dev/null +++ b/models/user_business_struct.go @@ -0,0 +1,267 @@ +package models + +import "code.gitea.io/gitea/modules/timeutil" + +type UserBusinessAnalysisCurrentYear struct { + ID int64 `xorm:"pk"` + CountDate int64 `xorm:"pk"` + //action :ActionMergePullRequest // 11 + CodeMergeCount int `xorm:"NOT NULL DEFAULT 0"` + //action :ActionCommitRepo + CommitCount int `xorm:"NOT NULL DEFAULT 0"` + //issue // 10 + IssueCount int `xorm:"NOT NULL DEFAULT 0"` + //comment table current date + CommentCount int `xorm:"NOT NULL DEFAULT 0"` + //watch table current date + FocusRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //star table current date + StarRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //follow table + WatchedCount int `xorm:"NOT NULL DEFAULT 0"` + // user table + GiteaAgeMonth int `xorm:"NOT NULL DEFAULT 0"` + // + CommitCodeSize int `xorm:"NOT NULL DEFAULT 0"` + //attachement table + CommitDatasetSize int `xorm:"NOT NULL DEFAULT 0"` + //0 + CommitModelCount int `xorm:"NOT NULL DEFAULT 0"` + //issue, issueassignees + SolveIssueCount int `xorm:"NOT NULL DEFAULT 0"` + //baike + EncyclopediasCount int `xorm:"NOT NULL DEFAULT 0"` + //user + RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` + //repo + CreateRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //login count, from elk + LoginCount int `xorm:"NOT NULL DEFAULT 0"` + //openi index + OpenIIndex float64 `xorm:"NOT NULL DEFAULT 0"` + //user + Email string `xorm:"NOT NULL"` + //user + Name string `xorm:"NOT NULL"` + DataDate string `xorm:"NULL"` +} + +type UserBusinessAnalysisLast30Day struct { + ID int64 `xorm:"pk"` + CountDate int64 `xorm:"pk"` + //action :ActionMergePullRequest // 11 + CodeMergeCount int `xorm:"NOT NULL DEFAULT 0"` + //action :ActionCommitRepo + CommitCount int `xorm:"NOT NULL DEFAULT 0"` + //issue // 10 + IssueCount int `xorm:"NOT NULL DEFAULT 0"` + //comment table current date + CommentCount int `xorm:"NOT NULL DEFAULT 0"` + //watch table current date + FocusRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //star table current date + StarRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //follow table + WatchedCount int `xorm:"NOT NULL DEFAULT 0"` + // user table + GiteaAgeMonth int `xorm:"NOT NULL DEFAULT 0"` + // + CommitCodeSize int `xorm:"NOT NULL DEFAULT 0"` + //attachement table + CommitDatasetSize int `xorm:"NOT NULL DEFAULT 0"` + //0 + CommitModelCount int `xorm:"NOT NULL DEFAULT 0"` + //issue, issueassignees + SolveIssueCount int `xorm:"NOT NULL DEFAULT 0"` + //baike + EncyclopediasCount int `xorm:"NOT NULL DEFAULT 0"` + //user + RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` + //repo + CreateRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //login count, from elk + LoginCount int `xorm:"NOT NULL DEFAULT 0"` + //openi index + OpenIIndex float64 `xorm:"NOT NULL DEFAULT 0"` + //user + Email string `xorm:"NOT NULL"` + //user + Name string `xorm:"NOT NULL"` + DataDate string `xorm:"NULL"` +} + +type UserBusinessAnalysisLastMonth struct { + ID int64 `xorm:"pk"` + CountDate int64 `xorm:"pk"` + //action :ActionMergePullRequest // 11 + CodeMergeCount int `xorm:"NOT NULL DEFAULT 0"` + //action :ActionCommitRepo + CommitCount int `xorm:"NOT NULL DEFAULT 0"` + //issue // 10 + IssueCount int `xorm:"NOT NULL DEFAULT 0"` + //comment table current date + CommentCount int `xorm:"NOT NULL DEFAULT 0"` + //watch table current date + FocusRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //star table current date + StarRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //follow table + WatchedCount int `xorm:"NOT NULL DEFAULT 0"` + // user table + GiteaAgeMonth int `xorm:"NOT NULL DEFAULT 0"` + // + CommitCodeSize int `xorm:"NOT NULL DEFAULT 0"` + //attachement table + CommitDatasetSize int `xorm:"NOT NULL DEFAULT 0"` + //0 + CommitModelCount int `xorm:"NOT NULL DEFAULT 0"` + //issue, issueassignees + SolveIssueCount int `xorm:"NOT NULL DEFAULT 0"` + //baike + EncyclopediasCount int `xorm:"NOT NULL DEFAULT 0"` + //user + RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` + //repo + CreateRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //login count, from elk + LoginCount int `xorm:"NOT NULL DEFAULT 0"` + //openi index + OpenIIndex float64 `xorm:"NOT NULL DEFAULT 0"` + //user + Email string `xorm:"NOT NULL"` + //user + Name string `xorm:"NOT NULL"` + DataDate string `xorm:"NULL"` +} + +type UserBusinessAnalysisCurrentMonth struct { + ID int64 `xorm:"pk"` + CountDate int64 `xorm:"pk"` + //action :ActionMergePullRequest // 11 + CodeMergeCount int `xorm:"NOT NULL DEFAULT 0"` + //action :ActionCommitRepo + CommitCount int `xorm:"NOT NULL DEFAULT 0"` + //issue // 10 + IssueCount int `xorm:"NOT NULL DEFAULT 0"` + //comment table current date + CommentCount int `xorm:"NOT NULL DEFAULT 0"` + //watch table current date + FocusRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //star table current date + StarRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //follow table + WatchedCount int `xorm:"NOT NULL DEFAULT 0"` + // user table + GiteaAgeMonth int `xorm:"NOT NULL DEFAULT 0"` + // + CommitCodeSize int `xorm:"NOT NULL DEFAULT 0"` + //attachement table + CommitDatasetSize int `xorm:"NOT NULL DEFAULT 0"` + //0 + CommitModelCount int `xorm:"NOT NULL DEFAULT 0"` + //issue, issueassignees + SolveIssueCount int `xorm:"NOT NULL DEFAULT 0"` + //baike + EncyclopediasCount int `xorm:"NOT NULL DEFAULT 0"` + //user + RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` + //repo + CreateRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //login count, from elk + LoginCount int `xorm:"NOT NULL DEFAULT 0"` + //openi index + OpenIIndex float64 `xorm:"NOT NULL DEFAULT 0"` + //user + Email string `xorm:"NOT NULL"` + //user + Name string `xorm:"NOT NULL"` + DataDate string `xorm:"NULL"` +} + +type UserBusinessAnalysisCurrentWeek struct { + ID int64 `xorm:"pk"` + CountDate int64 `xorm:"pk"` + //action :ActionMergePullRequest // 11 + CodeMergeCount int `xorm:"NOT NULL DEFAULT 0"` + //action :ActionCommitRepo + CommitCount int `xorm:"NOT NULL DEFAULT 0"` + //issue // 10 + IssueCount int `xorm:"NOT NULL DEFAULT 0"` + //comment table current date + CommentCount int `xorm:"NOT NULL DEFAULT 0"` + //watch table current date + FocusRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //star table current date + StarRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //follow table + WatchedCount int `xorm:"NOT NULL DEFAULT 0"` + // user table + GiteaAgeMonth int `xorm:"NOT NULL DEFAULT 0"` + // + CommitCodeSize int `xorm:"NOT NULL DEFAULT 0"` + //attachement table + CommitDatasetSize int `xorm:"NOT NULL DEFAULT 0"` + //0 + CommitModelCount int `xorm:"NOT NULL DEFAULT 0"` + //issue, issueassignees + SolveIssueCount int `xorm:"NOT NULL DEFAULT 0"` + //baike + EncyclopediasCount int `xorm:"NOT NULL DEFAULT 0"` + //user + RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` + //repo + CreateRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //login count, from elk + LoginCount int `xorm:"NOT NULL DEFAULT 0"` + //openi index + OpenIIndex float64 `xorm:"NOT NULL DEFAULT 0"` + //user + Email string `xorm:"NOT NULL"` + //user + Name string `xorm:"NOT NULL"` + DataDate string `xorm:"NULL"` +} + +type UserBusinessAnalysisYesterday struct { + ID int64 `xorm:"pk"` + CountDate int64 `xorm:"pk"` + //action :ActionMergePullRequest // 11 + CodeMergeCount int `xorm:"NOT NULL DEFAULT 0"` + //action :ActionCommitRepo + CommitCount int `xorm:"NOT NULL DEFAULT 0"` + //issue // 10 + IssueCount int `xorm:"NOT NULL DEFAULT 0"` + //comment table current date + CommentCount int `xorm:"NOT NULL DEFAULT 0"` + //watch table current date + FocusRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //star table current date + StarRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //follow table + WatchedCount int `xorm:"NOT NULL DEFAULT 0"` + // user table + GiteaAgeMonth int `xorm:"NOT NULL DEFAULT 0"` + // + CommitCodeSize int `xorm:"NOT NULL DEFAULT 0"` + //attachement table + CommitDatasetSize int `xorm:"NOT NULL DEFAULT 0"` + //0 + CommitModelCount int `xorm:"NOT NULL DEFAULT 0"` + //issue, issueassignees + SolveIssueCount int `xorm:"NOT NULL DEFAULT 0"` + //baike + EncyclopediasCount int `xorm:"NOT NULL DEFAULT 0"` + //user + RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` + //repo + CreateRepoCount int `xorm:"NOT NULL DEFAULT 0"` + //login count, from elk + LoginCount int `xorm:"NOT NULL DEFAULT 0"` + //openi index + OpenIIndex float64 `xorm:"NOT NULL DEFAULT 0"` + //user + Email string `xorm:"NOT NULL"` + //user + Name string `xorm:"NOT NULL"` + DataDate string `xorm:"NULL"` +} diff --git a/modules/cloudbrain/cloudbrain.go b/modules/cloudbrain/cloudbrain.go index b8aa2e143..74dcbe7b0 100755 --- a/modules/cloudbrain/cloudbrain.go +++ b/modules/cloudbrain/cloudbrain.go @@ -82,7 +82,7 @@ func AdminOrOwnerOrJobCreaterRight(ctx *context.Context) { var jobID = ctx.Params(":jobid") job, err := models.GetCloudbrainByJobID(jobID) - + ctx.Cloudbrain = job if !isAdminOrOwnerOrJobCreater(ctx, job, err) { ctx.NotFound(ctx.Req.URL.RequestURI(), nil) @@ -94,6 +94,7 @@ func AdminOrJobCreaterRight(ctx *context.Context) { var jobID = ctx.Params(":jobid") job, err := models.GetCloudbrainByJobID(jobID) + ctx.Cloudbrain = job if !isAdminOrJobCreater(ctx, job, err) { ctx.NotFound(ctx.Req.URL.RequestURI(), nil) @@ -222,7 +223,7 @@ func GenerateTask(ctx *context.Context, jobName, image, command, uuid, codePath, return nil } -func RestartTask(ctx *context.Context, task *models.Cloudbrain) error { +func RestartTask(ctx *context.Context, task *models.Cloudbrain, newJobID *string) error { dataActualPath := setting.Attachment.Minio.RealPath + setting.Attachment.Minio.Bucket + "/" + setting.Attachment.Minio.BasePath + @@ -312,7 +313,7 @@ func RestartTask(ctx *context.Context, task *models.Cloudbrain) error { }, }) if err != nil { - log.Error("CreateJob failed:", err.Error(), ctx.Data["MsgID"]) + log.Error("CreateJob failed:%v", err.Error(), ctx.Data["MsgID"]) return err } if jobResult.Code != Success { @@ -321,14 +322,29 @@ func RestartTask(ctx *context.Context, task *models.Cloudbrain) error { } var jobID = jobResult.Payload["jobId"].(string) - task.JobID = jobID - task.Status = string(models.JobWaiting) - err = models.UpdateJob(task) + newTask := &models.Cloudbrain{ + Status: string(models.JobWaiting), + UserID: task.UserID, + RepoID: task.RepoID, + JobID: jobID, + JobName: task.JobName, + SubTaskName: task.SubTaskName, + JobType: task.JobType, + Type: task.Type, + Uuid: task.Uuid, + Image: task.Image, + GpuQueue: task.GpuQueue, + ResourceSpecId: task.ResourceSpecId, + ComputeResource: task.ComputeResource, + } + err = models.RestartCloudbrain(task, newTask) if err != nil { - log.Error("UpdateJob(%s) failed:%v", jobName, err.Error(), ctx.Data["MsgID"]) + log.Error("RestartCloudbrain(%s) failed:%v", jobName, err.Error(), ctx.Data["MsgID"]) return err } + *newJobID = jobID + return nil } diff --git a/modules/context/context.go b/modules/context/context.go index 5f09e190d..6cb6c267a 100755 --- a/modules/context/context.go +++ b/modules/context/context.go @@ -6,6 +6,7 @@ package context import ( + "code.gitea.io/gitea/routers/notice" "html" "html/template" "io" @@ -46,6 +47,7 @@ type Context struct { Repo *Repository Org *Organization + Cloudbrain *models.Cloudbrain } // IsUserSiteAdmin returns true if current user is a site admin @@ -345,6 +347,10 @@ func Contexter() macaron.Handler { ctx.Data["EnableSwagger"] = setting.API.EnableSwagger ctx.Data["EnableOpenIDSignIn"] = setting.Service.EnableOpenIDSignIn + notice, _ := notice.GetNewestNotice() + if notice != nil { + ctx.Data["notice"] = *notice + } c.Map(ctx) } } diff --git a/modules/context/repo.go b/modules/context/repo.go index de494c1bc..4da6e9a16 100755 --- a/modules/context/repo.go +++ b/modules/context/repo.go @@ -11,6 +11,7 @@ import ( "net/url" "path" "strings" + "time" "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/cache" @@ -517,6 +518,7 @@ func RepoAssignment() macaron.Handler { return } + startTime := time.Now() tags, err := ctx.Repo.GitRepo.GetTags() if err != nil { ctx.ServerError("GetTags", err) @@ -524,11 +526,17 @@ func RepoAssignment() macaron.Handler { } ctx.Data["Tags"] = tags + duration := time.Since(startTime) + log.Info("GetTags cost: %v seconds", duration.Seconds()) brs, _, err := ctx.Repo.GitRepo.GetBranches(0, 0) if err != nil { ctx.ServerError("GetBranches", err) return } + + duration = time.Since(startTime) + log.Info("GetBranches cost: %v seconds", duration.Seconds()) + ctx.Data["Branches"] = brs ctx.Data["BranchesCount"] = len(brs) diff --git a/modules/modelarts/modelarts.go b/modules/modelarts/modelarts.go index 06eac1665..3f7ebfd91 100755 --- a/modules/modelarts/modelarts.go +++ b/modules/modelarts/modelarts.go @@ -357,7 +357,7 @@ func GenerateTrainJobVersion(ctx *context.Context, req *GenerateTrainJobReq, job CommitID: req.CommitID, IsLatestVersion: req.IsLatestVersion, PreVersionName: req.PreVersionName, - ComputeResource: models.GPUResource, + ComputeResource: models.NPUResource, EngineID: req.EngineID, TrainUrl: req.TrainUrl, BranchName: req.BranchName, diff --git a/modules/repository/elk_pagedata.go b/modules/repository/elk_pagedata.go index ecdbff078..f4538bf62 100644 --- a/modules/repository/elk_pagedata.go +++ b/modules/repository/elk_pagedata.go @@ -162,21 +162,10 @@ func ProjectViewInit(User string, Project string, Gte string, Lte string) (proje inputStruct.Batch[0].Request.Params.Body.Query.BoolIn.Filter[2].FilterMatchPhrase = &projectName //限定页面 var bool Bool - bool.Should = make([]Should, 14) - bool.Should[0].MatchPhrase.TagName = "%{[request][3]}" - bool.Should[1].MatchPhrase.TagName = "datasets?type=0" - bool.Should[2].MatchPhrase.TagName = "datasets?type=1" - bool.Should[3].MatchPhrase.TagName = "issues" - bool.Should[4].MatchPhrase.TagName = "labels" - bool.Should[5].MatchPhrase.TagName = "pulls" - bool.Should[6].MatchPhrase.TagName = "wiki" - bool.Should[7].MatchPhrase.TagName = "activity" - bool.Should[8].MatchPhrase.TagName = "cloudbrain" - bool.Should[9].MatchPhrase.TagName = "modelarts" - bool.Should[10].MatchPhrase.TagName = "blockchain" - bool.Should[11].MatchPhrase.TagName = "watchers" - bool.Should[12].MatchPhrase.TagName = "stars" - bool.Should[13].MatchPhrase.TagName = "forks" + bool.Should = make([]Should, len(setting.PROJECT_LIMIT_PAGES)) + for i, pageName := range setting.PROJECT_LIMIT_PAGES { + bool.Should[i].MatchPhrase.TagName = pageName + } inputStruct.Batch[0].Request.Params.Body.Query.BoolIn.Filter[3].Bool = &bool return inputStruct } diff --git a/modules/setting/setting.go b/modules/setting/setting.go index 2d70e47b1..e7ab0b7d2 100755 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -436,6 +436,14 @@ var ( //home page RecommentRepoAddr string + //notice config + UserNameOfNoticeRepo string + RepoNameOfNoticeRepo string + RefNameOfNoticeRepo string + TreePathOfNoticeRepo string + CacheTimeOutSecond int + CacheOn bool + //labelsystem config LabelTaskName string LabelDatasetDeleteQueue string @@ -508,12 +516,13 @@ var ( TrainJobFLAVORINFOS string //elk config - ElkUrl string - ElkUser string - ElkPassword string - Index string - TimeField string - ElkTimeFormat string + ElkUrl string + ElkUser string + ElkPassword string + Index string + TimeField string + ElkTimeFormat string + PROJECT_LIMIT_PAGES []string //nginx proxy PROXYURL string @@ -1235,6 +1244,14 @@ func NewContext() { sec = Cfg.Section("homepage") RecommentRepoAddr = sec.Key("Address").MustString("https://git.openi.org.cn/OpenIOSSG/promote/raw/branch/master/") + sec = Cfg.Section("notice") + UserNameOfNoticeRepo = sec.Key("USER_NAME").MustString("OpenIOSSG") + RepoNameOfNoticeRepo = sec.Key("REPO_NAME").MustString("promote") + RefNameOfNoticeRepo = sec.Key("REF_NAME").MustString("master") + TreePathOfNoticeRepo = sec.Key("TREE_PATH").MustString("notice.json") + CacheTimeOutSecond = sec.Key("CACHE_TIME_OUT_SECOND").MustInt(60) + CacheOn = sec.Key("CACHE_ON").MustBool(true) + sec = Cfg.Section("cloudbrain") CBAuthUser = sec.Key("USER").MustString("") CBAuthPassword = sec.Key("PWD").MustString("") @@ -1308,6 +1325,7 @@ func NewContext() { Index = sec.Key("INDEX").MustString("") TimeField = sec.Key("TIMEFIELD").MustString(" @timestamptest") ElkTimeFormat = sec.Key("ELKTIMEFORMAT").MustString("date_time") + PROJECT_LIMIT_PAGES = strings.Split(sec.Key("project_limit_pages").MustString(""), ",") SetRadarMapConfig() diff --git a/modules/storage/minio.go b/modules/storage/minio.go index c6f704df5..7b914817d 100755 --- a/modules/storage/minio.go +++ b/modules/storage/minio.go @@ -95,9 +95,7 @@ func (m *MinioStorage) DeleteDir(dir string) error { } }() - for rErr := range m.client.RemoveObjects(m.bucket, objectsCh) { - log.Error("Error detected during deletion: ", rErr) - } + m.client.RemoveObjects(m.bucket, objectsCh) return nil } diff --git a/modules/util/util.go b/modules/util/util.go old mode 100644 new mode 100755 index 6d02b5f52..017277281 --- a/modules/util/util.go +++ b/modules/util/util.go @@ -6,6 +6,7 @@ package util import ( "bytes" + "strconv" "strings" ) @@ -100,3 +101,12 @@ func NormalizeEOL(input []byte) []byte { } return tmp[:pos] } + +func AddZero(t int64) (m string) { + if t < 10 { + m = "0" + strconv.FormatInt(t, 10) + return m + } else { + return strconv.FormatInt(t, 10) + } +} diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini index 0d7daec97..beb968449 100644 --- a/options/locale/locale_en-US.ini +++ b/options/locale/locale_en-US.ini @@ -455,7 +455,13 @@ static.openiindex=OpenI Index static.registdate=Regist Date static.countdate=Count Date static.all=All - +static.public.user_business_analysis_current_month=Current_Month +static.public.user_business_analysis_current_week=Current_Week +static.public.user_business_analysis_current_year=Current_Year +static.public.user_business_analysis_last30_day=Last_30_day +static.public.user_business_analysis_last_month=Last_Month +static.public.user_business_analysis_yesterday=Yesterday +static.public.user_business_analysis_all=All [settings] profile = Profile account = Account @@ -875,6 +881,7 @@ modelarts.current_version=Current version modelarts.parent_version=Parent Version modelarts.run_version=Run Version modelarts.train_job.compute_node=Compute Node +modelarts.create_model = Create Model modelarts.train_job.basic_info=Basic Info @@ -2690,3 +2697,5 @@ foot.member_news = Member news foot.industry_advisory = Industry Advisory foot.help = help foot.copyright= Copyright: New Generation Artificial Intelligence Open Source Open Platform (OpenI) +Platform_Tutorial=Platform Tutorial +foot.advice_feedback=advice feedback diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini index de21cf572..6dd44d848 100755 --- a/options/locale/locale_zh-CN.ini +++ b/options/locale/locale_zh-CN.ini @@ -459,6 +459,13 @@ static.openiindex=OpenI指数 static.registdate=用户注册时间 static.countdate=系统统计时间 static.all=所有 +static.public.user_business_analysis_current_month=本月 +static.public.user_business_analysis_current_week=本周 +static.public.user_business_analysis_current_year=今年 +static.public.user_business_analysis_last30_day=近30天 +static.public.user_business_analysis_last_month=上月 +static.public.user_business_analysis_yesterday=昨天 +static.public.user_business_analysis_all=所有 [settings] profile=个人信息 account=账号 @@ -880,6 +887,7 @@ modelarts.modify=修改 modelarts.current_version=当前版本 modelarts.parent_version=父版本 modelarts.run_version=运行版本 +modelarts.create_model=创建模型 @@ -1461,7 +1469,7 @@ milestones.open_tab=%d 开启中 milestones.close_tab=%d 已关闭 milestones.closed=于 %s关闭 milestones.no_due_date=暂无截止日期 -milestones.open=开启中 +milestones.open=开启 milestones.close=关闭 milestones.new_subheader=里程碑组织任务,合并请求和跟踪进度。 milestones.completeness=%d%% 完成 @@ -2698,3 +2706,5 @@ foot.member_news=成员动态 foot.industry_advisory=行业资讯 foot.help=帮助 foot.copyright= 版权所有:新一代人工智能开源开放平台(OpenI) +Platform_Tutorial=新手指引 +foot.advice_feedback = 意见反馈 diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go index 518c63e4f..dcea46ed6 100755 --- a/routers/api/v1/api.go +++ b/routers/api/v1/api.go @@ -524,7 +524,7 @@ func RegisterRoutes(m *macaron.Macaron) { Get(notify.GetThread). Patch(notify.ReadThread) }, reqToken()) - + operationReq := context.Toggle(&context.ToggleOptions{SignInRequired: true, OperationRequired: true}) //Project board m.Group("/projectboard", func() { @@ -544,7 +544,13 @@ func RegisterRoutes(m *macaron.Macaron) { }, operationReq) m.Get("/query_user_static_page", operationReq, repo_ext.QueryUserStaticDataPage) - + m.Get("/query_user_current_month", operationReq, repo_ext.QueryUserStaticCurrentMonth) + m.Get("/query_user_current_week", operationReq, repo_ext.QueryUserStaticCurrentWeek) + m.Get("/query_user_current_year", operationReq, repo_ext.QueryUserStaticCurrentYear) + m.Get("/query_user_last30_day", operationReq, repo_ext.QueryUserStaticLast30Day) + m.Get("/query_user_last_month", operationReq, repo_ext.QueryUserStaticLastMonth) + m.Get("/query_user_yesterday", operationReq, repo_ext.QueryUserStaticYesterday) + m.Get("/query_user_all", operationReq, repo_ext.QueryUserStaticAll) // Users m.Group("/users", func() { m.Get("/search", user.Search) diff --git a/routers/api/v1/repo/modelarts.go b/routers/api/v1/repo/modelarts.go index 283696007..05c31b5f5 100755 --- a/routers/api/v1/repo/modelarts.go +++ b/routers/api/v1/repo/modelarts.go @@ -6,6 +6,7 @@ package repo import ( + "code.gitea.io/gitea/modules/util" "net/http" "strconv" "strings" @@ -106,7 +107,7 @@ func GetModelArtsTrainJobVersion(ctx *context.APIContext) { job.TrainJobDuration = result.TrainJobDuration if result.Duration != 0 { - job.TrainJobDuration = addZero(result.Duration/3600000) + ":" + addZero(result.Duration%3600000/60000) + ":" + addZero(result.Duration%60000/1000) + job.TrainJobDuration = util.AddZero(result.Duration/3600000) + ":" + util.AddZero(result.Duration%3600000/60000) + ":" + util.AddZero(result.Duration%60000/1000) } else { job.TrainJobDuration = "00:00:00" @@ -125,15 +126,6 @@ func GetModelArtsTrainJobVersion(ctx *context.APIContext) { } -func addZero(t int64) (m string) { - if t < 10 { - m = "0" + strconv.FormatInt(t, 10) - return m - } else { - return strconv.FormatInt(t, 10) - } -} - func TrainJobGetLog(ctx *context.APIContext) { var ( err error diff --git a/routers/notice/notice.go b/routers/notice/notice.go new file mode 100644 index 000000000..f1e996e2d --- /dev/null +++ b/routers/notice/notice.go @@ -0,0 +1,87 @@ +package notice + +import ( + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" + "encoding/json" + "github.com/patrickmn/go-cache" + "time" +) + +var noticeCache = cache.New(2*time.Minute, 1*time.Minute) + +const ( + NOTICE_CACHE_KEY = "notice" +) + +type Notice struct { + Title string + Link string + Visible int //0 invisible, 1 visible + CommitId string +} + +var lock int32 = 0 + +func GetNewestNotice() (*Notice, error) { + defer func() { + if err := recover(); err != nil { + log.Error("recover error", err) + } + }() + + var notice *Notice + var err error + if setting.CacheOn { + notice, err = getNewestNoticeFromCacheAndDisk() + } else { + notice, err = getNewestNoticeFromDisk() + } + + if err != nil { + return nil, err + } + return notice, nil +} + +func getNoticeTimeout() time.Duration { + return time.Duration(setting.CacheTimeOutSecond) * time.Second +} + +func getNewestNoticeFromDisk() (*Notice, error) { + log.Debug("Get notice from disk") + repoFile, err := models.ReadLatestFileInRepo(setting.UserNameOfNoticeRepo, setting.RepoNameOfNoticeRepo, setting.RefNameOfNoticeRepo, setting.TreePathOfNoticeRepo) + if err != nil { + log.Error("GetNewestNotice failed, error=%v", err) + return nil, err + } + notice := &Notice{} + json.Unmarshal(repoFile.Content, notice) + if notice.Title == "" { + return nil, err + } + notice.CommitId = repoFile.CommitId + return notice, nil +} + +func getNewestNoticeFromCacheAndDisk() (*Notice, error) { + v, success := noticeCache.Get(NOTICE_CACHE_KEY) + if success { + log.Debug("Get notice from cache,value = %v", v) + if v == nil { + return nil, nil + } + n := v.(*Notice) + return n, nil + } + + notice, err := getNewestNoticeFromDisk() + if err != nil { + log.Error("GetNewestNotice failed, error=%v", err) + noticeCache.Set(NOTICE_CACHE_KEY, nil, 30*time.Second) + return nil, err + } + noticeCache.Set(NOTICE_CACHE_KEY, notice, getNoticeTimeout()) + return notice, nil +} diff --git a/routers/repo/ai_model_manage.go b/routers/repo/ai_model_manage.go index 669bdf9fa..845dbbc6b 100644 --- a/routers/repo/ai_model_manage.go +++ b/routers/repo/ai_model_manage.go @@ -99,6 +99,18 @@ func saveModelByParameters(jobId string, versionName string, name string, versio //udpate status and version count models.ModifyModelNewProperty(lastNewModelId, MODEL_NOT_LATEST, 0) } + var units []models.RepoUnit + var deleteUnitTypes []models.UnitType + units = append(units, models.RepoUnit{ + RepoID: ctx.Repo.Repository.ID, + Type: models.UnitTypeModelManage, + Config: &models.ModelManageConfig{ + EnableModelManage: true, + }, + }) + deleteUnitTypes = append(deleteUnitTypes, models.UnitTypeModelManage) + + models.UpdateRepositoryUnits(ctx.Repo.Repository, units, deleteUnitTypes) log.Info("save model end.") @@ -130,10 +142,13 @@ func SaveModel(ctx *context.Context) { version := ctx.Query("Version") label := ctx.Query("Label") description := ctx.Query("Description") + trainTaskCreate := ctx.QueryBool("trainTaskCreate") - if !ctx.Repo.CanWrite(models.UnitTypeModelManage) { - ctx.ServerError("No right.", errors.New(ctx.Tr("repo.model_noright"))) - return + if !trainTaskCreate { + if !ctx.Repo.CanWrite(models.UnitTypeModelManage) { + ctx.ServerError("No right.", errors.New(ctx.Tr("repo.model_noright"))) + return + } } if JobId == "" || VersionName == "" { @@ -474,6 +489,23 @@ func ShowOneVersionOtherModel(ctx *context.Context) { func ShowModelTemplate(ctx *context.Context) { ctx.Data["isModelManage"] = true + repoId := ctx.Repo.Repository.ID + Type := -1 + _, count, _ := models.QueryModel(&models.AiModelQueryOptions{ + ListOptions: models.ListOptions{ + Page: 1, + PageSize: 2, + }, + RepoID: repoId, + Type: Type, + New: MODEL_LATEST, + }) + ctx.Data["MODEL_COUNT"] = count + + _, trainCount, _ := models.QueryModelTrainJobList(repoId) + log.Info("query train count=" + fmt.Sprint(trainCount)) + + ctx.Data["TRAIN_COUNT"] = trainCount ctx.HTML(200, tplModelManageIndex) } @@ -586,3 +618,67 @@ func ModifyModelInfo(ctx *context.Context) { } } + +func QueryModelListForPredict(ctx *context.Context) { + repoId := ctx.Repo.Repository.ID + modelResult, count, err := models.QueryModel(&models.AiModelQueryOptions{ + ListOptions: models.ListOptions{ + Page: -1, + PageSize: -1, + }, + RepoID: repoId, + Type: -1, + New: -1, + }) + if err != nil { + ctx.ServerError("Cloudbrain", err) + return + } + log.Info("query return count=" + fmt.Sprint(count)) + + nameList := make([]string, 0) + + nameMap := make(map[string][]*models.AiModelManage) + for _, model := range modelResult { + if _, value := nameMap[model.Name]; !value { + models := make([]*models.AiModelManage, 0) + models = append(models, model) + nameMap[model.Name] = models + nameList = append(nameList, model.Name) + } else { + nameMap[model.Name] = append(nameMap[model.Name], model) + } + } + + mapInterface := make(map[string]interface{}) + mapInterface["nameList"] = nameList + mapInterface["nameMap"] = nameMap + ctx.JSON(http.StatusOK, mapInterface) +} + +func QueryModelFileForPredict(ctx *context.Context) { + id := ctx.Query("ID") + model, err := models.QueryModelById(id) + if err != nil { + log.Error("no such model!", err.Error()) + ctx.ServerError("no such model:", err) + return + } + prefix := model.Path[len(setting.Bucket)+1:] + fileinfos, err := storage.GetAllObjectByBucketAndPrefix(setting.Bucket, prefix) + ctx.JSON(http.StatusOK, fileinfos) +} + +func QueryOneLevelModelFile(ctx *context.Context) { + id := ctx.Query("ID") + parentDir := ctx.Query("parentDir") + model, err := models.QueryModelById(id) + if err != nil { + log.Error("no such model!", err.Error()) + ctx.ServerError("no such model:", err) + return + } + prefix := model.Path[len(setting.Bucket)+1:] + fileinfos, err := storage.GetOneLevelAllObjectUnderDir(setting.Bucket, prefix, parentDir) + ctx.JSON(http.StatusOK, fileinfos) +} diff --git a/routers/repo/blame.go b/routers/repo/blame.go index 00ef9a99e..6ab357c09 100644 --- a/routers/repo/blame.go +++ b/routers/repo/blame.go @@ -6,13 +6,6 @@ package repo import ( "bytes" - "container/list" - "fmt" - "html" - gotemplate "html/template" - "net/url" - "strings" - "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/base" "code.gitea.io/gitea/modules/context" @@ -22,6 +15,12 @@ import ( "code.gitea.io/gitea/modules/markup" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/timeutil" + "container/list" + "fmt" + "html" + gotemplate "html/template" + "net/url" + "strings" ) const ( @@ -35,7 +34,52 @@ func RefBlame(ctx *context.Context) { ctx.NotFound("Blame FileName", nil) return } - + //get repo contributors info + contributors, err := git.GetContributors(ctx.Repo.Repository.RepoPath(), ctx.Repo.BranchName) + if err == nil && contributors != nil { + var contributorInfos []*ContributorInfo + contributorInfoHash := make(map[string]*ContributorInfo) + count := 0 + for _, c := range contributors { + if count >= 25 { + continue + } + if strings.Compare(c.Email, "") == 0 { + continue + } + // get user info from committer email + user, err := models.GetUserByActivateEmail(c.Email) + if err == nil { + // committer is system user, get info through user's primary email + if existedContributorInfo, ok := contributorInfoHash[user.Email]; ok { + // existed: same primary email, different committer name + existedContributorInfo.CommitCnt += c.CommitCnt + } else { + // new committer info + var newContributor = &ContributorInfo{ + user, user.RelAvatarLink(), user.Name, user.Email, c.CommitCnt, + } + count++ + contributorInfos = append(contributorInfos, newContributor) + contributorInfoHash[user.Email] = newContributor + } + } else { + // committer is not system user + if existedContributorInfo, ok := contributorInfoHash[c.Email]; ok { + // existed: same primary email, different committer name + existedContributorInfo.CommitCnt += c.CommitCnt + } else { + var newContributor = &ContributorInfo{ + user, "", "", c.Email, c.CommitCnt, + } + count++ + contributorInfos = append(contributorInfos, newContributor) + contributorInfoHash[c.Email] = newContributor + } + } + } + ctx.Data["ContributorInfo"] = contributorInfos + } userName := ctx.Repo.Owner.Name repoName := ctx.Repo.Repository.Name commitID := ctx.Repo.CommitID diff --git a/routers/repo/cloudbrain.go b/routers/repo/cloudbrain.go index ab3303408..ca1d08101 100755 --- a/routers/repo/cloudbrain.go +++ b/routers/repo/cloudbrain.go @@ -14,18 +14,17 @@ import ( "strings" "time" - "code.gitea.io/gitea/modules/modelarts" - - "code.gitea.io/gitea/modules/git" - "code.gitea.io/gitea/modules/storage" - "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/auth" "code.gitea.io/gitea/modules/base" "code.gitea.io/gitea/modules/cloudbrain" "code.gitea.io/gitea/modules/context" + "code.gitea.io/gitea/modules/git" "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/modelarts" "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/storage" + "code.gitea.io/gitea/modules/util" ) const ( @@ -220,7 +219,7 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { gpuType = gpuInfo.Value } } - downloadRateCode(repo, jobName, setting.BenchmarkOwner, setting.BrainScoreName, benchmarkPath, form.BenchmarkCategory, gpuType) + downloadRateCode(repo, jobName, setting.BenchmarkOwner, setting.BenchmarkName, benchmarkPath, form.BenchmarkCategory, gpuType) uploadCodeToMinio(benchmarkPath+"/", jobName, cloudbrain.BenchMarkMountPath+"/") } @@ -252,17 +251,10 @@ func CloudBrainRestart(ctx *context.Context) { var jobID = ctx.Params(":jobid") var resultCode = "0" var errorMsg = "" - var status = "" + var status = string(models.JobWaiting) + task := ctx.Cloudbrain for { - task, err := models.GetCloudbrainByJobID(jobID) - if err != nil { - log.Error("GetCloudbrainByJobID(%s) failed:%v", jobID, err.Error(), ctx.Data["MsgID"]) - resultCode = "-1" - errorMsg = "system error" - break - } - if task.Status != string(models.JobStopped) && task.Status != string(models.JobSucceeded) && task.Status != string(models.JobFailed) { log.Error("the job(%s) is not stopped", task.JobName, ctx.Data["MsgID"]) resultCode = "-1" @@ -277,7 +269,7 @@ func CloudBrainRestart(ctx *context.Context) { break } - if !ctx.IsSigned || (ctx.User.ID != task.UserID && !ctx.IsUserSiteAdmin()){ + if !ctx.IsSigned || (ctx.User.ID != task.UserID && !ctx.IsUserSiteAdmin()) { log.Error("the user has no right ro restart the job", task.JobName, ctx.Data["MsgID"]) resultCode = "-1" errorMsg = "you have no right to restart the job" @@ -299,7 +291,7 @@ func CloudBrainRestart(ctx *context.Context) { } } - err = cloudbrain.RestartTask(ctx, task) + err = cloudbrain.RestartTask(ctx, task, &jobID) if err != nil { log.Error("RestartTask failed:%v", err.Error(), ctx.Data["MsgID"]) resultCode = "-1" @@ -307,9 +299,6 @@ func CloudBrainRestart(ctx *context.Context) { break } - status = task.Status - jobID = task.JobID - break } @@ -370,46 +359,19 @@ func CloudBrainShow(ctx *context.Context) { } func CloudBrainDebug(ctx *context.Context) { - var jobID = ctx.Params(":jobid") - if !ctx.IsSigned { - log.Error("the user has not signed in") - ctx.Error(http.StatusForbidden, "", "the user has not signed in") - return - } - task, err := models.GetCloudbrainByJobID(jobID) - if err != nil { - ctx.ServerError("GetCloudbrainByJobID failed", err) - return - } - - debugUrl := setting.DebugServerHost + "jpylab_" + task.JobID + "_" + task.SubTaskName + debugUrl := setting.DebugServerHost + "jpylab_" + ctx.Cloudbrain.JobID + "_" + ctx.Cloudbrain.SubTaskName ctx.Redirect(debugUrl) } func CloudBrainCommitImage(ctx *context.Context, form auth.CommitImageCloudBrainForm) { - var jobID = ctx.Params(":jobid") - if !ctx.IsSigned { - log.Error("the user has not signed in") - ctx.Error(http.StatusForbidden, "", "the user has not signed in") - return - } - task, err := models.GetCloudbrainByJobID(jobID) - if err != nil { - ctx.JSON(200, map[string]string{ - "result_code": "-1", - "error_msg": "GetCloudbrainByJobID failed", - }) - return - } - - err = cloudbrain.CommitImage(jobID, models.CommitImageParams{ - Ip: task.ContainerIp, - TaskContainerId: task.ContainerID, + err := cloudbrain.CommitImage(ctx.Cloudbrain.JobID, models.CommitImageParams{ + Ip: ctx.Cloudbrain.ContainerIp, + TaskContainerId: ctx.Cloudbrain.ContainerID, ImageDescription: form.Description, ImageTag: form.Tag, }) if err != nil { - log.Error("CommitImage(%s) failed:%v", task.JobName, err.Error(), ctx.Data["msgID"]) + log.Error("CommitImage(%s) failed:%v", ctx.Cloudbrain.JobName, err.Error(), ctx.Data["msgID"]) ctx.JSON(200, map[string]string{ "result_code": "-1", "error_msg": "CommitImage failed", @@ -429,15 +391,8 @@ func CloudBrainStop(ctx *context.Context) { var errorMsg = "" var status = "" + task := ctx.Cloudbrain for { - task, err := models.GetCloudbrainByJobID(jobID) - if err != nil { - log.Error("GetCloudbrainByJobID(%s) failed:%v", task.JobName, err, ctx.Data["msgID"]) - resultCode = "-1" - errorMsg = "system error" - break - } - if task.Status == string(models.JobStopped) || task.Status == string(models.JobFailed) { log.Error("the job(%s) has been stopped", task.JobName, ctx.Data["msgID"]) resultCode = "-1" @@ -445,7 +400,7 @@ func CloudBrainStop(ctx *context.Context) { break } - err = cloudbrain.StopJob(jobID) + err := cloudbrain.StopJob(jobID) if err != nil { log.Error("StopJob(%s) failed:%v", task.JobName, err, ctx.Data["msgID"]) resultCode = "-1" @@ -555,12 +510,7 @@ func logErrorAndUpdateJobStatus(err error, taskInfo *models.Cloudbrain) { } func CloudBrainDel(ctx *context.Context) { - var jobID = ctx.Params(":jobid") - task, err := models.GetCloudbrainByJobID(jobID) - if err != nil { - ctx.ServerError("GetCloudbrainByJobID failed", err) - return - } + task := ctx.Cloudbrain if task.Status != string(models.JobStopped) && task.Status != string(models.JobFailed) { log.Error("the job(%s) has not been stopped", task.JobName, ctx.Data["msgID"]) @@ -568,7 +518,7 @@ func CloudBrainDel(ctx *context.Context) { return } - err = models.DeleteJob(task) + err := models.DeleteJob(task) if err != nil { ctx.ServerError("DeleteJob failed", err) return @@ -949,6 +899,13 @@ func SyncCloudbrainStatus() { task.Duration = result.Duration task.TrainJobDuration = result.TrainJobDuration + if result.Duration != 0 { + task.TrainJobDuration = util.AddZero(result.Duration/3600000) + ":" + util.AddZero(result.Duration%3600000/60000) + ":" + util.AddZero(result.Duration%60000/1000) + + } else { + task.TrainJobDuration = "00:00:00" + } + err = models.UpdateJob(task) if err != nil { log.Error("UpdateJob(%s) failed:%v", task.JobName, err) diff --git a/routers/repo/issue.go b/routers/repo/issue.go index 7ab5eb283..42a6b9609 100755 --- a/routers/repo/issue.go +++ b/routers/repo/issue.go @@ -13,6 +13,7 @@ import ( "net/http" "strconv" "strings" + "time" "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/auth" @@ -336,6 +337,7 @@ func issues(ctx *context.Context, milestoneID int64, isPullOption util.OptionalB // Issues render issues page func Issues(ctx *context.Context) { + startTime := time.Now() isPullList := ctx.Params(":type") == "pulls" if isPullList { MustAllowPulls(ctx) @@ -366,6 +368,9 @@ func Issues(ctx *context.Context) { ctx.Data["CanWriteIssuesOrPulls"] = ctx.Repo.CanWriteIssuesOrPulls(isPullList) + duration := time.Since(startTime) + log.Info("Issues cost: %v seconds", duration.Seconds()) + ctx.HTML(200, tplIssues) } diff --git a/routers/repo/modelarts.go b/routers/repo/modelarts.go index 31b0b196b..7a952394e 100755 --- a/routers/repo/modelarts.go +++ b/routers/repo/modelarts.go @@ -49,12 +49,9 @@ func DebugJobIndex(ctx *context.Context) { page = 1 } debugType := modelarts.DebugType - jobType := string(models.JobTypeDebug) if debugListType == models.GPUResource { debugType = models.TypeCloudBrainOne - jobType = "" - } - if debugListType == models.NPUResource { + } else if debugListType == models.NPUResource { debugType = models.TypeCloudBrainTwo } @@ -63,9 +60,10 @@ func DebugJobIndex(ctx *context.Context) { Page: page, PageSize: setting.UI.IssuePagingNum, }, - RepoID: repo.ID, - Type: debugType, - JobType: jobType, + RepoID: repo.ID, + Type: debugType, + JobTypeNot: true, + JobType: string(models.JobTypeTrain), }) if err != nil { ctx.ServerError("Get debugjob faild:", err) @@ -142,6 +140,20 @@ func NotebookCreate(ctx *context.Context, form auth.CreateModelArtsNotebookForm) return } } + _, err = models.GetCloudbrainByName(jobName) + if err == nil { + log.Error("the job name did already exist", ctx.Data["MsgID"]) + cloudBrainNewDataPrepare(ctx) + ctx.RenderWithErr("the job name did already exist", tplModelArtsNotebookNew, &form) + return + } else { + if !models.IsErrJobNotExist(err) { + log.Error("system error, %v", err, ctx.Data["MsgID"]) + cloudBrainNewDataPrepare(ctx) + ctx.RenderWithErr("system error", tplModelArtsNotebookNew, &form) + return + } + } err = modelarts.GenerateTask(ctx, jobName, uuid, description, flavor) if err != nil { @@ -194,11 +206,6 @@ func NotebookShow(ctx *context.Context) { func NotebookDebug(ctx *context.Context) { var jobID = ctx.Params(":jobid") - _, err := models.GetCloudbrainByJobID(jobID) - if err != nil { - ctx.ServerError("GetCloudbrainByJobID failed", err) - return - } result, err := modelarts.GetJob(jobID) if err != nil { @@ -327,11 +334,7 @@ func NotebookManage(ctx *context.Context) { func NotebookDel(ctx *context.Context) { var jobID = ctx.Params(":jobid") - task, err := models.GetCloudbrainByJobID(jobID) - if err != nil { - ctx.ServerError("GetCloudbrainByJobID failed", err) - return - } + task := ctx.Cloudbrain if task.Status != string(models.ModelArtsCreateFailed) && task.Status != string(models.ModelArtsStartFailed) && task.Status != string(models.ModelArtsStopped) { log.Error("the job(%s) has not been stopped", task.JobName) @@ -339,7 +342,7 @@ func NotebookDel(ctx *context.Context) { return } - _, err = modelarts.DelNotebook(jobID) + _, err := modelarts.DelNotebook(jobID) if err != nil { log.Error("DelJob(%s) failed:%v", task.JobName, err.Error()) ctx.ServerError("DelJob failed", err) @@ -371,6 +374,7 @@ func TrainJobIndex(ctx *context.Context) { }, RepoID: repo.ID, Type: models.TypeCloudBrainTwo, + JobTypeNot: false, JobType: string(models.JobTypeTrain), IsLatestVersion: modelarts.IsLatestVersion, }) @@ -1252,6 +1256,10 @@ func paramCheckCreateTrainJob(form auth.CreateModelArtsTrainJobForm) error { log.Error("the WorkServerNumber(%d) must be in (1,25)", form.WorkServerNumber) return errors.New("计算节点数必须在1-25之间") } + if form.BranchName == "" { + log.Error("the branch must not be null!", form.BranchName) + return errors.New("代码分支不能为空!") + } return nil } @@ -1422,14 +1430,9 @@ func TrainJobDel(ctx *context.Context) { func TrainJobStop(ctx *context.Context) { var jobID = ctx.Params(":jobid") - task, err := models.GetCloudbrainByJobID(jobID) - if err != nil { - log.Error("GetCloudbrainByJobID(%s) failed:%v", task.JobName, err.Error()) - ctx.RenderWithErr(err.Error(), tplModelArtsTrainJobIndex, nil) - return - } + task := ctx.Cloudbrain - _, err = modelarts.StopTrainJob(jobID, strconv.FormatInt(task.VersionID, 10)) + _, err := modelarts.StopTrainJob(jobID, strconv.FormatInt(task.VersionID, 10)) if err != nil { log.Error("StopTrainJob(%s) failed:%v", task.JobName, err.Error()) ctx.RenderWithErr(err.Error(), tplModelArtsTrainJobIndex, nil) diff --git a/routers/repo/user_data_analysis.go b/routers/repo/user_data_analysis.go index 42189c57f..7df384cc4 100755 --- a/routers/repo/user_data_analysis.go +++ b/routers/repo/user_data_analysis.go @@ -15,6 +15,132 @@ import ( "github.com/360EntSecGroup-Skylar/excelize/v2" ) +const ( + PAGE_SIZE = 2000 +) + +func queryUserDataPage(ctx *context.Context, tableName string, queryObj interface{}) { + page := ctx.QueryInt("page") + if page <= 0 { + page = 1 + } + pageSize := ctx.QueryInt("pageSize") + if pageSize <= 0 { + pageSize = setting.UI.IssuePagingNum + } + userName := ctx.Query("userName") + IsReturnFile := ctx.QueryBool("IsReturnFile") + + if IsReturnFile { + //writer exec file. + xlsx := excelize.NewFile() + sheetName := ctx.Tr("user.static.sheetname") + index := xlsx.NewSheet(sheetName) + xlsx.DeleteSheet("Sheet1") + dataHeader := map[string]string{ + "A1": ctx.Tr("user.static.id"), + "B1": ctx.Tr("user.static.name"), + "C1": ctx.Tr("user.static.codemergecount"), + "D1": ctx.Tr("user.static.commitcount"), + "E1": ctx.Tr("user.static.issuecount"), + "F1": ctx.Tr("user.static.commentcount"), + "G1": ctx.Tr("user.static.focusrepocount"), + "H1": ctx.Tr("user.static.starrepocount"), + "I1": ctx.Tr("user.static.logincount"), + "J1": ctx.Tr("user.static.watchedcount"), + "K1": ctx.Tr("user.static.commitcodesize"), + "L1": ctx.Tr("user.static.solveissuecount"), + "M1": ctx.Tr("user.static.encyclopediascount"), + "N1": ctx.Tr("user.static.createrepocount"), + "O1": ctx.Tr("user.static.openiindex"), + "P1": ctx.Tr("user.static.registdate"), + "Q1": ctx.Tr("user.static.countdate"), + } + for k, v := range dataHeader { + //设置单元格的值 + xlsx.SetCellValue(sheetName, k, v) + } + _, count := models.QueryUserStaticDataByTableName(1, 1, tableName, queryObj, userName) + var indexTotal int64 + indexTotal = 0 + for { + re, _ := models.QueryUserStaticDataByTableName(int(indexTotal), PAGE_SIZE, tableName, queryObj, "") + log.Info("return count=" + fmt.Sprint(count)) + for i, userRecord := range re { + rows := fmt.Sprint(i + 2) + xlsx.SetCellValue(sheetName, "A"+rows, userRecord.ID) + xlsx.SetCellValue(sheetName, "B"+rows, userRecord.Name) + xlsx.SetCellValue(sheetName, "C"+rows, userRecord.CodeMergeCount) + xlsx.SetCellValue(sheetName, "D"+rows, userRecord.CommitCount) + xlsx.SetCellValue(sheetName, "E"+rows, userRecord.IssueCount) + xlsx.SetCellValue(sheetName, "F"+rows, userRecord.CommentCount) + xlsx.SetCellValue(sheetName, "G"+rows, userRecord.FocusRepoCount) + xlsx.SetCellValue(sheetName, "H"+rows, userRecord.StarRepoCount) + xlsx.SetCellValue(sheetName, "I"+rows, userRecord.LoginCount) + xlsx.SetCellValue(sheetName, "J"+rows, userRecord.WatchedCount) + xlsx.SetCellValue(sheetName, "K"+rows, userRecord.CommitCodeSize) + xlsx.SetCellValue(sheetName, "L"+rows, userRecord.SolveIssueCount) + xlsx.SetCellValue(sheetName, "M"+rows, userRecord.EncyclopediasCount) + xlsx.SetCellValue(sheetName, "N"+rows, userRecord.CreateRepoCount) + xlsx.SetCellValue(sheetName, "O"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex)) + + formatTime := userRecord.RegistDate.Format("2006-01-02 15:04:05") + xlsx.SetCellValue(sheetName, "P"+rows, formatTime[0:len(formatTime)-3]) + + formatTime = userRecord.DataDate + xlsx.SetCellValue(sheetName, "Q"+rows, formatTime+" 00:01") + } + + //设置默认打开的表单 + xlsx.SetActiveSheet(index) + filename := sheetName + "_" + ctx.Tr("user.static."+tableName) + ".xlsx" + ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+url.QueryEscape(filename)) + ctx.Resp.Header().Set("Content-Type", "application/octet-stream") + if _, err := xlsx.WriteTo(ctx.Resp); err != nil { + log.Info("writer exel error." + err.Error()) + } + indexTotal += PAGE_SIZE + if indexTotal >= count { + break + } + } + } else { + re, count := models.QueryUserStaticDataByTableName((page-1)*pageSize, pageSize, tableName, queryObj, userName) + mapInterface := make(map[string]interface{}) + mapInterface["data"] = re + mapInterface["count"] = count + ctx.JSON(http.StatusOK, mapInterface) + } +} + +func QueryUserStaticCurrentMonth(ctx *context.Context) { + queryUserDataPage(ctx, "public.user_business_analysis_current_month", new(models.UserBusinessAnalysisCurrentMonth)) +} + +func QueryUserStaticCurrentWeek(ctx *context.Context) { + queryUserDataPage(ctx, "public.user_business_analysis_current_week", new(models.UserBusinessAnalysisCurrentWeek)) +} + +func QueryUserStaticCurrentYear(ctx *context.Context) { + queryUserDataPage(ctx, "public.user_business_analysis_current_year", new(models.UserBusinessAnalysisCurrentYear)) +} + +func QueryUserStaticLast30Day(ctx *context.Context) { + queryUserDataPage(ctx, "public.user_business_analysis_last30_day", new(models.UserBusinessAnalysisLast30Day)) +} + +func QueryUserStaticLastMonth(ctx *context.Context) { + queryUserDataPage(ctx, "public.user_business_analysis_last_month", new(models.UserBusinessAnalysisLastMonth)) +} + +func QueryUserStaticYesterday(ctx *context.Context) { + queryUserDataPage(ctx, "public.user_business_analysis_yesterday", new(models.UserBusinessAnalysisYesterday)) +} + +func QueryUserStaticAll(ctx *context.Context) { + queryUserDataPage(ctx, "public.user_business_analysis_all", new(models.UserBusinessAnalysisAll)) +} + func QueryUserStaticDataPage(ctx *context.Context) { startDate := ctx.Query("startDate") endDate := ctx.Query("endDate") diff --git a/routers/routes/routes.go b/routers/routes/routes.go index 95931c723..439c17a92 100755 --- a/routers/routes/routes.go +++ b/routers/routes/routes.go @@ -969,20 +969,20 @@ func RegisterRoutes(m *macaron.Macaron) { m.Group("/:jobid", func() { m.Get("", reqRepoCloudBrainReader, repo.CloudBrainShow) m.Get("/debug", cloudbrain.AdminOrJobCreaterRight, repo.CloudBrainDebug) - m.Post("/commit_image", cloudbrain.AdminOrOwnerOrJobCreaterRight, bindIgnErr(auth.CommitImageCloudBrainForm{}), repo.CloudBrainCommitImage) + m.Post("/commit_image", cloudbrain.AdminOrJobCreaterRight, bindIgnErr(auth.CommitImageCloudBrainForm{}), repo.CloudBrainCommitImage) m.Post("/stop", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.CloudBrainStop) m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.CloudBrainDel) - m.Post("/restart", reqRepoCloudBrainWriter, repo.CloudBrainRestart) + m.Post("/restart", cloudbrain.AdminOrJobCreaterRight, repo.CloudBrainRestart) m.Get("/rate", reqRepoCloudBrainReader, repo.GetRate) m.Get("/models", reqRepoCloudBrainReader, repo.CloudBrainShowModels) - m.Get("/download_model", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.CloudBrainDownloadModel) + m.Get("/download_model", cloudbrain.AdminOrJobCreaterRight, repo.CloudBrainDownloadModel) }) m.Get("/create", reqRepoCloudBrainWriter, repo.CloudBrainNew) m.Post("/create", reqRepoCloudBrainWriter, bindIgnErr(auth.CreateCloudBrainForm{}), repo.CloudBrainCreate) }, context.RepoRef()) m.Group("/modelmanage", func() { m.Post("/create_model", reqRepoModelManageWriter, repo.SaveModel) - m.Post("/create_new_model", reqRepoModelManageWriter, repo.SaveNewNameModel) + m.Post("/create_new_model", repo.SaveNewNameModel) m.Delete("/delete_model", repo.DeleteModel) m.Put("/modify_model", repo.ModifyModelInfo) m.Get("/show_model", reqRepoModelManageReader, repo.ShowModelTemplate) @@ -992,6 +992,9 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("/show_model_child_api", repo.ShowOneVersionOtherModel) m.Get("/query_train_job", reqRepoCloudBrainReader, repo.QueryTrainJobList) m.Get("/query_train_job_version", reqRepoCloudBrainReader, repo.QueryTrainJobVersionList) + m.Get("/query_model_for_predict", reqRepoCloudBrainReader, repo.QueryModelListForPredict) + m.Get("/query_modelfile_for_predict", reqRepoCloudBrainReader, repo.QueryModelFileForPredict) + m.Get("/query_onelevel_modelfile", reqRepoCloudBrainReader, repo.QueryOneLevelModelFile) m.Group("/:ID", func() { m.Get("", repo.ShowSingleModel) m.Get("/downloadsingle", repo.DownloadSingleModelFile) @@ -1021,7 +1024,7 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("", reqRepoCloudBrainReader, repo.TrainJobShow) m.Post("/stop", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.TrainJobStop) m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.TrainJobDel) - m.Get("/model_download", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.ModelDownload) + m.Get("/model_download", cloudbrain.AdminOrJobCreaterRight, repo.ModelDownload) m.Get("/create_version", cloudbrain.AdminOrJobCreaterRight, repo.TrainJobNewVersion) m.Post("/create_version", cloudbrain.AdminOrJobCreaterRight, bindIgnErr(auth.CreateModelArtsTrainJobForm{}), repo.TrainJobCreateVersion) }) diff --git a/services/repository/repository.go b/services/repository/repository.go index f50b98b64..eafad988e 100644 --- a/services/repository/repository.go +++ b/services/repository/repository.go @@ -5,13 +5,12 @@ package repository import ( - "fmt" - "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/notification" repo_module "code.gitea.io/gitea/modules/repository" pull_service "code.gitea.io/gitea/services/pull" + "fmt" ) // CreateRepository creates a repository for the user/organization. diff --git a/templates/base/footer_content.tmpl b/templates/base/footer_content.tmpl index 86ef0d98e..9a708fc2c 100755 --- a/templates/base/footer_content.tmpl +++ b/templates/base/footer_content.tmpl @@ -26,7 +26,14 @@ {{end}} + {{.i18n.Tr "custom.Platform_Tutorial"}} {{if .EnableSwagger}}API{{end}} + {{if .IsSigned}} + {{.i18n.Tr "custom.foot.advice_feedback"}} + {{else}} + {{.i18n.Tr "custom.foot.advice_feedback"}} + {{end}} + {{template "custom/extra_links_footer" .}} diff --git a/templates/base/footer_content_fluid.tmpl b/templates/base/footer_content_fluid.tmpl index e07a4d5ab..8d9f2ba40 100644 --- a/templates/base/footer_content_fluid.tmpl +++ b/templates/base/footer_content_fluid.tmpl @@ -26,7 +26,13 @@ {{end}} + {{.i18n.Tr "custom.Platform_Tutorial"}} {{if .EnableSwagger}}API{{end}} + {{if .IsSigned}} + {{.i18n.Tr "custom.foot.advice_feedback"}} + {{else}} + {{.i18n.Tr "custom.foot.advice_feedback"}} + {{end}} {{template "custom/extra_links_footer" .}} diff --git a/templates/base/head.tmpl b/templates/base/head.tmpl index 58b7c63c8..4525ab57e 100755 --- a/templates/base/head.tmpl +++ b/templates/base/head.tmpl @@ -194,14 +194,57 @@ var _hmt = _hmt || []; {{template "custom/body_inner_pre" .}} - + {{if not .PageIsInstall}} +
+ + + {{.notice.Title}} + + + {{svg "octicon-x" 16}} +
{{end}} {{/* */}} + + \ No newline at end of file diff --git a/templates/base/head_fluid.tmpl b/templates/base/head_fluid.tmpl index d16d42200..227c50211 100644 --- a/templates/base/head_fluid.tmpl +++ b/templates/base/head_fluid.tmpl @@ -200,9 +200,52 @@ var _hmt = _hmt || []; +
+ + + {{.notice.Title}} + + + {{svg "octicon-x" 16}} +
{{end}} {{/* */}} + + \ No newline at end of file diff --git a/templates/base/head_navbar.tmpl b/templates/base/head_navbar.tmpl index 53d0f8839..d1d40d1d6 100755 --- a/templates/base/head_navbar.tmpl +++ b/templates/base/head_navbar.tmpl @@ -168,6 +168,14 @@ {{svg "octicon-question" 16}} {{.i18n.Tr "help"}} + + + + + + + {{.i18n.Tr "custom.Platform_Tutorial"}} + {{if .IsAdmin}}
@@ -213,4 +221,6 @@ {{end}} + + diff --git a/templates/base/head_navbar_fluid.tmpl b/templates/base/head_navbar_fluid.tmpl index 32ad7ca2f..f364518cc 100644 --- a/templates/base/head_navbar_fluid.tmpl +++ b/templates/base/head_navbar_fluid.tmpl @@ -166,6 +166,14 @@ {{svg "octicon-question" 16}} {{.i18n.Tr "help"}} + + + + + + + {{.i18n.Tr "custom.Platform_Tutorial"}} + {{if .IsAdmin}}
diff --git a/templates/base/head_navbar_home.tmpl b/templates/base/head_navbar_home.tmpl index 4cd35da76..539b8bc21 100644 --- a/templates/base/head_navbar_home.tmpl +++ b/templates/base/head_navbar_home.tmpl @@ -148,6 +148,14 @@ {{svg "octicon-question" 16}} {{.i18n.Tr "help"}} + + + + + + + {{.i18n.Tr "custom.Platform_Tutorial"}} + {{if .IsAdmin}}
diff --git a/templates/base/head_navbar_pro.tmpl b/templates/base/head_navbar_pro.tmpl index bacd19944..45b1e7925 100644 --- a/templates/base/head_navbar_pro.tmpl +++ b/templates/base/head_navbar_pro.tmpl @@ -169,6 +169,14 @@ {{svg "octicon-question" 16}} {{.i18n.Tr "help"}} + + + + + + + {{.i18n.Tr "custom.Platform_Tutorial"}} + {{if .IsAdmin}}
diff --git a/templates/base/head_pro.tmpl b/templates/base/head_pro.tmpl index 0b92016d1..13ea305c4 100644 --- a/templates/base/head_pro.tmpl +++ b/templates/base/head_pro.tmpl @@ -200,9 +200,53 @@ var _hmt = _hmt || []; +
+ + + {{.notice.Title}} + + + {{svg "octicon-x" 16}} +
{{end}} {{/* */}} + + + \ No newline at end of file diff --git a/templates/base/paginate.tmpl b/templates/base/paginate.tmpl index da07fd2ba..fb3babf11 100644 --- a/templates/base/paginate.tmpl +++ b/templates/base/paginate.tmpl @@ -1,7 +1,7 @@ {{$paginationLink := .Page.GetParams}} {{with .Page.Paginater}} {{if gt .TotalPages 1}} -
+
diff --git a/templates/explore/repo_search.tmpl b/templates/explore/repo_search.tmpl index d30ad5625..e865208c5 100644 --- a/templates/explore/repo_search.tmpl +++ b/templates/explore/repo_search.tmpl @@ -10,6 +10,7 @@
+
diff --git a/templates/org/select_pro.tmpl b/templates/org/select_pro.tmpl index 56af489fd..647bccd57 100755 --- a/templates/org/select_pro.tmpl +++ b/templates/org/select_pro.tmpl @@ -13,7 +13,8 @@ .header_card{ /* color:#003A8C !important; */ color:#0366D6 !important; - margin: 10px 0; + margin: 10px 0 0px 0; + height: 25px; } .marg{ margin: 0 5px !important; @@ -28,6 +29,7 @@ } .descript_height{ color: #101010 !important; + margin: 10px 0; height: 40px !important; word-break:break-all; line-height: 20px; @@ -44,9 +46,23 @@ .full_height{ height: 100%; } + .omit{ + overflow: hidden; white-space: nowrap; text-overflow: ellipsis; + } /deep/ ui.checkbox input[type=checkbox]::after{ border: 1px solid #0366D6 !important; } + .nowrap-2 { + /* height: 2.837em; */ + /* line-height: 1.4285em; */ + overflow: hidden; + overflow: hidden; + display: -webkit-box; + -webkit-line-clamp: 2; + -webkit-box-orient: vertical; + } + +
@@ -69,25 +85,30 @@ {{ range .RepoList}}
-
-
- {{.Name}} +
+ -
+ +
{{.Description}}
- -
- {{if .Topics }} -
+ +
+ {{if .Topics }} +
{{range .Topics}} - {{if ne . "" }}
{{.}}
{{end}} + {{if ne . "" }}{{.}}{{end}} {{end}} +
- {{end}} -
- + {{end}} + +
+ +
+
{{end}} @@ -133,7 +151,7 @@

-
+
` + pro_html += `
` pro_html += '
' } else{ - pro_html += `
` + pro_html += `
` pro_html += '
' } } @@ -214,7 +232,7 @@ // console.log("数据:",saveData) $.ajax({ type:"POST", - url:"/org/{{.Org.DisplayName}}/org_tag/repo_submit?tagId="+typeTag, + url:"/org/{{.Org.Name}}/org_tag/repo_submit?tagId="+typeTag, contentType:'application/json', dataType:"json", async:false, diff --git a/templates/repo/debugjob/index.tmpl b/templates/repo/debugjob/index.tmpl index f38b915a4..a50c35eda 100755 --- a/templates/repo/debugjob/index.tmpl +++ b/templates/repo/debugjob/index.tmpl @@ -321,8 +321,8 @@
{{$.CsrfTokenHtml}} {{if .CanDebug}} - {{if eq .Status "RUNNING"}} - + {{if eq .Status "RUNNING" "WAITING" "CREATING" "STARTING"}} + {{$.i18n.Tr "repo.debug"}} {{else}} @@ -331,7 +331,7 @@ {{end}} {{else}} - {{if eq .Status "RUNNING"}} + {{if eq .Status "RUNNING" "WAITING" "CREATING" "STARTING"}} {{$.i18n.Tr "repo.debug"}} @@ -348,11 +348,11 @@ {{$.CsrfTokenHtml}} {{if .CanDel}} {{if eq .ComputeResource "CPU/GPU" }} - + {{$.i18n.Tr "repo.stop"}} {{else}} - + {{$.i18n.Tr "repo.stop"}} {{end}} @@ -402,6 +402,13 @@ {{$.i18n.Tr "repo.download"}} {{end}}
+ {{if and (ne .JobType "DEBUG") (eq .Cloudbrain.Type 0)}} + + {{end}}
@@ -546,6 +553,7 @@ $('#' + JobID+ '-text').text(res.status) $('#model-debug-'+JobID).removeClass('blue').addClass('disabled') $('#model-delete-'+JobID).removeClass('blue').addClass('disabled') + $('#model-debug-'+JobID).text("调试").css("margin","0 1rem") } }else{ $('.alert').html(res.error_msg).removeClass('alert-success').addClass('alert-danger').show().delay(2000).fadeOut(); @@ -629,7 +637,7 @@ if(["RUNNING","WAITING"].includes(status)){ $('#stop-model-debug-'+jobID).removeClass('disabled').addClass('blue') } - if(["CREATING","STOPPING","STARTING","STOPPED","FAILED","START_FAILED"].includes(status)){ + if(["CREATING","STOPPING","STARTING","STOPPED","FAILED","START_FAILED","SUCCEEDED"].includes(status)){ $('#stop-model-debug-'+jobID).removeClass('blue').addClass('disabled') } if(status==="STOPPED" || status==="FAILED"|| status==="START_FAILED"){ diff --git a/templates/repo/header.tmpl b/templates/repo/header.tmpl index 138a323e1..2091610df 100755 --- a/templates/repo/header.tmpl +++ b/templates/repo/header.tmpl @@ -51,7 +51,7 @@
{{if not .IsBeingCreated}}
- + {{$.CsrfTokenHtml}}
-
+ {{$.CsrfTokenHtml}}
- + diff --git a/templates/repo/modelarts/trainjob/show.tmpl b/templates/repo/modelarts/trainjob/show.tmpl index 956853d94..0be93fc8f 100755 --- a/templates/repo/modelarts/trainjob/show.tmpl +++ b/templates/repo/modelarts/trainjob/show.tmpl @@ -161,6 +161,15 @@ td, th { padding-top: 0.5rem ; } +
+
+
+
+
+
+
+
+
{{template "repo/header" .}}
@@ -186,6 +195,12 @@ td, th {
+ {{$.CsrfTokenHtml}} + {{if .CanModify}} + {{$.i18n.Tr "repo.modelarts.create_model"}} + {{else}} + {{$.i18n.Tr "repo.modelarts.create_model"}} + {{end}} {{$.CsrfTokenHtml}} {{if .CanModify}} {{$.i18n.Tr "repo.modelarts.modify"}} @@ -446,6 +461,62 @@ td, th {
+ +
+
{{template "base/footer" .}} @@ -479,7 +550,61 @@ td, th { } let timeid = window.setInterval(loadJobStatus, 30000); $(document).ready(loadJobStatus); - + function showcreate(obj){ + $('.ui.modal.second') + .modal({ + centered: false, + onShow:function(){ + $('input[name="Version"]').addClass('model_disabled') + // $('input[name="JobId"]').text(obj.JobName) + $('#JobName').val(obj.JobName).addClass('model_disabled') + $('input[name="JobId"]').val(obj.JobID) + $('input[name="VersionName"]').val(obj.VersionName).addClass('model_disabled') + $('.ui.dimmer').css({"background-color":"rgb(136, 136, 136,0.7)"}) + createModelName() + + + }, + onHide:function(){ + document.getElementById("formId").reset(); + $('.ui.dimmer').css({"background-color":""}) + $('.ui.error.message').text() + $('.ui.error.message').css('display','none') + + } + }) + .modal('show') + } + function createModel(){ + let url_href = `/${userName}/${repoPath}/modelmanage/create_new_model` + let data = $("#formId").serialize() + $("#mask").css({"display":"block","z-index":"9999"}) + $.ajax({ + url:url_href, + type:'POST', + data:data, + success:function(res){ + location.href=`/${userName}/${repoPath}/modelmanage/show_model` + $('.ui.modal.second').modal('hide') + }, + error: function(xhr){ + // 隐藏 loading + // 只有请求不正常(状态码不为200)才会执行 + $('.ui.error.message').text(xhr.responseText) + $('.ui.error.message').css('display','block') + }, + complete:function(xhr){ + $("#mask").css({"display":"none","z-index":"1"}) + } + }) + + } + function createModelName(){ + let repoName = location.pathname.split('/')[2] + let modelName = repoName + '_model_' + Math.random().toString(36).substr(2, 4) + $('#name').val(modelName) + $('#version').val("0.0.1") + } function renderSize(value){ if(null==value||value==''){ return "0 Bytes"; diff --git a/templates/repo/modelmanage/index.tmpl b/templates/repo/modelmanage/index.tmpl index 5cd7bbcdb..96ff36a42 100644 --- a/templates/repo/modelmanage/index.tmpl +++ b/templates/repo/modelmanage/index.tmpl @@ -17,16 +17,30 @@
{{template "repo/header" .}} -
+
{{template "base/alert" .}} - + {{end}}
@@ -89,7 +104,6 @@
-
@@ -103,7 +117,6 @@
-
diff --git a/templates/repo/modelmanage/showinfo.tmpl b/templates/repo/modelmanage/showinfo.tmpl index a6b35500a..e3cbee271 100644 --- a/templates/repo/modelmanage/showinfo.tmpl +++ b/templates/repo/modelmanage/showinfo.tmpl @@ -78,100 +78,121 @@
-
- 基本信息 - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
{{$.i18n.Tr "repo.model.manage.model_name"}}
{{$.i18n.Tr "repo.model.manage.version"}}
{{$.i18n.Tr "repo.migrate_items_labels"}} -
+ +
+
+ 基本信息 + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
{{$.i18n.Tr "repo.model.manage.model_name"}}
{{$.i18n.Tr "repo.model.manage.version"}}
{{$.i18n.Tr "repo.migrate_items_labels"}} +
+ +
- - - -
{{$.i18n.Tr "repo.modelarts.model_size"}}
{{$.i18n.Tr "repo.modelarts.createtime"}}
{{$.i18n.Tr "repo.model.manage.description"}} -
- - -
-
{{$.i18n.Tr "repo.modelarts.code_version"}}
{{$.i18n.Tr "repo.modelarts.train_job.start_file"}}
{{$.i18n.Tr "repo.modelarts.train_job.train_dataset"}}
{{$.i18n.Tr "repo.modelarts.train_job.run_parameter"}}
{{$.i18n.Tr "repo.modelarts.train_job.AI_driver"}}
{{$.i18n.Tr "repo.modelarts.train_job.standard"}}
{{$.i18n.Tr "repo.modelarts.train_job.compute_node"}}
+ +
{{$.i18n.Tr "repo.modelarts.model_size"}}
{{$.i18n.Tr "repo.modelarts.createtime"}}
{{$.i18n.Tr "repo.model.manage.description"}} +
+ + +
+
{{$.i18n.Tr "repo.modelarts.code_version"}}
{{$.i18n.Tr "repo.modelarts.train_job.start_file"}}
{{$.i18n.Tr "repo.modelarts.train_job.train_dataset"}}
{{$.i18n.Tr "repo.modelarts.train_job.run_parameter"}}
{{$.i18n.Tr "repo.modelarts.train_job.AI_driver"}}
{{$.i18n.Tr "repo.modelarts.train_job.standard"}}
{{$.i18n.Tr "repo.modelarts.train_job.compute_node"}}
+
+
+ {{$.i18n.Tr "repo.model.manage.model_accuracy"}} + + + + + + + + + + + + + + + + + + + +
{{$.i18n.Tr "repo.model.manage.Accuracy"}}
F1
{{$.i18n.Tr "repo.model.manage.Precision"}}
{{$.i18n.Tr "repo.model.manage.Recall"}}
+
+
-
- {{$.i18n.Tr "repo.model.manage.model_accuracy"}} - - - - - - - - - - - - - - - - - - - -
{{$.i18n.Tr "repo.model.manage.Accuracy"}}
F1
{{$.i18n.Tr "repo.model.manage.Precision"}}
{{$.i18n.Tr "repo.model.manage.Recall"}}
+
+ + + +
+ +
-
+
@@ -179,6 +200,9 @@ \ No newline at end of file diff --git a/vendor/github.com/patrickmn/go-cache/CONTRIBUTORS b/vendor/github.com/patrickmn/go-cache/CONTRIBUTORS new file mode 100644 index 000000000..2b16e9974 --- /dev/null +++ b/vendor/github.com/patrickmn/go-cache/CONTRIBUTORS @@ -0,0 +1,9 @@ +This is a list of people who have contributed code to go-cache. They, or their +employers, are the copyright holders of the contributed code. Contributed code +is subject to the license restrictions listed in LICENSE (as they were when the +code was contributed.) + +Dustin Sallings +Jason Mooberry +Sergey Shepelev +Alex Edwards diff --git a/vendor/github.com/patrickmn/go-cache/LICENSE b/vendor/github.com/patrickmn/go-cache/LICENSE new file mode 100644 index 000000000..db9903c75 --- /dev/null +++ b/vendor/github.com/patrickmn/go-cache/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2012-2017 Patrick Mylund Nielsen and the go-cache contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/patrickmn/go-cache/README.md b/vendor/github.com/patrickmn/go-cache/README.md new file mode 100644 index 000000000..c5789cc66 --- /dev/null +++ b/vendor/github.com/patrickmn/go-cache/README.md @@ -0,0 +1,83 @@ +# go-cache + +go-cache is an in-memory key:value store/cache similar to memcached that is +suitable for applications running on a single machine. Its major advantage is +that, being essentially a thread-safe `map[string]interface{}` with expiration +times, it doesn't need to serialize or transmit its contents over the network. + +Any object can be stored, for a given duration or forever, and the cache can be +safely used by multiple goroutines. + +Although go-cache isn't meant to be used as a persistent datastore, the entire +cache can be saved to and loaded from a file (using `c.Items()` to retrieve the +items map to serialize, and `NewFrom()` to create a cache from a deserialized +one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats.) + +### Installation + +`go get github.com/patrickmn/go-cache` + +### Usage + +```go +import ( + "fmt" + "github.com/patrickmn/go-cache" + "time" +) + +func main() { + // Create a cache with a default expiration time of 5 minutes, and which + // purges expired items every 10 minutes + c := cache.New(5*time.Minute, 10*time.Minute) + + // Set the value of the key "foo" to "bar", with the default expiration time + c.Set("foo", "bar", cache.DefaultExpiration) + + // Set the value of the key "baz" to 42, with no expiration time + // (the item won't be removed until it is re-set, or removed using + // c.Delete("baz") + c.Set("baz", 42, cache.NoExpiration) + + // Get the string associated with the key "foo" from the cache + foo, found := c.Get("foo") + if found { + fmt.Println(foo) + } + + // Since Go is statically typed, and cache values can be anything, type + // assertion is needed when values are being passed to functions that don't + // take arbitrary types, (i.e. interface{}). The simplest way to do this for + // values which will only be used once--e.g. for passing to another + // function--is: + foo, found := c.Get("foo") + if found { + MyFunction(foo.(string)) + } + + // This gets tedious if the value is used several times in the same function. + // You might do either of the following instead: + if x, found := c.Get("foo"); found { + foo := x.(string) + // ... + } + // or + var foo string + if x, found := c.Get("foo"); found { + foo = x.(string) + } + // ... + // foo can then be passed around freely as a string + + // Want performance? Store pointers! + c.Set("foo", &MyStruct, cache.DefaultExpiration) + if x, found := c.Get("foo"); found { + foo := x.(*MyStruct) + // ... + } +} +``` + +### Reference + +`godoc` or [http://godoc.org/github.com/patrickmn/go-cache](http://godoc.org/github.com/patrickmn/go-cache) diff --git a/vendor/github.com/patrickmn/go-cache/cache.go b/vendor/github.com/patrickmn/go-cache/cache.go new file mode 100644 index 000000000..db88d2f2c --- /dev/null +++ b/vendor/github.com/patrickmn/go-cache/cache.go @@ -0,0 +1,1161 @@ +package cache + +import ( + "encoding/gob" + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +type Item struct { + Object interface{} + Expiration int64 +} + +// Returns true if the item has expired. +func (item Item) Expired() bool { + if item.Expiration == 0 { + return false + } + return time.Now().UnixNano() > item.Expiration +} + +const ( + // For use with functions that take an expiration time. + NoExpiration time.Duration = -1 + // For use with functions that take an expiration time. Equivalent to + // passing in the same expiration duration as was given to New() or + // NewFrom() when the cache was created (e.g. 5 minutes.) + DefaultExpiration time.Duration = 0 +) + +type Cache struct { + *cache + // If this is confusing, see the comment at the bottom of New() +} + +type cache struct { + defaultExpiration time.Duration + items map[string]Item + mu sync.RWMutex + onEvicted func(string, interface{}) + janitor *janitor +} + +// Add an item to the cache, replacing any existing item. If the duration is 0 +// (DefaultExpiration), the cache's default expiration time is used. If it is -1 +// (NoExpiration), the item never expires. +func (c *cache) Set(k string, x interface{}, d time.Duration) { + // "Inlining" of set + var e int64 + if d == DefaultExpiration { + d = c.defaultExpiration + } + if d > 0 { + e = time.Now().Add(d).UnixNano() + } + c.mu.Lock() + c.items[k] = Item{ + Object: x, + Expiration: e, + } + // TODO: Calls to mu.Unlock are currently not deferred because defer + // adds ~200 ns (as of go1.) + c.mu.Unlock() +} + +func (c *cache) set(k string, x interface{}, d time.Duration) { + var e int64 + if d == DefaultExpiration { + d = c.defaultExpiration + } + if d > 0 { + e = time.Now().Add(d).UnixNano() + } + c.items[k] = Item{ + Object: x, + Expiration: e, + } +} + +// Add an item to the cache, replacing any existing item, using the default +// expiration. +func (c *cache) SetDefault(k string, x interface{}) { + c.Set(k, x, DefaultExpiration) +} + +// Add an item to the cache only if an item doesn't already exist for the given +// key, or if the existing item has expired. Returns an error otherwise. +func (c *cache) Add(k string, x interface{}, d time.Duration) error { + c.mu.Lock() + _, found := c.get(k) + if found { + c.mu.Unlock() + return fmt.Errorf("Item %s already exists", k) + } + c.set(k, x, d) + c.mu.Unlock() + return nil +} + +// Set a new value for the cache key only if it already exists, and the existing +// item hasn't expired. Returns an error otherwise. +func (c *cache) Replace(k string, x interface{}, d time.Duration) error { + c.mu.Lock() + _, found := c.get(k) + if !found { + c.mu.Unlock() + return fmt.Errorf("Item %s doesn't exist", k) + } + c.set(k, x, d) + c.mu.Unlock() + return nil +} + +// Get an item from the cache. Returns the item or nil, and a bool indicating +// whether the key was found. +func (c *cache) Get(k string) (interface{}, bool) { + c.mu.RLock() + // "Inlining" of get and Expired + item, found := c.items[k] + if !found { + c.mu.RUnlock() + return nil, false + } + if item.Expiration > 0 { + if time.Now().UnixNano() > item.Expiration { + c.mu.RUnlock() + return nil, false + } + } + c.mu.RUnlock() + return item.Object, true +} + +// GetWithExpiration returns an item and its expiration time from the cache. +// It returns the item or nil, the expiration time if one is set (if the item +// never expires a zero value for time.Time is returned), and a bool indicating +// whether the key was found. +func (c *cache) GetWithExpiration(k string) (interface{}, time.Time, bool) { + c.mu.RLock() + // "Inlining" of get and Expired + item, found := c.items[k] + if !found { + c.mu.RUnlock() + return nil, time.Time{}, false + } + + if item.Expiration > 0 { + if time.Now().UnixNano() > item.Expiration { + c.mu.RUnlock() + return nil, time.Time{}, false + } + + // Return the item and the expiration time + c.mu.RUnlock() + return item.Object, time.Unix(0, item.Expiration), true + } + + // If expiration <= 0 (i.e. no expiration time set) then return the item + // and a zeroed time.Time + c.mu.RUnlock() + return item.Object, time.Time{}, true +} + +func (c *cache) get(k string) (interface{}, bool) { + item, found := c.items[k] + if !found { + return nil, false + } + // "Inlining" of Expired + if item.Expiration > 0 { + if time.Now().UnixNano() > item.Expiration { + return nil, false + } + } + return item.Object, true +} + +// Increment an item of type int, int8, int16, int32, int64, uintptr, uint, +// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the +// item's value is not an integer, if it was not found, or if it is not +// possible to increment it by n. To retrieve the incremented value, use one +// of the specialized methods, e.g. IncrementInt64. +func (c *cache) Increment(k string, n int64) error { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item %s not found", k) + } + switch v.Object.(type) { + case int: + v.Object = v.Object.(int) + int(n) + case int8: + v.Object = v.Object.(int8) + int8(n) + case int16: + v.Object = v.Object.(int16) + int16(n) + case int32: + v.Object = v.Object.(int32) + int32(n) + case int64: + v.Object = v.Object.(int64) + n + case uint: + v.Object = v.Object.(uint) + uint(n) + case uintptr: + v.Object = v.Object.(uintptr) + uintptr(n) + case uint8: + v.Object = v.Object.(uint8) + uint8(n) + case uint16: + v.Object = v.Object.(uint16) + uint16(n) + case uint32: + v.Object = v.Object.(uint32) + uint32(n) + case uint64: + v.Object = v.Object.(uint64) + uint64(n) + case float32: + v.Object = v.Object.(float32) + float32(n) + case float64: + v.Object = v.Object.(float64) + float64(n) + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s is not an integer", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Increment an item of type float32 or float64 by n. Returns an error if the +// item's value is not floating point, if it was not found, or if it is not +// possible to increment it by n. Pass a negative number to decrement the +// value. To retrieve the incremented value, use one of the specialized methods, +// e.g. IncrementFloat64. +func (c *cache) IncrementFloat(k string, n float64) error { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item %s not found", k) + } + switch v.Object.(type) { + case float32: + v.Object = v.Object.(float32) + float32(n) + case float64: + v.Object = v.Object.(float64) + n + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s does not have type float32 or float64", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Increment an item of type int by n. Returns an error if the item's value is +// not an int, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt(k string, n int) (int, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int8 by n. Returns an error if the item's value is +// not an int8, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt8(k string, n int8) (int8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int8", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int16 by n. Returns an error if the item's value is +// not an int16, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt16(k string, n int16) (int16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int16", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int32 by n. Returns an error if the item's value is +// not an int32, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt32(k string, n int32) (int32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int32", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int64 by n. Returns an error if the item's value is +// not an int64, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt64(k string, n int64) (int64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int64", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint by n. Returns an error if the item's value is +// not an uint, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementUint(k string, n uint) (uint, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uintptr by n. Returns an error if the item's value +// is not an uintptr, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUintptr(k string, n uintptr) (uintptr, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uintptr) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uintptr", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint8 by n. Returns an error if the item's value +// is not an uint8, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint8(k string, n uint8) (uint8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint8", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint16 by n. Returns an error if the item's value +// is not an uint16, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint16(k string, n uint16) (uint16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint16", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint32 by n. Returns an error if the item's value +// is not an uint32, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint32(k string, n uint32) (uint32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint32", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint64 by n. Returns an error if the item's value +// is not an uint64, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint64(k string, n uint64) (uint64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint64", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type float32 by n. Returns an error if the item's value +// is not an float32, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementFloat32(k string, n float32) (float32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float32", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type float64 by n. Returns an error if the item's value +// is not an float64, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementFloat64(k string, n float64) (float64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float64", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int, int8, int16, int32, int64, uintptr, uint, +// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the +// item's value is not an integer, if it was not found, or if it is not +// possible to decrement it by n. To retrieve the decremented value, use one +// of the specialized methods, e.g. DecrementInt64. +func (c *cache) Decrement(k string, n int64) error { + // TODO: Implement Increment and Decrement more cleanly. + // (Cannot do Increment(k, n*-1) for uints.) + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item not found") + } + switch v.Object.(type) { + case int: + v.Object = v.Object.(int) - int(n) + case int8: + v.Object = v.Object.(int8) - int8(n) + case int16: + v.Object = v.Object.(int16) - int16(n) + case int32: + v.Object = v.Object.(int32) - int32(n) + case int64: + v.Object = v.Object.(int64) - n + case uint: + v.Object = v.Object.(uint) - uint(n) + case uintptr: + v.Object = v.Object.(uintptr) - uintptr(n) + case uint8: + v.Object = v.Object.(uint8) - uint8(n) + case uint16: + v.Object = v.Object.(uint16) - uint16(n) + case uint32: + v.Object = v.Object.(uint32) - uint32(n) + case uint64: + v.Object = v.Object.(uint64) - uint64(n) + case float32: + v.Object = v.Object.(float32) - float32(n) + case float64: + v.Object = v.Object.(float64) - float64(n) + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s is not an integer", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Decrement an item of type float32 or float64 by n. Returns an error if the +// item's value is not floating point, if it was not found, or if it is not +// possible to decrement it by n. Pass a negative number to decrement the +// value. To retrieve the decremented value, use one of the specialized methods, +// e.g. DecrementFloat64. +func (c *cache) DecrementFloat(k string, n float64) error { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item %s not found", k) + } + switch v.Object.(type) { + case float32: + v.Object = v.Object.(float32) - float32(n) + case float64: + v.Object = v.Object.(float64) - n + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s does not have type float32 or float64", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Decrement an item of type int by n. Returns an error if the item's value is +// not an int, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt(k string, n int) (int, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int8 by n. Returns an error if the item's value is +// not an int8, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt8(k string, n int8) (int8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int8", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int16 by n. Returns an error if the item's value is +// not an int16, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt16(k string, n int16) (int16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int16", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int32 by n. Returns an error if the item's value is +// not an int32, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt32(k string, n int32) (int32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int32", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int64 by n. Returns an error if the item's value is +// not an int64, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt64(k string, n int64) (int64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int64", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint by n. Returns an error if the item's value is +// not an uint, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementUint(k string, n uint) (uint, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uintptr by n. Returns an error if the item's value +// is not an uintptr, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUintptr(k string, n uintptr) (uintptr, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uintptr) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uintptr", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint8 by n. Returns an error if the item's value is +// not an uint8, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementUint8(k string, n uint8) (uint8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint8", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint16 by n. Returns an error if the item's value +// is not an uint16, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUint16(k string, n uint16) (uint16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint16", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint32 by n. Returns an error if the item's value +// is not an uint32, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUint32(k string, n uint32) (uint32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint32", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint64 by n. Returns an error if the item's value +// is not an uint64, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUint64(k string, n uint64) (uint64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint64", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type float32 by n. Returns an error if the item's value +// is not an float32, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementFloat32(k string, n float32) (float32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float32", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type float64 by n. Returns an error if the item's value +// is not an float64, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementFloat64(k string, n float64) (float64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float64", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Delete an item from the cache. Does nothing if the key is not in the cache. +func (c *cache) Delete(k string) { + c.mu.Lock() + v, evicted := c.delete(k) + c.mu.Unlock() + if evicted { + c.onEvicted(k, v) + } +} + +func (c *cache) delete(k string) (interface{}, bool) { + if c.onEvicted != nil { + if v, found := c.items[k]; found { + delete(c.items, k) + return v.Object, true + } + } + delete(c.items, k) + return nil, false +} + +type keyAndValue struct { + key string + value interface{} +} + +// Delete all expired items from the cache. +func (c *cache) DeleteExpired() { + var evictedItems []keyAndValue + now := time.Now().UnixNano() + c.mu.Lock() + for k, v := range c.items { + // "Inlining" of expired + if v.Expiration > 0 && now > v.Expiration { + ov, evicted := c.delete(k) + if evicted { + evictedItems = append(evictedItems, keyAndValue{k, ov}) + } + } + } + c.mu.Unlock() + for _, v := range evictedItems { + c.onEvicted(v.key, v.value) + } +} + +// Sets an (optional) function that is called with the key and value when an +// item is evicted from the cache. (Including when it is deleted manually, but +// not when it is overwritten.) Set to nil to disable. +func (c *cache) OnEvicted(f func(string, interface{})) { + c.mu.Lock() + c.onEvicted = f + c.mu.Unlock() +} + +// Write the cache's items (using Gob) to an io.Writer. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) Save(w io.Writer) (err error) { + enc := gob.NewEncoder(w) + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("Error registering item types with Gob library") + } + }() + c.mu.RLock() + defer c.mu.RUnlock() + for _, v := range c.items { + gob.Register(v.Object) + } + err = enc.Encode(&c.items) + return +} + +// Save the cache's items to the given filename, creating the file if it +// doesn't exist, and overwriting it if it does. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) SaveFile(fname string) error { + fp, err := os.Create(fname) + if err != nil { + return err + } + err = c.Save(fp) + if err != nil { + fp.Close() + return err + } + return fp.Close() +} + +// Add (Gob-serialized) cache items from an io.Reader, excluding any items with +// keys that already exist (and haven't expired) in the current cache. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) Load(r io.Reader) error { + dec := gob.NewDecoder(r) + items := map[string]Item{} + err := dec.Decode(&items) + if err == nil { + c.mu.Lock() + defer c.mu.Unlock() + for k, v := range items { + ov, found := c.items[k] + if !found || ov.Expired() { + c.items[k] = v + } + } + } + return err +} + +// Load and add cache items from the given filename, excluding any items with +// keys that already exist in the current cache. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) LoadFile(fname string) error { + fp, err := os.Open(fname) + if err != nil { + return err + } + err = c.Load(fp) + if err != nil { + fp.Close() + return err + } + return fp.Close() +} + +// Copies all unexpired items in the cache into a new map and returns it. +func (c *cache) Items() map[string]Item { + c.mu.RLock() + defer c.mu.RUnlock() + m := make(map[string]Item, len(c.items)) + now := time.Now().UnixNano() + for k, v := range c.items { + // "Inlining" of Expired + if v.Expiration > 0 { + if now > v.Expiration { + continue + } + } + m[k] = v + } + return m +} + +// Returns the number of items in the cache. This may include items that have +// expired, but have not yet been cleaned up. +func (c *cache) ItemCount() int { + c.mu.RLock() + n := len(c.items) + c.mu.RUnlock() + return n +} + +// Delete all items from the cache. +func (c *cache) Flush() { + c.mu.Lock() + c.items = map[string]Item{} + c.mu.Unlock() +} + +type janitor struct { + Interval time.Duration + stop chan bool +} + +func (j *janitor) Run(c *cache) { + ticker := time.NewTicker(j.Interval) + for { + select { + case <-ticker.C: + c.DeleteExpired() + case <-j.stop: + ticker.Stop() + return + } + } +} + +func stopJanitor(c *Cache) { + c.janitor.stop <- true +} + +func runJanitor(c *cache, ci time.Duration) { + j := &janitor{ + Interval: ci, + stop: make(chan bool), + } + c.janitor = j + go j.Run(c) +} + +func newCache(de time.Duration, m map[string]Item) *cache { + if de == 0 { + de = -1 + } + c := &cache{ + defaultExpiration: de, + items: m, + } + return c +} + +func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item) *Cache { + c := newCache(de, m) + // This trick ensures that the janitor goroutine (which--granted it + // was enabled--is running DeleteExpired on c forever) does not keep + // the returned C object from being garbage collected. When it is + // garbage collected, the finalizer stops the janitor goroutine, after + // which c can be collected. + C := &Cache{c} + if ci > 0 { + runJanitor(c, ci) + runtime.SetFinalizer(C, stopJanitor) + } + return C +} + +// Return a new cache with a given default expiration duration and cleanup +// interval. If the expiration duration is less than one (or NoExpiration), +// the items in the cache never expire (by default), and must be deleted +// manually. If the cleanup interval is less than one, expired items are not +// deleted from the cache before calling c.DeleteExpired(). +func New(defaultExpiration, cleanupInterval time.Duration) *Cache { + items := make(map[string]Item) + return newCacheWithJanitor(defaultExpiration, cleanupInterval, items) +} + +// Return a new cache with a given default expiration duration and cleanup +// interval. If the expiration duration is less than one (or NoExpiration), +// the items in the cache never expire (by default), and must be deleted +// manually. If the cleanup interval is less than one, expired items are not +// deleted from the cache before calling c.DeleteExpired(). +// +// NewFrom() also accepts an items map which will serve as the underlying map +// for the cache. This is useful for starting from a deserialized cache +// (serialized using e.g. gob.Encode() on c.Items()), or passing in e.g. +// make(map[string]Item, 500) to improve startup performance when the cache +// is expected to reach a certain minimum size. +// +// Only the cache's methods synchronize access to this map, so it is not +// recommended to keep any references to the map around after creating a cache. +// If need be, the map can be accessed at a later point using c.Items() (subject +// to the same caveat.) +// +// Note regarding serialization: When using e.g. gob, make sure to +// gob.Register() the individual types stored in the cache before encoding a +// map retrieved with c.Items(), and to register those same types before +// decoding a blob containing an items map. +func NewFrom(defaultExpiration, cleanupInterval time.Duration, items map[string]Item) *Cache { + return newCacheWithJanitor(defaultExpiration, cleanupInterval, items) +} diff --git a/vendor/github.com/patrickmn/go-cache/sharded.go b/vendor/github.com/patrickmn/go-cache/sharded.go new file mode 100644 index 000000000..bcc0538bc --- /dev/null +++ b/vendor/github.com/patrickmn/go-cache/sharded.go @@ -0,0 +1,192 @@ +package cache + +import ( + "crypto/rand" + "math" + "math/big" + insecurerand "math/rand" + "os" + "runtime" + "time" +) + +// This is an experimental and unexported (for now) attempt at making a cache +// with better algorithmic complexity than the standard one, namely by +// preventing write locks of the entire cache when an item is added. As of the +// time of writing, the overhead of selecting buckets results in cache +// operations being about twice as slow as for the standard cache with small +// total cache sizes, and faster for larger ones. +// +// See cache_test.go for a few benchmarks. + +type unexportedShardedCache struct { + *shardedCache +} + +type shardedCache struct { + seed uint32 + m uint32 + cs []*cache + janitor *shardedJanitor +} + +// djb2 with better shuffling. 5x faster than FNV with the hash.Hash overhead. +func djb33(seed uint32, k string) uint32 { + var ( + l = uint32(len(k)) + d = 5381 + seed + l + i = uint32(0) + ) + // Why is all this 5x faster than a for loop? + if l >= 4 { + for i < l-4 { + d = (d * 33) ^ uint32(k[i]) + d = (d * 33) ^ uint32(k[i+1]) + d = (d * 33) ^ uint32(k[i+2]) + d = (d * 33) ^ uint32(k[i+3]) + i += 4 + } + } + switch l - i { + case 1: + case 2: + d = (d * 33) ^ uint32(k[i]) + case 3: + d = (d * 33) ^ uint32(k[i]) + d = (d * 33) ^ uint32(k[i+1]) + case 4: + d = (d * 33) ^ uint32(k[i]) + d = (d * 33) ^ uint32(k[i+1]) + d = (d * 33) ^ uint32(k[i+2]) + } + return d ^ (d >> 16) +} + +func (sc *shardedCache) bucket(k string) *cache { + return sc.cs[djb33(sc.seed, k)%sc.m] +} + +func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) { + sc.bucket(k).Set(k, x, d) +} + +func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error { + return sc.bucket(k).Add(k, x, d) +} + +func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error { + return sc.bucket(k).Replace(k, x, d) +} + +func (sc *shardedCache) Get(k string) (interface{}, bool) { + return sc.bucket(k).Get(k) +} + +func (sc *shardedCache) Increment(k string, n int64) error { + return sc.bucket(k).Increment(k, n) +} + +func (sc *shardedCache) IncrementFloat(k string, n float64) error { + return sc.bucket(k).IncrementFloat(k, n) +} + +func (sc *shardedCache) Decrement(k string, n int64) error { + return sc.bucket(k).Decrement(k, n) +} + +func (sc *shardedCache) Delete(k string) { + sc.bucket(k).Delete(k) +} + +func (sc *shardedCache) DeleteExpired() { + for _, v := range sc.cs { + v.DeleteExpired() + } +} + +// Returns the items in the cache. This may include items that have expired, +// but have not yet been cleaned up. If this is significant, the Expiration +// fields of the items should be checked. Note that explicit synchronization +// is needed to use a cache and its corresponding Items() return values at +// the same time, as the maps are shared. +func (sc *shardedCache) Items() []map[string]Item { + res := make([]map[string]Item, len(sc.cs)) + for i, v := range sc.cs { + res[i] = v.Items() + } + return res +} + +func (sc *shardedCache) Flush() { + for _, v := range sc.cs { + v.Flush() + } +} + +type shardedJanitor struct { + Interval time.Duration + stop chan bool +} + +func (j *shardedJanitor) Run(sc *shardedCache) { + j.stop = make(chan bool) + tick := time.Tick(j.Interval) + for { + select { + case <-tick: + sc.DeleteExpired() + case <-j.stop: + return + } + } +} + +func stopShardedJanitor(sc *unexportedShardedCache) { + sc.janitor.stop <- true +} + +func runShardedJanitor(sc *shardedCache, ci time.Duration) { + j := &shardedJanitor{ + Interval: ci, + } + sc.janitor = j + go j.Run(sc) +} + +func newShardedCache(n int, de time.Duration) *shardedCache { + max := big.NewInt(0).SetUint64(uint64(math.MaxUint32)) + rnd, err := rand.Int(rand.Reader, max) + var seed uint32 + if err != nil { + os.Stderr.Write([]byte("WARNING: go-cache's newShardedCache failed to read from the system CSPRNG (/dev/urandom or equivalent.) Your system's security may be compromised. Continuing with an insecure seed.\n")) + seed = insecurerand.Uint32() + } else { + seed = uint32(rnd.Uint64()) + } + sc := &shardedCache{ + seed: seed, + m: uint32(n), + cs: make([]*cache, n), + } + for i := 0; i < n; i++ { + c := &cache{ + defaultExpiration: de, + items: map[string]Item{}, + } + sc.cs[i] = c + } + return sc +} + +func unexportedNewSharded(defaultExpiration, cleanupInterval time.Duration, shards int) *unexportedShardedCache { + if defaultExpiration == 0 { + defaultExpiration = -1 + } + sc := newShardedCache(shards, defaultExpiration) + SC := &unexportedShardedCache{sc} + if cleanupInterval > 0 { + runShardedJanitor(sc, cleanupInterval) + runtime.SetFinalizer(SC, stopShardedJanitor) + } + return SC +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 92e6c309b..a68535ee2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -677,6 +677,9 @@ github.com/olivere/elastic/v7/uritemplates github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log +# github.com/patrickmn/go-cache v2.1.0+incompatible +## explicit +github.com/patrickmn/go-cache # github.com/pelletier/go-toml v1.4.0 github.com/pelletier/go-toml # github.com/philhofer/fwd v1.0.0 diff --git a/web_src/js/components/UserAnalysis.vue b/web_src/js/components/UserAnalysis.vue index beb4de8e9..c4c5608f1 100755 --- a/web_src/js/components/UserAnalysis.vue +++ b/web_src/js/components/UserAnalysis.vue @@ -27,10 +27,10 @@ - + - 下载报告 + 下载报告 下载报告 @@ -175,6 +175,7 @@ params:{startDate:'',endDate:'',page:1,pageSize:10,userName:''}, tableData: [], totalNum:0, + dataUrl:'../api/v1/query_user_static_page', pickerOptions: { }, value_time: '', @@ -274,68 +275,52 @@ let lastYear = lastMonthDate.getYear(); let lastMonth = lastMonthDate.getMonth(); + this.dataUrl = '../api/v1/query_user_static_page'; + if (typeof type_val=="undefined" || type_val=="null" || type_val==""){ this.params.startDate= this.formatDate(this.value_time[0].getFullYear(),this.value_time[0].getMonth() + 1,this.value_time[0].getDate()); this.params.endDate = this.formatDate(this.value_time[1].getFullYear(),this.value_time[1].getMonth() + 1,this.value_time[1].getDate()); }else{ switch(type_val){ case "yesterday_usr":{ - var now = new Date(); - var tmp = new Date(now.setTime(now.getTime()-24*60*60*1000)); - var yesterday = this.formatDate(tmp.getFullYear(),tmp.getMonth()+1,tmp.getDate()); - this.params.startDate = yesterday - this.params.endDate = yesterday this.value_time=[] - // document.getElementById("yesterday_usr").style.backgroundColor="409effd6" - // document.getElementById("current_week_usr") + this.dataUrl = '../api/v1/query_user_yesterday'; break } case "current_week_usr":{ - var now = new Date(); // 当前日期 - var nowDayOfWeek = now.getDay(); // 今天本周的第几天 - var day = nowDayOfWeek || 7; - this.params.startDate = this.formatDate(now.getFullYear(), nowMonth+1, nowDay + 1 - day); - this.params.endDate = today this.value_time=[] + this.dataUrl = '../api/v1/query_user_current_week'; break } case "current_month_usr":{ - this.params.startDate = this.formatDate(nowYear,nowMonth+1,1); - this.params.endDate = today this.value_time=[] + this.dataUrl = '../api/v1/query_user_current_month'; break } case "last_month_usr":{ - this.params.startDate=this.formatDate(nowYear, lastMonth+1, 1); - this.params.endDate=this.formatDate(nowYear, lastMonth+1, this.getMonthDays(nowYear,lastMonth)); this.value_time=[] + this.dataUrl = '../api/v1/query_user_last_month'; break - } case "monthly_usr":{ - var temp=new Date(now - 1000 * 60 * 60 * 24 * 30) - this.params.startDate = this.formatDate(temp.getFullYear(),temp.getMonth()+1,temp.getDate()); - this.params.endDate = today this.value_time=[] + this.dataUrl = '../api/v1/query_user_last30_day'; break } case "current_year_usr":{ - this.params.startDate = this.formatDate(now.getFullYear(), 1, 1); - this.params.endDate = today this.value_time=[] + this.dataUrl = '../api/v1/query_user_current_year'; break } case "all_usr":{ - console.log("e:"+today) - this.params.startDate = 'all'//this.formatDate(2000, 1, 1); //this.recordBeginTime// - this.params.endDate = today this.value_time=[] + this.dataUrl = '../api/v1/query_user_all'; break } } }; - this.$axios.get('../api/v1/query_user_static_page',{ + this.$axios.get(this.dataUrl,{ params:this.params }).then((res)=>{ this.tableData = res.data.data @@ -345,45 +330,17 @@ console.log("res.count:"+res.data.count) }) - // this.$axios.get('../tool/query_user_static',{ - // params:this.params - // }).then((res)=>{ - // this.currentPage = 1 - // this.tableData = res.data - // console.log(" this.tableData:", this.tableData.length) - // for(var i=0;i !search || data.Name.toLowerCase().includes(search.toLowerCase())) - + this.params.userName = this.search this.params.page = 1 this.page=1 this.getUserList(this.type_val, this.dynamic) }, - // goToDetailPage(pro_id,pro_name){ - // sessionStorage.setItem("pro_id",pro_id); - // sessionStorage.setItem("pro_name",pro_name); - // document.getElementById("pro_main").style.display="none"; - // document.getElementById("pro_detail").style.display="block"; - - // }, + tableHeaderStyle({row,column,rowIndex,columnIndex}){ if(rowIndex===0){ @@ -415,12 +372,6 @@ console.log('dateString', dateString); // > dateString 2021-07-06 14:23 return dateString; }, - - // transformTimestamp(timestamp){ - // var dateString= new Date(timestamp); - - // return dateString.toLocaleDateString().replace(/\//g, "-") + " " + dateString.toTimeString().substr(0, 8); - // }, }, mounted() { @@ -434,9 +385,6 @@ }, watch:{ search(val){ - // if(!val){ - // this.getUserList("all_usr",7) - // } if(!val){ this.params.userName = this.search this.params.page = 1 diff --git a/web_src/less/openi.less b/web_src/less/openi.less index dcde874be..304242ea3 100644 --- a/web_src/less/openi.less +++ b/web_src/less/openi.less @@ -232,9 +232,9 @@ footer .column{margin-bottom:0!important; padding-bottom:0!important;} .i-bg-used{background-position: -514px -52px;} .icon-bind{background-position: -550px -52px;} .icon-unbind{background-position: -568px -52px;} -.CREATING, .STOPPING, .DELETING, .STARTING, .WAITING ,.INIT,.KILLING{display:inline-block;background-image:url('/img/loading.gif');background-repeat:no-repeat;width:16px;height:16px;background-size:16px 16px;margin-right:5px;} +.CREATING, .STOPPING, .DELETING, .STARTING, i.WAITING ,.INIT,.KILLING{display:inline-block;background-image:url('/img/loading.gif');background-repeat:no-repeat;width:16px;height:16px;background-size:16px 16px;margin-right:5px;} -.COMPLETED{display:inline-block;width:18px;height:18px;background:url("/img/icons.svg");background-position: -496px -52px;background-position: -441px -52px;} +i.COMPLETED,i.SUCCEEDED{display:inline-block;width:18px;height:18px;background:url("/img/icons.svg");background-position: -496px -52px;background-position: -441px -52px;} .text_over{ overflow: hidden; text-overflow: ellipsis; @@ -585,3 +585,22 @@ display: block; margin-top: 0.7rem; color: #888888; } +.tutorial_icon{ + vertical-align: middle; + margin-right: 0.75em; +} +.notic_content{ + height: 50px; + vertical-align: middle; + text-align: center; + line-height: 50px; + background: #E5F4F4 +} +.x_icon{ + float: right; + margin-right: 15px !important; +} +.a_width{ + width: 50%; + display:inline-block +} \ No newline at end of file