From fff080d8b40a8e69caaccaec20141ed941a3495a Mon Sep 17 00:00:00 2001 From: DragonAura Date: Sat, 15 Apr 2023 23:00:08 +0800 Subject: [PATCH 01/10] fix(CAPI): :pencil2: fix some missing headers --- CAPI/cpp/API/include/state.h | 1 + CAPI/cpp/API/include/structures.h | 2 ++ CAPI/cpp/API/include/utils.hpp | 2 ++ 3 files changed, 5 insertions(+) diff --git a/CAPI/cpp/API/include/state.h b/CAPI/cpp/API/include/state.h index 1440a2d..02f5543 100644 --- a/CAPI/cpp/API/include/state.h +++ b/CAPI/cpp/API/include/state.h @@ -5,6 +5,7 @@ #include #include #include +#include #include "structures.h" diff --git a/CAPI/cpp/API/include/structures.h b/CAPI/cpp/API/include/structures.h index 613e04e..1b10d20 100644 --- a/CAPI/cpp/API/include/structures.h +++ b/CAPI/cpp/API/include/structures.h @@ -5,6 +5,8 @@ #include #include #include +#include +#include namespace THUAI6 { diff --git a/CAPI/cpp/API/include/utils.hpp b/CAPI/cpp/API/include/utils.hpp index 3472cf6..3c61f8a 100644 --- a/CAPI/cpp/API/include/utils.hpp +++ b/CAPI/cpp/API/include/utils.hpp @@ -5,6 +5,8 @@ #include #include +#include +#include #include "Message2Clients.pb.h" #include "Message2Server.pb.h" #include "MessageType.pb.h" From c430cc60aae9102062a5e16d99c55b411a69af9a Mon Sep 17 00:00:00 2001 From: DragonAura Date: Sat, 15 Apr 2023 23:33:24 +0800 Subject: [PATCH 02/10] style(CAPI): :bug: fix some warnings --- CAPI/cpp/API/include/logic.h | 4 ++-- CAPI/cpp/API/include/structures.h | 2 +- CAPI/cpp/API/include/utils.hpp | 7 ++++++- CAPI/cpp/API/src/logic.cpp | 2 +- 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/CAPI/cpp/API/include/logic.h b/CAPI/cpp/API/include/logic.h index 3e33682..a496a2d 100644 --- a/CAPI/cpp/API/include/logic.h +++ b/CAPI/cpp/API/include/logic.h @@ -42,12 +42,12 @@ private: std::unique_ptr pComm; // ID、阵营记录 - int64_t playerID; THUAI6::PlayerType playerType; + int64_t playerID; // 类型记录 - THUAI6::StudentType studentType; THUAI6::TrickerType trickerType; + THUAI6::StudentType studentType; // GUID信息 std::vector playerGUIDs; diff --git a/CAPI/cpp/API/include/structures.h b/CAPI/cpp/API/include/structures.h index 1b10d20..d92a5d2 100644 --- a/CAPI/cpp/API/include/structures.h +++ b/CAPI/cpp/API/include/structures.h @@ -177,7 +177,7 @@ namespace THUAI6 int32_t viewRange; // 视野范围 int64_t playerID; // 玩家ID int64_t guid; // 全局唯一ID - int16_t radius; // 圆形物体的半径或正方形物体的内切圆半径 + int32_t radius; // 圆形物体的半径或正方形物体的内切圆半径 int32_t score; // 分数 double facingDirection; // 朝向 diff --git a/CAPI/cpp/API/include/utils.hpp b/CAPI/cpp/API/include/utils.hpp index 3c61f8a..e4f9260 100644 --- a/CAPI/cpp/API/include/utils.hpp +++ b/CAPI/cpp/API/include/utils.hpp @@ -23,6 +23,11 @@ namespace AssistFunction return grid / numOfGridPerCell; } + [[nodiscard]] constexpr inline int GridToCell(double grid) noexcept + { + return int(grid) / numOfGridPerCell; + } + inline bool HaveView(int viewRange, int x, int y, int newX, int newY, std::vector>& map) { int deltaX = newX - x; @@ -419,7 +424,7 @@ namespace THUAI62Proto return playerMsg; } - inline protobuf::IDMsg THUAI62ProtobufID(int playerID) + inline protobuf::IDMsg THUAI62ProtobufID(int64_t playerID) { protobuf::IDMsg idMsg; idMsg.set_player_id(playerID); diff --git a/CAPI/cpp/API/src/logic.cpp b/CAPI/cpp/API/src/logic.cpp index 799b281..abb4440 100644 --- a/CAPI/cpp/API/src/logic.cpp +++ b/CAPI/cpp/API/src/logic.cpp @@ -84,7 +84,7 @@ std::vector> Logic::GetFullMap() const THUAI6::PlaceType Logic::GetPlaceType(int32_t cellX, int32_t cellY) const { std::unique_lock lock(mtxState); - if (cellX < 0 || cellX >= currentState->gameMap.size() || cellY < 0 || cellY >= currentState->gameMap[0].size()) + if (cellX < 0 || uint64_t(cellX) >= currentState->gameMap.size() || cellY < 0 || uint64_t(cellY) >= currentState->gameMap[0].size()) { logger->warn("Invalid position!"); return THUAI6::PlaceType::NullPlaceType; From 1ebf0709b2b5a0a6f921d17873edd39fb99559b3 Mon Sep 17 00:00:00 2001 From: Shawqeem <1004837646@qq.com> Date: Sat, 15 Apr 2023 23:33:25 +0800 Subject: [PATCH 03/10] feat: :sparkles: add Sunshine and make everything visible when running playback mode add Sunshine and make everything visible when running playback mode --- logic/Client/MainWindow.xaml.cs | 13 ++-- logic/Client/PlaybackClient.cs | 3 - logic/Client/Properties/launchSettings.json | 2 +- logic/Client/StatusBarOfSurvivor.xaml.cs | 3 + logic/Preparation/Utility/Transformation.cs | 4 ++ logic/cmd/gameServer.cmd | 6 +- logic/cmd/playback.cmd | 2 +- resource/client.png | Bin 0 -> 135159 bytes 使用文档.md | 63 ++++++++++++++------ 9 files changed, 65 insertions(+), 31 deletions(-) create mode 100644 resource/client.png diff --git a/logic/Client/MainWindow.xaml.cs b/logic/Client/MainWindow.xaml.cs index 714fea1..7999ed6 100644 --- a/logic/Client/MainWindow.xaml.cs +++ b/logic/Client/MainWindow.xaml.cs @@ -221,6 +221,9 @@ namespace Client case 5: playerMsg.StudentType = StudentType.TechOtaku; break; + case 6: + playerMsg.StudentType = StudentType.Sunshine; + break; case 0: default: playerMsg.StudentType = StudentType.NullStudentType; @@ -560,7 +563,7 @@ namespace Client { if (msg.PlayerState == PlayerState.Quit || msg.PlayerState == PlayerState.Graduated) return false; - if (isSpectatorMode) + if (isSpectatorMode || isPlaybackMode) return true; if (humanOrButcher && human != null) { @@ -579,7 +582,7 @@ namespace Client private bool CanSee(MessageOfTricker msg) { - if (isSpectatorMode) + if (isSpectatorMode || isPlaybackMode) return true; if (!humanOrButcher && butcher != null) { @@ -606,7 +609,7 @@ namespace Client private bool CanSee(MessageOfProp msg) { - if (isSpectatorMode) + if (isSpectatorMode || isPlaybackMode) return true; if (humanOrButcher && human != null) { @@ -625,7 +628,7 @@ namespace Client private bool CanSee(MessageOfBullet msg) { - if (isSpectatorMode) + if (isSpectatorMode || isPlaybackMode) return true; if (humanOrButcher && human != null) { @@ -644,7 +647,7 @@ namespace Client private bool CanSee(MessageOfBombedBullet msg) { - if (isSpectatorMode) + if (isSpectatorMode || isPlaybackMode) return true; //if (humanOrButcher && human != null) //{ diff --git a/logic/Client/PlaybackClient.cs b/logic/Client/PlaybackClient.cs index 5290b37..11acdc5 100644 --- a/logic/Client/PlaybackClient.cs +++ b/logic/Client/PlaybackClient.cs @@ -82,7 +82,6 @@ namespace Client }; new Thread(() => { - int i = 0; try { new FrameRateTaskExecutor @@ -91,10 +90,8 @@ namespace Client () => { var content = Reader.ReadOne(); - i++; if (content == null) { - MessageBox.Show($"End! {i}"); endFile = true; } else diff --git a/logic/Client/Properties/launchSettings.json b/logic/Client/Properties/launchSettings.json index a671030..a747e32 100644 --- a/logic/Client/Properties/launchSettings.json +++ b/logic/Client/Properties/launchSettings.json @@ -2,7 +2,7 @@ "profiles": { "Client": { "commandName": "Project", - "commandLineArgs": "--cl --port 8888 --characterID 2031" + "commandLineArgs": "--cl --playbackFile .\\video.thuaipb" } } } \ No newline at end of file diff --git a/logic/Client/StatusBarOfSurvivor.xaml.cs b/logic/Client/StatusBarOfSurvivor.xaml.cs index fa7ca3e..75011d8 100644 --- a/logic/Client/StatusBarOfSurvivor.xaml.cs +++ b/logic/Client/StatusBarOfSurvivor.xaml.cs @@ -53,6 +53,9 @@ namespace Client case StudentType.TechOtaku: serial.Text = "👥" + Convert.ToString(2) + "🧓" + Convert.ToString(obj.PlayerId) + "\nTechOtaku"; break; + case StudentType.Sunshine: + serial.Text = "👥" + Convert.ToString(2) + "🧓" + Convert.ToString(obj.PlayerId) + "\nSunshine"; + break; case StudentType.NullStudentType: serial.Text = "👥" + Convert.ToString(2) + "🧓" + Convert.ToString(obj.PlayerId) + "\nNullStudentType"; break; diff --git a/logic/Preparation/Utility/Transformation.cs b/logic/Preparation/Utility/Transformation.cs index 62fb05d..22b3464 100644 --- a/logic/Preparation/Utility/Transformation.cs +++ b/logic/Preparation/Utility/Transformation.cs @@ -200,6 +200,8 @@ namespace Preparation.Utility return Protobuf.StudentType.Robot; case Preparation.Utility.CharacterType.TechOtaku: return Protobuf.StudentType.TechOtaku; + case Preparation.Utility.CharacterType.Sunshine: + return Protobuf.StudentType.Sunshine; default: return Protobuf.StudentType.NullStudentType; } @@ -218,6 +220,8 @@ namespace Preparation.Utility return Preparation.Utility.CharacterType.Robot; case Protobuf.StudentType.TechOtaku: return Preparation.Utility.CharacterType.TechOtaku; + case Protobuf.StudentType.Sunshine: + return Preparation.Utility.CharacterType.Sunshine; default: return Preparation.Utility.CharacterType.Null; } diff --git a/logic/cmd/gameServer.cmd b/logic/cmd/gameServer.cmd index 3d9e370..22cac6c 100644 --- a/logic/cmd/gameServer.cmd +++ b/logic/cmd/gameServer.cmd @@ -1,12 +1,12 @@ @echo off -start cmd /k ..\Server\bin\Debug\net6.0\Server.exe --ip 0.0.0.0 --port 8888 --studentCount 4 --trickerCount 1 --gameTimeInSecond 600 --fileName test +start cmd /k ..\Server\bin\Debug\net6.0\Server.exe --ip 0.0.0.0 --port 8888 --studentCount 4 --trickerCount 1 --gameTimeInSecond 600 --fileName video ping -n 2 127.0.0.1 > NUL -start cmd /k ..\Client\bin\Debug\net6.0-windows\Client.exe --cl --port 8888 --characterID 4 --type 2 --occupation 1 +start cmd /k ..\Client\bin\Debug\net6.0-windows\Client.exe --cl --port 8888 --characterID 4 --type 2 --occupation 4 -start cmd /k ..\Client\bin\Debug\net6.0-windows\Client.exe --cl --port 8888 --characterID 0 --type 1 --occupation 1 +start cmd /k ..\Client\bin\Debug\net6.0-windows\Client.exe --cl --port 8888 --characterID 0 --type 1 --occupation 6 start cmd /k ..\Client\bin\Debug\net6.0-windows\Client.exe --cl --port 8888 --characterID 1 --type 1 --occupation 2 diff --git a/logic/cmd/playback.cmd b/logic/cmd/playback.cmd index 32156c7..8e413d5 100644 --- a/logic/cmd/playback.cmd +++ b/logic/cmd/playback.cmd @@ -1,5 +1,5 @@ @echo off -start cmd /k ..\Client\bin\Debug\net6.0-windows\Client.exe --cl --playbackFile .\test.thuaipb --playbackSpeed 1 +start cmd /k ..\Client\bin\Debug\net6.0-windows\Client.exe --cl --playbackFile .\video.thuaipb --playbackSpeed 1 ping -n 2 127.0.0.1 > NUL \ No newline at end of file diff --git a/resource/client.png b/resource/client.png new file mode 100644 index 0000000000000000000000000000000000000000..f63b2840224f45736d844e8994444b9df99a6df1 GIT binary patch literal 135159 zcmce-byr*Mw>4b7fkJUBT7tW~6?b<{3EJZBP>Mr=LJ2O#3X~whJuMJi3KWN+!QBD` zdHCJu9q)6_`2f!!N%qdz*BE=rT>DyU&K;|*sr334#j7Vzp1f93me+mqHD~mk{yQ2Qhp&Jl*DO_pEK;dfB&=+1KuHbXe2J?g)mm z)N|@(%xUuwefRA*&-K@HS;BE>V0;-()Oeb4b&Hn)5wq$K_gG<`ttoRKBG;i{<g4yujab@_zE3Hs%3lH&Yl{Wm5h+g6oN^Rrr|=@Vzrjy`OO+ zE~h3>X1OQR)TWDDV_|y>eIda*@(X1ImE218Rnf(&3;hJsjB(iqx%7+EDPBN8nI0EW z)gI?=qp7u5@O7HLG+^G6c)Ss(o2Qpa4qt0X{>1n0lbUKAW1+3bU!<5v3qJE?&W2+S z!%|BZaf}UvMNgFX-k(w}*Nh%ndVIjgb6awHdKVln5d!ME(wN=% zC2>^AZ^sGgi(|P7um~#&ut`KeBo6Q}0iLGN61*bzRYB1tU-geQlf0FJTNchDQ|gDM zc5en@3eVO}l}IQK2de`ndida?RVgwI6!Lp7Oxq7C?oP8h0b}}BzJd`XPUCVDMDwOk{rYzE!Lqo1tvOJ;HiKclj$L9#KH^WO3jswT!meF z#jV2xoM(a`ss9Zwsaba+DyET4p`Npw=gXFe+S75dOLxoV5Xx~rom&g(m6SqE{Bb_f zU603mjs8sR3xWd-SPV0Mseg(k(Xr<4-mvdU=n8u?eosj-?#TP)GKMjZ1@trph~))c zGt8OgCgMc#0$2-~L79aNa>;K2P{5<(cmZ!f`VkX7GsSI+43ekfiLwj^);W909x7}o z{J;+cm;|b?SrKVlffGFdxJ6E6n6-Q%0JeVQ7-$Bxa50zX`$foH{yxz`@-eCh^z*$^ zh0cFtrqk(7XXJECoHsD{2;n1_qXaIFvGHa7N6P3TcEy=3-jD0w9Wt9{DM&k%>N{8p zo$q3GE_iT=)ZNpb6?*Tlw|CwN71|bFwT1j*R3Lc!U%3rP!F|ap6t>0%m~$7TPh-57 z|I0J>!k5L6yq9h*YR!>$yp!Tz!#n@WaF7>7RzdTJJHY^fq&vIjetK*g8b6tQoTxyZ zmw6-whw+jRF172CK!8P11R!SWWLgpgoFp&169dZs>1oqXw&oUmMkiZN2cP7VOZlNp z@UQEo0Vx=1G4cu_eakbKjh(HF%`rBg7&Uc&Q;i3Fl;2xWs!#BgUl<^$WdHAYzPp@& zO&#Q}*XD9^J$jG+AnB0YB7dPQA-vEQ0dh1x=PVT4e{|k5xd~Y}&u>C|*0-ig31_Vz z+c|v`AM@OB|3%F0a-sJl!T~7+FF8R4g^CQ~RdzkKJ^3*do0%(`PaadCh`alRY)?X; zk#OPN1m*Z1&lg^Tc;c=8weIWXUM5)aBfP1`DapbBY?JtpM~{9u5qxt4NTE|yU{SkO zX?2uelLP8X_S7b>f{-ak?rD@We6r>4f+;^R|F7|lF*XVzz4&v2r2OBZd4UagN(Jk>tV(uN&?LHR_)fnTy3BU~% z_A-A_m&Of;jJV1>}A=1!f4N#!|CPi2Ve$^J$xQ~bYo|sa7F4cJJD1q}F2J)Ome$J?v}?O!z-<=j#d+uxId&*?N4}i)y~7r^wk! zVjiB~_neF{wdB9s_PFeKYyqg`x`4c1BmkGQXmquc2~ zOs;28Qe3O(e+nc2dVw%fBH)KxJV7M*mD1Z!bL2$tue1_ zY2k5c6~99*3MY$Nqyn;<3vVvGCHr5i-WRDrhNZGUFVGk1gpRli7zaM{brFeG%+^{v zbS_O5Y@b|JC8QZs=_T?X*p9I&N&Ro}dbEkb?Zgc7*TjwZf|RzKbEwd5A7!$5L_%rsEYyE>vyvS<1OVoNon;)1FZxH76+Ao$btmdl z2_xI7Y%B(3HS?4gDLsu_CEwUO7AUoF^8bfndn zM_e)-hLdB8e%@LiNTxQ5WU;*wfz@OD1cqWDuFl9mA5~xO5{OqmSN{>`caiU@8cPJ7 z{PWvJ7H;$qmM!>-;f>P0Y?8QRQFhyztXE1xeLQP3C`55KOb0csu+$gY6T%q^nae)D zmPD;yTVdlPed^~0AQXON6q56=VNN63gO~%%RXOm^aQEXNVKrGB7^P03s&!EFTD$Hh z2G-elor{IU`FcpnFY0u6r$db9hf5BoH=pF3 z-3U55!Ilwc*V3GzgIwL21^sP!e&~w88Of6T6gIRDF8SS!Tv}c&pqSF>IBA+KNKJnG z_5yR7%QQkk5xMLv;%QR8kQiTqE=nt>2;lT|o*fVoz@i?^!uJ_^0@2WYHL|g`*fpaA zw~;N6W?K35-4X+I-^I^Z9FUdNO=qR1$CP+KO<=2G)olCARHQwXY1H~d5*ttCQbM(_ z|AOdpzoukTMh1CLX&EUi<%ccnoWUNOQc+h*5psINHQv%di@%k7W?8{C=JQ?B6O9Qy za-UnzOWsJip)&Jo37+p|;+wki2>oEQJ?!uJQY><9DiUi^L1r}?VdaMgV~l^-#8CG@ zZ-c|@U-)N>Ft7^`EDkgDsLZIS@4qxZ-6Zqm2hV(gzw}1t2r%?7^f6)#2MddHKHUOo zzDH%jjRl+qq;{_bvX%qr2P3lHzY2Zh$vucioXuMtcYajsYSl8gYDa&Qv^9R4t03Y( zhptnf|9M|V0h0AA-HCDcMbrvgDPQ34n7SsFyBM8DoeSr?jovm3Fg1kyV~hOkPSg|h?hGzF%3?RExoCgFRO{PZRD-Ox9^ccLp9g(974B$Y46Z& zV`TAKEZpzutU|PYZ-w2WW%`R1uArRIGg7CewPzj`}t6qGXtM z&3;8?r>+GNv!og(e(FYd>im}{vhebg_wl9s=I82s_LlUoZXfZcXJOJqjVsC!=e9*P ztW8Hg!@EE9|7P(9+qxlSk?;B~o5pPPAZ%Z5Y5?ut?TX>HULRg2wf?m>%9p}fDl*U~ zM@Y+I`BodpluA4rUBb;#Io9)7PZVirhRf)>Ce}4qw#SQf6Ic2P)NpB~oCXTyL>RVK zWT*tTp5Rhz)!ed)ia+f`9d&mGW4J3`4LOK==(&G<(A-~a!CRg%N>UP&USwyEvf(PM-1te z6|6deE8Xq=v^`gX!yQ^GsclFVZPbWm8L;pnQ%w;R*#tbXyv3uP#ttvn$52l&Oj^PD zA>kz2l+Y<&mKr;tVVr<+%*Hn@!XV9?nlGyehluPa);sRk4xOVYRQj}ERsN$v#sw6( zlN3lvgz8twWN?{wI$>-6U>Yu22f_NJAG#)5MM)WY*Q?V-LPjMS==XH2RnJ+!R{eG{8y1ln0c zrT*crxh2G2R|r=jc*mT?RhS?etOjsC!(U``HM+MQKJ(G|;oSSx3^~J+ls*Oa{8kgP zQaRyY6=$@Sb9(A{9mHVGuPDG8ubqBom43YEu9u5T-lP@cq1O|)!#E5!3!d}=sb^1M zCQq{?*7R(NettkD0<6NF`)`o_6PS<(j zgnd^1eJ=3e^pEEu@qJKhju#Gi+}FSMf?No%93&3p?Chfpoi9w%l+E5tb=L4*tFRcX zuvfC)U7wJO6z+{V_z*IfRq+Xc4e2`&M4n76N@koCJ7jW>p3;T%e$(|Q@p`j$c(zZMcal!(t98S z&%Z1@?Fla98!(%3qkQNsS-k|0H?PBp9Gb`Z%7fRJ?EmclGO>L1JOurD^?FzE-Comu)KU1@8?{k5i_?Sa z%d|@I{Ra|%e=!)~SU#&K)C;&^Idc@;))pZZTmk@2miWJD(0TFbJ} z`j>egqWov8K&3-g^`pl+0m56I5u7}!tqEI5$s&mZtleZs3~{*A)b0ZeQPs$am~~(l zD5noMm+WLY`t-!`SA3n;f*{8=m!VBdYRygX-?ZtD`#EX%(~nJlXojZ(Kh`R0e0+2) z`x<=JUJ2ns>@84hH%BMtMY=pBl?j)U3bhp&^GhsK&di2EitB%D)AGbM`!<;_lvD#t zs2s=V561NnTQb(XdiUaHog_*nu;^LrokMt3{lT9bh4*Y{4EHs0VmkwJ)yB#F8m38M z9{xLhyn?$Qfevg~#(x?()Zy$!K1+&z^$Tr&_b&13fgewKWsc&Pt_-DUe;OwbB=#-} zyT~*LNwGiAnTcnD-Yuzlr)sL7ipEip^YF z^Ym+DC|_R3Zzg@&c%$DVuW1y7cle=4b(}h=O;r{qZg#YUU3RD~IC?`P|9hvt;b}6^ zJ=G=h-{!&Gb}-qKxa`9sLQu>jlv?z1?`ypOt9* zUnEK0u;fwI4^PCTh~aT((;l;1A2U6IcVIDr;I+cu4-jG~jT;NxVF0QudO`EBUaZUV zmir|;qWlZbS*Hq6iVgPJKd&8}9&pBW&^3!ONG~Dqq5!$@zQ5f*{+H7+j z{h)DTJzFYaQ$hvLp=ltXAxzYra#0#$Mz5SLlf7?fi5{W2Z3l!wX z!AR7mBp^Vm#aMcl>TkzRdV7s0l&7kvS|yd<6qMBGQvKx>^~0xVUqxj^q=}y;&ClZP zyQ>}Tceoj={F!YqMY-$N3IR+?FrO|@jWWSbhqD-NC*I|Dj5fsFJmKCWi5Ph{SlT=n zJsfU5&Nm$K%3_m#b67nI1P@${ArbCymnc$wGX`Sx9^w&5y@csUm)AHxPd`?IC~x#y zsU%SrUsQZYgO&rrD5AW}*k<-AQJ?CykSB|OgS(Kt=-$<*JLE&s!X#+J%wXJ0csK#Gp9&k9)|0bIj6qLamrNAY|$%BaVX#$ zb-3-(l1nl>9N4q$-WUii^1|=CG5r}t09pTR>d;zjocTDd3AOyFQOvO08simm~9=Hw~k@wmOD4`yQn1wSJ=KdXAoB zTw{_So_z`;VZWZThRf!$iON+Jjlt~Qyzbp9Bo0ws7IrFRI(xk5+Jh2ye{7tPyiHtOe4=WL^gU)0=qp)svQ=^=0(o{kNc!|Vf4_k>lw>8fjmF` z2{Pgux{@$*xz8mSTxp^dSJPm*e>|K|<_hJMiDOb0Q`e29?}Q;b(ky2ifvy@K+G_m} zAhUnR1j=D_*igz4cw5pkr^DDu-aiL7puxr$JCthotA>zNnigzZr32Z@F(p>UyzDd3PDPOelswd(QP9m>>l;jp$FVt9d zD>^N**iJGz&9~F=2Oyk6K5x8C1#E|*;UDVX-z{QML~ zIc(6fHAoXQsa$`^I2;N;wJrGEqzdsSRzfc?2`_Ikar9=Nk}XY8p9~jGkb%E?Y+ptD z#o+T#2gIvfhzW8cORP~DXl;qV5rC%Gzd~dEEkgu%GTVpGj30I^Foh!v{SuUT9Y&>A z)9LyB-!y+M!?pH#8Q7T|oqVz?Vey=6BoyB63a3FY9rd>CTT2&$gQ!UYaYjp}XPaxc zC>I%N)vlA0VMn?&9<{Y!85tJ_6p%=})zcFFVg{i68k~Wlw0PB{apmVo+voGY#Kp=( z{Es{Y7riJQE_4e-kZv<``z<^6FE&&p@a9EfB8}X6te!|P*V7NXBBzlrDnZDN;>Jzb zYbzTS0{as6rm}U!nDb_?hG9@3&_GwRub$83Oc99Rh)+j3oeU- zEv5yxuG>b2B;!cD9V(ACGdlMj&v;xATaG78hP6<%+4bB|1nDxZMRx4vSiXMQ=tpso@$WYGm^DXv~ywaDB{Pc4ZW2Lw|+{ZVEy^Q zHvd)xtsT$MOX^di*zr@8kN3|wVVzuppV!_CO`*^WyfN0#_=m}XAY z9Co_ZQr@>_778L(?$@Q4&V82$;sm)Ycj9}`607bo`0a*&<+CRl_k1_=NG9x{rWAto z^>(LKHyf?tfiCueJRa|X=ai?MOd}@IMbGHd8jj2sIce2el4>+r>r&Dqi@Pb0`%wAs zkHxvy!br!y&wMAysJ$$=FzYtn6b>E$?b)c%7<*zs}<=xN%1>+qSCa^c*To=_rKqm z$$fNL3kaI?CO7Pjk*cV}ZxYuH_+=7%SNo!0qo>Ls`>nZwV)E9HPO!l1uSu|ZF-Ljhlc<5 zBW0ghhEGmhShZr%k9b!3_37rnBUjGJLJ%GEl&i2R#JjW!1BKl6X(W6u}T2+FWP*h00{s4Ey}pE^)l6(Kze z6J@&lRcYMfed@Hx8%lC$%1QHtZN~#QF0_-H5jG#oNAFv(NIq0!MZ&yqFnz4vc6H|W zmhS1lL-_%+W95$knvT?%K0YVloMR9_K?C#H@5(bnc1FdN$l&Ec*fC$hz7p3+w-6s` zVa08YD_c08B*c!%kydeu(^U95cH1D5DfPrKwjr_5&o3^UhDCHDeXmrti&&SmdL&8d z^7`erq2m0H?IrStg%Pq8$lY`Rtw)(K#yCvkJ&i6z9tC%!5bX-BqU!QR4OZ*D5R^7X zZoPFenUx+q%}c#@tJG!s7T>H93hIi;=lKBM6CG&xdllVS`RjZvaJ{83tMHJM^H{#= zVYO|pqUOw4g6|-Sb&F&A_LQXa<|6|swd(KK+=>&F7VRqd616t{(|4tlxfS%MIJD-Z z?8b4LD#!9(@*iBPm5eF z-o&N^8hN45m)d-lPzYGJ*t({n#3e^w9B=+PJRVQ5&!TDO-F5Ah_ED9c-*KcR`SHk+ zad6j;z2BV9RUHt+Yx@`{WIlj!NC^c$tE4QnTxu;_K5+G;!Kb5C3JH9pktLwG=PFEZ zwS@BPkW&+AZlPdH<%(LfWm}2LYAV_Z?>OG$>ht9dd|)|Cch%d$vwGm7a}*BQccc8M zU;as#u}W?{27rnES}r*TlbF+z;&tmog7qLWvm>;=BoW|iH7s?a;Ojr6{Wp0UtKw41 zkTrxJ_NWlcig<_T!E!x*>!8qAbFM$qh z4R@tev#ENw5wT~A@U+I(FD@0MDSG1M_j5*BSe^Cu!XI$R1tw`dlAB#>Y`e}$>7f=b z8bcCMAGCnn)ZsPX{P7&qM{3L}Pu*Y6{ zTh8u@gnEVoq?w4G&@$@TS!?h#*6_YLHfQu*w4Z<9MyND*ZqXVi&qUA!aeWLJVokoR zDu!3!3)-NWVILHM0^F^hwhnd!m;R8kSJj&6M84~kPo{mG1jJ9%RZG0Wa(yJT*qzRP zmUYNVj2*0*K#CPE`B9jdq5fQ4uUsmGptaa1>^B9fG3No6Oe0!`GvAoolClnEAjkl#-2wiUWgvLpVTZ?DsFHk;>bk)jsgpXS$4#EfuN_wA2f>En!H64%C zfvi5@e?VE2sc8|1xk2gK1D?&RdPgIqo^j>A(lc6@^vC7U6FIyEVb}YJsFFz*8l+@_Hb@J=_%#VlnDITN6 zuO>3;&>{otE~mfuo-bW2Qfc``ZLA-*1BZcpYIjG{CF<`sW!t$bt@*$y2=$U->Nzy8 zDGlwM?UFi2fCX2&<&YWUv>4#{*M>vMpXB}L(gomiq)_oWdj?s)W1i!22*VD)OLwvNDCX93-Z&%iq3oPrJ*pn9 zb${otr7D)g?2DW@EY0gplMX0Y^;s%YD|Z>+p;6 zmX*5owFGguqcqpA6{W22!AHNZoMw4&&^7F)`q&)tK2Auvm_zc!FC&y`WHpX@GzKho4}G(BQPY%KO1*dU6z6`1yo~g z$h}C)Wl?Q~JeilUJD7XzBJ5L}~H*D87maD(l z)@$sD9q_J<9mKSxCbSQcxjS4N`BiDMMnoVYn$mhqr_)<%F_)0RoF2kr-XG)aETrM} zC6qN*!3_77!Q}qsjwh=cr)XVjtt4P<(aGtH@i}FfA8GUhnzByV0@=b|#oG!WEO92E zeis6)_%4@^Iv&_BQl%g8=Xm%Ydtmf?^KWlL334nC(>(NBu)l^r3)|ExgCV5DU;{NN z6PMGMb)@iG&l0e&QWQg?AqiTH1w<~cXPkcuYWIDAr8$1YSsx2t)YCgITg>nuvY*{- z$u^bxe%X(y(2%aUwl`|3v)CO5dI#WR`!v- z0?iVF*o7GnNQ%Es8anF8w&vG|Y~*!+(X7R3#-eq_Z#R~zwf@^N=wFNMsPfO{-PyG2nWGJDn+$k7aV<{yF-HZ^i?? zKj@}Lh<2OuDb^XtIA`O*>J^JvN~Jq%7bUq}44z}8wz+ziJ*t;XIOQ||PLxpb+|*-P zOSP(8#C^HxBI~bI)o9!j!;hkgl^H5~)ON^onC8aG?bV$)P*S=cb^z9T zRj{=^6&ZGi&xHO-C-s259%4lJI?(uR41(0UA>}V_0*y!1M`ax4n$}y5YOY?lEJmy$ z*r3w{iH+anK??InC!E@Ui9i25QUuHe@gnIiV0gpdhN27uFp>dvfGKot83IDG&7QPl zUd$LGkrS<^^zfTmOGqX+ZicQ{UvTks#q5fe?^U|TqD4bu@#q0aN>eZM-uM88Dx#vg z9X-t>K5|`F8_@8#{6~{BkoF=z;=Ak_4fzGiCr7gl5Zt1rw0Muqm_@s!@{zzT@*$6u zIr`M!0gxZf^S7h!d26sV24!NdlCCZxSDxgj{sGs*g2iT?de+W4tLvL|f@B30+lW3Z5Uv7g?raQY<+gZ8(@J5Gzig9XtN1iSYTVY!vO(w4>QTjh z5mBYn&WE=u?jO>4+&|hE!hOdj;-81S_1XDyv%^NFP z7adgz&#}=-huSTitOM!z&1maCYuloWWYqC!Eivi$;=0H_|1u$4ESIbnT~^+d$BKZM zs0msMq=}-a6B;I!iT7o;KCS-7vy%?wabFwD;IVqfbbC$|a_cV9n}B{Q9rVCUQ_fKx zNR6;4adKs%R72DUjW>_Fhq1RrA9u7s+c8bGi0e~qUEoF+CER0l;1kW_C0dOeCY&lx zXEG1T)?uO?BgYOa+>6cJyVpqrePq+Gq&aAk8cX8d>fbY#TxUFzsep%@W3j)1p`xRK z-s~+R#W|BT(?qMGl=;r+LfkDC^St!h=RiSrzyW$>yW{H1d7@Z~w-eudx&YnAO_lNY zMzMjdg7P3II+htLz@>O73RX|HoRJ>A+KH8@%NzIsZwT6O`Y`K(7o7CtR1`c~x4sa7 zC0@eaJEMZuyKG?<;h3muQ4x{2{*ao|$nCLUKMSEC+Jh?3Os&IK9I~WA@1#mS=rg@S z*e-huuZAXO%$QHPtRBipH@yKgwQxB8sQG;$IiEVzD>RQzN=<9hzVH^NzJdkjMRk`6wpCR7+ zSrPi#cz7qMa?=F5=MJFF&fV@;QNzzcSsUM8j_`m(arIE5+;t3>pBEj~e{(}q;_D`9 zlnC^kaT@nJt&;Idj+?Ws?l?g3J1X`xQGIom$@chYBuzlO@xZU-7U1`XmE{_X^LM3G z6-CD7k#V)#+`71e5Zj-|1Mp1Nq>Qn^;6OFoXeda5<~79e*KpmM1|&zKtnz=POt-EU z9u*|qnPb)H)fR~u@csq)`;dEL_1Wqb4nrq2V3*OF=-sqGusx1sVUagXU@We~SjFOj zcQ<2q`T&iZSXPp z>B-7uXG#R4t6Sc`n#>o{WB_z#sL0VN&K9rWAFW&L(G`Wi){J(%0ZR-`Z6X6C;^n#< zX(%W_=t%(NZyxMv6USWKf>CRTKTk)S$~E_xfU)nh*TQ){7G_$jdDNxjeyBGBOTMHd zcme1r*VUWx(87tUmS(F7KuM3Xhr1u7#7cCH8gGrso;84t?4w^SAB$dZ%0!8pc(H!D zuC8jKLs`3v2*Rg~4?``G&0B=ajSbRglkKxyWoW4RZJYf{D5hl zUQ@g<#eu$3QQSy?wNZ5kPrvx-^Q_0Zpi5v-ktUm6J-zxxzuNz*8e;5pyGfUtoWxqp zcf-~6<>g_X#GdHujxpFkHRo(2UZ}V(>$l{g?98F~Suha6gDFN?zB(A_n#W_$1P=Kh<}TWH3?oewIge^Q z^5l-fyBD&2@t1|uqB*eSO5Z>y3|N_FZrRoR(P>vIONc|`L&{M3+aZ%C<*jp!A<@@? z{#6St-c?Q%PEvh82#?I7?`mn(l0#}j(;?=aF+^?C@8=eQ$5Y>B{EDot7cJE8YuR(# z(xm5d+sq*k%>Y?-y0Y7G(>9=%-Fu`svgD$=TaSNgEY2K)R?cht9pRyc+XoE2izh?6|W$W5$Ay|hWz6`S1(e4{pCJa9U1!G@+A-}!&QZhbD>?7RilWVa=-?BxzcEJXBvyaBmR?D?cKU);%{4JDX?;?QQ|@#5x3WIXz4ziXAw4pMEVaMW z2li}h6BCr!+k(GR&Kq`m7I_^=;QCH7`cu9du3hYCKW^-Jos3f}$C3=|A??z3;VS8M<#5u*9$$PYAOBV^@de+&Mxn|%wq{*NO0oDZj6 zCyq_>@b;BUPIQH_422L*3p~gvP33$flJxJ*K64jJ22$3?EOzM_sR6NhkBVt~6sDxV z^)=Rkt6un~t=Omwq;)b&%8AF27!iEOO}dD5ct%{(nZk*&f|oAZ+%BIff(X_kPc}Z` z!O7$c{jyNd*J-?eE=h;+K`FGWdqOHgQ9h$OHYG{3;r@GXR4Z-CxO?l?Q~OGmEqDNe znnxE><_(6Q)k%)=DgTFZChX6EtCpb0HO{G+)rA*<E6qAE^Zzdehe(Er3+Koejg-2f8uen-E`t3&=>l(~Jm~Gecg@bJ#wKs9 z#?03&YRyVG$v9^SMi!3zYRM3HmD(K3PYS(H-uh)66S>K)5D)b`A>=UO!>V59sHTkS zWKTU9*;rjhc(k0GCSscph<|q9>^;S=uW7`sCJ^1jYRe8ruNkSm|{+*eY=i~h=F?cQG( zN#J3VgOtSb7Sk#`<^M)2l%BcU5yRb=2uCf4CLMKBhVxo0TAy!^lMC+BOBfnRHEOq^ zZze)h`C9zs)0Jd>Vx&1rgdGze1`nt~gN zgZT(V)xdi-0{8a0yq1z__+G$`P@>*(_Yuy%$t~c{EAMbGjm+r5SN_;TV891Cr1AdX zFau~S)v_6OnNX%%K&6__mbkaUy>09AZi#XO_*ZRVanI}UG9+GnL$NAuB09$d)Blay zA9`6f_IVQQgQ&xsrX+o)DFVYnz|SQ=`kympVc9M}W3x_gUp6=vY38<;nN`!cv|RY# zhM3Q>90AXhfiqs!>;6sj;_euBI0vz#2iZlIa6(Lel5UNp_IQ;c)Jjj}ldLddm`?Q1 zSMl%UGq4&Ez;%)3NTt+RWV~eDIBf9QZ}-!i;pasoGvgz!x1gDfV$I7Ce#Dgmr|0NL zs#Kq~+X>&}+h)n;VpU>{lY($=wvm6u5$?+!spQ!WKJ2u>gOuI6S%44F7JXUi!N?c{ZaVQ)GO8Zg*WD=~1FHyV3qDvWVQ(g+sXD z+OITiB|>4_(vl`i?WT5NTg5Bzz@YtRkbQds$=H4lviCalBV+5YwtA(sRGqWc;jVWy ze$qVCuJPig68}L~+EE2BVsNh+=WauF_j6SY&kTxf6L`7|p`s40_NUbdzBf?ih#5?} zFH1EjOTwW2kI?Lre7@=E6E3yb$_aDcyS_xNwE<@Ff=NBCRg4N+i8$ILFiI;R>7L{$ zzrnIJ_}6LEa5pF;E6FqUd8gaC5>i6APb4}aeVy~{kE5^1u;@x$t0losx9*bbjSQIFhoI>h%&T_KzV;66z9F1|*1K`Z30)=xVhX3S#{Eh0Em$6VWUYGM zFcjMY_|IO|rd>3e%T2U+I^{`(mnRL-MiFec{GAvKcDE!uqmnH_nB$;XbpXq_!nBSJ zF0l6sHVlSU=B&0PhLtI`0fL4@CUyTfPCF^oHz?S@yvYhoR@qvzu8vX9kv^rpr{X7f zgFyGRF54@@ev}t+fZq6eV|$>F;nw7r}g`p69j{NG1|&= z-&y^Tj4Orv@<$4FLV1#zywbtby{|tP@y70L-?UCDgzVpwULZx)Am}~T%R)Iu+MZWy zJgAb1O^=FwmH%kEk)B8ad1LK(u!qBh{7oQMWx~@vr@*vyf11FV{?DGciqf9pbRhU8 z%}ss}RUqX0J4EVQ+r4w&$hQq6F#@ zog#6q@ky&`w)bQ81%&s%jm?FRl~T!X;&nkB9!IaLUk^=3nE)xOlUA99UoW+FcEV3h zUAZp#4(lru({np}`bI)uA~(c}BS>T4$wydpbraCg*nMbEOP{$R&Zf5PH*~FYS90ph zZliLI99R&5&vv^c9&Kx-MV)^|T+Nu#pPdZ&X&*YjZ>4eN)Hn_Na$qgpP@hw3IYbo& zCj{qb-F`><9Z^P+m^JKb;Mer5O@SXhbuePn4 z`9S+NxOo7!A|QxYQ^F zPs2j_OOa|(WxM4gMcTMMa(ndYWNy05)GbTzg=dR8d!Sg&V?i7p?*r#vlk1w?BeyQL znImN5X&N|t|tR*g|Fu?@aF$^?6xgVWtZKH%x|Mi$sD zBV8CiNydG_Yw>!Wz zosG+#NBVnmdhar>Azo5vQ3RSh?~auHwBc6Rl7n$7)C@y{!P)VkV=b!Qnf^q3NvzC3 zjX^V2R8v+>84oo2DC%Z5G$v%Z+F??x~*<%(Gt9eBFL1fwzz|b_UE=S7x58sWJ zKZYgo8Q(9I3 zutf}AW}Z4Ahx+zYB1uGzNy7%834Lskd=n=QC2i@H(}y|`jlPuYNPhHA=7&p?_SLDc zrVM8uZwXom!dj*Vu6fQIY^wrR*b4g5-}%4Roo#lm{v*%bqwpJn}K za((n(lp^FLzo}1tSHADW-qtYe^1^4peY;`YL`1{=ve;Ea!xppf73kDAtZ# zs`ex*+ zUagons(u}HNEOpiYu^rj#p+Q1n2Ug$;KqfVZNKgH$@Hl?>5R=7!%5*=$)cV+`-}~Y zEuf|>yV{6TbTm8eIud!o--t)&seZgq%<{1p0 z3OfK%M5%Pov=J%L#8a|uOK&H)y2CYPWWRvpBm}TksG}F)`O`ip*(C=&evLK6p}$@q zO70>>4v2C*QZp)#Zzf-uXN03BZMf)$psg?U_bZM-RgbqXXh7O%E-~*^!l&vl!JUdg z@e?!N^4sf8Xgpy|z?G_`3obUxgQe|3XpZT!2P0Scnu%Z~`{9e6%x~ zewXh?S}%~%iM(b6YN{i&al;6;vdVFfCZ|w6J@bIwS_5C79cxob^jcC8I9F%);Mqk@ zX{1i;S#G-M2{>(_KIIcYpXyn3-e>lcXsjSF?yLNytj1Aew;O$l6_c0<&n1;TP?H0> z@K+-D#kCv`x$E981G%Dx#EUsK=*v|CK>stB=cW>9Z zEZtGV-(?0!yNcM#&0LN+sF7aOw2z+Wt_iyE5~j$MF1k1LDmR8=^wpLxXtY*CpRzPH zbY|B@REV12|GL1Eb+ElzwJ6AiuE*xc)wR6Zpxt;(8Du;I9_~m}>b$X>+k2?proBR6 zay&-0DLKyl#?-dZ&i^3Y=QCy9w$vOg{HOtj=2b`6FPt8#IQij@=d6zm^;uGTz`|a& zCr(aj&rvr726dy;6C0*Wf-P>u39i`R^5LGs^$U&Jq`#UUbnDFSeGJQ;Fm-=8nKEP$ zJU`WEMhNT9ImVuXcRizCIm=0Y#Hf{UfYhov(*0S@N^uH|l<9NJO7JWwd+ofVEb8K? z2=DrQB@5k+x3-TtpG%LSw5B7vW_&_idSYN_pp#&=7Xy=Bg#_`J(boDV<0!-7#t%;9 zQ2PlViU@H(EelhJTQ82`^vLe)iyM2VLNu8jtae|?E$!#MDqSsEBA-gG=e0fYBX$E{ zbkC2zVe&Qqz#Ih^J-o;lbApZBH)9cqfiOOlvv7HFTER2EdyHb98WjD!#2|6LPLD}@ ztEIQ-{!{|E$Y1AHl;+y3Oe%U%Duzsxa@c?6TJxk3&yUHIySsI$Z;2X}#(g7j94_w&J zaoh>jN0f7K&}Ew2$SFJfuW$Mr3P1N#yo?|3euWaS0;I0zhMe+KTdeIz;Fc!}#d=~t z5!M}6>om+Vtr9_TU_{r-s>VcFYY_l)OPQ;Ij5e77jVX^{!ZZfJ|5u4$VKe@a1;`0P zZ^h-l^+DY_qh3Rsg)T05%Qrz-$GJcTxTSV`gpjvU)P~Gz4m3cNaL%@+M|c1W2Z9`O zs5Ou?R_V#(m<6`1Cj<`iMr?HpK_aW34R0L1%ONru^h^}xoQHb`hFI5RV?gEwgoq`Z z<>y=fcawYSor-V5*y?q^Hx8M;eaQ#MJ~J$NP4hP3`S-#PPsm2*XeiZydQ4_LE=I>J ztp!Q|NaylG{XX|CtM@Q_7f~6FN4FCTw^-{hF-T+DNFT%2*ZQ`m8!HS*5}({y9hMyZ zam=)&#(wqz^hb6~(MYn0fa0rQh|CKHPq|xJ#7ht6ey>KL>g1sNbm8Ddkb|c>te|;)p1tM!ohJJ4tWYjv z=@51WH&JF%%3~x*{zeK*I3ZR=S%jrs9`TS3vB)F;$^im#*mCM=auc!;Nl=B?)ZJAyO) zGID`cI%i$GW8&9Zu;^45#paLJSCejoFG3!*&i#0D_)Y;|vfWnlAgGr2*o+VRIxs{r zFg3-yY4Sq9RwTY+=?jWR?z{ST982{uD0juH5zEdjiI`)}<&~Ak#KaL!1u-ToyJ|d# zG4zu+VVUDV0>nGx-3sdEBcu{?*u7$Tae+ebdejVLJ5(zI-B=snSt9ogO@Q1_)b zUcx^>tzJ|+8%F0u>Jy z47195+^@FtP2InV&gr{O8}Z`~T)j+~#A$P0+g^;z41o2;wrwt~`OIk-3x9=sbo1?M zt?&xc93_NwuNXOHXoibM8mon>y`5RtaJxQZVaeb&E27&uKaw!Xb!RXg@|xs=<^S0`pbO+;@x$*k8(GDl&XJuhc;)gBiAHi zYWMAWd(z&K8#3#P1I`Ptun1RS2u!kzC>LN7sRX4n-TxtE^!^}xzn!)~ykAls@y zjmOI30T<=RKy0)8%Z#AJ84S7oiM4CeSF*X_u#AA=qi04+9_seZQR_GjDUX}2NvMi7 zJ2_q&UEa`e3>EH6KL^TBlRR+}+Y)@G>6rzvkByUplvSGOJ@wSQ|F$&PcM zClWv_4ZSkjiFsjap~u{;!>F+7%&5@>_}la&J6+0cni}K{1g1srWZrc3z*eFodiz8p zVMj5zUWre*Udt?Ppycfhh=xfF3ZPvX@i)ky!#S(o6;0ZR55LanSl~{sJo^S+(giZI zKKRS69^3DvOfz1u3?@z&zkUf!B4^bJD?nIs0)gdR@<_Mc#h=lc!j?Lgc#NMatw0NV8&@5MbUhOvQMR%S0N zdylzd%9ys*U)g2^kxB$V5Rt$bQOqLbN*HH{Fs3q+v$d%hx8{e@M{o*Jmo znOVtQ5LjHzXO{=Y-{M_-`jmit;K?JhEBzE+4(SJ`HvDjaKtfcUL14Zerf=ci5MR^sK`t1960Z zr^~muO0$4nJB=(gSY&lQ#9~{#e2X<|HLL(QQF?eUUJNws)~Z_HRIJW{uREP1G_f zyB?Y|*{`6+{kMUa&jzx=MPD}st@-&W!ks)0Vodv`GGn^%dw|MZq2ZLfZ=-iD9a_$1 zau;kb)$<)?5*xTMm(0kscM?kl(-OC)<(<6Hsraas1OT0fqpQ$(xTEB(q5L(vovy6I zStf!m=bhNQo0Z37pt}*et9?@Bwk0Ks_Ve}1L-ZFgE~mg1y5iSV88K}m^D&JF*6&3Q ztOI3Te+7B&TLvfZWqwQRurQrMzKU{PEFcyJ9K#38Bc}euE%o;$nx`Hb8t+FE8+)N~ zH(&D^xt}_pc-XP{7`(ejeABQV5IQ#=aGtB=tQ6B+@p1EPN+Tuv%iwBePw{%;z7oWWQnI0JSU3FsG!_L(xb?_;9ODUX zi8(a~^vt>;(58b-{z?}U=f*?%d;M#CcB&tnBGXnWNL#eomD)!6Kwy?%G{IRlsgjd- zx7BSHx~kZ77+M{JP4|t7Go1f+Z00=K-Az<)fUaZ4ssnq60e)28`ek`NnDTq`9Zh6L z1A!GhQ+hW@u%wC{U0EiH?K6-j?|6kwk#_d9EM@yprEGQa`YY}mdOK>p2T8@fP6dU{ zD%V|1s@Ah2*v5}!xPp?sAFqIv1x7Hm*0&@a`kTtaEuav};*vK?k*)$JDzf`ACArRSc$X}bXY(O6HBH4jxX>n=@>21L-%bR)p$?)zM=k#FhdXU?t2s%NkP8v>#U{HT8D_z?2y+ime zG-m7cQ%5Ikcr|~&tvZ6@58Zw2JeXf3SRE0&Fv%>Q9t^U(LXca0GG$y7omy(6|8(; zHxQl4kcdIS#~=JP`pd*)$uL#81?`6vH!EW|#0KnQ``q;vr+A|8ea6y_X4s69lsAN@K}$y(w+CXPZmbgJfF z06l@Od=VKk_G&-4C2AasN*!wvv;JAvD7WXdm7S2{M9Q3k)$Y9}iHk?KmqyTVpQ(Cq z&&_gKHD#&Di=LeL>^h*_)A{+VXIi$x5A!!J2hDFra~StqTREw`O7M;D&Mi13K3X>~ zj&PBNr--oF!VO=Wf0IN)Y|?M)!-BZBVa(s)hvL%F@Sl^3>=#p?j`NjXl$ZN_#gQ0)|K zu;@95otw?PrEoSRB%Z&G3Qzu^fR2Uu@Z>xcE8nRU+%Pv0h8Gp>AzP>NqFqaPln>EaJn}i^{WwQ zx&Fk}gLW@`hugS{o0b*8@H%>IcZPB782iBiEo>ut<=lalS#`zhEmP&H$)cmvV8_(j zYCSKZI|}dZJZh>W+fqi7rDC=PU&0D5F*DpJJ|PfOWvVcUy@)tAHulwU5C&`SxQ5R2 zgtYKn43u0I#(~(N&!HZhb(_@#&in7ZM~GN)1;hfVC$k(f0;JoFN>)Epb+xzIs4Gxu za+HY9_%!39P3~Dk)@B=WN2QAv;&~E%Pvq*OpC?QF1H%9+nQ}2{Ox8%q&h>iv+a}Z4 z&TIrtjfE1@{>597adzS3MR0YZQNgrSt0YXo<;`RE1`F6!6=VJp+1SF+#7&s(`iEvg zl7n}T!|rliU*d(qoFALc{ysnqaEKRs|PRu zotE?59nE-E<0UVzm4AxeS7Q@<^L0qV)bemprSofB^8CZDG|VpF;j0~GRl3?3UkbvK zjYib|Uo-c;dxpCv3o&fV-HFF)mm_4yS_DI>H@uUn(3Y_mpDV}nF)Ndb-W2450Aney zbq)w~#ny6$C$CU?<_ObdjI0cVtN>B^Inr(D&uAzlL5J7HJN;@e|Cn5$JVG~GXA3$$ zw7Q@}jN%@b5xlvUm2HSTAnN#e*)<1lj1o(?U^~YDs+ZG|J6r_Tb@p`UW^n9F-Xn3% z?o;iZV}gawehp>55e>)}eGY}DS4kviPBlP9<)jgFKf}o|@Cr|kIXmj4Zkr>!1c*KH z^RBpKfA|t3Q!*F%;tP_LlpcOm{=oz^JoLCKGbuX1=_IpJud_4AFuxV~;@F7l=~%QE z9*`(+1+z3&vWp7^3d}g~NPMO!SZ?80=7L@+!%Bc5YnHc0 zn0H_-E{cMPQwJjP5U`_XGL^#vte8PmZuhef>|{6Z`000ZIa!WiMADertNq_?dXJ$? z(9vYAdW#Lqr@uTO4dHZsG8-3~^W`Hd9YL2va{YE9Pn^2%J0|)X>=XOW{uvRML1++p z4sz5a+eU@O5L<~}Gnt55gYkeQ8Gt?*Y%^QC6l;I&9acinnJMTW;8|z7^3@q!=OZ>* zzVIF^&2{P9=!zg^mcMdA?M8YcI9=Esy+E~#u5Xr$<+Y(=dzZ93cDPb6U#pH#Z&PCM zc=gjfi*Pt6w*ZQe&{q9pCnAd>+1M6pTFQ&y`eV`S8{r`8vhN z?yoc)rR`$oea@qiQT)2LN~SoR^e(S}y&>H!NmbTq8Q#Oudh2;gy_N!Z@9pcCOitY0 zr5GwtOG$~CGhl7;k(YlBV847dVG0EKP=E2JbcjBG!9RR(jeqw9P6mCdEAuZcE%WL4j)IzPxR>O#0k8 zzVOSqZT2&k&od(;neOOmQ!+b6sNZdeI7@7_L{0EeZOWf3hH=}qNXE^Ewe=> zqJZm!CT9U}=d5hf*znMp1Aa!GqfCY~YsD!o8WOuz=kFM?6=^Qhz{wY^=3U^PJKZ z5T(N?aCGIU^}OyA6-JIpi8nf~9a$7kX%&FdO?d9xIoYLr|9DXUp|K~EB9F0iadp3D zJyU|07h1o__pt6>gT{%8uHHn~W7n$l4fda7K3N?>@7Br^y&qK>CHgiS6WMX5>>Ks% z@aFl=tw06$cW;QXQ=`J8R!^nAKex@pvWxU%S5q3H>Q~oa;x;YggQm*}2d-SEZP$C5 z&lxnNHcXWUfQ1@WJbV)8?`k|9^t=z$cDt|J;Mvup)?W;N>EOJy4a^;i2`Mj|Q_qi< zIND^(Hj5^#36X({EOed=)w(MX4N*qu*e2f*+4i7rz~BTQOjgxj>Eu`}=NBJsX!1V2 za-?65)?wS1*Yz2kqUH;5vID5clW=6L)$Gf}Wgv7j;@{?Kh(z?IzdGLi~ z-;3)?amj{+1fq}*a&yzyT{2lXansVSV@6vWMVskI%U^k6qj8xA==O>~L*C18g?;}>ZJJSW z#ra2l&nqJKQxB8MOU&(O38P<(?h~++&cZHilD>OGTd0RSxpss5ol#zSi(_T%8VGo_Jp+(hMZ+5FvSbs){=_Id=LoF5!nTnCWbfZ{#xK(&~kgEnV_ySzv4|-5b8* zdn1;PL-K8tOK`BqMjsk`4AEU$OfDH2b>)a%M<>@QCq%M%z@d;G<4g=RC~1Dvi)gstVBmeohu z-~b+z%6wcOe)7qo=F$BsxeWVXFav_33G%yLgPjB~tR z-HZ6tpODZrNY9iTn#8B_y~W0P&6#ZJCjWsuJs?fK?`n5r+`$!jg4U_$v_|d~_lw#2 zx|$A>KIZJCnh83Nf|OMCFkNI+I|^^ra@|YAqjO|pb5!Kog&8A+pML!12;|@cWe^1@ zWlu1&_c8XmpH?j!mfYJ?j7eF3BLJ(oIxZod73JJ#t{w3O@&Zkg$%T5u&q%q3jX z<(-ZL-djuG!3g?4Fvp~n`Nbby18UCoqix1PCmraT1f2s>i|fQ%bhR97|C9;cV)2qgXhCIaQt@iy`ei+LxJnE+?%(XETq6}M78?VNS$Y;$i z6nRZQmUIEJUP~G^we&wp&TMO%$I|!`Kd04Q+a%xB?dCj=N1^)eI-p zap-vEukUsgiruknbBYJgCd=jCZm-lkb)c=To}ba!P_c0seTZ0tJfKqLdNmmpd+)go z2Ve73fd%nba|vP9Vz=thraF{bP*1J}!Ql@0nV9ldLcQ7K-`OlP!u9_ggg4g#w93Q95~n4Jq=- zi8e5OJfy}wC9&v6vcbx7IIpyHX&!rV=?d~5yB}Y( z%k=*W$M)`l?4N11G>0^##1Rk&JPF}9X_%uC@g6df!&aKzK919IK``s*e8!o z<<>_R4m}nw>@&@~{kehCJVvu&@>%JN?zpt-`&mz8x4CWvKg*Kp*GZ&jOg+TfL*Bai zb-41?>9E=KEk6Vl)rk>RCIBQr)ObSXNoOZTWPhZxpL(Z^9b1ZyUf684wsuyRT3{W-?TGM)t-8o_2lPeXK2%G>@GCmISJFtvy~&*7 z64_1fOdUV3pZ~md~nP;#r2)os@#ixT(K50QmfE(4gVtKtka zShK!aY^fyTI3I|kA;*rE%?%POPN%{rLLG-OqEu`7lq5Hym#)stnWPO=$vqVu>FF>` z#gOFZNg^}3J%_AdsKmS#cVUxK5AMaUi25&kENQQ5<3sYWq=piHrR?(%c6+6-(z+k> zJVh7CUZrnce&chh&HS5vr-GH_c)mb#6@*dN2g$7E#su#5$ zVfnDo%U8oM+cpqjpA?eH2(VAb+j*U;Tv(6Rm=yr&i8|kbK{RR79?5{EAFPnY`Wf=( zG&jUO+lztQ{YmgMbh$VA#JoEUrRcwRv=@xbVz^L$D%N>N@n?5gxWB>)eOZq8xDu~y z1=u2I0~93=5U9Eud+>*ayvG2duElBt!^eFItIUA>u2H9Th5Z8w!jMUe@~%UsKmzRH zVPE_@AcdH$nb_zd)RDw#-Uqq7Wrpw|Iz6b=f~)F9Faa9}AIl?Hi#{X!Ps^D)Mr6W) zYu0TF?%>#jV5AE_?aYW5taL9V&EwZ1dV<~wbmWqDPIypi+VdPbF~T+dFN{d6rbDO@ zV@IXE%~mQI6tc(d3bB#v;|9w91w=GlPHYnqu%Ny(2)J_q!~DaBv1QW^=Er@J2y>K? z<#~GEb2{;?&pUrG(eHK;zeYpNp_;3?oT$|Ggfb!4QqYk30hylRDdQA2I)()EV)3BsiI7hi6;FuU|+^UxOcDxcfe7zpc}~NU;dMb47jj z4{UUIIdzLRjd6WP%sQ?Z%}JVn&fzyz(5x{(G|iL0WZLWb&Hk1s8m8T}+OJ!QBm&Hc1@{fa)T%W_*^ty?Rr4c6EO|-9cVA;9s9-aO)+8 z(y2j=FViwuMBWnjd)JY#`A!FfOYF(4g%H1CH=F$@VSfq{aFCL=EZx+A4rn_Y@t(*1@v#<^a}j{ z`_MA}@%m85_iQQBVBq40KYvnb)HF5Y$Cjl~M;~;d({?g{b=gBt=nO_akAaIj{_$1d1-k5Cp8$DYe|?2VxeXO~{*q^JwhjkqC$i0IEVUZ>Y$g^q@(%1jADOA`y7*HuFO!hz?HfZ_kObU_$c5Job5y?(4wPF`9) zR&@DgsE4{~B6`i!k5`$H!n<3(&e!Y{v0qeg!;+NK4Q`)h+JUjN+u`$pjyH61WhIm+ ztCZc-pT``6=0nhW68Rv41nhq`A4tHZeDk)RpDQ9aG;tH{jpeN?)=5Out0G2qp(o@c zNzUTfcJ4&JjBK;y%KhDbg&T)@r@UjaXVYNioma-?iQ(nTSx)^BV$BMNS7JSkajaDB1)u^zm^SGc}{al`!APMpcCIlBz@X~($PjFtv`fr2a1LS;|Sq~ zwYfIMazhwh^iT?pEtR~*Wa-W5A5G#a5Y1*FeHh2b9FMnW$E;c`6!G%b| zg1vd1XLSu~v5%9Z3b&#i8Lv-e4sQsL?cJ=k8jm;9tvGZ5z%Nvp#E4@l}{& z`DT7xpmEbTKR!oQ{*8iW9UT$a87d_*G`E!6iirCpPt8K>w=IY0#2w@O6QgL{kba=(^**JwvlODfzK ztfE(crVK@UY}gEvF68Uc(SYn-IPQfYJ&`PVO_dpL!4^cr%Llf z!+p^v;rVo5qU56ODySLQCNQYg|HAxg8KW5WP%geu%mBRwJo?ltg-+ezOotW6b*mqb zM7`v1LaABLG7UlgIsN1-=|{}jNP_acHP-3tF0nTkd*TKlMa&n3M7BsF!Uu|oyB)GA zWty7G*o#-ue4z1^ry|11STEcX+Q8&i8q7yE6(;I016iUL))RL^^cth+tth9 zeAH$5gurqJmpNi|h0D$Dm20a0T~txgke-GT!YJ5*(#-7OYQrd>xO$o!?n+}UE2-Qf zSqT97!&PtH0ncm0Ib-L@FLCHRN0+lnc!!}fkxr-5VYz&`eyPl4dRJ-StHhY(p3pKM zLyG@^V@*iJ@P%rkhc^-bHgCi1$d^zqsH5Yf%y)5Ye3sz(g7lRdf?HuCg$d@HSrkI4 z?N^&bzKI`gd*8l92EXv@0V^&e1g3pa#qq&^hkD$p_eT>`YIT?iZLg zC%cKM`QThI3YHIdgc>6Mi;`wFSPcM^#*7ROWI%}MwrhuRdy_EY>?NQr%%m%dXJN>0 zjT(ec-HvriBr^RC@(qaCQdCPiM(p?{lhy+&j!HaFAu%%pox~SQHFDuqc^Y?nmgIKe zjdw68JnZyYfY;IGF3a^iltYruwd=a-OtVXJ2C%89ZH4M8SqdfI(T`PPznb%w!M*K# zn(pwc^U3!2myV+Rn;W@c=mLtXLy;B@WOte6`EF;wqd>WtPOCW-B8D9|hr7t(w!5su zP5SU(ZmY?&H>n5n3UJk?)0E~fa>!G7q)$sn!Rj^?&luCeaPr`HE#!4) zhS9GXAQvL14kJ&bc%5$?@}{jLT!B=>PTo$lsU-(1ysQv@PyoRlyA_L~zOzabBzm5YY}(LxGDPq%=v{ zK_y3>1N_(nFG-hWA>hEEo7&O5KLvvO9*+UW+5*OIw3qxsvN(#3~504~?3GY?qv z{In%y;xU#^A5W_xTht&v+v!?X}GgEQ5JK#0D781(LpY& z##;D#gCSOc;N@=0Hbi0WaC?W~TU1-a#QKZ+tt&6W63;I}UZNemi{v)e2@H>zT zXTFi|75lcEj0Wp7O34H&fyUi^6Ku!1NB@v0&9_dXTXnH@% z&*xyVS<~39e4IGK>YE5y_> z4?v%+eXs~z`{>w189XVF>*Fm&{T{1aP1q=x`a3NE%!Bc2zz9@#OIrx1_-i-{q)F#>aT`NC8vb>uK6? zz|BrzjR62pZ#P-)BL>a6sbS187JLUD1z@F%hiG> zoe63Ai|D*m+A2&Zz59s!R;Ymst9FsBNiOd7hxA`@Dhe8X$ciipe3&Yc7vyBDf+#6CaK&)K8}*XdS9CmG;ej<%;c4d# zpy9!8C$k9~w&iTKbyGF}8|l*z^5V5b60JW`^Gl+RN0!T_&7kDgIh{IaCfvg5D^GQs z*lhC5<~^g`igluQZhs<$3$~ou_Nf6->C6_QVU@H=qqoOf=XMQrebneuRj-c_aagLz%#g0Y=fjTMzJM8XKah(3PUQW4){=$i$-a?X z)P0D|2^kUYGE2C@ipemEh`~rM=Zr_=n~*=L@r^udRlQ^rgFNCS2*POJ{&#ZZf=7WG zS8ucvPqRj<0`VuWewcRot0%jqVlFTR?=EjZSD67wM#J;< zNuI==W=z&%b_KnV7j+^7gh~{XPaZowY-JW&YuoN`Nj-`*81=Pg6Nd5j{6HEac7)1qC z-k1gSfZ3X3j5zmh5`6;D2oeB8WP?Xj0JZw)a)b3CX{N(7|ExFbelS9(a8vIm5de^x zVA?k6`$X)cskf`AWeGp7Vj&r~-?pst$s8wO3MJ`TF*=GOt*%`T3l>(mX{Nf!l?J*( zM5gp3ZFRmA$q-jxy-d*)aB0-Z=gbGZF{$}vwy zwC34sLI6pPlX!^PTt{kXEI!8$h`1C~(m%{!jDQ495m=fO3*!90>>*nj5}Kr0|bARCO9j+lMqtvpa=>3t#tlv$w}RTZAx4S(7_nLH}9;1Klbq%df!JR9*8Z>t6oR5WV%;A zvaR8`=+N~!4MOGs9rQi`5`={Re{J{}&EP(zo>BLM<@qDrN&fiQG1tYKtU8)`xelNf z*8lgk5Sq{bZ>j@9r2iyXS#faw+?@n~H(Cyjin#r5-TMJH0bYt7S-j6id^Ql`X_igI z?hXTw)iKc}*5Ymu_}bH9U(x8WfaG~zKqVhw|5h!)t_bL%^yhY7@d#c0AGzU={R|r* zUpvqt(tqa5AW+G)QquRd!|On-&<2hq3&xejrj~koNn37K|4IB@jvY_H8-}1K!y<4X zX+!D?ha+7-w0|!k-iPS*KS~2VVpm`WnD6g92HLsjnrRGRx-k9TNr6qHUy>C>k*BEA z1RG&3aDRF9SYk)$_$ZcKqKhbhoD%3U73lFk!unwPFDwJd++Q;I-+E>23SVFCX|7Zc zPXdx8dOB%j6;F!XmR_H_hgBVx%$eieKD~|9LjMK0um1Pt1VrzzjqwPfejJb1j4T(M zdp9o;24{GZKRIG|*z`;rJzeVndJQ8ymOeemR+)&OJd4dq(qAeq5{WG=$%*$gN2iLL z3UkB4&`gvR_=AUnkSGxIMturB3i;8IG0EY+PI`Ptu90>9-twKdti(NX-GxDsgQwNv zLUE4-h>$%1qlfMfqsPhnbO$5F61W?<$QNgOA!NSrlyiT)&+J1oVEkaN-ECCc+T7TQ zK_^}BZi2^krs=Y(8pb@cZ|ja7eG|A(72w*Ztr;m+VR3Lep`a}z%ai6PxZ(}(LWEoS ziO35lz*yv?q04##XccCOeH%c>%ssXLbc7{Vf2d#LmR=}MP&036$*jX=I%`q)F@?}l z40qmK7oqczfCUspurKZ#NLL}r&6nr&zPb!Oo82V2I~c*nOX2+p`#sV}BC_QcVYIc@ z1?|mZ{5EmNAKlC4`(mkO;{IZ=gh>Uk-%L+DP2PD274?Lt|5mom0sqQnf0!s$U7I6g zlB1?FJQ_E!oO< z^tp05E_aRW|9X2fRM}nFd&LYKJMC9E9LL>P{XI~XUeb@y89V83kzz^I&ZebrB}1UD)D2^$jV7dTmL zw}3A5Y25JcQY{eW7h`qH9)0qK|FVgHBau$==y7Xa2;X%|Vh*9k7h6hg&Z6sCwIVvi z{)3h<^L_|$bH4#L?SD9X@kD_X)I%=NQLNyOu2Wjb4$_IcV5>VvW)Z204RIX4=DHCk z_%k}m!k_a8eB1XmsHy&P`NVz$tT%sGXwF~;gm3Aqs}8G~>Z;kI49$O_`4+-_Z>G&8x^z{|O9T1abo0!<-H^1LyWNDE4G+Uo^c{JsLJm_$7Uc zbM1C7B}qo_PadNU-27X7;g62#K!Y8AQ98)sXfo`RlE1z>Yg9D}*Mir%~&=W~S}C8KJ4ZWHgS z>F_fK#!3R>_)nVuZpW|$+&fVJ{$_{RWHd(x+ytZHA~Vq@Ku9bt5&Opa(%RARgD$Q1 z?=E8wW~P#;tiVeBlLOjNy26otyA*wXZ-!8CSmt}?{e$4!o2#p2Tlh*C+Gu}ww}bnJ z{nH9!!F*wpbyhQ4AXo0*U+xR}!xvD99}rBj0V>zLuW=b~%MKp)ISJ1OhD#NjY!aC< zN}@J&|9h7JNvfALfNV}_lOL5b;Oe&$cvYH@Yb;5LIQ4>V!gS@sW&+J3h)gDKrB}%~ zLGc@(sv@KtA~QP3Q^~oMoIj$u7y!d~@SlbOBpvoJ*T|$cKr0xTII`X+D|VO+pt185 z*`(&vLf4R_$$=`LdA5o4#?J=xVM|zfQ>(GB0Ax(W9R7?u{a#gI$T?A8SU~Z-1yxPW zb=ggcr-4V(do}G#-%x4&{(U(C?vN%I$3m{P)*{dx&;FKdBdHw@M6LL&!xdo;EIfcw zKEs|rqaF&XIx%MrQr)ctxc1)P*ncqq(uD{m%^b3ThKD_f^4GfCLqlM*!Mbj>)pPbF zej&cI?{!C#`32}?MX$+KFBw2yoe|)MCMQ8M1a%)E{pBfVI0tx|CL+>drTasEcTIE? zHJ}y1K*K=gqBa>uM#uyGXf#gFJDYO~c7TMjH2!~|dn5t7qrYwmk{;k*0kBU`I-Cw= z?rt|F7A>qrok*NoVs9Jo?p-&*FtJB2BrgwcNe+)?4eG-oK~jFWk`Kb3b!OYraA#{a_m?o_>lZc6TV6iWWZisDzXYWe^i~|J zq5aX^q>pH{L4M!x*%9K3CtcBljwU;Ac*doir8rgcg0#vqQuE0lGJqVSWHEx?%56E+ zb!b3=M_vizW@PM@;0kq=p zpiHL-0Xs5afdE6911?4qCp7*s_AMJ=I0i66{~&^czn`1W2D45hW^qD8#9pM&TVj$L zAP7rW+^pqM{Rvz-gpTuHTJ40$(A}((DPQMFT*BeW_erWYc{Cl9u+V7p9s~BN|37b3 zZ<2y)Rv?D8*;a}e+%hGgm?Hlq9OAzue=J}$(m+Rnk8=<)#s7yNQgtc{=OjnAy}!K~ zT`+z0YA~lmzV9VJ{U7=f4G~`bO(z(3mttEmz}z_~bwED&4$srb!uOxKPY8Pn8kX3)|8a?d>Rv!<0*ls=b|pknj6(y%g&rOrciW_X9}7&z&q?Ae$4YU< zmV20hHW3Vf;Q}jlhuGr&!%8i_EaAxdjv+ze@MKu$1nJ~Mh3G<;;!*nWq^y!vF;-Bs zB!b)q7zoY%2Tspp$xv4o5IR4C#A4?(pv!NiPx|bj@uDahFXQOKGYtOt0^r*lFE8iT zq>M9cWJ8GC_X(*We`%}L%Ri{V`bIF6aMYFAmu`Z$+)xC;?T9x#*X3SAD=inF=bKB) zo6x-K3TIOUJnG@@LcqGNlHvbhMj=etj>@GDoTQIoM0|L7gOvk?y~Y4^MBUoLQrCz+ zS+X6mkN*TSh&*|^8f>P922*X{|JB}__8g5?I`UeV`zy+up|4syfaT>xw-s1f5Ey$c z`|b`iG2I9%dG4)XMQ7}5crL7mkFdNVLa}2kpnj&pdmgwBGVRYKZu{>0G^D!yAL@S@~ut z(NHTLWj)QHmd)WGIO{iNdJZbE_F;OM1YCjqRk+J0bP%ZX9q;e?%RqF)us0 zQPoD6gLR9eazSf)TGK5kWI_a~6xhhJ@=NVsz`DFePrhFB;6xB{Iu5NGxs6&t<0FMK z@p1CO`}%HzdAWS-`wej<(}{W7kxV(x3Wed1#f80#@VutFLb zR<~)t`aY6e-H2!?)z`n9b#G|xOSvS_FqQUpM4yE}r*EwrYC2X$jFz_J3s@)_FGyto zNmHi3$+C4h>!BwW5qk>M$?ajQ3{cut=-gM%9{$6sKJ7=BLOUw+4}l&y6+bY}7vC_E zyQ$dk%kZ3l<9hB9H76(cih&~2F|!~+=K-Uw%Fl#>!R-QpNDbUy5;f)^z%lDs`eC*> z>9@N-MhtY)*0+NO=twru+)ljGG|bF&?rfC%eC~VpH-h_G#rW3_1a#1xp8esA5o_ZBNyFQ$NG1J|fgUabjn0?k%QUy8 zTa=CL62UN|m2?+`~p0O&sm@F&pu1AaLcP^bhjaJ+N`n`%qL)k+a(-EhJ^ zG#BXD8%g?OFS9AWBk|rVN%@M$?;%}G+!Xo27MQr~=151K!cD}01a<y>$xREk^i@*fYBkGp4M)J?DNhF)i6j` zkws|XM!0U?vbb5tFClE#lYMNZ4krOSkf(y@Y z=YeVS>=CV#aDlpGbO8AdC)A7u_Cf|hgz_s4-OmCfCI@hW&H#TTfL(7IV7O$p&4+cv zo#jFx;NlqpX`TUrfY?OSu1R>A#o>wp7`lU}c%;G#Yc&Sm}whYvo) z;fwm^zW451tW~N~9YgT>>uJL#Jrjy2xeQl!5riY}sUhe8)*EBcTT*WTrSM096(lPUBa6sGe%# z5oUV%XOPun3El)B-l^AJaMX0t+l$}ZY&^;lhBE(qu>At^C_;+UZ(4L~TS@Qwo<6^s zEv6q=<*brUNq8AWTeA8>E(D>3e~wr99K*V2R|-7B{nN;t#;jdJ1pBuB1yjvgTHL9+ z32e&~q%BO9_-i%%@cj)u6MAMjXs#M7u&+JwC%9G}3#8&&RdaBivFsry0we>umHbJp z3KBfgyMfhQ*ov#_Bp3V{Zky&4kQ+lKbkSC!-J5)m$Mu1J;lJ#j)GZSPc_Rfo3(dNe zR;U*nUB#C6E^2$Z?E{YAkq!5~X9}EI?Ue)X0v{Rr7oTRKc;&I3(<1~Ia&ib@jrJJ9AUa~tAzu^ z!E_|+p8g=1ALZ-kWa$KcG-{CAanK?Dbu;2uwM>%I8sj{J>unw$-OF7I);5NGeNVU^ z^2}%kns!9U%tM{g9of6+2i2bcVB=TcLg3q1a4cVV)xl*#>07zN`TYz_`vh}ry_^gk-HQ( zlPfdsgXf$IeVp-oJTuOyknpl|tiewJqieR5Mhv35|4YD+)|T?ARnCu^G(LXHR$D9g zP~XnU6R0pZkU{M)OZrjGP#+@{Jhrdl`+dt$ay9}24tHdWG3R+Vq@O1}mu@tM+~H{Ci>2Cd@%~A`@(~Y9B`+Gu}m>p3?sgFHED{@KeCYfewDKT8Cl~Oxbbu8{;$|lOp&N#r!)heV1kf>Xzce`r#218s3O62)SiRj`Nu@27Ekg6xIR!KIdIq3 z1eqGtGT8iDbd~Z8GDCm4ew5THc2}8wpfoqd#CG5(J}A0+0;>E=r0CkyTE-_VFQ z&!4-BfcD+FGlD34oVt_9UeqY2~7)Y@SDv2Jj>F*v9a?g%JGTL zK)w5PL9WK?v_CB*^(#1aC!q{_O8yco_w}_2{)NGf?bWMChjY?2Z88gGVU;-ZJ0~5Z zgF8VTebkV174InZ$Ne7fj;kxp^LuJ~dZRFI{2&DZ+wy=>-xyVE=KwY%7%-xJ(Wc27 zoi#7KQ!*9*d~;Rl-B|uj%U`-aky#n}sdhH^ir)L!K*sNWnY2j_Tzb3|yMLXftdME; z0~u-l4X>!Fd+X6RTM4p%&A*g>W?at;(Y@@;S2D2q2s!Ucc0nM|q^WBv#y-K`V7LMb z;xikFnnA}iMVtoDidu5*R!w~;``|u}$oHHnen3)mnU7t_pOW1Xw5|xzH1#^r@)SJg z>8`V7cKL4$a1@XeCA+&YyK1!8f8KQZeT;a^Tat(al@vLd4jpk)G7jD!huaw@R*Gjk zvh}}x{Jb>%vfdtE0(vl(-QJWzPYsJqj&VNpZK<&cRx@qN9H3;$8+kc%TQi@%Hd>(B z4k7-qIXQZGHP2a3)S=DQ^A0i5pE;V=9`qosd|Y2ssVr#asf8nWTtyNrv65`;7s zSP8EO6tn{gJU61vO5A`#6?R&oZZw}pjSzPvKr~w4X9FTOU0#hP5`Mi*Iv8w1Av~f6 z>+MJbj>QXHaTEB`9n;(ZcBW^WNS1&v!5C568i~4;Y_StM-=lE_AI`b`DH3KB~yX5c`F`4m7!( zn)D~qhMo$P0#!udPaxO;PpM~tydVG~M~Fa0 zis}N;Hhx~|r^pOJSt?IpWW4I!zdT4$EA{^_ye%6=lI@c&RrC2x#wWI=ezxx)EI+{4 zdR6kEL=%+S1ZqgQ{b-Z^Gu%o0YoG)1MY4Z*%dQgyQ-EM`AkvRAHk%8ER@`xZo%7le z8T1k8Xyc%GN4Um{>9Yjy2MUR*5BaH7DpHnWYvL4(Vt!SA%Xada;(QO~ObFsmPM}Ql z;?44F1c3~mujZHrWJjCyt{J%}7Zsl}ZXA-RSGrqM{Bhj%JY=dd*nS>DwX%3)UHF4N zbHalV_*0j#<)w1d1PF?d?FQ&oQ+P_r zbGmyH=mYQ74IA6I)hwZ|ZMljMDjx9g&)`?y$T)_HZpA}g`NUs_Z)ANptje<6lLp>!cD#Iew&797jtjPbOu%<@X2Wh=r>P=$WB((RPAnO0 zq7P0I=rjcV{m~DLPCz%o#TW31!a3Q612yRf?J^4$BLK+#^&DBgqEi$!Qsi6MxZ_&l z=8ZCBR%Pdb6{UX;rV|T)z{8UkNL$|gs@C-Z)Qm?dEof2m7^qIl$U^lpyI z_PV2Q<+B?_68WBIgI)IUB-_ReCMwH>kL#-uP2MDp4Rn4WEbmCste|ikXgz;MD5m^m zK;!-rluXLt3q!CSk5n<7!IB!=D0#LY5Fw1mS>}~(5$pN1st33*hXU~xUl|EUZR+NS z44?HdX$-#L%xsspn#+%GP(Pzvb}^$C;AVxPGNyzMSJsa8#u%J&{u1j!t>I0Q-ya(V z`ZG=(eDa&86N*+foYroQU8DE+-XtW9Q8i-7Kku2Q-Gaor|8(X!!0D(8*ZzdT;Kbm@ zO9cf`qq8A&_!xlKaqNfaDkyriY}2KzrfC0RHFpQ`1YSKx8#$z`q8uQq+=H*aAJ%hR z(Zilny!kGRmimrD9p~4EHr9s}F4{DM#o20t-M$eN^A4unoFKT|%2TF?SbOk(ZZS3s z7-e{#qq_KRc+abYQdb@*C&a|-7FqLxum|Ws|EZeRZK9Ybp;!6u9hcPB#^e<2lY{9v z67;6U50`s;Cdw6nb|qvn8}DNL?eh!hmpXqB1CX{Gyq9wObsKVa@JeMB7z%rCW%5v! zPUN_%Drj1Q`i8IK?d5#-?KhW(hyqw3$pC9tNNXW)%9vt^+>vL_Z&(J^wpeu_)0O-O zRFlBReIuk`(`W96)2WvCpG3c*=}qlIz`z9o`fbe?MSm{5p;QYGpri zVN?g8ZK-CNP&pdL3(*(cEv~F88nPdG1|8ZKnNJ0>o^&)GVg?B!1e|YNh3I?AK4&I? z-kZCmQt-pO`ip^KC^SPx0@TuHi^mh_R!aM=ukC0jtj!Fg5BlH7%vD_leSz0$_K3E1 ztYBWftllmwrtsP7{Q!W(4_)&D!H?6`HMQKz4R*B$=I}$o9sOTxMG0t(n$@$$6}6T3 zerH6*Vq?*Bo;l?xM-s}?p)%d8y53gY^aAwKwNJi;+1038lJj?{fmRF3+KE0OT8b04 zus}93m=KLXtjc2zPX`9)T@sP*&I(7d9ZL)Pnju7vjG9!49M-U-m|fn>yN^XUdFJq5 z?%m)K6VB;E%9wLDIYhqer6=WlUKUqpOslem!8i&or-Z; zphj)BI-h?g@Rp_p{Gt#m^6aYg{*s`_-aC-&@$$<^gw!_%fnM6hx0RvCtVuyjt zew0)8^ZD$1iW?m3)uBLa11irS{FHc6#qdFb7kGj`=Q{qZ_rV^IJG97<4szCPjoaY; zI79b-8s~>9eO5V&&5{!l&;+bUrWjfCY1itA>u$-1iL8~HJEAGVfCHWjxUgN$ z-VB!V!OmTpeZ-D0g4@wfBrDcFHo7J8BX~ue#dOzmjzCW#&=s?L-P-)pIK(Xf26zqY z$IIPs8}%v~E4;(8Uwb_CLUNV)aOigW+=r?$U#!@J{5wHMKf%$UWClG%;aDHgp9cCC zas5i5Tj4X&i3kQ7*p!bG80a9nL)c1ziwuc{4qaNV(kQD7#oPRBb-pp#!VIs5GbST} zXkP1W;N+RbU89A5wE!5iKj?m$KBowVJqAEHAaUm5afX%!ZGzALxE!E|;4UHK+rP@) z`R*#9e#}@(h26JF3uH^H`}zSDaK1$Nk=8>(Hh-X}tVXb21MNx^`bcaDJ&52=G)^Df zWl>>Pp!bU8WIUGNKR`>(Niy+!gT~3`_bBum+U_fQ7D8%vb~P3EQl&om-=`2xCzQkv z-!~HjoY@xilgN9025n=3aO6d_x{H7EUf}*(8g`Y)lgiwUX0${iZ;laI=S2J{!gb04 z)uDNQZ~8aDOFdTDyI+F02eMJWl);k3xiHI77d*9BqJ}dD}V< z+N+LN2_}79l)$N>W9}17SO#Yn%{L*Df{F6}L28-L_N5+gEIoMsNlBhm>z#~aE{^lk z@RDZ*eAJ0T5CS#+?q4p9L#`d{5A5hT?W)e(V(KQ@PErimHK2y#Ht%tG_h##{hkz|J zI#x{2F@Rf6YR^F!`0CR$X{XsQFx4)FeH5fO8f^1_w^NsrNoRCtZA<;Bq^7o*D8YWl zv*t)vfqPLA&p2Jl#VZw}+QCzwUzAT201?%&8H{@AU#7TcJoEB49~Ec?1R(mj;}X}# z1{66C?=>E6Y$YZOy5yJx1nSwrVO z-KP-x1kb`U&L%!A_yyZFX6C=dEK%cu9~lPOB)s4Bp>us}EMQ%&pQeqbcz`$%AC< zf>t#S8AG77)#t#=rzC!je~f+_Mu($2|9Cz@_0XP?_$oM&+({2A`FEt(DJKP|*J>{F z-%B$1ZCLM`J+=lLsxtGB3*VE}Qn3p13qx@%(mT zw##_`vIW?Qt7^LULIBLik#<~Hq15jmxW=yE7V*c``hr**A_v;5AN4CXiyj!1O8zjvPrpm7g&u z>V(NI|K00+OLQ4?Itny7F}?7P@d%ic z(e*7Hyk$G0joq4i_CZYY+^XczxkB=WOI%zmS%X>MC8uGI^~MYGT!yu?*U=jPUhe)s zqzZ*3-0c)M9uk>?a(_p)$ybZX>(lJO@ynR{w53BV66gKdJXdaq4$MtCiM)wte2LFA zYlP*7hSNDt89$Mfz#!E^j^MMZ5t#hd*kob`nos$az#(ymlMfiOg(9EbEwQL+zUjIa zGKld}Q95||;GaEWvn9v3K^)iKNQ_AtAj>+0N9IN8i=~f1?d_W_+;hUqO6gJvofx=R z<@irP$eT-)e?r5cGq3tvKtH&+Hb)3uw==GC1NQ8*LOwe{&GWMKMBwh-rhJoqV$bOf z$_8Ujy13f;ms!n@5$?}e#=P8FI%j!93c8FpTVlFr#{$t54^!<9Sp}FWB}xNOGbm{q zRFH9xB%^1&!=W*A>A;TUq|f2tuo3{bblI?aAS;u#=YH2~izQW@7H~@5ND@BBtqiJr zK+DZp>3gsv_E8uz{`@Yy^Lf{Yn$fRrP^(w_O! z^#w-QD-*6?8Np393$K{$#rn!FVwW{1Fps|+zM}D;dA+?PxHHn4=y}=xX1P<=PeDbi zXW=Y4y^291rV*DO5R@}lqquo3y(vQw)0^jqEW{tSNA^-IhQx9|%1);9ZU`o5zqcup zrS|EYqWzg`AYlNJyT%`Ie>UEYp<)`ZhmXnJLB4Vao$5xIe6+y?z4f$(aQA+$aJ#t6 zu`#@B<+hs?rMFX}?8w@yp4s^2r3mAi^}D5PX67zY-^4YB2wj0$gT(vxRef!Bki=rY z>}Z^AQ+r_-yXSyHmAC_gka8?&>LECfLnO`b({LrD4y$39%`P<*2`k=4lqO_@8X8=3l`J);8vHC$_413)-`1VSSd)w+|wd-_<^2gKxf7<+Pp z=s4f{keYkk8~1*tt@Zwj{3=0s9^eHa0aO7OrZfgA{SQ2W%Fq0P!%7tmZCL`S<;0a8 ztA6*Ju5D^^#}U4P_0M6u!0p|^JKyh_e*FZx2z35oEbT^U&!K2{q1$;VEXyYjBL8DX zUm&&}kaTeM9ee=GDBS zS+6ybuHWnC`_c}8*Ix(11@||nfmpST!vssMdt8b3 z%`MP(MGq1oAuv8)g0vms)8I zmVI|K2%(OD-RkGmj?Lz^m`z~!zitxlM1@3+POT{{T+_fvk_CS2nprUpLg)Vcyk>`jtEkHI#yQ^rot?P<*g%}Lc%Q#+&^qnw3*81j%u7b2?H?Gu-Mkan0X0bz`!B7B zf9rwXZph2a7~f{4kIyaH-I3G4;D7FMbP^5ZxBvof;cwL-rY#UiW53IM2oWW3AI}O6 z8d^1)AxOK*s*vz)kW1sD8rj{_D-fzXEy4kS8sYRda0LGV*Az@4#L^(KO%A9ST8uYf z(p7$aFw>OFH-`W9dY5P*43vX;XBCC_*n0HenXc5ibd83Kj9N?})uNb6FRu26-CA~u z=HQo^)wvJKMEuk&>`4cTwNUp=Ifi1eR1rfi~+)G;v~qO&raMJYy=J? zt}J}w`ijU84R#dr@0iX)ocCqiw@-6F(vtD=h)UFs)(+6piaP2|JWwe#aNRAwpwCOj z$A_#+kDkf-Vg(!f7~&h0W{x=s%Oc_>ykhnIlq><2?CmqRU(B$(OmkfwKN1t1Tk81W z+PU;$&eeP_RxoiJv+2oWVcyl*xwH~1mMG;I@G22DG+v|ka4W%HXmc<3qW#zM8`GQq zyQOC$`)*_%4W#LW3z})j2IgFi2@GFLn0MKO>DO&WtzDM1^e@H7|8W)#ox2M^r4ocn zmhq!2WCIm#iIARfl(9&@QKO93!MIDziow2t7^X;n&9QebZC})z-R>N#JaB3>@!Lzt z&%%BPY_Aqvi#u|EHGWuZCu_>=v5xJN{R+Apj@X;7lhKC`tEuG$$*7M=FQr@=d#l}F zU4d;Q4~L=tMx2toW*P0W&c!3$SBG0vFBY8seClBUB0=g)lTLGxv?nsh4pkb~AE``k z%i|*ldd^?w9OSOgP2iKGDr&-1^|)Jm>-KCH50qvex{l^E!)k_BLtwS~al<0s%mW|1 z-mO2wbeMaJMpo-o4HyTvJ2e>?sKQk}$%vZ9wSJ*x83<$=6*!L89!YJz;@(McU-zTw zjp2?r^kkN){e-z3Yv>uy>KxU#ne(`^JMKk+M~!7}H0P=wXUG_z=z-a*?X^MY=Yp3> zA)mTpO?`)(OVk{zoJkk@+86h*bGPl~!KI|ki2`gmtuG;`Ggcnz*E(R+3f!N~8Hx@r z$RE8~BN06lv-83%6sg`2*{7~O{ne%v!8$5B+^OU1ObzR=S!WoGM4kRjmFtJ_r{OX+ zupfG#@lfhfxvopX&cWaf^ZxTzf}00fb(-W!W3Q`!_*z>R3m-{ta}P3)U?g<|FCwg~z6_x0N38eih%okRu1=y4TluaOlJSCQvBQ$-)A!L2qZ$;93t+8#UVgu{Xz#D@JTi(pk{)zHHO`oz7+`>9WCCa;C zIo@it$9B#w6n3f|KI~;aa;6_y>zoRQuyT(Y=@6;ooJ0>E(U{XS!cpq@HPLpI;*AV> z99#CwkUm&@RP!4tg|ijHEcKgFwwf6Flt1@k3Otekah8>h=v zno(LM#X|r<*+qQ?@ka5H*69(wvm%8BMt{P>pqUC;vADem34t@_TZn{eO>`ip& zyb$wRcZ-up?r=|YBJl`q9A`vMrZ5piSNeTKOXzu*)GkC9kF0f%3Un2Z@xV$EAF5dB zDPimCiAIeBHFwgb_S^@LB1dO@Hul?ED_sRx3M9x%qw+p%q7>Cxs(bhy0}pkjt2ZfT zYPNl4wb9R$Z}M_VzL&kAhnIw*f{B9=-~(Q5k>^LJNeLT4ce3MMlZW3IEkW3m8F9*N zGQmzW(T5>UjV~v-TcMP_H6I?$92VqkAB{fSi@=(AOwS|w)Td`Tuo#j1?Hz`(=_9Th zRb5(6rqJ`cHwtJ1v+?ZD7^Q8nA@MS~(7_Uf5L+V)D!g|b$W*3bXjuU2ZDIulL~Hqh z>790Ev$^nRG5GvUJT!9py!&>T>UZK3_EA9R0mXsT@et;`ZPt;cctizQ_-#z%#b4HDPOIJ_h zIr<@-Omv6_ZZg0M+NV9)JGJ8)dWwh^cMfHpK(L?=rEM)vssYfdpcI!Ot z7?9cGk&CFeg}0*>nYgf5Gk3fxfw4}=F()QIb$_6J8brbS4NoUU~+>q|5Lu*JEo`8`x76+bfR(8PGD zA0S3%r+_oOWJi|0L`~8^qd(;%hK5$;gXmGgkDTsAP_MMBb#2eoXwubj20{9yyNJQ; z)WZJWKI)Miu!4{Go)!hmeB#g|gfJ6A@C<$a{`22vj5wJdwtW!gB!%3yYtt7P*+xjc zH+wO;M<0kfjmIMoU+@4!e0qhNBu5A9eAhaIH%kavXey!3T^K@9S22I5u#F(-ZV&c_ zc648T#DZIvu4O$mQMzqLdrGRVYW7!slth}7`9@6~4Ghm|XGAzr{V>~^~N3fB`@LnhwmN*QyC z4t}Oy`u6GZr19&SQK(>^2jKuw#JkF?qP&k*E+|1simIH`HV+93H}lu8*8}!%_1BB z{-mqO_PE%_OFH~at})`Pe;o8$lr!jkqWjBaFkCJ$M3UN^u>S(;zKMEauNLV zBK7Zx!XJQGnyHh{Q%uJ^e60M}&;MTIc{H&~%Pm<>=g-w;uj+EOc#eBD~6L|jJaIWmJo6)=F&-cY>Guqe(sfpuelK401s7j`_|tt z2VXEfah=nd`GqalKcQ~?GT;5iAj2hpv^GBIEpRaP;_dD)0kb<3&XWPC(@sRcT_2Y? zLr{~el&0W0ZXE$AY5)~+(d2kn&db8J4z>-TUFbJ&$@nCowY773G!lMvNe?JbHs~(A zmBPs1=zRL^|NhOjaFyN3SE3A8nfNKZAl_#l3<5XM^F_V21unvfhI&1RTnjLI7e!%5 z(EmFE5vZ70M3_L)ZIL%78sdSCr+%A@4LKbcIe>qwZTG1^O3mx9E4`S1x|j1`i#c|+ zKk^ql63=q!s)M2;d~iiav*m;Rg*o`C4|h-USZIimnFDxacCEk#J_udRBS$dm8wWo` zCveN8_k<81MzFytgplqxYSiF?Z$ywZ(Lqd)phxJ&fK%S=A- zyoxiwJ+GJ$;!l<4G$?ez4ZY=uI6tWAE~xXqirb+Z{-gQDfdmm@D0>huq9LLzX(BMY zrJynAOHYdSizJeI=%ZS41Ihz8p;EVTM9no_Doqvlf3pA$4Isk*<}l$X>ZPLD`KTMv zXG_gQ&vrDOoS=xQTQU9!OZl*O`$9W=;@*lri|l{qrW0@ai~m36M!x=X7GZCox!_^2 zqN7sAzpu_<#OYFW+rrMK?*}hAq%MMwu8|ZCl{LnPD%zi_IR9ssAWmxPWN)T~`BdCH z*-0fnH1z4UR;57>7}qvplnA1A8-9O1=4c}97dBqct?0qTu7a=X(vPlMg2DD(41>PF zYkX#m>bay5w>ATL9zw3&@K7(l;Jo_8@A116o9*%eKc&NSMYqeR#-Clqmc8AVaFf|( zm5`D~(vBMhV`%+S7b1qWcRbY&F)gpWt*1Q^KQp{YB$BMkj}c}0w|J|EGK%Fd zRV*qtFbc`<0HZ;WW3i7D7T%Rw!ZtZY94y=GAOsnG9+cEZU!M<(1*jNe|#M#xrm3{>}@Gn)T3bXOJfy^0wIJ zC8I77e450DP!D~g`pmOl~rkDbC3T!XRu?@~T6C&A364$B;v@+4@^krMWk@>%r< zU)3_V>(8NpktV%|eTmD!#%D?>Cpf(=O7JKsc1Dj~HG_>+O)JYuFPh1|z!liI+KZLzE zY{JPes$_rKZ>6%0Q=nscr>x|o3}VF(adl>&W;(pAWwnT(RmoxU&iyMDk&-KWyIriJ z6XL;BvgO0k46OVPw5RSskY~`JC&rtSTMSuXZ8BUDc_}}iH7GU@Sa2Zy5@p~nX5mU% z4&&;6bY+)v>EUr0V7JZW5{h6G zEpW#_%QiG|7`b%`s z)g@scV)@A-T1KWjt-Vuhgc0)njt2>? zp8kA;?8mgHINe8ERIm2Et0067GXLZ}#bG|@g{qgT;P5tYk7XVpahYdW{Rung;~+8Q z7=u{3xgEp6lHZ*%zYYyICakR?#)mcBawCSNzWX>Akjn3G7ab zIQqQmdf8t4wm?~zajpL5w*NZjXTR!U{wr5f4XCYhTDYK!CqN;YeCXp)k7J%VN$QO* zNH9y~>b{=MWB0vSQp&^ zNZPH=%T$o--<~CY_*#AGlj)HjO1Xq&>}*G(xJ)xKUXtK{TGt1_y&`@Cm;bOgKw$Yz zb^e2s033CKTmFN+0N2H#IRGz-@xUEAICOQ)vF8CPRtQ7A1vbS?y$DE-We5rf8;_aa zFOcjn%j0m}G5I--S1fxB29LSO9)KNhIRF?tp)3Dk`TxSo|Ka?6VudBdypR%nyWkmF zw0q=r(A6Ut88OQ5rUr}B6&2lJm4ZEMSz>1hjf*_k#^Beo$NG9}0&NF}%l^aqbe-$l zbk^tZk8&0c@7}PLMJaM1uMDXB|I3stBQ6GiL=ljkfX3h>XDWsKwU`Fl;=@_oGI0?c zXgIh(Wfx&soSyZF3>?quYc9dxG&tsg0gL@ZM^Dc<5nIw4nvW)ndM>S2q9Z?+ z@QT+Dr(4@>s#dBF^1OMx0~g4Lp@l{|u?`T}tE>9vc1bn|ZHTtz%9SdwjW2C8Lyj9t z4#UM3I&l-CM>v&Gt_1t2T&bIRI8WWi4q_pVwMUuXQWma!U7+?UyT6uFcFq_?ue23F8}x&nmG?oY_=b)3^T=jp@uD z(9Z@2DjAqN!k+fA$+a8)4tpQEY%_DYeOP1|V^wA|vk`euPSV8gjkcAvE=Me+Y9Kj! z_Vt9tl^9H?&qNH5ce=Ah8(--egL3AQQp9Q~3p4V@B%SqRReijt-EgvBOyviWb&+UE{|Z7b0fiHR~PgG3j9p2LQxpV@hqnq$q%`*6s#< z8Mbf-aiadVW%)%|#pbfPFmk6>U6&kkxXgk4tkH*t>Ylky1bO5oaqYy>IRQa8zYD+4 zgwwVzA>WZ1HChuw0tg|QltzuDMg+N8FnDWY#A>UQO+>pdihAW+Fl%1~&vp4aIOG3* zgjZ6m!L3w~^=L^qHJ8?3HTQ>tqlYj2p#oGubW6Ai%QN~o&BXD zfP8MUK^VzfOZM(}9EOVHb}e2Oc;P7(GO(FOI!{EbA!GGCs3E_x%$i47?zy z`<*3g&gAb<{FWT|1&at^a6AqCe=Q;y#m=E^wl45&;rVEOeV46!u#o0~&LgT=MvaD*7$1TFu?eyrFjcp72(Vf!MA!DFFJF_0z>`9}= z;&mA2ENf?RThXn2>Tq)$4e`LXk9W=Z#*gX5pR#q}L|W?>%IPl-bPZKWIGCbln^4cz zjAss#BpjPww>I(U2p;(}06nmIbMuS)ir9?9XnwUcu6DOlF?F1PFwy*v4#~eU4|t0f z<_G~x*bZKE4nQXUd|y2HquP9JN+U&)k0|J-Zw<{_+ATW!J9#(v3bYx*-?MJwOE z;w|@`f1rtI0JCtz30{$oNZKW&W1YQ!IB$|1DF;Cr9OSnFGvgXqjk0_OKQBDj+bd1Mx zciwgEPC=re{)&}~cOE6|4Ogx|!kt5T$*KA1LA{*u2GQn$`!Z+Y%`=jHV-?$DBg1)D zSdK2!99B`o@bI319MM73%pBu6oHBn9;e>W3l>o0N#$*S&KCV6Rb*B=y4nWu*R5*ib zSm;33p^nX?&79Aqfdem4FF;jij|lQ2EdF5d)erAoe^ zih-hS@iJxBq>7NZMk+95MILk|rtG|0>LsKiMNPolF1=Y1*;?D2Jh5<;uiOL_zI%-j zQWC%c`_=4??lRy_i#v*OIGjSdVV!}@iKl=|YY8{E;3p=QhW|FPg)mZRvK8aXOnCRC zen~X*EZDu4kmQ+HOgNX=(~p;FM+aG44zP3KarVr&6$ztbC@4O5h8+^}yIes!J*%Ci z9cNhVVk!@_L?C_6g&%Rm^aW6f!e;|JOcVrUeil ze-H`3H3aw_ZzexS=q`>BflwKI_*0xx1XuMNFiJDw_VDF6-_{({^Ocf;l=sz5&fwDz z$_c4P!pfNUKb1;mKZR#49bS3ORd*CKLJ{w=>`w}q=~`N8IriXmFB1rXKm8c*;|f4N z>mWbk`UP-VU)K9MGa(c1*sP+|sL%7>9Aa}z2)l!$p_k=OXsAS+fAEM|M0nou6@nzC zCz|iEL(L+ZOc-J4VVdeM!K?U52|(0;!QHEX-s)sOnq$E85n{t7?|;8Y2#9f77EXn5FrL8|QXqZPgFAd;`Z0&kVJKFeoc)k{6J$antby zUUJLz7?J?~c08dxd?S2Zk<+mO+DyLV`$|fjedKy#;@g{FpBCN2kx5;6tJ+BJ{2Vd9 zVsn(#rxdt6a{k@BVQQueZTU>F0_e&Bh>cT^4y~^08?+wK({#T^2Mg?-=X&np<8}*v z|6GC!2N3{-cr(Y?128UQoUc@o`;p}ix9A%!`UF5!ss0(h^cVV;v=jhS_@Nw74vZ*LQhHx=#X1(ECVaDE( za|SWS{a>-{%f2mtjLfr-d-**VR-OBCdDS+H8SK@c&>R%D6n_NKrA!DQdcGVz5bK-D z1vL`9TSEXHZcMK=Lsrj?UT|w+Ynyp;@yNhFS zFNHZmtyizQWf{nTWVma%#!*mZlT_@wQssyM9T^ElSk1D#wHwSc>q246%qGeo?0t2g zHq!OainPlxbtQO~09q?FR6>@?i6;Ik-0x8)cR|Q$?u4E_toNq_9`uHlI)L2s3KJcp zCwBh@j?OpX(DL%#&lXP!pMR=0+g(=DFJXZNw6B*~nQ9H+O3{)&dT$MPotO;4b%i9{ zoIMZb_N1s(l<25-+9C9UGj^@*Rv#K^TXOLd5Fp)q??hlWHW)X6E_B$em z!`}MxDBVDGqk&Y!E3#{~{e z{F@2#iUvHT^8DL#>8r)}_s@2g`%eFj1zzouL-0+j;naIMAFHboY4Fc{3;G1D&Y6 z_x)$sy@(rT4_8cnZziKK5~U3ZXA-|2F$q$IKXBoh(5_)^=+IE8!k0hybqPV8_oP3BSyYsLT>D&of`C}fsKI7 zJSb!d-~DkA+Gf~u-CLg<)9-ObfHkYEBW@tjgAz%U33Ru=YytWjsfp3Q28Z&$qxDB@ z^CRTV~5xffn46ZdBhkxr1^zN?|fJiOrnslIVKcKnz4VJiV77st>%t zQ~U`t1jPL8G5y`-?#;=mdmkb9gc$Peusp+~_vm1=m}4ewJ2+1s{?zWb)u;B99Hw4u zKz(0%ukV#b_A_)QFR-4qI?XE&>WpVWV#&il_3j5Knj3x&hxj4(ngP&>NJkZTEDB7Q zEV>-gJl%KbT(6#b_){_5GP z21aI^!|Nl<2Q$4KhnmkuoiztHnPj323=Wl_cv_|XbkV@GtV5tCYrktqfx6l&Kf2`! zBIms{USv@`=bUw1#MDYff#C(C#y*h0l{f+sv0b@{=+)_P?baFsxV_f=%egUuVBSN; zD!WJaJpaY_lOLknEMvWnc-GVS1Pl5yDBetU)oQO?lvcD|_0GNM{=;HsDmKS7#$@#^ zPt}2d55L?=_I~B$j|iDAVcm?fYN8e3K$d|re&Lg8)FhL4d+=L(HQR)xQVkIsV(Ie= zL4P^KsxMIm3TIhkBbLLN*f)GRXL4eA283Iqb(YyPq=eKpmQ6Q*$)~!8^O*YF)oO<&T^G$1 zcs>|0T#Xif;7W3B{;%HQWCU*|K-kZjq#2nF&YKTD*|2Lh;Brz^>{pJ!yf1m%_2D{{ z*A7xRvofZ0uJje?(E#lU$I!6X4u0XvH{0F!9=73mZ5J{&-xf~J&I`1L8OW%mceHwf z2nk>dkkvRXc1Oqt?bo*t=I9$~k34u?j+|a}Ja<#FSGom|!k>PBhZ!bvHL6vaEM}tq z-~y;H!P#WJ^LlIKW|iD{%RI^?g`XrKc{a`|Wj7gcRa!iY%+}vL%zuo!sPzOsD5PU6 zW$L(Vd8Ao5r=qoq?(3Q3_B5NWth?}N$ly(l!H}Bt(>(>Pi6h4$=YrK)1B6w^H2+F( zSxBloN-zp_`c|No(acy24#Yg~dsO$H>E##h-tE=mn2xQIP=&lF+hdTI?pqqthxfU# zuAW&2wQt*v44k}t_!u8<43axK=mA&&Ab*|I2J%>A* zSBN0XBy#Np27?7A+(yqToMSj_6WFs4AjkxEQPys~3OfCS1ug^W{rg%ft=*GE=(hAy zyQuUp6mOzS3yBSx&3VYIOWWPzXG7GaJXygrPOy_j&t$ZSjmcwhNsOu zF_?#Une!cRP^dz1(T^tT#XT3>LTq6j739`IL3Pe$P)p%w&7TYhjbecKg2p32J%pka zKnrpn*^nr;lJD*>o*ULLS|R> zFMFF9959pCEr#w&zyQD;KHH4+&@r79+cF>BuL~ZXmSIpkxVRWrn{&p}NZs9iD@a~P za5KRjr+OHSZ+Nd**`!mrYXD_`Y^y(GFSeg~c-R4#_o;{=A!)_s^3CMv$tQ24W7$_n z+}?;+nVxi&NSQe?!iLO-JeymORw}ZW)(6AeXU`QWZ28a~1t9{xc`Du7Q#Qt%N4N`p zF;$)>k)|a>x1Vjr9PPkj(|NDf>G;II>n$1&=p<*1s1#mJD-_KQmQ}n1kS-h3gqS8Yfm<6N?1d^_NyGZ!qD5JX}a?K7-x9gByJNUPfk4^e_=V1Y5HmPn_qX{EeRx6YCny`(CaB zxm+^rM;?=$t{Z6?j&m-qvz)F62w3?>KKA}2k?N!OMdYl!n#^lNtxgiqvjT+t^OQ9N z-#Ol5uHe@DTbOyimFk}&f6nSG-u!xawAw*JAM+S7sqp5QN<`ggZ^S)|ixi;0mgR{m zE$r%R?_4q95BUcru>#s2A=*bSpHpiOlw4ywcQB6N9U1^kF$QnHzAA9|IEpx9__hN^ zqONUH17zbk`p~FZ0UMrSW_qbf)0EeTO4C>YPCL31{jstQg~2C<0zgaJQ00=RQw7DY zUndbB?+0k`Q}ag|3;EIY5+B!T@p?Mlmd7s{M?2EK}>}!DFY~Fi@Tr3*2 zn0~F=ok^XbX=ZofnyV+O0c8B7aMW9J$&Y98wt+mv>qnQuhD?GHf+rrrPrZ3ICRkvc z&z&)T5@s5O)nvsr*hccq4b=w?8%bBjNNEtFE z<1Q7IDZ?fSWr%8H8!``}%tKUU&alZm&%^${?@i})o^#IgoX_>UuHPTeb*`styYIEu zz1Dkpy;$J_2#2AJN`KNMt#)M#VS*xOt=upq;Hnds z$GO7WBT2#*gI?boRWz5hjBL7iFGO%05KxE~vUbt4wrtg>NnF@M`5)g}mQG8HB+e;C zbyjh=?visgRZ0Ss96xdYvhw*~MZ79MdXI4FZIj$VmCz=dSDlLGI5fXHi5g6HK@+Tj z@GV0V%oG`UHNJ$3V57UWjOpxNTsfpkbky|}8 z_f43TlO^f!#5Z!#pomo1IQm`BREr>rN@Rphg2DdQ(aMZxLcQ=IcxpbPM7NDb1d4YH z*TE&C8|RW)7!QfaHC{Lv@2i^o*3fmq({&;d=rQaC%TvUbjB9bnu@=?sbyZgLVIHbN zHSDj`T&9xj9J1Zs8uWe3dv!HHQ>UxxNSMcJZ}F4?aixj7Wfn!|!2Uij*cmYg~z3got;GBvHNRF@86plZ8nF;-CY7_Sp^nFguqAz;pyJFYv>=NT?xCe3D} zMH)qJvxg5x1yTl9o4~dP9wLB&PR`|d*K_{+IJW-JSTiLg$8-vlTWa6%B26Lk~z z#p~ZN$DlC!>lfXE`f-R1Ty1(Ub)O+_r}4*YP+n#D=a>A&xCo=}Ak~;}ZoiGvswQp% zh6=^&9&d(A2O1(JcA(!xpY1@55#AeT5&$W1r1`T59QaT(prj%HBNBRO5$Dn)#fi?$ zey8KIAi2Z!xDpUBjXR$430$yP)J0`i;0TR{9tFw;!-6-E(t{^d@EzZ3RiZiNFcGrp z=7>AEcEd*7I@Te*;D3nPNDgeUI5JCZx67ompIzmI%liH zkUkn63CHUMGnC%}pspdMG^6dtv=+(B%#ih8G+<2)qEQOv4@4(tm$WJqDNmsGr@Mc6 zJX%yj+_67v;TV_wgL$`_##7VZYpco)oL+U>dc$j!b6UnmBOFHGt;Z@*>C{d2_)ukA zPI1GF3K-nNE)K{WU!25~4D?HY*VJNTi0G`&C?HbFcyG}Jf&k)p*O4_qlP^}SjpS(|QLcNTVv zlYgavFk&~O6p_OUc*EF1`~lc`NG0_AN#hoev4*nF;K_YT zN4c9LS`J;H2n%ChrS~ZGL~2!XTjd)*81ke=#bm9j=QCikrLj_hZHX%@sEgYG!T}G8 zcur_wgG_|tBGOU<=ID_dhVsx^c@*ES-7V-oVvN#c13yqEM;4p}bN5Ean%dql9BiAk zk-2@&-|LZwhamya+j_gYt_B;^t{2AO;hTlG>6+*=EqPqDC*!8`ew~MC6g|+fw*c;m z_$&Y|p}`Jy9S|DGwT?E>Y1yU59re1;lNPY`iEuHuJe|l9dbdLV0H4Jb3Pl@x%^U)~ zfz3cPEh+{M)M)^B5LdgE3^Cnd23{}BLV2sPEZfu`JU?x2lKg`_Js< z=%u_Tf9s0BC10*eL*G5+24{WEgFJ4_$ra^9r1%IV+<#g@<>MiVJj^U5vFcMAteCyS z{5zN)6vs{U+8M{5d=j$t>3Ji*$3iW9E4P7O0c?YRTAM(=eavZ^Jh<_TtQf_vASFi! zrEL7njy;z!lnfM%AFVENbo_>F`+uH~um_)=Zp%F3Vr#UMem_<%?HymoY+2j34`lDLR!ujCv#>Rw`&-zM~q%k30PfIxU zN@f?Tm-cogANX{iLo7XB)pjdojKbOvFnIrUc9l^pQgb_=oW32j@ZCk$CXc8(^CLJW zxN2F0*}4R?$Dc##@fJTG-veyTK5_vQykRSQIZhyKfd4k%V(caq^FQyCpWx-+?|h`v zu>YU8>Q7+tANCGB)98IAp)oz7<@WH?jYN5r(1TS)p=vU)3opM~wJAX=WndUK$LJLMPRROxndG-5q zWnBj=fNBF~GQMyUf??BYUMy4HUYSnSdXMTZ0J3)hizi1Q@x7*|H^o0O0E>l*jP0+#F6&r&8Sj=yV{mL1ey*1UB^nz< zMG{vpE11b>zdf@rPVX?M)H6L(+ndfOl9;jKBT+3El3h+v0t*D_P>?Xy!-+2RtdTXRI5HmuD_R$jxvmbxT zJH1LsyW+Vr`hAouef8y30v0ek+RFk7tR32N^&L<0(o#z}ReNIOR2mxiZC`%ar_V^u zp2LaR+05ov_=E09F&+Pd;X3!RQDSxv`i%^`7O>KUH*&DQY&$hpt=4@NKa^anvx=rQ zm?qkLMkC4xk4epRhH7!>uRW{RWS-w9_oQYKp%S*sdhzcw9wN%%I zEO&xz6hl-NS3?8qtrEp{(^Prmau7`W4vt5)} z0_((}f27x%!?FuDJfpQ}OwrkCiYq*|WKL&f^Z|pRU~-=gc0{4SG@Jgi81ucJ7~cSb z`iaWY$A=!@*DRK$Q|_mgl)a_NN)fCV6$rlDS59_4TRFW1@JG@SW7B#mTtGn`gpx~0 zn-5}+)aLqTJx=PRxxLA7%hMx#zNyr*n27H451l4{!R8(y=)Z#_)`i_@?l4P_@IXlu_ zdhf&*lzuVT<1!2lT9vv$b}@UnZaL-FTnXbHpP$yE${UBjBs@7Zem=#1JUFaBiL&#M zW$G!%Nt?d4mdpT6%9b5y(Ly~%ExmDV)_DR*-w$Hc;k9$zQ#~(Yi33jg zI(80<%dWJQ6xzN)Oyh+Id*!5fwMh=zErSmuOdsP)G5q&&6BQx;_OJHpa@`A<5v20h zx_hZtD7d3w);vI+Z=b4X+sjkWDCDj)YK6$>qdr1+<>iq!ZGa&Uua4RvC@s+} zPg2q{WxN2Y((>CZjuRR4updW&m6ZrVf6#=J|7&(9$jDH$7N5)fs-i7AoTs3qUhSAL zyV7-}moWBRWa#;Kw9T%j(w&f(d8v!y@vfuWEDGJ8tc)$lR6eCxYv6lpI(Osq)ymu) z;ia_bs&KJgxb9N<%51U2l~OXRAyiy87=jk${T z?m{{HArvmB)Qdcz4>rA=eX8otrlIQNW;g4jw9Y=9opm-G2M|;BUDjBE_BJ%TD>7b# zQm9F1ZC1+49Ej489?OMd*{0dbTNvk_ZzhmX4m|p-a%meq{PG7mpVT;v=MPK87GCtq ze3Vh2Z$Q(vnqap`OsHs(85J;;d9NpJ^&q2qn~wN6HIvv3huy8)p36zJfuUl}6vLm0 zKlJW8+tD4)t^4lM5ZqDv_=9nLd*$%GSe+_whOZKvPH2}@d51bNwjda+rv)_JlFp^U z$VXx1LfF?K)@wi%_(^GUp!SD!1O7!6+e~)(NGj})kj|Uzv9VBESf_|=_T!Pp^K+oJ zFAFO(R$LFC$ui4vt`gQMbo0x|5(=cudVbiOqgBttt{_RcPxGC+ z&b^CwZAzs?aV*g-09hb$2|~01dMx7XQF~I2O^XRieO0N|rX3k= zefT2j6l~0xt``n}G;Xi&)M)Y4L2hhEHiq87oFZ#aYX;C~e9_%kO%%CpC>VDJxIIed z%-%n-n=Dh%2t2PZJmiEAH37E658iXm7Gn`DK>M5!Y&kf7pncKJ*=16n$z_eZG52IP zYvSEH7?TpY(a|#l*}RVko?H-Zx~)eoN7d-H#+Dm0i)l)%Oi8iWBSksb%!P!c?SOP_ z2DSnsj?h?k3p0uJ*rW;dEQO6oLJqUZC}t+1mE!$~xuTTc8& zj$9Bu0t*QDhxYvW&kg~8%+gfxjJlY#<;1za{&b$$_qJxf6o-YvD{biaze@O0kKGV8x+>zuNB|T7D zVMSlwD&j z8jA}&;S&SJq(zF0?$FJ!Cnfg?Mx-%uG-m)wEb>W1Z;PL8`VvfT)>Iz$E%N8_m&)uV z#R9i?WJyKx64QAHZ!*7YE3;}@7hlCuK~+6HJ$cL|$-E?BUC|bjSBSDcKnfza z+gp0=##*KX6lp4oksdOv^NC*EfNG%K2gO`yuS^7+i?mlt>p*)Y+1x^RbOaEA5u)fP zjBq#24J7!-UWfW>vJR)v{II!IC)=?@d0VQQ7P3^|={t!g>mgZCW^?mew>R?Xbx#_5 zP7TO8tpFtP)!bcA9C*6Gnd*`Cx%$B*K!*kE{z)w1-3ix_r%ABwFI795w@S@Jw+AT3 zSY5ZF9aH#mQK}0XU$m3dtu-75{Mu~721xDAt@+2M0xk7ZPn=Dlp&pYJOM?8$xoF6r zB(dpq?*WA9kJ+E#kD2U>skoZ&xFGZ%l~~|7UN96L8MkMpW=W<9^(MfIJLvZEXcqA? zI+-A9-(LfoFKlb|5lPVo*eKHKcv5moZ$1+u>T@IsI1>IboJN`fIzf|7lV4sZ=o)l= z*1nv4DoECxOp>08R`N_~zaR^1I2H_rsgE=KQ17|Mo6oQxS{aJtfU63^E(fAUgPZpN z|D!mQUO_oh;?mOG=v!YdiC!7=`_iO=3$yl#lJzTdSqQYYe7`TVnI}S&VxH@I?VO&E z)Qv?~=NdyH4LSPA!KSPT{l({PZHmJ@DI8J~7?C@2?<FtWe~V&}t;@*Zz*;FI-y*chwxxJ`X*N2r@6f>klRm_!c$ z2UuV7l%XewtO5h`JsPOf-pVhf=&xmQG<6gA;cV3DJA}>00n)n|{Y~o-z=nE2S^>ro zqIZJ8TOcXWm*_hokd|*4QXIlQDy*UVY%0t=;q^y7U49bdRiS|t!`lTx_ECvjE?P^y z8NQztyVB;2^HCbgQrL0_sjz^pIqiN$?-BR_Ychd>EQJCF?rFtd+1;`$y!2lF6?ClR z7Hgj%d)&g8yPO>7_ICf48r4|wQX=MqUcc0)x>X>4s|!ZmJV$%S4j%aT-|osrcgUw5 zX*?+b>`XDzU5dBk6vNNctrNK)7{||C3qSBM1kZzE_u6avK|#<-@{8dH2(CJvl( z4I>Fw2XC$nu~|A%ytwG_>tPJ^07vK{o#(QT+Ma$ImZ_>M=a8k8^Y5 zlWK6&KmD4kCxmw=KZ9KO#ff(~kHW_P{Tyk0$a4*DyPn$&A1Q|Wb0_(eWJK8)8|d&d63rdKA9t)1Yj`glnDahjoyGisM8E-%dr&nD;LYghJNMjG5% z+6t_?gi`Kvs29vudO0(-Vt?vv z{G^U_Hip_nV@aFtSb5B7t=2WL`D2bn%*zD;*!O}CP6Mz$DHQMHG<#X$*~7*kr^Gi! ziO=hs9%x_#<|R;dNo=}6gYO@AK|rdT;`rlz;!p}lgqHvrEZG^d1N~YKb4Hikjfd#$ zp$G^K4`NV+xxH(*)(gm9Pe8o5j>)egf5t{;o1+dR_$HXP_-_QeSp9s1F<0*37TG0I z>?K>akGr*=|Mp)W3w>v_6;jJ?=jPTvj;7U-=1=fc(S(mza-NN8)&M3JusY(PR~Zhe?}n5VKjMa5|UKtE2~5DJ?6jk&) z>tY4;q|?x)C&jv$*Gx)wKBm)Bhqq;ri(E(xKMG2QNDu2BN@#wx^(e{GU8;Qg87KLU z=~WYe4IuaPhED4VBiTJ8eL*m|cxG0bJ|IExM2dWf5WUpl}c zE5{67kK&&pcV0~ocJPeWL5h8V)fdVK`(`1bZqv|`uBzk(h;kF)pQX(q^lG@)Rz(ys z!{m`xB2{hLZ$*49kLAQC3aUz6!gMXC6f(C5ql{FSy*c_lv|AS3iAlYKXd!Kgm|F1T zW_FcWK}ze_5synA4@oN=UCXqZo9KoJ`}k&Zz;CS2v9F}+nXFG=j(z<*QK8OBRVI-e zzVE`1W7Tgev3yvfm3>v0A;y8H)Y`SLhW*%Yi}taSYxP1GsaTruY96XXb@$&USCwr8 z{#B=|>6b*crhdQ5V8n*>AOB_;pR6aL+L@nT^e$XxO@X-jMsnKl_89;~ke+1c?FY(& zOm8LJ{ni;8P%t`fH*-&3FXD{QuAMO46&dD>9kfsnP(gh3zIJu9W+~M^1Fn0kXV(~HBoR_1Hx%Ma2*M{9E+7A0 zp{DWL>JT5KaU$r}ZI#c6vCwjN?Y75mrbSB&E(M8h3Fe2jRyi_$a}uA-Fo=v|TFM^O zaeqrpcuON!xH7GUwZU_Pr@iF?t5l(mXE2mKDgpgkY_{1Bxq=yEPVOYOBC<`FgTv0@79dw+crG@Y*Ts~W6u zx0nqFREOPKyKmV%YSK6-2%1zEPcxj)cOLfbki2X@x)NJdt^Uzb5s1`4{X#s3AI}an zj+HKk2Hjom#NX_OCfMDTU)3_bw)Z|m+3Z1l2m(j|?*+`k0m@Nm3JB;MZX4$l3mA`H zoqui&nygvL5cO4PYz>mdF1>WKE|0GIOuIxX)K2aa;M^WLZB#k?r`PqaG6#kp9uJEu z(jXl03IT9<;{N;t`QU3_IJ?O>eTOpu((Z5eCQEUhXK4d3xkz#SE`TT*rB%UQVUbSD zQ7h(h*(3ToCs!50apnh|o=Wr`>L?(0m1_%yIQb+e?Ehr}0pORsa-Lvv(Y+9Neix<8 z;r{2mF2?Z(TK68n9~Ujz*}DWXIP2I6;%lFrGdDJ^^evofimSe=>%e{rQ#e6#seCdS zRykv#U741tWI8#r37oMr_uUUNzY(#-CP}K$4!g{e&$e5Y!Vlv2QI8SI~$5++)VE*rXC2rX;ua zb$LG&xL%w-?%$4m_k4~IlRTH z#OFJfH=A}%*N2KiSAocEXn`rmxC@lLP&;v3>HRUU_ik8ufcYI+3mWOpPaW&45wY)< zT?{pTW*o17iqGjd6;xFIvXSB0_lvnWw6;~uymXK2>cXNwcM22$!g4w==yZ=auD;57 z)!T_5;-Kpr;mH*MQii$bZyAC_q_7BfMo~e(VHcYI`HrIFOt$sXXN;fsIC4a71za{nUaT_9|;`#dC+ipl2ZOwQkq!d0jp z&NYtb>f5Sly`7mFF-|+sL~q#H>puJZp%ywcHq^MNRZDo8gt-i-8x^TW5kwbImDq_K z9yW)%dmx|i>(L4JV{dr+au>&AQ{!`dzUWqH5@da+PH%rwH~uB^nadv#(%#!)av)~= z04N$n66X);0MWgEZBip>ag9gMBxuBz8tn|aBTncSU!p~HXVxW3h4}s(#ZD(_W4Ez@biQx)Iist*zR9W z)z5AB0GBnkkUPRciGBm7;G#MOf=d6se?k2Kniv1_Hala}#o?kL!?6`6@s*Ivc1;$x z=>cvx66&kDIY~pn)c=bOgZvuMG-Lf}O)uPMS`BIOrf{V7ZF{Fq1a!E2?GwyBzYXnS z4Nbyu)QdNyBhSaqzD)p)DAUI}Q)7mvbu&dFkvZwxD%8hws|O4@Z-fSb(&UTo#WPp0 zay|K>obA8uimP7dwUjMZ-VEeWe;t?e9S$xd2!HA^JhE5{G5#I|^dhHA7txtcWv8Yh z$G1Y~B2MaxtO<}ePkUcGa4ejsoB~{7W&mcxy32qN;Bm&E5dr{=>0L?7z&Hnm;pePw z#G7|Xn}3^ij{_aAo1Q?A@|7U>uca0g(#fo)!awX9?$FzC^Wn7cA)b85$Q##!%7Dny zfQ&Z4>kT}SaR8aEHT%r8=9$_M?Ug%P2A@SDX;M>~PO*Sv@Q`^K&iQ7vJ3{KlMkpb9 zPeEZhzl`cMHcXU$J&p#@6^Mb=Jjwc+Q3|+I*-)<*sWz+0y9;khCxd0i|(o%kN^c!OCXBRt@f;vPy6T-{&{bPF&#Dc-}Jmn>{kNUJm0c({qSa7^fDy`Fm?t*ajrE?JKJE`~t)$sa z;I#b!Vh1ewr*i=i8IfxN;N6o88m-5jc#SFEox>c9T?)4~{XIq40E>TGiy#kjn2S)B z0pRdN`i#|9YqEuZ4}PzmBBS3wZKQ$>Iqbd&4Gn;Vop}b4ccO=xKGMXs;oyHB4vrE< zkYj-wDUw$Ktr20FA&uW=nld8E>kWcPwuK;Kb)5L4$+X7tpxs2)Tql!7ky62v!(dwVE=~rlBL`%Q8 zkZt9^m^~BL`$XxN(izuo-9^AAM11Y-dn&rX!|pQXUL~*UnIsq}WUv-M@%-yrD|hSy z>PNN&Ybv6}`Rh0O`{6=y?N}Y?e3orQSN~$c$$Sx8D2DD2`@=wcZvDr>eO81<yCs6OxDkQnX#$6{0|DX3mlp6%Bhb|QR7vFq_FdLT;)+ViubMRgPf8F*YwNm*3 zHrE3uuKVak#LG7<9l>Z5qI%ly$ejZN`)~J<>VH76lfC6@6xF%B`1>3jBXL~#JBT3> zBuh7pISf1ln_XD(IUJ-HDmw*S(L$e@ut2lQ%D+`gT{X_x0VJ%pm#@gS#pH&2cl|IJ z7Dd6&;WrT>e9cNz3Fv3JcaKD`)lTPyn@BzOM?hcEUze~v--=$&({v0UTaerW;D32n z=D~tO$K|Y$dJ)Hg^er-oT-1FAv9hjA>cOvX*J~ed@b#{%Ol#QgfyT(DV3uLWP!KkO z6k;!#f_Gp3jRX79BfHQ$ab7sf?}H8e9@TuS`;Q+%g1xxOKkH1k_mE6OhrmEuj$2L# z990uQOd*h9s|cM10G^f40k$+o@c2-5&>Qg-NFkL2%A^7S$i*R8(hJ6BoV89U|Ns8H3Fnmhe%yza@C@l&5#*URMI{_;H12fFgthMW4Iws|DN zJH!lJf~J<$ojUVZ=f0!mnb}AalP%d-Zmi7?n}pQsI1b1Zi7PF-syRhjSc5Pk8yJd8 zJjasb+gO*imy%Qr)yieFvjUM4Cl>fDcBQjp^%tdHZwOt!paTI}gyaR|sOX_}U8pB_H;$2?lG zRzB4!QIaK=`mQj7#zuXO8{!pPilwoq4&GB9Uy&%ndNt{3a^ClAl3lLA3tV?z^Id1R zZyTt+=q|$zd#&>n>zec>xb@-wu;h2t+u`B;>jIWJLMBdkb~3P~#w@X4IJ&0CG@VO- zjlYx8{8h$rXVq;hk)9OC;zAAq(Kes9vk(nBb`UEg;Y8Nv^u?xgYkQO1Q-aU+nkc^m%90ypUJfd zA1Nk~EjMehT=K4J`4T9(jvg);8;Q?Xc7oaPAjfKbE|tR^L&Q1CIu3%{Z%%EozMQ$! z-$jdKcRBcjXz|f*E5}m|kw>NJ&z1!6`5uuj*~$NhZ$P~V(dT*h?5oNA`)~Ovul7n$ zwK7SChzum#6Ypgi?vjoHtB$w;z4hAunaxPaBR@&y@_rE{YiZcXT1x($wV=KqE_vJr z#lM3h+gM-w@-;a3=1Zl?XMoH*V$*GJAgy=sV1UNn-_QEWcOpHW@0$YVw)*l>uZ8N> zM~@EOXTZq)Zmm#vBH(~sH=Mtl?^NDW^^jidGigRjN2-@-L_G(%61cQ1IVL29{P-}*P>yEl(jbzuA|3&yd|NN_WO182q+QiDD-CGw z?t2^i;4Fp@fn;xjz@mXj%3$m+9$d|{`OCCTSD>ImUI)#)SHB$mc#xY#ca4X0oKasM zsw`Z<$2KZy8~U(gvjV^ml?6G;YHbaE;?PYG$qpuwpr9G?&hx2~!s-0ZW8j!p(W0duO>`XB${es_K zt=Srq=cdX`hp!Zb#N%00&B=-gV$cfBr$GvQD9C z_H0c?zd{AA3+^g#u<7(;!Rh^tWuW=KftN^dkd;P2Z3GaQKV&bPS8K-n=fUa;5`ZuO z*mLNdb&K92q(-!C*TnIdHTR|O(#yPIdBW>#3@#&)_4<=f*4K@11o&$V<(|0NxL|y( z{Xn!3lD@oio}Oz1Uu@p6&&-`I(G@v&@gWMb)Sjx91K6Hri1!E7FaT zVQVYq#Cd3JDOk3K$PHnaL;byXX~|M1EPuJ)yvlJOtCdyM%B&EsOFmbmW!e2rZU+$9 z`k_I{fXR8(F0YqhdIO{>;pUjGsG*d6RmXB#YQL*<6@gXJXF9qaxj$ik!nTdKKU7^C zS9n=%NiwB_tR(RnUH!KR1buZ39_2K$;%;G-f*7zOa#pxvT6gQFLjcL3_`n z&)fIY0?Ku=oLu1k{SCnSd0`EQ7pjraE7!AL{sk#Xq-zdn2;{=m&*TMefp7#w}d+xU5ejQEi1{ZmU?X9KnUO13udL0c`YIb+ zTdR)N)oQ3p$zle9e}Hm~<$7}JuqtceS+(<(UZGN0p7h5*G0DG~=l7)d`v-Ucc@^t& z(=oM(X6kJ4nF+%T=R4zrv|G7pgbLi@7zNlZGv)<2PLgqF|+9fsy$_ zBkwwC0dD$vK2VOEc!+Dh>?(lSnQgo@Gk71z40be;rQxI<>9lydq;Y%@WScDHY!es{ zNK*Eb9zy0@l_*-lH~8Wgbh?BNjiVuRzuc!9uj}9xUs#81rI*3G$<^gt$CN`8 zV+=c)Q}&;un=&>v^Epl-xvkXVdN`B*4?UoNNaYhRsArtKzJ75*;cMBFw*uByq3p85 zK6=f{78XYesl(Z85rW-*YEX>;p4|Eyc{eNABHEUz|M-ns2*T;{3c}&+jry8LR~AyS z#c9{QaqVgBwMm_%2PL2A3#_hjfnumHL7fHpa%UON%dLO9^32#~8;*)844)2pXB7P{Y)tFo5CVahkV}$6ovGD)( zWb7zWJUqpye`IJsy@y-5U93GmakmyT8=2DfWg7r+9T$H>dB~Q4_A#Vi?HvSXR0zJP zET28$v@_Ii-{bxCQ4@s|TcBAGF@)t{P z=s*~Tva#l5%Purq*4rRXy(e0*q%}WVd#*_>_8HY$A92}?Ut*@OF#=&2)SJV;hd8Nu z^aDyCmnil*S@ldd*?#W6P!)L|UTIs+7Cd&bGbPu)gF?P-v2(gyPjy&U(r8#cs z21+iX$*3I~u9BMj4i?$S&Y1fNR!;~Ub8I8f_4H06(6du(%-CK#hfDzagOCJ`N<~j( ztc2>`iM+wcat`fI>yGe~MyH{}kudVH@>aR=TToh(>uzYYL~~i%kWt{#ANB6IR<#-Ws< z8t|zm^pz2haks)~>HeTL=lv|Q{GfC?ARl`O|K1dEL91`4uC{TkrGXgtq?S=)Ji1K( zCAh%^aF%3TaaiPlfM!>3a*ZhTr^1NH*OL)*H5~3gdH~V{w?KEX1tbSfo2Yp)7?wAn z+3s?2oUJ=0vQIihK4|{~oagO5?mowk^TQtM*Tv+BO?^;tS}6*#4FGrj&F#_@|MQmW~HK<7Hi(uBY<9% zd(o%VNFVFvApau&gPp_m6TpBR_C=gr`eN?XTESco@`>JJVqj<9@P+$9g86?uKiNlUiyg%?NO*Ml}#UDT=zZGrNGwCj0ocy!L+8FKBk18an&dGhkaIfStNft}~FIl^TQRgKzHQX&7hw_#0UDa2|j?Ai|?-h#5hZg1?#O!$jddUA~C{0EDK3n(5 z;4OJ~@HJrf)w>EMQ8jHv+EW2Z-jwS<}g5?C9HPF6)f}(M2}=0{};;$O$D2)jpK2I&6<6)^3qSv7)ERC zzOWLjB}x?3PmbV!`=_PwLk=yHTW-*%H@1etpu$M6`S;7uGuQ`Z z0bN5B(qfm`olKeG23FECMB_i}dgOo>;cg?2POybw9h@(G8`thQv)NRca$r)1|KOVg zz;_|*6FMTN)fe1A_+7+$LRno8$?1P*u#-3ROz|>{Y0Z10@WO`&Q(5kIHnjcG_e_7- zbcHF9ba7HVR8P!cGpJuGQIvs8BLFONGL`n=HvV{Zo&K}|$iFvzO^xP>X2rK$(27RQ zk0@ZyT*B+LvafJ_+7G=p2zoUZ(#QrOkpW~R#NIeb4-A4elIAv9yj)^e9mde2-HTqd z_YcxcCh6C-Au0xu$t?;18P1K(a}z~Xo!HlFQTkZ6YZ0T(IHa-_>Ra^r znX!IUW@aXcFizKs5eE~Z>&7a~_G_Dsd`n%A9Pfy?9lZME%yw{G{-cE7b^z^5v&iIv z+&k(|k=y56hZwI4k;MZr!mJ_*x5`}U=rs{j0 zBlqu? z7F3#bjKuL7(3*c>_8-6B|5^wzGOVsorlh~Z*nyn*|HmBRz8@e)wGXNcSMqGXaVvO~ z=RNEsKi*yFdz?l49Lo&Lp|C0euFbh|vUxo;mWHqJ`xa-rY5XRYytgA#(Bsfqa#?#b z=*IuP=el((7t%=d=59Zdeg6C}f7j_~*_k zB?rM=B?j2Y_S&i9lag(bYt_q$1S#W(WpU7y|-1o9&$_8lx-s$%mK2eKjxr_;gh%$ zZ?2jS(qZLP5tf~W#hs^|ODMIP%jwW4X0--7bYPd?qjJ6S3k5~5SBAAa25qeZzK3o` zH7dWOzG*r)vP=Qm_F4`f9uFN;I-39S_@8#ViWdWw2|)J!8JS090_qlcB7}CLy-J~r zf_x#=tO=>W$Le3savgd_sn_lLb^z z?&D;7xSqJ%pgRu28GgpvF>xCYZ45X`3z#AX0hu=IGM;C)`l3{p%k#zoB3;?Tm92o}K(U zHTU#H+RauM9nu;1=c=;gZHtIUh~-~k)KCR_z@;)oy$LDA0!T#%st#Z(+6QALrEEqJ zRgKfnUW0!FrRR~a<37lNCL}rd)PmL%IA(s(=c}7^(((otBKEy$_xz2d(m&wo1tlTs zCQI+xHmAvtQ?1lu7#5k8_iSsuFI@myHnnj#Df3MgO<7&;s;ZBbwCVK7gYqzxhKr^P z0u#sXWz_Wzr)VR%0Z*O81wjt`oOd~M4{qF(|9D}o-fPI?D_fgPp2m2t61m>nMZRao ze|p${Ws*=Z>b_dIj7Vi$lJb)bbA%UaA#Sm{gPzVxSVD8ycQ{Y_W5J@0Wfvwnbc_UY z;_59N2ELvF3hS0@TY;YftT&e)MW8ovYMm_M>R2h)o`qI)wq`>iZGGDZ@0?fq$fV`< zEAVnf@J==mN+Cj06BWm|eRcm{^*hssO#U(dQM`To{1p=z7@K3wF))rM%RlNGWJozM zV+_=)+hw6IP@m*tB~zGLM6$GT^>jPAqJvIs^IcQ6 z*a4zwbl5%Y&HE!Q?$smyHCFTEJSJgVo6mMAoyAxfo>JL1H+GO?vQQ>#p^sEC?{Skb zlGcHB?n=?C7zil5aM_SSpUx440hrnbFo1MMkQSB#(uXFSnXm&Ypgo0?yzWp&i(Yf( zeJdSS2cSa$+pVWCT!2BDhK-5qR{_4<&bGJR$FI0QvzZim#lAdqb~?~GtKMusp+Fi7 zq<71;ORDWMFjmQu zEOog!RlE46CO=L$+|#|<>Rmi5c|t{S=<>p(YxSMV;d|8=Z@i;LPZLvvZ}BdyO(bNs ze`eHg4|n`bOpeVAaC7Un6pWUcJrY0lpT&;XE0 zS!hhHhr1^rdW3P38?RA&h6riKFO@y6XYb+|tLJq#F578AD_C{ikaPO56obCBe$=z$ zs!e+hUVeKs_}TRG1j*Uw>O>$@B_fr=S8&leq)JP2gg*}iOvUml%w?hsFQ3w@v|cSM zAH&zlhU_5*#;neQfCxn@#k=5>czlF_kpHeCL-}g7eRI{T?=g7lw^z#+0@=D!>I+jf zoA$wb+{Tw3W#K(3)$gV9#{&(7f^6gW9r0Ih(U)Fp5!&9BB3VttK^Z1*Hm#~S-j?9A z^d09YJXn>YXjZO7q!>3ud$SH8a>oCcMc_7$cQ3w{^BtXIALgK(HD>prR^#eB?1Qfw zef^xdN?9e2@g{voF>B|M$GS}Slw*B_DpJ_PBYYUDqk^mF z)}C5w-F7ykYB7J0o;nkrt$yU>b#BltV6}U0`FV1&h(AF}SdwL4OC?uZaJJAeB4E+W z($IlrXa96@E^IYx_istZ)pSX%w_1;gg#WIPE<>FHl-vfEyv$cNgVV7H8G%Y`SFB zF5B3Vw{lKaD%{=4rbVJ(*deK^wCae3lxz9<bQhx?X6T`r&b6y8boTqOmFGma!tw`hvxBV!GtM70GFx#n}qk z;@lpn?VcmY9Kgs!4xF4L2SN~pO3e(giJ&YB$sCefBAxZ9|E0pqfH`qlu&rY`JH|ex z@11I%%3N)-6P38h7w{AJ0&0Cuvf%f|z<9L`cFdmVtkqPk_v6Moi{0um$&dGONknu}uHh@wE+M=~@!}=nwrbm|J^0(8*z4^vWN1#cII1x^ z)wGAUDm%wEA>L5HCV(AnW5ddqu$X1)(^aKW%>4i${(i36`wZrq%vxF(R=inLHL{So zx5%pGOmABJoRtZ(o6#Ywg3ju#UwFhG?Wju$YPlq+9%b|~eN7;zu(GJ`vhWW-;#3vE zcxq_*nE6smHs2D^p`&VY~uFa86EpL(~0{G_4ay>fTdda}<%bZ>X4{1PKxKC=lE1ksY)Fpl{ z>b9{LerGc0D7r5xHTLR{0@{k~Oj=@T3vH#4Q;W*lut^tAFTv?DKn4Gov&`#(CVWjN zPWFSM?rz4~1pg2{!$|aJ?-SM5$az+n?oGGn$qq=pzG9l@dULfSS-oHr=2|c!DD5!p zp4C@(rbwNAnaF3?>uz48lqKsEGV7 zUA9uL62#^?JYOh3yUpk&ZmNVkuX-Rt&%VK?R5^#ZuP9hAMMllTgE%U@gv&DhkW@;1 zr!7}eWFxKZ;j(yId^|+b^cA*a&Uxy)YF41slB%Gq9svEo^MG_~L&EW?TE}*T$qN;^ zRVcMXXWd3Y711z3DW9uLRqyYcSvG`Z$W{CHfR=b-M9)+bmUq4&OwYS2rdWPkbH{}$ zQ5;?$)3S00Tan=@k!ChIJoV$NQ-zYqSjf1!^x-0_NFEdGJdLu#bj>LndPWW{2^nYi zng;$K>I~itsw0(y0nX<`t)`NXuGzI%h_)+g~H#9-f<8->^=-IKgZk|InOqmZx>@N(?pHA#Oj|119t?}j@ zTsMXZSS8LU`J8L24^uFS3Kr`mo$kyntXzK15qQr!Qt-w;<uoBcF}GMn`6tf=L>=;eZ>P$*g>d39$gk?P} z5@VCH_c^|>{n5}@(aM;>gld(}T$x)Q(!m+3?wI8K$zn?k)2(I0I8%L(^5s_Jq6zD2 ztu(!&$#|PBj5|TR%9oclsV7viv?@(|{D~oHoX2$f$9#;Ko@G(WbI0abyJ;bps&Y*z zw|v~!=e;F5c(GxNtY`4oFiEw7R*7(rX0ClGAt7Q!^2fV&J+B9h)S;ZX=33R2M>{Y}4JagyfMys!`5M8Tyuv1TJC>a+AbahsScIUk6aulAyWme=|$_!4? zm%jWsz~ukx@IgEZ(dLeq3=D=A%%*SlNk6`2IK(~gT#dE6Se>yDd$A-|Pn7g#276*n zH<6a%0j_1fqWgiZvi;l`2R503ZP>h>Tc&+%+HT0izMAy`J!{8Q<^N;uz2m9w|Nn6< zNhB+K?~avF$Y>ZzcE_%CC7P<=j-`;%=_bh=W6+4jAze9kQe{8#XZV}c4~iUT=jGqUDR@@ zM`MC*_CjApVB&PqC;zbreZ##?*pm2giAPNH?C7HQual{-B2E4D7gk?$NKO72oa$N3 zQW86|`R#tukb0G<9hC%=rLII06-v?j-tCge?pU3DP#t*Pd*nUn-<8|cX?}I=x%b6; zGG!v)VDe#ma-)v2`TBB<_Vrb@xiRZuJ3h1iA$1NLSKo~`2fjVk*M@y#F4WE+t*GLu z^fET@e|wW=-( z&<{mCm`p`Bo9X)JUrTl`+=+_wslLXez)q<=jU%%&*?+q!qL|^Uzszi-#H2aOthI) zK?MTUf5rawOSI{epTh}TDF1+`9wXC@c;(LWx(UOyLWbmuDsi9E69?Sbx4jiSGb?0# zT+@&NJ$`ESV`fp>(5mYVLfS@VB(OdD-{_o%8SIzCCEI61s&@jf(w4la|E3IGuY(w9 zpCwXvv@8_&(3q2z2ZPe}{wbLN=z$XF2WO4M96MFfl*Q%Qg?R|H%pSSTHAiIP;O!!pYV^N{Kixn!PZvdy6 zQVQ(gCL6iN+7?kJ&B2V}LSOBnI3(Y9q^|=et3JSCISYX*9hw8>Z-&%4bJmQzl6m?! z5H&csj~)F>2>m$OC7Af%hRl7*LB|x-cz@ZqtsLksRR?t0x?t+-0~ruOkVqFspxv$W z+fhQP0&<1(QW_FK$WsmB(GH-QKhn{$w;&4+`bR)GOwvb&Iy9B1rAb($?O%;3&IEgnF>NS2w_0%aW80YuYd+O8!`|Iz?~30x7*yPbJpDS zX78ti%}R7^s^td>LEA!!F>3$KIzl=TXuuYZ*dN zVaAX*)?o}H%j?*_S*9u7>m2^t48xbtbQ@i-tR>PZa<%rYt4?21s3IR;@!2)uGn7(i zGx6QE%W?+*_mD;ck|+I#)!kwoh!dgMKO8AM_B|B;ec#K#J9HM+KQ-d-x_%AR9#}Rv zRIiCsuu^qgAMGK}Hr|V>8TMXG6~zX>+7&k_IUs?uXqF|3M~2%I#VM?ZRaO znex0Ea~Bhq&g-*ZI@aDs|6IEix)lHOT2KH_#iRYzzF+&?a=&Usy{Xl_pjfrGY#JCP zY^=Wj3?V0PJrzcRlf>o!Y;+ob? zUD={kcB`w_5-owl1kXi$j30E ze=$L-(-^ZS@~WQSRY{mC0T$BC5v?%o<6RW}kIx(`b9LvmE;uAr1eYQun2YrMoohf61sS#^%R?Bocw`B|^futYfl`8?Qp`@n!t{e!0l|1- zyg!QgzF?PYjT%No@lb`y_j%rleBmxbcJx8BREWQh{crY(w{y8n-LQ}!V=F@daB=Vo z|NQAeDl;2Cf*~va;G5)_y$M`GA~>0i^Op_b~APGTb(DT%&NeT}W(zefI!1-hit`#aXRcQ4Sx*$;!oMqn26 z^BmU$oID4yy}@yX9B7i4){+f!q2qO;&5-n{-$nI=*fBlbUc%WTF0o-&NGo`(O><6m zsYNa(QyjAgl_D8=YUoyV#gH}7iMh^rrF!|cwKUj~bKU1BFeKS!E-kFKEVfy~QdB$* z#R8~VN|FMsNw@f;0KC! zT(6+`=sUL44%bD z?~Jx;IB*nHG7^|oDvit11fEl)?Nei0XO>}>XD?`5AcRi8-LULiTGaZ=JOEhDWC`D! znkHNo{krs;SC6~H?dI^|-rs!ybJ;)WXFhU{UU|ree(W^kh0IlxATH(4tTtSm?Tf${ zV-*Ob=K+8z7zCLQ`fY0*4-zUC?L3#;Et_}Hq@bSkzZT;or0EASf-NB17E3I;+RZq# zR#zS8PgAM#?RAsb4$|B8$2Ef1&Q?j|@S7mKNOer7w@Yl))O+))iXpVq+NDO@1bVct zNG%RV{Q?56VT`3Wb}mB@Bhc%c|J)~M^?*~;J3E_%9se8xBsNAJ(}xj_NDEAvNy^2I zyWg)5=18Gdr>dmfbPX1yYAFwb=VV|*Pexb!%Pm$2#4abTU3)Qvn>dBE3}h;a9Xgy# z9~v*T?Y(akxjE&&DQvyz>9HWSpEqNzmcn)###i8@W26V8>Qvzxx14#WP3`|l`BMp{TfaWgG{>=qvNwYaW0KEvTT z!P>ajU;EVt)0XM{)noh92fNr~#w46Ao@-^D#l6XNTGf$_-K<D#pAaqedmZV^n4 zU1#39xrdafo2xB3y-dMzW*3(y%V-!aCFGIw;i*xx`7!Q7xy8fPvB!7&8Ag^a_#`e| zw!C!zuEN?(*6;sX-`eI{KlN93kc!lW zQKJsOi4I>}zxQbsH&|5bxmq%_THt@y;$F(?5riuTkx2#B0mnZEet zNzXHav(#ln*BV^YNlz16erxkX!#`X1IW3O{+JOCGKq72pBz{RDcS}N{=;tc_&uf+? ztXbqMyU>&!Dgvk;@XMV98ZuDSRWUyUi>+k3!m)C8*{B`f%nqcS|L<19oC&=l+%w5U zA{kMEamoEKpui0i=ttGX`;O3|fgA2P>_8G<#9#A!>T9F4w;Pls zKwy72h6DJAIhEHSy_eSE?ULiy?3VUzQwB-V(iwq|xxm z!FJeE=c51(itWO?^x{v86M@4;eg?$Sx8*ic%1eIc6(+8`IFSA!CD)UPxvFfjQqhB6 zpUsEU9Z`WTxJsq#QGpXS7ocDtHgj*5D;Ao-W_AvG{wvEf?@uc*m=(1C@bOLiu6Mn% zHJ>SL#TLCW-|GLJ|HFgp$(2*lrQSK5We3ocw~v9K?N$+@5qK8kKyVOHqDU~Zm;T*i z)|(}XyR1#4^g4!pG^d5ue2cCwh(#w$-a4%EF>B$M_~^1Tc{A>dbuhb28kP9cv2yzS zp@ALyh(%SGspotp-LEYuJ)K{Q9PJcZwKVP3ygxDPKk}h(p&7g$vuF9yn9&F>w1qbc zb11;;iPQ1$5{@dtB5E323;)E$tjnwBk`5cj2U_Rz6_iW%af@9I#*>%;S7pf!pMo)+8@ao;p^>yz^L%rbSC`w~@m;pO=EgVM{`2S+au zSV2r?et9XtsTE0-XDm6KN@~gr95s9{|)4P@j zUW{DWc=^XkgSI_D&i9a8`0L`VPm@{r-nw`UGTx3ppwI^Mw$r;epk^A zmQhw^gVqztSFjD;5pGKW&x5D(t(sfU*K%Atg9C>D3T6`8%F0}XB5HuJjeB^c9WZ*I zjmkdZZ&azt1NlHUaG)TZ0*I@h|K;joj1r~~RIEiPL8~<-?n(8hJnP-q_N&jU=e(~) zYF&~uEIwzBsI(rP*GtTmFIT-Uf9h-cNv)s4q4ZeMF!HF5C>mZ(_-BM<8HSGjI!?*L z(c1^tYdGE0$$gL-X$*@guhrYuELvW0`z}^eRi&Hqe98a9-gm=^Vh=}#+gc8q?k>2H zbMln9c>T_(+86ulV=n#_USHx)NB&6>8!m8YTlVM>V@YFn=~+g9MO^QYL4VfDFu*sD zVoxkc*db#u7R-bXk;?^D2kWBbUVf}tDyiNDj0_q+N`=1M{(A6s`duu!K;>meGd11%CLm@u8c25>kj|7Nd5kA6+hqJDiuy8Y|z zMx{H8+5XIC887ZF^XB!rz3m`yZdebuJ)Jz}-sjoe2t+yd1yG$?QhtukGnFX1kXUJK zGiT&Nc6k(PQ!(vA99QfO+oC$;qt+*>e zB4dSQnj|n=GOB?eby*#49M1UMl`w2;v|RUo80vBw`$dnnt323uAm!pG-JNksqa#s! zBvWF((9yeUI{1Sb$#^76AjEV(aTk@>7mzpg`94(i&UU=Tsgu8_I`h3qn%HsT&r>^Y z$zwdy9r*+?aef>X@1*A-g*j=1m zm9lQV_l55mS1Vmvo3|(Sl2YkgvUCx%YC9 z6rNkkTcF8vTPgd$Y0~n^XNT@9tl^Dd8?dp7Tneq2k6#gvOEUXJAs$XGnexT$Rzib% zH20gJ1mvBLgaT!--LQLVvu|5s>v6HenWI$42Tae2Se+VSXy=i|1P$<1h$>-RK4pC= za#HvCI;eKtxGaG7Q~uAQ_mAjbCjKnKRi`XH>YRRd`E%o>qZ9$d-0QGQHv8E**kv!3 zbL1db5@SP)rd0TK9csiV+J$efQ$0-fzkoZKbqYoqrkc)_>}GRm!P<<8E%;r7mY@BO z0Iz9e0NP(7%aL>KCBlOX8uy#2QWqh0Ar_6?_9l;#azToL;E<&`r_P-@Pa28zJG5J2 zw`(?x6N&CaCuVY>dAITE%E~gR;WUnkpxMoFgQrjFxrEin@~XD0`u3i@)t)(6kbMK{ zmA3tSAr8r#Rh`L|t2*)XH3y$Ot<&UJ*@?40c?$b5v_96Vp*>SLE(Hs2cu=*N*B&0wP@TVE18Hg?6g}&VQ7v)VI z{ti=2&O~Qj_bQi3W@A=@RH0<>&I$cp5l-p}k7{vJfx>k-`9S0+mq)dlT3TRT7=09} zAEpzF#!t)5#x74WeqMelV}PsEq{C9}>CU8N9*z{(yTxd;6UNS5cW(W33xD^m9xM9b zl2x(% zM7a>_k@Wlaz&c+Hb80BI4_$+tigUsko<-J)A8&DaQS zX5uOgjkL8ew3)j_yOidZQfhVSv^0zIwd7-;ziw_?B^Ew1|46P{dqy&LcVh_Nw2Dq? zbLLxGgb%ZYTVBbJh@`q{2cr=P+5YWf`o>SM4^P5aL4Er~*EgO$FU*z8G_#vFQ3bd7{_d@uJ1AIm(s2wEC2G zt&5fR7mjPmxwk~Djn{se2|qm3zL^kxWU9$h8S*HEJ{QIZwnP?xT0CR8Xvv-Nrc3d% zjp=f`?~_bL3|^+aR98@zfor{exoM(KMSt>9bUJ90aO|QlGgsv@;&+lh(_pDHgmmeH zS)9}zK4+la;`)p8@0_i9ObFu?pj&spF_@v;%sxOXcQ5mk9-pfElQ;!RL8s?9rF;$k zK*P0pApRbXw9QfeBiq^a=r(@#NOS+90sHL^$ls^+8d7&_N7A4;cm$Rfm4s|BWNbPp z6Bsya^r4c4a4N$H+?JHXK;>nT?y4`Rc+Be;+4wB9_a%Qv_1((ve@)R z<_==$&DDJfVQP1lgz1dcN!NcAa4D^&rhZFduW?mOs6=ydR-s&i5u=XuvA8g{hC>-z zp^uJspX#CIl1z6LRO&v}tvB6curjr8I8cYd^tmZjSsE%DS361CENCEb)fwt@2wlW;&a#z6@&3Hoz^FVc=VJq+LyO{e8$qGE95HM2YJBFuJ-GNF+pnNGCLKVLgg zy=JDANZYruHgb^by8NbB?eMNtl=zpfLx0o_w|8kzSf&dVSn}+(%ZtDNWYZFqRtz`| zQ!DzE9eo>3{XOXwM>Ge59O#RiR{j6&b*ZVpl(5S>|IT^~ds)%2(Hng9a5}D_mN?kb z5LDZyCny>;L0!Q1d_yunsOMazq7io}ilgVYq6t3*Km5Ns8Edwuz?3fR1VmF~3C9Ra zvacdSbfV3Xqs^C)$3-2iqO8M7Fi}$~t6##5?$19`aqasGqM23F8qIOsY^?N}bbt%c z&@{RZIoL59)lR%5zXKkxV>M?^taY62j}pnY%XZzfSBAe$F>yr?a%Ivf-84DknP_Ht z-T3IHl5aHO?x9@%xFGchK@RKqWGNwn5ZE(d2m8~wGHS{FD6(occWTvccg}Z-o$Tk} zmk`f+)9e4BaO@UiKe?y>r*kuT_UR*LA5K(V|5Q2CeQ&&Cor(2dE@2{50UaL{>*PFi zeD=c2yE1)hdbH>Y5%>D$h$}NKPih)^;~_65JI_R#k{UzQa%#Qbb(Ad#NHnKSxg}it zz!B2hypa$Y|7M@zHkYvwd=j;@db6iD#QQCN)sw=N4j#tx3c9=As{YY73HZ1Q0b?Ak zfc%a{FCB|=#iLhc1PMz+sYInu=mS;M7!V_enRft{|NZr2{7BRlq$)cZtc4MlW;Bew zglRH4VwzI6xQ|87)uz$Z2$4@lW)h$#Kz5`;#_RUlYJ_TY+F;wOGYPLMjCywWEX3_W z$s+>`3V(612VRIl-v3thk4^>Umq_Xf*`_s-37bJ*)NqsEnJ4N+71Qr`Bje3CuK>X< zSwQUHkMOT8rR`lJurH5>{lx@s10slj>9(mPL-*@H9tKenKr$HRs>dJ@yhcoi0rnt9 z?%*FpCt!4JO9MbPne<~MgIV@IbQ{Ri=Qgq+-1&n_(yQt>e_I?3! zjqxxP)}}pZPls2>>2BM#%mi{Ms4x8R{xUv0JaZ_)^!@{b5b~0z$3=|=rF@Gkr+e(8 zxx_7=50GCujC6kyk{0i;uV7|J_^IDCyXTLhjJ(%hTXb|csK$}8dpyk4m3e)7halB~ zo6dq6Vc(=K$=j)aVcgtZB$Y4I6&X=r@%)(M1QZ0O_qQc%YI%KbZ~4eRUyu=~&LY1y zGLPJ^1&6dSqP(K=tKya)gjWN(otQI`!pm30V&#^jV{e*%^4{&!dbEmTW6O{Ln}OsWtoVgvs=~2w$zBR9^w@rAsq@Jc}3OyBlgcoDi87 zlG>y|#mZo}?bv|W6=2;E%_;hBKg#pE@W|1ndx0FuiBdh(4&IHV4rwGs)wiGW%>~XR zNtY_iv}_-Yr-gW5jW_$kq(p4b?(k;M4XY0|-mMFh%Eq<1ZB--JKYTQGLwczK=i)1= zz0YjEXJhv{%h7K=y>(a}pHrGuxGz`s6=bZ*+aVFDxlc6|sxqa3I@^De`aX8JdS%_n zIv5|__Lwj1P1kYE0F>6|fotX$D!-H+4Gd|}hgs^C1?Hh@Ffe5EFT60z3nirPy`1UM zMjQ(XaPNBypZ{+qy_E6zSCj z@D6mHVCZJ>IxUl?Pg%>ohf7&IEi-uUo{Exe@)cWlwd;WyYfV1QN$d1f`torIJ3_)s zbq8C_-O?HQTy+p@`j&tb58~Cu@ z%+QWIl(V!UOpPjw5~GyBtBDwr8WT8cL#)kvbE%6xn=vQfEE_HKPvnn^B}PG^mBB*j z8uZD5R~38;DOLttF-giLh$nqiBO(xyoYYGy;8%LVPtSyI&aW0Oj|)59J{q%L8rXJj zX=>DlctVKq-~TbGD~q#cz}lQ_arim`G8SwmN(Y&!UResc{lkTv;6NJ+Inq1|Jag%7 zr~)rP=9-66%nY<3A-};5P8jqpe6e;`(5{;w>DFUNIj=fMOqUPyR$(;Lys7qymZ!$4 zZZQe$+jwrs{egfsCZEmK_qU4cdcTQ_ki!MQE0dN+{3U zfa`g&HNT4l;v}gv!8qXc;Zp`a@$^tV(FOYoy3rY8`khs? zYr<@39Ism8`;uwRw3a&DiAm>OH-xG5`c~F?ilH+PLnMQDV~8_coG;_Qj+^Zy8daR^xutc_{uj+l2y4__)^kYUQD+kL&J{zYJ zk+r+PFaN1swCaoX%@*egF~2KMzW)ec_i%v%J@QIe)i%d_Z(7&}w;XNM{0=XJ%x|R< z1g=0PsrRmAdv3R>RYNeN>fD}TB|$Eg^vy|KTaK!W8ciDtbQcX76Q6!CJU z0QxFM?^LSpr1i`;eu(vLx(C98+lI6kz5StV~#6pDsI78g&&#wq9XfQPB z2_I4~`C1i!H^Vh>KF;SybX!8DFMDD7?X4R(#5UZg7ORdf*xlbkdgD%sM{z({fG~W) zeh2lBU(Xu=#LT*sbtjp1*M^7dG;3V>Zq!&-`8PHm3{>AWScg-mz2(#+p(vkuK{1&< zFQ(E@`r|2O!6kR;_i0XHbvPj^;adYdh?wIHHuFn)^ouVN{nbO(j5QhCr?Cwgp(n~F z5;&9fi&C^B(M=Z0toG?#^lJ*NS9RsK_5(omKZ0@kvNbi2sEk142XG3oFibD;*Ndjp zKy`EPt!TdT$S+o(DfDCsOX;ab)6Z_DTUvjzZ}PpJT%V-feWQ6ah|v<_-KLi}fmlG4 zK`nArJ}}&s@6-oG+Q_1z=K#*{wquFFqKq4zv%K+csDD7P*04nb^>4e8mwv58(5di* zV+y_WQ@Ag;Uq4H4T{DSJeU~mDY0&@AOO~NVeaKKf%Cv{q?DtQkpzi1vtCz}cBqAdv zrzG`Atu9HzS8Zu7cO`mh@gsGblc#1KM-Cq!a*`sux)x=SH^h+PGL89F$d_YVpZo4$AK^&)pT)=Y>)fvu@I4x z9p%wu4-_I?81PxRz+TBc%1$sj^%T9E?wF1&MT&zpcwi z6TP-BghMqW8)jv|KG9D_D!=+UuG17bHgvOlPtYKWD`x#@@tKq>kK!jHCxFj)^MghI zy~TbSzufKSZJT_DOlRke?L z`hSMe_Fs)-2umCMxAp$U`!)7|$RaT|9r?xN-gB*#r&Q=coj*$Q9SFPtbi6!=)7j`- zjqd_N-;~aGLNwyX{0A6{_{9wm=sC+b%4W+wugaa-%M{TBRaDrj&lT5_a}O=#~48mZt!%=x(mYD z*1dn&_}YP@PlUfl)#d@G$E#>NfZ4kkD1N(?RMQva;4rx9KL-q3`1xH5saFYYT$>W7 z*Ec_YM1M3(trja5K$KUr~;}aiZl34gY>3NTrmDyo^6!&weWR6Lj44;(Xh-@8L<9E$H-p zoL#vjqcEGzm(-Y`#1^x7W*{v#@;0(GH>Q=Y=JxrPZ%$-v3oMdyRex{!w8d%U2$bA>1O{X(9WY}5_ghL&cc+xn(l{(HZ$g7w+05*f z)Dosww%mhS{_ijL;Pk$$(5in4{P4)PoJVy0tT<1<=O(1lcPt)@+gd6=pXw6Vp~@K2 zVygfI3}GW+B5}yjoWwK%QVSLX?na)+x9udjf4J%Ijf>uIeLJ-~xYO~jC za$em15^Qke(7JRIB?0Ew;c=A$WcGhOmstg8O}j$w5HWp|i^6!#M}Y7Z^!NU6Pcc@v zw%IzRu9*Vc;8kAuB$PM^a1CvZ{#jiJ9bgx&POtZM-(J)tT|XIcDZK}WZMGl1AF8t} zfO7dSF86>BM$SWd+V=&-whI}SCjpO9_J2CSA8R{=wHYR|N-K~Lv9McQQo_(#^Ig8k zz06GYxJYp)&qWkLG$k{OKPA`8wzdB}e4IOyGZq^gRYP^IID~s=5PpHQ*Z%nioAZMh zqXudhftFSU7%gy!5VNWbfxymbh;I&HWo5|~C$Jdso$Sk_bk2yOB!+z0A1W72ik;rjGu_ zNTIUG{XI;t8@QGNNDt35i=8I`)hqPBAPwPBBtXbDFjN{^wh4mYycyqTmVTt)L9*oge*yzg&R z?ojbG98A+K&oE%U8!9FFkhNF;2Sxl1udBx;z6LYWGibWIXzrlb%X)L}wtbG<&K3{& zfI)^Z>OVgg+s22UTk2yO^7ieEf|>ZE?dk-LsN5ev3$BNsMNCn(SO04LaItff-)dNk zedFSaxZ+P0$Fd(mKJ_@xRCU7WGz)spL%H@LO(X^`J927%dEm zVoR=+e+y#|xI7VkjlMr>t(r8vUHZwLuYS7?9LmSQ5#e{~--Eq@VMs_cK z-nrzih`c&*yI;xkdDfMbSKA&HbB@VQufI}GHnK2_(TNSdcJMN0+V>!_XnK8zeD)E? z`{OG?!^L%DXK`n7eBw6;XJF-Wo@edTVN|v~?B%Oe*Zfc}wj$LeqBE0Bn6Dpu%iu=Y zfZ=&EGsBldC#6rUUECiM*+t8a3bBg}3D3HMzv%RZnTGd7>Hh9ccDaXqn&~!A?CvX< zFrjA#_o&a!Sy*5$g>wJ?TDaY7b{eL$JqO#rzBt5rD(E^7AzRK#Lp|A zO6l6hL1Kr`(}+F1XD1G|4Bg>XOM3HeGNi&h2;bL2gNp8o=9!sC5AvOJ;#IBS6nRU* z6<((~+b$&-DxKFT8g&xK>?m*aoNES{%DsSP7zoh8ecjd2XLn>_Fv_WIzS@Sp1zSNA zN2G)DAsN#%qE&tNuG}BoTDQpQ)n<6r)|P(+CldHni>%cya|jZi9_z^3RjY|H31;L| z62tshzDG$d81Yp#VrjOQ`mnJ&?m}ZkmpovGJRhFj8-yqBM{|?>$}alTq~{3=I5Y~W zWO0mG0pQpq6KAc}6jh4hXHlHVIDHl^9G1C5t@}q~wO@DrghtX!(R&rE@j_F6^k}bw z&qTWLDE`xGQEM0VV;MEA#sZ>USEmb7Wv1onvJZs|PE`#zJ@1EQ0W22~wt|ZijA2kW zBX1a%A+$ozRfT0gx)!u-znq{#nXu~*W*qn8LVt{4$?3d1HYQE{+7LVEtG4LMihg?Z zVZHjzIVHkVmc#nlhdMhioo@;k8e5H+{QHqrF@;=iO521{upo&4xDbpV9nPGV7qT~N z<5#Ydw!El>N_$H0cu#s~yljS6>s8a)o-0*tqe}OQ=^Z_CmxjCqm!~tAPs|!`%S;aK ze8@w5%3@<>+k3CtVivB2_J3KsqB_k^nREI~kqHz`j|%AX+haU0X!d!TjE-JRD31sq z-n+>1*^Uu?FFz-y{CdTyQWf9i>$l@u`3z>dJ5Q*qKku?J6SXXb%c-RVDqnEV=Xp;% z*4yZsfq%q~&<7Op5`qv~7}ZQK7Y&0K7pva-obxGMGBh)mJNCry`O?MMm&`osi4_$3 zs~G35w=EOu@uwC?(t@M1qa?!$YC2x#RNRYvHEPSDLo3* zouz%Kl~o?GlH@B&tH#yXhFNfHzxFcV z=|Y*e6dGG^|1d+zgwve&Q^8yhZikjH>!kZ`tm+wm#c)2`FX9pe!-g7uxkUBw`a&z8 zd)f~exg29kZ@YA_5shDo!|uK1}F+ZsHDYDvSl#Eqx{`x zyDpmaQvXZ1{J-oSRghx6d&kzr- zY+qP`TG^$tA4?Xg`*=E>dJ)IoVh_=6h~4E*iLpdCII7S7+5y79EXbok~EQHAMKvl`n+6-q=G6$FZ~KTM`&+TFz02D5Y3kFDH0xjCZi ztkZ&hH=GcT#3_B`aeK43&q_|`1oU}*P#Vvi?XU5;TVf`Q@zs)Jyt^5eT2Qb)Z#Z3O zC!gwjz(+KMTAvM|^f?=q$b((+^N`YjC*owYBHp-?I$*E3#ng>H<2-l6>RZG?`#z^jVuaDg1AYx{FhNtjN1Win zti6jojqtGNRKhk~?Z_qZvKJC>M+UhEuV@At+_=>+lJmmDVMjBheA|FRYCpOhenM($ zy6hcS(p$(>p^&XB1p7jS_Lu41k3RM&1b=3CLvp^>$Vto?q+^?42UHw23c|#9Fu*vR zN-}tZtW&@2!*R@X0O^pQPhzH#ijnZXF`0IofOIHWK8hXGkiJq?Ww#Ea%p3jw22p!U zXtV$L<{?frdSCAGS#svKFF!SBkK1eArlLO8vin@!;L}1{8R`AL8()Yx2U%IEhvcl? z2~YBuW+xUi8hZAU!JRy%LWSGqh|G^@*ps69N2HKCGQLfhI=mCzX3#05o~mqJg>xwO z>T{7P!}`ii;FPQVludjp$@ZhyfLn_Uchc*u3rm_Xtj2iI(V4DK)kY4!uhrkx?ww1O z(6`gcd$j1cyq47)X2ARFRecnU$JgC_tk0r*lF$0xEg5+~tuvw%z%k-zcm5)O`K|76 zJxS+-7&l{?&Uoi##sI4?-GH+oN`;S|U zPp+QVkXcAJdBhSWHUI*5sjWmSE486^5xUh^?G~6rEl33Jn|9#ND;0P~ z1&$zuB6OvzuUuq2olvJ~tI|Xvz75X%_j|~!dsU!)u7Ay-Rug*1Bp(7nv4VQgE3ClA z+KN+)!HwZvoXdtb1Ke=YYs8K>v^*d!t6W|6X~yup=M8nGbH+#i@p^;!&>vMXdk_Zs zaLqZX^x)kYOuCf@lC&r9?ZFCEees>SNY{b{mNV&RD7LOw_!;K)J*$+0Fy;;|3e@zj zsaW+`OfbH}BZkTRgv!5f*ef_bc|rx+ciOu;Ut)+A1vL#)Kw>CiVHw3^+n5G1r6E`lycT*JIU`$4-3 z`)@CAH0r>k38m%Wl_}M!^_k^bd#T?qCusOuJ$pr4`AD`t;%M;bu-8|L!2{@{jLZi2 z-D))n2ju3QrC1IRGSewr4*&59#Cw0)lSm}Z~eS)eY?zl#e}rqQnl-FIEl*=LHK31mr)j%aHx2!YU193 zBaN)iy>rjZwx9U-#~M2r+VXpA7uDhR@b5Uu_qh9T$07czH=5(Z^u5Fm2B(OuRa(T{ zjU6T2RgrR?bUHV|GR912X(!+s7iN#SL@B4%xlRG&4PZyf(vEJ~Txh_iX+uQ*r;#ws zP?xt29XN?OpdNR@tZ*NS*11d5pmopEMaJ7;DT`_~WxO6*2Qw=L**Km${gQElNOwp* zPr4x~APU|7w~enCAt#J)1tr?zDv(Gq6DN{#j-kzDJqK;Evu%>P-xNHldu#^9Co zA7=CKByxrhaE#c$DO+a;XL(~$mP{PNwLr*})^x66Zh*emCV~yPd$ihXbG4zTP?ubo z60iqk?p0A3c3~TKkUW^nujh7t5HN>*2rfn8PmlNilY(>QFL$8^oOT3^)tAU$-pj0_Z%5@5lPR-z>c#}~=iQ>W#MYT#LQ!Wco&mu_dYxm~ z_}cFE#eSgD4%h=sRbHU$LN@uAPy>Jo&nZwaB}{hS3g+1V;ZcGJ4kQIIl}mOP^Y@GR zTaED1?VRWbH!d!xUB)EI%BBaCr?+vjqo-fLR15A>sayGoNifzK@}+>8p@QPt=0W4j zn&Zpz)*IvytvtJT#MkMmPcVG5*em6plHAG~%j`LyC+}vna+|0pu2`GfD&Ah?(F$U$ zhE|R zk6qE6`*ip5H}U?f@fWv(4n6Rg{l}neJq(rNfH0VB{+FfVz94_h%&AFr{KjrS%hLAV za}h+VNW)7Uk{_v1fo^r0GC!%PL_Ry+Qx<{@wc4o;C#Rq3`r_hY0>wva)VmjlUGqD{ zvX`A}RWZ}3SHz-;{A=`i)?bmb@fCr(M@MnAUsTljE1;oyk(-KWbyWA0bB!6&#xkDt$N9CLG75^T>+5%*8o9VS;_16PxxD%@ zJJ)*hjC8^$=dfk%j8MP%jm7XbzTR!zvz`Yf6_dnwsjxU6Bodc46z?p`E}g>cu@vxP z(W|?O|EOJait3VJ{hg)2#cy-wP?UVaP&`wM(+__~5c#%zFO=tX5ICfteMwchFnShW zi@t~*UhAT9D>f+hxA`%U0PqyBv z6~psP6kFG`ZqYQRPZp=gj`msmp56nkhEG5Hg6}{oZB*&us~CKjF1WN4n>IcZN(|e^6b$;-|^=gry&@ zU|7J}f>(|1{y-?+YSwq!i%hD3si5E2edC-&fc{`8{$G~@iH2GqA<xwB-}_>V;4P4|IG#W5NqEEkqK%TeFK9t}c zDyrL$J~qAB3T$Rvuv=?{#y0GX$#G7|{r8Xlx_p1Tx?IB@YP^A68h->cK$rnL0BTU% z_-Kz`e)y9|G@MUUDjz0G_22Un7ky^iCRr} zh-3c8n*(MDh%qdK@RPiRCB$@X)srM3@PVeDWc1%Ge*}?EsTQ}Jbfurc#D){~seV-r zfBUm3>@a<*f@DBeI;(t%GspU)T0O zeA68RR3x{?_j;>U^HpC}c71cQ!=BS1frHbxVusQcIovwDVy?e0I`>zfs-9~~IAjZmAyyN}$_V)3}@%z`x zPv;pwQM2!_N_J#kXC80VSI$}~#B(~o^z*1%Nq#<6_52Q2m5WDD5V1#{G^A1#^padK z(c8m*T(8$GT)$jUSw9l}gX|8{RHymfNO0Kf25057$@$8g&1YN_6K30jo65bkxr!Q( z=ojU(ycXCZt}b@9)V?H36016O-exUAsoCnng`jE4XaP+$#kX|0EL zTx|K|(eshBbMm#6cN!PR^iL?;e?wZ1#q27H%llAF{$F+sI`u-2=o%!DC%W_a&@l#z z>qg&XjLMJP+JoxI3B3D8(DQQUL_xKMn0a{|@^G9E5+|wKjH)xD*7ooaj&c5k7S8Tg zDUL#^OAGu{>V%|)wv8xE`=Y^$iEIWmcIIi(Z^P`mlTDOA);E&f@vQV%pM36IPZ?14 zMUk*JwJ8QmiX;HeI#}2^kX0bwlZ&ZfQDutIt?;FU$_wGd4s+5glLuut zC|^ z`RfaO2^#8)kN|x64lSqoCp$JZd}msxx)btD=Mu;Lw}DH6vMLEv5k#0!C)Brt>VCjikD2K4 zJ_Xy{c6DuioxbVU<0E8X;mP7?Nw@+EWB0n|yC#k^U6&1)o1$pWNvyur={h<1qx|qK zN)#$&_qj}|v|v`sU{blQoCAHig*;d-eKWk_fq-v6c--mSU%_+|1kVBK+3o)DqfZ}BnL@z_vI`T)7k$BU7k?+t?07cX>wtFH$X(1S@EhBaID3m`Gl%u) zDDk66P5L%~qd*6h1b*3HbS)S!hN>!9AXFIojwTJar*eT2?MpWBvpMTni^SQ}qYS6- zV^!Z?!<23M@uW~Z5R``H-HO5g`7;U>Y4>E)p#!uM0pf6^>(Er{<2R6`_*yG z?+qpeWfH+AV$sG8fAk3FY{_`~{f%d1Zm@~D;B8@{MKBP7ca~{T-{vgm`OfKc9dXPo zYM~upnQJ_zQ&X!c;7TbT#tu9;}nZa@Y>8SzfuI&tQtYv&<{ z{3>+yN?Ki)Z%!|`M4++#^UXL+xLW@G?MJ^{+4R#sz1J+uR*hasOM0a;1LT7sP;gSaQiH@ke6BW>e?WaUQ+$@j^*X7hMO8v zBmD9|srnA{LWx2i@+w_55)Oy^f>N!f&&Pr{Q76Kpu8nD*7tzIp)gP+=&Ky@z5`*KJ z9^ucxeLGC(nle&QFVx2JKfAAm(-$;SdCIOFXPL1;uj#UmG9?!t{ zJf5;h28Hr%AEC|q=lP>Bd+c$H=!oe~ud&mO5nIu5U`PRxj*GhUrzaMok9Sf8=ArEQ zj@U?+tsr?PjU_k>)~tcq+EXNZ*N-7v&N1HSAjbVL8uTGIb5bmrkaJ;;Ejn-=uN6$XIa7?&nooX;>C zzj5JMaBGtBUarAter+hfI&@?+?`8h`nbiCr9cbII9DnUUuLRX8UvB5Bp99M0cI@WJ z;yjeP8?ab^E-J+HHl${=Te$=efvSL=hWo5z1L)*L8A7=Xe8Lh$;)XtY-aLfXeo0Q1 zdxJPY<8tLd1nUO|9`%&yXn-}O{L>9?OkpT+l{kqbc1Llz89)h$sP-Z*!kAcrkqDswm;0CElkRdKX~r+ZPNc- zlp^bAH*esqY2VbYeqUw>6j26RerE_b_jj)Tr8#`Mt24*$X@R;>JQ56#=@l9KXqje)vb^(4E}PZpcm^(26wn zj#_~${pnY)Ik%b)UC2_8O14z2DgiBjLwBCS$yYL zprun#LDav^dG^pr`E-7vVE!STJ?0Qou7mvcEE;%=DFq0}s$4G33)5RV|6r=(hB#oj zkt3^1WA6RZJw`wO^X4VkWRs_tqf5%Luc;wUSsobfKj)oVoL#cK!PpoIEAHb^u;CYU zwIR_C9aTFc6CNe63Ejd61v(*U2wTk--$^{WqU262*~Z%8NU|H#BC<}%9?CupBRd(&nmr6+FekeiOqQ|C81uV_vwhC@ zoX_w3`{(ypy=@uO`_l zRuR0fB!i)Hlfx6nsE2K+S^G9grnKYY2#8lrKptbrxQE?n)D67@eH(G(o2Ut($DGI7Xo(<#+jeBvHD&>exB- zBa;-J;lhaTw*4D3PjG6UiD3rltx^n-TP}haTaWr|irm=dQvb1gT=SkZPi5Lu%kfq9 z#^C35ZaOAUf=r|z0eToJxvy(+$i^qxi`>P5fNdhVleCmvo~jirc%a)@?xvU(5PV0x z-_EZ3V)lFbmah!oJN_|Iup>^`mml8=*7h>EPxwCe;yr6--mVP6+|n?UKvdoa1XTXN zlyb}*WUKkhq*=0n^Q;~)Ihm#9X2iR6ZgwD9wxXar_0ql=TDwely1ZFRryv)9ktjisR+T$`V^j zM%>Y(X&`?*6q6z30pO>7lT6EGg|=s9 zIq5vye_ElalI#+Q*94yizZ|emK~>7a@>K(nZUFrh2zS2v_(k1};Ws5%SSx?x`;f=e zI}2FBxXpy^_n+0YA3|k~Vi@VIh(zbEsg1BkqXB^rBuRiz3YbtVEPz}39fbNsK>xkZ z_wgTYOcNVKm)}6oNdyhTKOjl{&t-rRl@rUT`D@u-l!VR)^S@^|+xbiu1q;gplgR-{ zsLV=;8B)B&AfvLWsi{5uJ#s@&BTWl!LIhUOI|eWjDdA^doaT2o>0gUkfx|f^?5WLQykdqi5w%_{u zwR6KdHQE~<2O!`^>44W4l!!}8rThd0L0oZ@7`kds_>J%COQ~7`oY6fj&jQ_=HfX_Q zy%NAqmm1rvg1!nOLoHwsUV%N=1?Rm{D}rX;?>2J8Q?b9{-l?p#bBZ9$4XKej>q|Oc*GAg+P{ZwC~x}-_6*2ClTX*W z6@=E`@^`?E%#OW|S}$MdPCf!w8^P#pK^h(W>mo z7BKL`>s&!VD%d+rY&rxK!G=_5}LEKR>75Pr?AT$++ET05bSPu?4er=NA5a760|By`+5o z{&7}U_`S7^7WUbJ=`XC1=coV2CI{U6Tc8kT`FwRtqTs+J5FNiiv>$RC>SHl$GK8e) zQ$Dyv8w+bMytODsRvX@=eXEwMJ7RI=KYs>uH~;JVvv?Gm{iANCv@23&*7#gnr5e90 ze4R0bjaB}CcuuVvu)vqv=OXSWvYm~;$$Vot&ve`Id?MQzf)Mi`_w>In#ckvXTTxMX`tC zUPwPk5&#VZq;`OO0M5_SZ9#w``NmawD{YhZGQ-rh*a-?&Z)~+z^4=W*FV){4@BTCB znTj0zRZYOK8PVlZI4X$%c?ioh9U~B*hr6a@S5k6R7mu!uChElW z8Z%7vH=#oJH8GVRu5iZUbB>+ZD!F&7UQ8mxEwD4ysfxHSQ~5Py)K64`rO4uT%EVL@ zaly{xp^{82z;AYDJW_NHuKe?7i79MV13Wqwg;OmSP(Xg1aBx&0o&xa~eR1(d;W+CT z7o>`5=#Hw1dFvJ+DFBj#Qa(iG>Pmy#R(Sk4Q^!@mau{!-Kzdv}!z;KfTdlavI&XUs zFTY~JZXkwOs~rZO5-zWSmO@8);rLc$7wcfeQh)7GodWNt$J9d0CXk3}dnVxvngg?k zr6usXiHmQZeyTeec7M9z;pe4;(Cr2#iDl};Iwc=}Y0eKdtxV+Nhc2}bQ>%;S&-eMv zx!AN_vZuarpZ2`3*n=291GE>(nJL|1y;QOU3rk{_fb!|e%w}*tH?wW z^@IDwbq$^G?VohKW55E3Jr;QDH-98aE>{kD^4ZDc%A&z(GJjmgs$Op)+uweiW7*q~{Hi2XF%>&$s1IjY0FtXiLE z=9I>v1QXA&@=RYqB>u!`oCE2(XRVgg)}fbcTHUM^!4l!)9|h1zz-JtDbxg-WW`5USX^bpbePm31qPnPmc zXa)H-ST_^F+Jw-SxHt|1X#r4G?E-)&XdSS8Ljl8dxL>nM=gF7@)h_F-(Sv{a%!Ddh zDMMVbGQ~~a?t}jA=&Z;UpH~V*uW7d-z?i`}R9#Q>{`v_gNJ@;scoYsWQU&!sRnMjO z@W*|+k&x=R{<55(YR*~HT=Fpir?rPYD@LQEBhj4BL&T_7RD01N7KfBA-1VI{aQ(y; z-qr)1De5ZA~eb(7}L1IPbIx8wwAb2z!lM53Q!YryUP4L{S>ak+GrYUhmy)t0X0 z;ECmOXwD-0yLW(dVW+a^+D@yH+GYjU>B)?=B;h^To~aYHxs{Kr6;`+~T5#VdZiY@9 z>*>VKT<|&bTcwPj5`ZT?iQG`jl@zS61MOQs?!L_o2PCe{lJGqzC@lz<>Bsc-KU_8K zZEpk*yX(1__-a{p<>F46VZ8=G&U7I)U!!~_`=Y97U)Hk@b+im`Jak=LT(6!>m^}f{3)so3c=`ltI&U)x)0>RxBT(KB3_gB&jg&I^RT^B9Zk||l zdx_7pv6GlF!NI11n#tN&J>VqW(N~&%S^^o9Y13$&vjgE)ZAbbwDtnbRx-W;U4}Eb7 zwv82%$ILIEX7gBz@axB`T^~LOeFsvtC1x{o@fb8aa~p4U2AgF)a!M@e8eZVjH8@GR z*gghl+eN}@w8K6m^~rhozET(!#u!9K2x4yNJ1LIXQD8cE9@>PNHnBqRj;keJ4sbfg z5FuTe;&rlld5?qLJ?Dx z^<}tIr75r0kiGZnsl2$42JTSBld9(ZT3B)&9_J$OoE|4{CnAhi@|{0H>Bn4%K*J7CVh?m2>IyeZFz= z3%z5>VkmltsdaA;@Xyd30Pe%MubdcNbuMWqRh^-N)719%ggnvff|!|1e`7~U(nAe% zSpR!pId8eb;h3CiYA3&4>Ok7uv5nQ$Marxb6yp=PHlVoi(W~}07b36nGTiwGmyKC& ztT5DVBTGB!#C|TZxA_aPllfABOtI&;4$ISQV<1)iOX&67N|IIpMDO0ItE6RRcmKir zs+alByL*0Wrwb_9tIAgz?#6xl8f&7Tr5x)voqjXI3^_8osXI(E*Ttssau;YoC)3wA!-1ErP z4*RlYHc@Swr8S_7Vcp>GCgMI!INc&nj1Hd+h-o&IDUA#&nbW6~pYEKLx#Mj;*e$ma z?B(r>{iIHU>p8$&UsC&LgnQimo%BbW#ynj+7(NnSRqgt=!{@VmB@$EJ`jQJr9f{u6 z_y>-W7`LM4_Rv1FFMhx;>-UhRzDb2(*~Du0LgqWqbkab7J421#X6n zKY$E<-#xza0`jtBVTW&>`4}QHt$LQM5d2 zRK;{Bx}{4MBTzPRjqil6v^d8xJ)RQGO*!}2ZF3erPYymbsp zF{!nztD{%awb3a$+tt3qNU(Rs8YzhS)EW@4oFlGsvZ|x=aVHh1)&JNDe>wI+f6MMt z=jCy`ypLT6z)LV%fV}q5MRYc^-Rmp*>~)_s?WeRpz9%0a>(Sz8bdUN>n|AspCg^P%Q|Ww~ViRP#f{b?L24@MeAc?(2_DfsHqsRX~^fxgPejB+9u z2E+iWMAdt7-;2~mA|Y$h*Y{(EUDHdum*t@Y%OMry21t*{q(zqFF}NPKj4DJyIw8`% zn%V{RZHEZM<0OkRdH_*^0vF5Q2^f>VXD7gUp4pna01}7^U;uKlpha(Axd%Mvs_Ohn zrD{_%m8JH1{*ETk*~T6dcctvP>PMPb1FFY(qTSR&Pw~D%+wYGTKN5!~mdQ$&6}G`1 zee(J>^QB$a^GVcz%v7>b%*N;sVrF$LN@a;2iR%fz))QJgnS-DEq}`@U4j?joNQ9nh z-x+VBdk?VN#eCn{(4&j;+hCf2bZAFcCbsHgh35Hgv?e38 z%qkZ2ibq;?LoE!eESHTKUL-5#a$xS$leoh!v>lULVl2VRInjKJj+BWH;igOY0{z4_ z4hFhi!NH$8!-2@iE1%uwT5qxsmso1{VS^UplvS-bchYt2hpI7z;!v7vR89S4oHaBd z-hq|Sa=zoKY>eShv#f0f(2dNFTqgT@Wbvz8wxxBS8PG}HaD~@A!Ovq==qo&+%rUGD zYFufyl^C=@?Q?r09N30dY8*6B1?1!*hLt-=PAh+;$pM}RAe}&S1yw&NJaYz+qp!nm zpLtFw5r}GnnCDL%PiC@Pe1+5Pr!NEA^=UT`si>@35qUWJdQW(cyg(-4s|?0vqq0q5 z{mz8sEN?E?`=j5;_stGxjEeIYs2y#_ z$X?P{Xz`EEgq}6ks;lR!Dc~96=NSg7rd}A+$IJD0{lXm~s&eOQg1mmXQEIEvS&6FD zef3dLQFFc8W*Lyavri%7M3k9z#6AG2ObOpe+bl{zU4JQasa>3a znq^{{f@&v)wVR-T0CyK4uJ@UY_pq%-ox19Lni3WUckf$rg}CYMNpi(_T}Jret%yY7 z;b;Zx?Rj|XNIjE0N*Pc07~U2zoP^I9p+LJVZP@+EGKxUd$gUiH5ap^BRYXO7-kpJRLMna0 zO}G-5)#b82CHd$Kxewf3lCj8JRwVfrPk$V@>-_qJQU^`n_?eK}Ln93KqdsS|LN4zW zK|k_cHkk(n*^fPHPe*=)bIyZ;Fw;WXc<}UH#ng_HvtwMOxjj)8Bc&TQ_7whuQ{KE5 zgQqqUOA;|>SA#$e=z}2#$o(7+URGWuFwRv4474|hr4TGxK1Ce+k>P#F4!-R z_RHAU0pyOu!X?y1nn}*Cn#fNn$;u>G!RU+Tn7r9jK5NN2x=C)4rk|YGW0uY#CG;}k z3mrWzIf{oY@;26gEyT_0I!FrMl;+7MAI~;)K-@^Qz<#EO*%5y1X|0KG%ZUd0<&i9V z;}8o6b$rFo>&3LnMY$eV8!+4bgI9|7Hw9n|yo)k+PSqfwax%S`Ocy8XSycSGLPWJu zA4kRW^xpXp*6$q@`217WiqGgh$~XUcq@2`mR|`@h)4k{z;{c12_OrMWR3>@NVadlV z>j(yI;gD`OTP~P4JjqY+&aP-YYY(c=eb>RuS-_=p-hQ_!FZRdo&HIXcm&K&0m5p}V z^eXXt_*dDc^Jyw1zUu4S_*DE7#*o5d7X!Qi2yXTa$iqNNsibNUlZ(U)0K7t zU&l%O+?pVzHhCla7*w3rztZ#b=BVPZoY^u*C)_j6Yw}&fc=xlo;2xI`)OcU^F?1uj|(EVeKo+Wr@agH*$w2NA0r)ak-BI}t@Xr3aYG63%R`7c zF`WGw#q5pc$Z#= zFjTZHu8tEjr3n)4LG`x)0C-D0bB3y32v$K+2>U1V<*cJj7FW$hK{5fG*oZ26H}{y-Ff!} zs3XC&Z$Iu!fH_=88yL=B<)jythP2z3sMsU1lY0=Jp5GB5vrWw6Ue*i;fNPPPK-F zgYVBfl#j+$Ch(0e4HxliPGOFo6tSLNITy{YS8o=!e_slw&oaH@?8lCxh%zks1p0mm zIbT0m^a}Bu1U$}*mZ=LBcF#4k?-QN)LO@ygt~jM2tM%cl=*ra>zMUPQB)-{BLl7wv z@Zteda=V_K^0!qV_-Yafca%|7>P2rM9HC1k@mPG5*lY%v^wyJf=^B*DdGFq2Myjb- z;A37;iJW?^4@Rm2{~=JDy%z*2n>S$L-8HDpUzkZG#xx3|!j!rk!WiR;M3X}8<1umI>tyb8P1#&^`&~n28Q*a3!&h%xrt3b!FTM3b zZ}`A(C|{vNfawjW&PDyAG^!RaQ=0EDvFJvm=QK zDEA?T0WW!Qro}Ek1|;3(h(HC;jw!|{-uvCiD<(JbJmok$JLZDmOcfB=cJ}cx1-3R? zwoDn$GcxTC;>K>HoSxn}#KOEiUEVj>kolkZf6#F$Xsy1Sn<|Q4A)+56cw zZofTu^m;*G0_O#0Bq+ia*kyerH27fmL5dqIynkdF!NOA;>|~s%;GYKCX@I{EA<*W$ zd7(=sH=$8LY&4u3`SGMT_UL6RijoZ6)Xqc(_ua{=YWP*ufYlK8F^E6_Uj{-l3IJCQ z0^%Mp&0^AAS(YpPqwJ1*qNv~GTMSx21K|OaYHjsq{(Svj$V661$qT=72f3P{oUodp zFo7MN5|TiR>KF7S&d!EXz2IO!pN{?d>oMd4Y*>#bXDkW?n;qDOAj zi&s(5k99Lea*FIEE^?jj&KN?EZgSN3c}JC`^}(ksO(EYXw zuXTa>OgFTZc!`wXbtXDCaT!H3Ap|Ca`60y24(%BaIYC`tM}=qiuLWdug!X#7y8No; zA2oKmaw&u$9=5%SGOeZYV%4pf24=O%42wtR)4l9SEql2G8;hN1r^x{ugERSu2n_0M zcd3?t#uEHyU1kO)_94W^_4w~ZH50d*y-dg>AFZze;N=g^v&+j1nF#L%m%{4k&huhpRr8d#|pxGjmf zH3SP@&ovG@)g>kQ;J#1pm=0rX;bOssnRO@EI_>U8m$2PhgUQh6m%}vp{i9Z^WCmmE zR?fQ$CRAtSpPN_cP8M{!T|4^L1)fVRX z7e^@4BU?tlcI7WtgK3{P1v})AopeT+?%FLc;Q9-6gx34m;t&C_P9JT=*Ob(eZXkRs z9S$H3+Ub;&MfoH4(NF5w6y76Y;Yk`Lso)Pcr2%%3y3kQKKVvqdbt>@w_P2 zBG~=&^&gk-Rhhf6XzhHKQ6{OwCQiwZsKOQ{y>qpnuZTdtfPZxL`xC4 z_d{iQQtybXh~OUiAl%7R9rMSJIUw?-p`}`^!Z!&WOJ1WL4yKqTN;4O<#BngoSI#!9 z)THoMb>Q|{&xsEKO#MX2S&Lgkmecm))r=<;3Z#0@A<1V$t8hGN>fpl25yWV}aHqs; z>M2+kAEVvh0e*vON>OE4x><8gHe@ zlUMdlDdLhnQo`&Evzp2HI6-jP_StBvI+NI^GEMp(wpmbfL#2?i)4#rsYE3min+db5 ze{hr)M6`_Y2&&HdgF}|W{QPNUdSX7<8T=&QhZFu$A&>7?*+4^%GAbt=g5?9E25E`I zI)&z^-O~Fa3jy(s?-ESXboYxRmgx`6)2g^sIqNB5D*Moi-pirI2F(!-1}-zZ3S9iaqS z6h;R2I(M3T$C0&TOPq#?uHG2>`Xn}w+$--=nMbY=6cDSr_C3V6ZPf7Y)K!Y%U4YAW z16&qpHJ&-X*4;HRid}i}fZt4+JNBoH;!${Zfzdm*{a{o^O&`-FAaS~rV{MxCE;sa0 zCQ4iPo`$d2o+`c*A%M@``s|RCM$Y>;--iuxkJ*83wu?gAn9|(*k|!*%$1Q{8psmi5 zlOJ*Mrjgen#f5IgpX02SPo>eoYt2K^bl0o8<%|@67g!VgUH1u*NoOQrP6Zo0w-lPb zG7L|e99pR-RZA}<5?_5tNA|Z)6^;+AJ-xED1G5A#Nc4iuUx=Bht_IaD(=`& zgEw2@OTf{*+Lv184u-{nVaAFRW;NOq?6h7>>w7x8dWEiy?R+gdq2dKx%q>*m1VvVm zH*#)DAenZWrQucnOsx<==!HN%vX!r3@*_NpwDtgaJ&UL1v@c9j5zl)su;_+8C$zvZ znFnf0o^iV4q+uyy9&PU4>xL^9oHFn~6HOHf`#ox6@}@@CtXKD(7?85z0BtC>moRxX~t z0YlCCI1PMAD&CCp_jd@?(NhiqeNRK5dB4)l!yOh7>d%NZ$ z{rc{7%>YYv#w)aX)&SLKPc&d&M|}^p5VREYHd-i|*I7s3y~Rx)L~4+~$$|8c`&<}= z(wQvmg9YQiPaYLsf>&JshimXyE9G@L$k{%!O%isPTAbf@mdXU_VeHuAaC(t+E59l36rHbGB zMIEGksi*Wc%-)~>z0f)h90LCFAvnPT8?Hu_V4}LFXBB^`(e9fzxNV($EF{$^C`Yw6 zNTt|l8TI9&X?hv6z)hb5&rleC0KY}TR0nd6t&)*!Df0dVdgApJC zS9aQd(;x(tjfZ~EdO@5AQ)PjP{*>7N=da_&8oaSoiwELOFIXo-V#wc8EpBy98+JL4 zlPMwk$0x~@Z66|kw)w^1l1cTI>b6w(66gTGi?;15n*Z3L7`)C`pg90!v`lRBaZAw$MSl-(&D!( zTggl(uhZdb-IB!sZV0obiYOzAGZPro;%1U2@_CC#G5pi&FW=k0n?Z2(#_Krz`b0x8 z)7}(cz&S5joMoZcVasr&UPUZ``BlHq5LFUWV1z_AUo5VDvCYOt040Vx7V-AiN z7kGiLm7kztMKu?+6ydvF;=oDR@o4hB1^@kCzKkyZk5AfUY}%L}EURT6$}dvgp^YFS zsU9rYdNro=M;4Dk6ws?mPL=E@RLIG|=@mR%ijNr^fdq)=x<4()-{=M_!XHgKF-T5; z`^5DG_Tw02>MhwGJ#}zo5!<#x1$YdpdTvGVz-pCmEEz~kYknxAeTc@%Qs7=qGo2is zYk-~h*H#Lk)uewhiIT0pvu$Z0m>W)tS|q%k46-LI4n|N$DaXU`mVs* zb5q}X9^5MiMTYYE3Il3(4csQC#kTKFaOwdcsvlrt^p-_7vt5*U+qMHRcmeJ6Y?swt zAjSm-lJ7NHAr9^EE5ulogh6CxB@{awDq$>0|AaIG!_Cj@Z8{|zfDH~@wpRg+DqB3y zLBb6b+xHB(B5v`P#?@=xPBKE0kQ$4tg|}PPjCHg2sR&@@WBK2D4SUw`PgsS@4*H9FG*~?*pazl>`6_$qrU7^FMF^THSLq0Q~xJp~9k~?{f z1$76-X}%eybQ$6s^no*F?X}HEihl%l4lGhWX7<=j+cxm@=D1F*DJ)lF1kWHaIs@m{ zlykCnOoqlIWlWh6-rKs_Gvte=LrWoRcbmz&1G$!oRI9=|mTULnJg`7PzX8 z8MXX$Al{nLF%_IMbclKRX$M4rf`a`^wy@t&PW?y!anEMkM*5u$G4v^099ggzv0?1m zqPvU|7en7o0cVazbq45!h;Esq^swp`0c(`6`3vhWV^HK7nt}|fs68%VbVYep;=X)z{afS_*zs-B{A)kat% zV2)}U26rBsiu*W5t(gX1Iz;Grdgr0}@OrtC^Ec1Qy>{!4#9s{C%eu5+CWuCPqM+N5 z$?QSp_W4EU1&fWsihFIMM)XeSmPld-Lh%yTtr6J&{?V_0w@jMK|MrX2h(`qb39q#x ze{1KrngMSvK06tm@U8Q(aeVZ?no zTp!c4%XHgTG+I{I4}o6SCJ*9p!qP8)9*NpdbPVl{`Eus4SWoedW^rPo1wEo-ae1_- zHivOj89VZoI0<|k{@7$JYQU35B1;jmwBavo)FnNQ+P5h`{rhM%EJ#SlS7LR9NMIEpkd)+6;hl)r_blb3^6!4EB zdu(ncx)FkH;41fba%kbd2t`!o9=v7OWuZ#jxUefGB&(b6M!^p43A)6^8n*Y@)t6QW zJvS|RwPF7eq#7pkq%*SRs%_mV^%C$)a2flJIRr{>WBjRWBXQtwKSoLbS~8TWlW+uk zJ|XdC$>JP&7@Z`OgAUSl>CxeP*tMa@+olmQ=Hp1j-Coc;C37Hita<-=PA>fsq_qYo zmo4~XckHTflOUu3^xvaNJ*w3u%kNy`=>Epp{*ayZndNq3VHX}UvO*_>wVtTRxf0K< zoq8MZZ}0?s?)Bc&^Nj_muLGs$8>M#K@Tnd{av%MT(D=8=J0cHFT=}SK+<1p_<%d<1 z{5AT_XH{cW3#n47E$}1;G%nvIdp2;iYc|w2Z_3!sW82y<@8I>))4t_O8>sYtEiqu+)~qMha#RSe;jxO zHL>Q0>TeU$J!5B>KBAS{Fx7QXSN~W_@RIAj1Dzs_DzVh-j!I2|;_3h+*d(2umYtUT zGarp=xZ8CQDj*Lv+jxo0V%+g~ckKF57&u3+ZJr~Cd|*e9O4{$fwR_ZZ{MESK9B%4W z{|sMIVyTZ+xK}f}z{+{QB5iB+6l5Vq%!q-UA~aU!PDs1AFi-L zj!L5B(O>lfK+U^xXfw40ht1!1ht+w|bEI|j8(_5=kE-xbT}QJUfDD@{o&fgz;Lrj4 zp{$`f_Sn&5S!BMV1XaBNi{)68H++7;`rTcB4eV>LMr;xDH$$DoI6^b#nCy-+s`7yI zX)c5vdw4NkgyMNXf2Hup;<2Co;nZp|cQk(5S+%?sA7}No8F8D`%EosbyYE(Mt!^JE=Es7;$uPPI<*;moE+U?V2 zU8L6aW3!F%Pj@zj387ra>f&cE8a#3ci2546r^7(dyNCT|MetS5rUdXQ`n`_i zz7a2^17&HJAIbRjpC7!G3dg(B7_Sy@H)D(Su01&8!U^(5jMu1zjNfzYAM{V zIF7hezqea-(3t&O0G~eORfv1Q-$(y>5tJgWpxJq=aed;K#l&q)++e!Ae>%3B>qE7% zikzF#xioa-1OX=~pmJIHqo~tF_Q&E*hnpcLQlzPYigZkWQ!LGxU!rloRi;`XL`5JZtYdzC*q9?1b zU4y;uiz{>J4nN`Qd?(N;!DT5p5JJJ*2%I|<@Xn)PhbHq9Hp3lDAZPdBHacZ#O=<}3 zNi3|gX{&j)DF)#=#6RlOHo)r$YBc6#4g>+vsDQt9bBT+FSE%bz{w{c1Ft4oP=rfFo z*Kiz>Wb8YHUtgUN%wbZ2O3NSjuZ>vQDXy5QSX2_gFb(AxAV&W-5a~X6qN-s)XNnw^ zkc_?V;N=!x>gh3d20Wv7?BWCvPfpGib{w&AXLjRk=oysZi8!7ll)tZq%Yc)v>`GVU zEIz^4{Bb;mR6m(E3*75myT8Y-x;~nmPru$4zwPa19dKh%w$OJufJeF{V=Rj5gHLG? zqg8)S>}Sl>1eK^Odv@=&OEyl!lov!&3$L^ruQ-hTv|Z)VhIOhI^)k{!1DGQyehpYU zX0OO}FksS}1T70z`n?l!m#s#Fd6{lSUVGjyUHr~Yj$)}8&%VOJ&0Mg-^ZrG+Uz3XM z+&NzWYWZUjLrg**!`zcr&G-bqMoon~RsbG!pJlvL2#tG0t35~55cEonHym;<3yxnN zvNz1xVZ?HK*)rXxC3Rm<_$M;jElb710>iFEchBy5e&$*3x0>cPl;F%G4aRkO;2t_H z)9bc``kXc{o2r`$3dEGHyWncPlZ-BKKQ%aIIj#-}bkB}!VLq0mN|;N4 znJ5IFgcW`$6OS@6bN%^cgcJt+uY$0SuedEnX|K7HtBr}ER({Qmt@>5Myn_>R2~{Kr zeI_sutY!q5f`|o8@LgLS>}S50VS9nm9Z<@%fCk*}bD>zCDRtSSw+e~v?=3)ie0ft= z{w(Fs8kglblKDV43tN^co=v_wXzOf$4?1C+6R|*ZoaL)9wgEJ1HVLcLU0a7eSi&jb zM)f=2;9r-Mmx2S1MZaJ_bL>ZjOBP<}XttDrp|YNT;N&LwJOXU?t1Y_&jN?VjpjeI_ zcBdBE6C9)z6_{Wb*_&PhMzh#v#yW&K2|G?Q)5r%tckX7=^9z7;bj0d#T%080<(kLc zBOUq%DKt6Y3S8WOmIyrSGH?RP0E54cY$tl!QO9srimrL4=GCJ)J(bguspHaySsd;d z(A#D4OzVUF)*rY8d^Y@Ef^!4pFzS64mgy%R->*Bwbur@k{qdjgJ!l!@HIof3M7+)1 zmC=$gU`@JBzWKe?EC2{@9o+NWpY`DDz55_dEInuZ>Pi!{Co%*t!5JhZ>FO))MOXZR ztI7c1W*G}Nw*tdZHns9}Fx8TMQ{ByS*euyEs^M`sw|3D$<8Gr-GfDiZosnQTM6ttB z3wKxefiG$DDL54|x{M{$C5h?c#HUOsK;DpaK3-QQ#d+gfoTQ7*Y_+*S>0UO7^F?9w z1@L$-#s1rmi^<#$>_*fsNp^h^;lD-H|B^0`PK)WOvQhZ<*_??!FmBJnkXC9=9GR^8 zvr|ccuJNMq3338O$jdzjMp6Rm)IZ+2^!*TFVPI3-d|VZM;hh)c7(W-F6WV}Q#p-K` zI7@Y0_Ph3r8w@b7ou1{QebYHj#0f#15M(5!usCie4*z!9OG zT@v8Z@~k15H$gd5aYKbTB?6PO(=>a_&v)=z1H>o z^Ln|w-F=PN`**-lfhD*d8>>8Cd^gWcD&G%Z_4w^KuFqE_sAhVaP1wsS&!K2SN-8&5 zA#otY928~7*Uc{6HYSB6fZO|M2@oBapp>~&xd*^i;7-tZnCgGEr} zo$qS<-lTz?K|&vIa=Ub?EVI{AMM5tAAXEyxgvW3EOMK(1)plIhG#kX0WJ#jy5Mk~Y zWOcz{Oy7fctdM(34}n=~S*Ur$?sp;A@fOm-vb3*8`{JhW$F^Hf z%??0@g7(F&NK9(R4Zwp`#Uzr(_}fzAI%EVfxux?sn;u%s+S&u=TaUQ^s$VPl`!<3z zOiOT^P}6<^z!5km{7Na1*6W-kwnb< zB#Ui#Q|eR@AF#(z8FKK!dwCa^?mA~8XF7dz4!itSpP`S5v-pxY znN))h0Z{~>`C#zWeo$7LhujQGfeG)rWI|wX={nA!O_>5r{7kTSpPNWOlbRx;K1YHbYP20+hgUi7!Qbieg*{aD7XhP!>Q;axx(WCMTKw4lOpy5;?>zau4Rp9oR^zVJ zC90H2dn~^PXZF?s37`7#+X1Oeu(T#f@2Mw;&kjBw3ui8$wxZ1>sP0_@21tA#x4*sK z$b-S`3OoX^h)fM&1X7`~5Pqsg@6=I^Qe271=mY-3qzngLIF?8U@>KhBOOY-w4eZ_n zs6_JoY1BaAafildVBNB6*QCHCDDA6khxyfmmut0cNe;M5{)H7cAz*~KuP?VkXT84F z&5;+|28`EBnJ2r;{I)%>5t$B+?yU2m${O?VNYpo8?g0f-XuLgumfk++g9({?c3}ccWbG=YuOiFTkpoTtbmi;@pK{Y1P6?+FUYk9>2JgPLELv%W zlJiq|;v6FHy*3vh$eN2P?H(PW#SB!f+TY(5NmWo}h4iQ|tX!ACIC(F5(qw!FeM0?b zzphr;BqUUG3>o9{Yo>436g=n=n&*+mn=E`i1m3?PDBO(VapwFqxd4o#m$ z8_Eo=Ow2k`E4so@A0W6dQgTS`8!B$U@ zY`^FK@KyS-HF9@Pb>(~y@#m-nYQu9@Zbd(;)Ym9ib5neM8Q@>P|57GqQ^5^=<_KnV z@&Gm2T0NXp+kGEz0sHUAGh^cmPQdbpfJ`zkPlSJ6(FQl3xJOhozuDa@zN| z{PI(fxB5r-O^Qa-kqRr=QH}gDfam=6SN)OdzA%RYo3hp5IFK|y8~!s{JFIO0D_0v@ zb$1d)11{}mx9dL>;Qz65HVyRQWB;rFseKLRNe|GuMH;cbW0md%pfGTZChPl0g>!;^ zW_=0V9*{*bC+b8od9!GXokwy^3{nK|w(5|z+Z~LV9NIo7p0bXz&3X6dcRIIO*8} zYAW%?ey*;Nu&`dHxE~B=T%QvY)B{}IScSJys0gix+{a<#naiIHM z>HW3mm%$H}1D2T^B7am<5_r8KnqIFpAQ*yivf8@?sY=kq6;h z^2<#z08+;WCZ!~?g&9s!jsHHNvvcmy9mTXi3BTUPOQa*?P7s*y|V@H%aSlg__@ ziOE4AP2DPLnP+yg^vaQH4dDAULMioN@-O4UV4WbGUMhb)@3m9oFWw8Tz;B{Al(&aG z(K;_{e?aKQa9Iq^^C*+SRpStc?cG=8%I(^tikzgU# z<()MRs+XOaDz||_-!uIaTcC90@!Ft( zA4{T^r~qzVX>xowF0DVZjvp!OvM>^f%>#Q}C_HJjWN|_e+m9~hWAY2Zd_xxO5BdnD z?~YaP{`dR;66>}OpfG@L|6bW|S+eu|Z4k$Q8`jG~Q~EKSo0OTMtD~}8+OS{X#X(?K zwr?{MfM2=AyV*C*QjnK)Xe|x)4&tm@YkMe51fvE`vbtS7lE#aNrO|{cODLc6=ul49~`pk z!G)ywqgsMeai@P@?|jsl?u@s6$FJQqw~4_#e2dPY1dzX=U_P2Vk~MY{hPtFod!$|z z)BO4EctwiTPHqufX|EMooxg&}T7fYkw@pAb)IO6XYwytxj?jGK#jmA+2i!`O>9 z^}nFkg13UZ02`wiNe+r}kSYcw*h7cF96Q3f$)81qv-GZj%kLGH?{0O!yGJtIa+`Z+ z`6YiA(aWRK8mmeE5tgl=T6=0CSNhbgm{o~zW4+$@U+XfEswh!we555U_h&F8EB~=& z{bxeC8(6p0X9?&^c-x8L-GRHL|PLrR93CbAU-F$uZ27^4l8L9z$x_|>`w&j;W zqnM|YI#?0`5HbIbCHrl||G^UuRkvMNK$^H=GNZw~N;A4#TQ~W_7O|LwA*sjdjT!gK z_(7OBfnu6ieKTA>_u7%tjT*-Pn?meEUn6w@h0>DV>Wv^W}9d9)g5UT5qu`M#iV1`9(WV5Q-^cWzNmwO z=DDd8{WAY_uK$l**k3#u3xTeYB~tx)L18=21hYC`dl9tpQJ9Pd6XB`X>AlY0tF6Y} zsLQU7b8ff2Q<*~Y=ML4oB@D&43%Rn2*5M6ix>Ye!r?m!rvzDNiP?F&t(U0tr$L z&m6_ad-6?4>1!EpQl;C5AUb({8n;fP3JSGe;`$;z;JOs9JEY4xcVM<0 z@Z4GFBdJjNN5p%7*^8u9d^LDkk^NmmaM!^$SK8>uP?MQAM4QfBaOKtFueRQ{$a9ZM zHWt>uOq?P&cDX+tP;{wPLJgwX=sK{mK}hv_@?fEn$1z)tnJc0|>^;|VM{wHOjyMcu zcNF{aLNHI%V|ipcIT*}9wD-&P)@&$-bDGV1*cH89G;ESatjWTpu|Sxn&9g=h!XNbx zON0Z_P)!Um6!1I6Z?beA@O6yc%PZ&!Pc^Up@RsHuX`P0aD10q0y^sK^QO{ej^>msK zFl8Z3mvaS5V#S`+W;i|@eB#c8$&R-j1wD|A?aoJFlmNN%@%`hmuZ<9qAdm?9#IMWlwAS!xyfc#Es>n3 z&gGij1DByx5C%k;Z~u}W)2_q65k|8-oA&h$HLL?pwR9z@9~*^sP7#L;w8l&-F$pC=GJdp1oD3eDmDVc6u~8g>4iJXAq_*p zZ`oK-TH}va1Mmi*UmP)T2e?@``9%T__@#*MlI!c;-XD(ixdcZ8JMAZ&S@3)te(+j= zUm+{<$Th%V;?P5L8OZMzj7c32vLnpq~UpL@L5S~FM{y7YqV!>-YCfhs#rJc^EmqF z6>;Hm?TO~Eg739HR$|q&>+ISA(wK&6y;@M7F}>PFoJz2rb+hAe!&KZV35BH}`co55 zYEH>jlh?s%DANI7HgRNKu}t#_jY@w1152LLGW0LeVu_gO2jdgCLW%cSSw z!TiGqMRzab$BXj{%A#CeC6mT*0S?u!_+yt@c4JKyS0yj%_eS7<-IHWN${qJK*ntO! zJ!cfthsf4Fff_NdYx=SwP|dD^3nkWUVdO?w1(?-J*eQim3huFlyjH6?na0@4Z&oj6=P1AQf^sBEAbmL! zE8@6EjLIY-5Xz4$;FY_W@3VIQlJcgc=nS?Lwbisc+T}q1g}w3XUj!4rs((i0XnRcW zqP8XL5s))iU^;M3(W=g|AowYQ^wJ3nkju`I=vLEM>LrM=8e z?fDziDbWh`w2=a-atalMNsQ4L`psR4_buA$?Tz$3I6O}-E0RJ?Ng)8)M_%c~q?npf z+Clj#7-I6hNDfE#*i&Wh_7#{yUje-~gxn~gcis_1@wIL>dM-CLbNet}j{$OsBwpBw zf2s$2$}Xz#I!et>23Mym=*>z~vuWSqczpnfm|+k0>>jVqEN|Q4DRSdHimpwZ0kZ~P zZ>O)KK0VSE20ubG9}*Ww%IW@6JByXLaQcMho@Si7yl-iX!CoG-lU#bYcZb$Hs}v*f zbsoR9?3zh6$J4Bo1b9@t`tb>&l9@~x9sUZ-$9(TlRJYa)G~9ux=yO3dkb$%Z!jFls zA;C(=UqLkE*K}flfd2#Fco~KU>^zVf-WY}YqDb>2{D}1WK|CJmPh=3a(!k%0V;$On z-#<$O-083!V@NHOZV&qm01PxqA@KniWT=(U%7mP6{zjDE zr2<61e>rLZNC8m@uti89i$4RWD60F;460L@I}?bC+6+a`9}8$_r7r<@x=}Mj->9;X z2p+u?1kBm3JJ8f`8Pphnd5n+=VYh`84>}gL?%0WmT3D_0DBDfVxzo+NF_2)TS5AOM zGynVlI9>BD5unjotxi~ja^|L2@!oNQ=z>z)^tS~iY=nJB*##B5cOeFHY~CTjbo=>T zevPhsG3W5nI6;57GVz{kmZY7Oyu9H`3>3A(MzMYL)I&Zal7z)4A+JIx9OJ?~U}4Rv%Rm!biI2K%_+nUfodQsn!5Yk5l=d2_R+<>jH8nFbyjh zAgC4Q)Ey%GFuYIgngbsVs}&~EZJ+jJP<}YmLO#|yosjEh`r$Hr)$NzhAeBC8Gkhwy z+tZ@Hpf?cJgxg=#v*%IwC!R7Bb%>ZZT35loNccE{YdIbj)61iq6AJIzl<65AzPy`r z`vz#`bdebO*2LZb(krFX3Oh_TjO&HB?UCafx93Ios!wPziKfZFXF;ff-;(dvDl_Nc z{WkY{6~jW=XWXjqA8c{n}Y^_N#&vG#E3>R#Lyd1g*MO9 zKyuK1C?Z=y-xYxGy9@T=l`2mD2xE6?Ew8i@Q_idJnTxQYgQgS2xZ#c5^AYOx`1FYo zdUl6xq|jn@V5b1$%C0D4e9Qz|3LlEg`jzSwQbt3q0J{PGOx1^8H-wpblm>d)ywjiZ zXx6|hJ`+={87!Ldf|*$3PrkjO@|XuC_4jiUCps;Lcx?Y9_n%KMxDBztl(iNyTMGw~ z#t)El?ijN)N~9pcH9Fry3Nh)>`QK5Tl%?*WGXc{fgHm6>P<>_0*w5FBJEg{=wMVt|s3FdW7L zJ1~i0*FaF+Wb!tc7&}jvc|qK=*MDu?5jbQpN>lxJK)>%%)U#?i4|@B9C%CE@AE6cc z^8+6PL9No6q%JMY+afUkG4OxL1?v0AR23911aS&x5CE28h1utczw%kTBeK=?* zuayPAU26hHAr70+QhEUP!8mX(O~Fq^x2riXUEru8V=z?h#4^X zP&eq%$_5(_F zVV+0Tc&0{sh%$(BH?WB_z;#AuPSloPIm9I5uHPgTK=g;}q(bjKbXL2g zP8}LEi0zY&i?nhR$6aVqSi&xmaEnE?Yh_1Nqw7&tgwYE(qJ>}?Z}wVm@|9*ksp;{| z-HDpEjm|0l?p*a6_m6q#ooxLSh*JgB4@i+Scm`##0klU6-BXZE{WV=O*MWr8mF3BZ zH}T^BJ^Cq_G2ywQ8)9dPccPH*Hi)8!@pGmMdEMAtIOYM z-gK5FDPBX;w^XBGndxPeFa0v(WT`~_n1{}MCNzYMX6FH3NayV!Uk^3;^v!G%3IDOv zmNwZyVP)&yn@{h1a;uUZl{gO4k2nXeyQBRLTsMm`o?Ye0Vqx|cpSDQLT(kyySicRl zt_(gn7%eRClig@Q>x;x~g7=DF{8UAc~#A8@ILD$a0?Ee zUaTdbkf6E`a@9Xre?n9y;UKy88!URh`k{P;hBY8s28V6E44AJ0pA1Rw62#hk2h|M# z--ZhE?B0wlXbFi+K%Sl?eN(mCCGP5Z5<|9(-p30UuJOt-_l+Qj9|TKf)&ByF{<=)e zu$i2YQh{&_@}y#pW97P*!EQqj7G2X#

::emplace_value(const size_type i, + allocator_type *alloc, + Args &&... args) { + assert(i >= start()); + assert(i <= finish()); + // Shift old values to create space for new value and then construct it in + // place. + if (i < finish()) { + transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this, + alloc); + } + value_init(i, alloc, std::forward(args)...); + set_finish(finish() + 1); + + if (is_internal() && finish() > i + 1) { + for (field_type j = finish(); j > i + 1; --j) { + set_child(j, child(j - 1)); + } + clear_child(i + 1); + } +} + +template +inline void btree_node

::remove_values(const field_type i, + const field_type to_erase, + allocator_type *alloc) { + // Transfer values after the removed range into their new places. + value_destroy_n(i, to_erase, alloc); + const field_type orig_finish = finish(); + const field_type src_i = i + to_erase; + transfer_n(orig_finish - src_i, i, src_i, this, alloc); + + if (is_internal()) { + // Delete all children between begin and end. + for (int j = 0; j < to_erase; ++j) { + clear_and_delete(child(i + j + 1), alloc); + } + // Rotate children after end into new positions. + for (int j = i + to_erase + 1; j <= orig_finish; ++j) { + set_child(j - to_erase, child(j)); + clear_child(j); + } + } + set_finish(orig_finish - to_erase); +} + +template +void btree_node

::rebalance_right_to_left(const int to_move, + btree_node *right, + allocator_type *alloc) { + assert(parent() == right->parent()); + assert(position() + 1 == right->position()); + assert(right->count() >= count()); + assert(to_move >= 1); + assert(to_move <= right->count()); + + // 1) Move the delimiting value in the parent to the left node. + transfer(finish(), position(), parent(), alloc); + + // 2) Move the (to_move - 1) values from the right node to the left node. + transfer_n(to_move - 1, finish() + 1, right->start(), right, alloc); + + // 3) Move the new delimiting value to the parent from the right node. + parent()->transfer(position(), right->start() + to_move - 1, right, alloc); + + // 4) Shift the values in the right node to their correct positions. + right->transfer_n(right->count() - to_move, right->start(), + right->start() + to_move, right, alloc); + + if (is_internal()) { + // Move the child pointers from the right to the left node. + for (int i = 0; i < to_move; ++i) { + init_child(finish() + i + 1, right->child(i)); + } + for (int i = right->start(); i <= right->finish() - to_move; ++i) { + assert(i + to_move <= right->max_count()); + right->init_child(i, right->child(i + to_move)); + right->clear_child(i + to_move); + } + } + + // Fixup `finish` on the left and right nodes. + set_finish(finish() + to_move); + right->set_finish(right->finish() - to_move); +} + +template +void btree_node

::rebalance_left_to_right(const int to_move, + btree_node *right, + allocator_type *alloc) { + assert(parent() == right->parent()); + assert(position() + 1 == right->position()); + assert(count() >= right->count()); + assert(to_move >= 1); + assert(to_move <= count()); + + // Values in the right node are shifted to the right to make room for the + // new to_move values. Then, the delimiting value in the parent and the + // other (to_move - 1) values in the left node are moved into the right node. + // Lastly, a new delimiting value is moved from the left node into the + // parent, and the remaining empty left node entries are destroyed. + + // 1) Shift existing values in the right node to their correct positions. + right->transfer_n_backward(right->count(), right->start() + to_move, + right->start(), right, alloc); + + // 2) Move the delimiting value in the parent to the right node. + right->transfer(right->start() + to_move - 1, position(), parent(), alloc); + + // 3) Move the (to_move - 1) values from the left node to the right node. + right->transfer_n(to_move - 1, right->start(), finish() - (to_move - 1), this, + alloc); + + // 4) Move the new delimiting value to the parent from the left node. + parent()->transfer(position(), finish() - to_move, this, alloc); + + if (is_internal()) { + // Move the child pointers from the left to the right node. + for (int i = right->finish(); i >= right->start(); --i) { + right->init_child(i + to_move, right->child(i)); + right->clear_child(i); + } + for (int i = 1; i <= to_move; ++i) { + right->init_child(i - 1, child(finish() - to_move + i)); + clear_child(finish() - to_move + i); + } + } + + // Fixup the counts on the left and right nodes. + set_finish(finish() - to_move); + right->set_finish(right->finish() + to_move); +} + +template +void btree_node

::split(const int insert_position, btree_node *dest, + allocator_type *alloc) { + assert(dest->count() == 0); + assert(max_count() == kNodeSlots); + + // We bias the split based on the position being inserted. If we're + // inserting at the beginning of the left node then bias the split to put + // more values on the right node. If we're inserting at the end of the + // right node then bias the split to put more values on the left node. + if (insert_position == start()) { + dest->set_finish(dest->start() + finish() - 1); + } else if (insert_position == kNodeSlots) { + dest->set_finish(dest->start()); + } else { + dest->set_finish(dest->start() + count() / 2); + } + set_finish(finish() - dest->count()); + assert(count() >= 1); + + // Move values from the left sibling to the right sibling. + dest->transfer_n(dest->count(), dest->start(), finish(), this, alloc); + + // The split key is the largest value in the left sibling. + --mutable_finish(); + parent()->emplace_value(position(), alloc, finish_slot()); + value_destroy(finish(), alloc); + parent()->init_child(position() + 1, dest); + + if (is_internal()) { + for (int i = dest->start(), j = finish() + 1; i <= dest->finish(); + ++i, ++j) { + assert(child(j) != nullptr); + dest->init_child(i, child(j)); + clear_child(j); + } + } +} + +template +void btree_node

::merge(btree_node *src, allocator_type *alloc) { + assert(parent() == src->parent()); + assert(position() + 1 == src->position()); + + // Move the delimiting value to the left node. + value_init(finish(), alloc, parent()->slot(position())); + + // Move the values from the right to the left node. + transfer_n(src->count(), finish() + 1, src->start(), src, alloc); + + if (is_internal()) { + // Move the child pointers from the right to the left node. + for (int i = src->start(), j = finish() + 1; i <= src->finish(); ++i, ++j) { + init_child(j, src->child(i)); + src->clear_child(i); + } + } + + // Fixup `finish` on the src and dest nodes. + set_finish(start() + 1 + count() + src->count()); + src->set_finish(src->start()); + + // Remove the value on the parent node and delete the src node. + parent()->remove_values(position(), /*to_erase=*/1, alloc); +} + +template +void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { + if (node->is_leaf()) { + node->value_destroy_n(node->start(), node->count(), alloc); + deallocate(LeafSize(node->max_count()), node, alloc); + return; + } + if (node->count() == 0) { + deallocate(InternalSize(), node, alloc); + return; + } + + // The parent of the root of the subtree we are deleting. + btree_node *delete_root_parent = node->parent(); + + // Navigate to the leftmost leaf under node, and then delete upwards. + while (node->is_internal()) node = node->start_child(); +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + // When generations are enabled, we delete the leftmost leaf last in case it's + // the parent of the root and we need to check whether it's a leaf before we + // can update the root's generation. + // TODO(ezb): if we change btree_node::is_root to check a bool inside the node + // instead of checking whether the parent is a leaf, we can remove this logic. + btree_node *leftmost_leaf = node; +#endif + // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which + // isn't guaranteed to be a valid `field_type`. + int pos = node->position(); + btree_node *parent = node->parent(); + for (;;) { + // In each iteration of the next loop, we delete one leaf node and go right. + assert(pos <= parent->finish()); + do { + node = parent->child(pos); + if (node->is_internal()) { + // Navigate to the leftmost leaf under node. + while (node->is_internal()) node = node->start_child(); + pos = node->position(); + parent = node->parent(); + } + node->value_destroy_n(node->start(), node->count(), alloc); +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + if (leftmost_leaf != node) +#endif + deallocate(LeafSize(node->max_count()), node, alloc); + ++pos; + } while (pos <= parent->finish()); + + // Once we've deleted all children of parent, delete parent and go up/right. + assert(pos > parent->finish()); + do { + node = parent; + pos = node->position(); + parent = node->parent(); + node->value_destroy_n(node->start(), node->count(), alloc); + deallocate(InternalSize(), node, alloc); + if (parent == delete_root_parent) { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + deallocate(LeafSize(leftmost_leaf->max_count()), leftmost_leaf, alloc); +#endif + return; + } + ++pos; + } while (pos > parent->finish()); + } +} + +//// +// btree_iterator methods +template +void btree_iterator::increment_slow() { + if (node_->is_leaf()) { + assert(position_ >= node_->finish()); + btree_iterator save(*this); + while (position_ == node_->finish() && !node_->is_root()) { + assert(node_->parent()->child(node_->position()) == node_); + position_ = node_->position(); + node_ = node_->parent(); + } + // TODO(ezb): assert we aren't incrementing end() instead of handling. + if (position_ == node_->finish()) { + *this = save; + } + } else { + assert(position_ < node_->finish()); + node_ = node_->child(position_ + 1); + while (node_->is_internal()) { + node_ = node_->start_child(); + } + position_ = node_->start(); + } +} + +template +void btree_iterator::decrement_slow() { + if (node_->is_leaf()) { + assert(position_ <= -1); + btree_iterator save(*this); + while (position_ < node_->start() && !node_->is_root()) { + assert(node_->parent()->child(node_->position()) == node_); + position_ = node_->position() - 1; + node_ = node_->parent(); + } + // TODO(ezb): assert we aren't decrementing begin() instead of handling. + if (position_ < node_->start()) { + *this = save; + } + } else { + assert(position_ >= node_->start()); + node_ = node_->child(position_); + while (node_->is_internal()) { + node_ = node_->child(node_->finish()); + } + position_ = node_->finish() - 1; + } +} + +//// +// btree methods +template +template +void btree

::copy_or_move_values_in_order(Btree &other) { + static_assert(std::is_same::value || + std::is_same::value, + "Btree type must be same or const."); + assert(empty()); + + // We can avoid key comparisons because we know the order of the + // values is the same order we'll store them in. + auto iter = other.begin(); + if (iter == other.end()) return; + insert_multi(iter.slot()); + ++iter; + for (; iter != other.end(); ++iter) { + // If the btree is not empty, we can just insert the new value at the end + // of the tree. + internal_emplace(end(), iter.slot()); + } +} + +template +constexpr bool btree

::static_assert_validation() { + static_assert(std::is_nothrow_copy_constructible::value, + "Key comparison must be nothrow copy constructible"); + static_assert(std::is_nothrow_copy_constructible::value, + "Allocator must be nothrow copy constructible"); + static_assert(type_traits_internal::is_trivially_copyable::value, + "iterator not trivially copyable."); + + // Note: We assert that kTargetValues, which is computed from + // Params::kTargetNodeSize, must fit the node_type::field_type. + static_assert( + kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))), + "target node size too large"); + + // Verify that key_compare returns an absl::{weak,strong}_ordering or bool. + static_assert( + compare_has_valid_result_type(), + "key comparison function must return absl::{weak,strong}_ordering or " + "bool."); + + // Test the assumption made in setting kNodeSlotSpace. + static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4, + "node space assumption incorrect"); + + return true; +} + +template +template +auto btree

::lower_bound_equal(const K &key) const + -> std::pair { + const SearchResult res = + internal_lower_bound(key); + const iterator lower = iterator(internal_end(res.value)); + const bool equal = res.HasMatch() + ? res.IsEq() + : lower != end() && !compare_keys(key, lower.key()); + return {lower, equal}; +} + +template +template +auto btree

::equal_range(const K &key) -> std::pair { + const std::pair lower_and_equal = lower_bound_equal(key); + const iterator lower = lower_and_equal.first; + if (!lower_and_equal.second) { + return {lower, lower}; + } + + const iterator next = std::next(lower); + if (!params_type::template can_have_multiple_equivalent_keys()) { + // The next iterator after lower must point to a key greater than `key`. + // Note: if this assert fails, then it may indicate that the comparator does + // not meet the equivalence requirements for Compare + // (see https://en.cppreference.com/w/cpp/named_req/Compare). + assert(next == end() || compare_keys(key, next.key())); + return {lower, next}; + } + // Try once more to avoid the call to upper_bound() if there's only one + // equivalent key. This should prevent all calls to upper_bound() in cases of + // unique-containers with heterogeneous comparators in which all comparison + // operators have the same equivalence classes. + if (next == end() || compare_keys(key, next.key())) return {lower, next}; + + // In this case, we need to call upper_bound() to avoid worst case O(N) + // behavior if we were to iterate over equal keys. + return {lower, upper_bound(key)}; +} + +template +template +auto btree

::insert_unique(const K &key, Args &&... args) + -> std::pair { + if (empty()) { + mutable_root() = mutable_rightmost() = new_leaf_root_node(1); + } + + SearchResult res = internal_locate(key); + iterator iter = res.value; + + if (res.HasMatch()) { + if (res.IsEq()) { + // The key already exists in the tree, do nothing. + return {iter, false}; + } + } else { + iterator last = internal_last(iter); + if (last.node_ && !compare_keys(key, last.key())) { + // The key already exists in the tree, do nothing. + return {last, false}; + } + } + return {internal_emplace(iter, std::forward(args)...), true}; +} + +template +template +inline auto btree

::insert_hint_unique(iterator position, const K &key, + Args &&... args) + -> std::pair { + if (!empty()) { + if (position == end() || compare_keys(key, position.key())) { + if (position == begin() || compare_keys(std::prev(position).key(), key)) { + // prev.key() < key < position.key() + return {internal_emplace(position, std::forward(args)...), true}; + } + } else if (compare_keys(position.key(), key)) { + ++position; + if (position == end() || compare_keys(key, position.key())) { + // {original `position`}.key() < key < {current `position`}.key() + return {internal_emplace(position, std::forward(args)...), true}; + } + } else { + // position.key() == key + return {position, false}; + } + } + return insert_unique(key, std::forward(args)...); +} + +template +template +void btree

::insert_iterator_unique(InputIterator b, InputIterator e, int) { + for (; b != e; ++b) { + insert_hint_unique(end(), params_type::key(*b), *b); + } +} + +template +template +void btree

::insert_iterator_unique(InputIterator b, InputIterator e, char) { + for (; b != e; ++b) { + // Use a node handle to manage a temp slot. + auto node_handle = + CommonAccess::Construct(get_allocator(), *b); + slot_type *slot = CommonAccess::GetSlot(node_handle); + insert_hint_unique(end(), params_type::key(slot), slot); + } +} + +template +template +auto btree

::insert_multi(const key_type &key, ValueType &&v) -> iterator { + if (empty()) { + mutable_root() = mutable_rightmost() = new_leaf_root_node(1); + } + + iterator iter = internal_upper_bound(key); + if (iter.node_ == nullptr) { + iter = end(); + } + return internal_emplace(iter, std::forward(v)); +} + +template +template +auto btree

::insert_hint_multi(iterator position, ValueType &&v) -> iterator { + if (!empty()) { + const key_type &key = params_type::key(v); + if (position == end() || !compare_keys(position.key(), key)) { + if (position == begin() || + !compare_keys(key, std::prev(position).key())) { + // prev.key() <= key <= position.key() + return internal_emplace(position, std::forward(v)); + } + } else { + ++position; + if (position == end() || !compare_keys(position.key(), key)) { + // {original `position`}.key() < key < {current `position`}.key() + return internal_emplace(position, std::forward(v)); + } + } + } + return insert_multi(std::forward(v)); +} + +template +template +void btree

::insert_iterator_multi(InputIterator b, InputIterator e) { + for (; b != e; ++b) { + insert_hint_multi(end(), *b); + } +} + +template +auto btree

::operator=(const btree &other) -> btree & { + if (this != &other) { + clear(); + + *mutable_key_comp() = other.key_comp(); + if (absl::allocator_traits< + allocator_type>::propagate_on_container_copy_assignment::value) { + *mutable_allocator() = other.allocator(); + } + + copy_or_move_values_in_order(other); + } + return *this; +} + +template +auto btree

::operator=(btree &&other) noexcept -> btree & { + if (this != &other) { + clear(); + + using std::swap; + if (absl::allocator_traits< + allocator_type>::propagate_on_container_copy_assignment::value) { + swap(root_, other.root_); + // Note: `rightmost_` also contains the allocator and the key comparator. + swap(rightmost_, other.rightmost_); + swap(size_, other.size_); + } else { + if (allocator() == other.allocator()) { + swap(mutable_root(), other.mutable_root()); + swap(*mutable_key_comp(), *other.mutable_key_comp()); + swap(mutable_rightmost(), other.mutable_rightmost()); + swap(size_, other.size_); + } else { + // We aren't allowed to propagate the allocator and the allocator is + // different so we can't take over its memory. We must move each element + // individually. We need both `other` and `this` to have `other`s key + // comparator while moving the values so we can't swap the key + // comparators. + *mutable_key_comp() = other.key_comp(); + copy_or_move_values_in_order(other); + } + } + } + return *this; +} + +template +auto btree

::erase(iterator iter) -> iterator { + iter.node_->value_destroy(iter.position_, mutable_allocator()); + iter.update_generation(); + + const bool internal_delete = iter.node_->is_internal(); + if (internal_delete) { + // Deletion of a value on an internal node. First, transfer the largest + // value from our left child here, then erase/rebalance from that position. + // We can get to the largest value from our left child by decrementing iter. + iterator internal_iter(iter); + --iter; + assert(iter.node_->is_leaf()); + internal_iter.node_->transfer(internal_iter.position_, iter.position_, + iter.node_, mutable_allocator()); + } else { + // Shift values after erased position in leaf. In the internal case, we + // don't need to do this because the leaf position is the end of the node. + const field_type transfer_from = iter.position_ + 1; + const field_type num_to_transfer = iter.node_->finish() - transfer_from; + iter.node_->transfer_n(num_to_transfer, iter.position_, transfer_from, + iter.node_, mutable_allocator()); + } + // Update node finish and container size. + iter.node_->set_finish(iter.node_->finish() - 1); + --size_; + + // We want to return the next value after the one we just erased. If we + // erased from an internal node (internal_delete == true), then the next + // value is ++(++iter). If we erased from a leaf node (internal_delete == + // false) then the next value is ++iter. Note that ++iter may point to an + // internal node and the value in the internal node may move to a leaf node + // (iter.node_) when rebalancing is performed at the leaf level. + + iterator res = rebalance_after_delete(iter); + + // If we erased from an internal node, advance the iterator. + if (internal_delete) { + ++res; + } + return res; +} + +template +auto btree

::rebalance_after_delete(iterator iter) -> iterator { + // Merge/rebalance as we walk back up the tree. + iterator res(iter); + bool first_iteration = true; + for (;;) { + if (iter.node_ == root()) { + try_shrink(); + if (empty()) { + return end(); + } + break; + } + if (iter.node_->count() >= kMinNodeValues) { + break; + } + bool merged = try_merge_or_rebalance(&iter); + // On the first iteration, we should update `res` with `iter` because `res` + // may have been invalidated. + if (first_iteration) { + res = iter; + first_iteration = false; + } + if (!merged) { + break; + } + iter.position_ = iter.node_->position(); + iter.node_ = iter.node_->parent(); + } + res.update_generation(); + + // Adjust our return value. If we're pointing at the end of a node, advance + // the iterator. + if (res.position_ == res.node_->finish()) { + res.position_ = res.node_->finish() - 1; + ++res; + } + + return res; +} + +template +auto btree

::erase_range(iterator begin, iterator end) + -> std::pair { + difference_type count = std::distance(begin, end); + assert(count >= 0); + + if (count == 0) { + return {0, begin}; + } + + if (static_cast(count) == size_) { + clear(); + return {count, this->end()}; + } + + if (begin.node_ == end.node_) { + assert(end.position_ > begin.position_); + begin.node_->remove_values(begin.position_, end.position_ - begin.position_, + mutable_allocator()); + size_ -= count; + return {count, rebalance_after_delete(begin)}; + } + + const size_type target_size = size_ - count; + while (size_ > target_size) { + if (begin.node_->is_leaf()) { + const size_type remaining_to_erase = size_ - target_size; + const size_type remaining_in_node = + begin.node_->finish() - begin.position_; + const size_type to_erase = + (std::min)(remaining_to_erase, remaining_in_node); + begin.node_->remove_values(begin.position_, to_erase, + mutable_allocator()); + size_ -= to_erase; + begin = rebalance_after_delete(begin); + } else { + begin = erase(begin); + } + } + begin.update_generation(); + return {count, begin}; +} + +template +void btree

::clear() { + if (!empty()) { + node_type::clear_and_delete(root(), mutable_allocator()); + } + mutable_root() = mutable_rightmost() = EmptyNode(); + size_ = 0; +} + +template +void btree

::swap(btree &other) { + using std::swap; + if (absl::allocator_traits< + allocator_type>::propagate_on_container_swap::value) { + // Note: `rightmost_` also contains the allocator and the key comparator. + swap(rightmost_, other.rightmost_); + } else { + // It's undefined behavior if the allocators are unequal here. + assert(allocator() == other.allocator()); + swap(mutable_rightmost(), other.mutable_rightmost()); + swap(*mutable_key_comp(), *other.mutable_key_comp()); + } + swap(mutable_root(), other.mutable_root()); + swap(size_, other.size_); +} + +template +void btree

::verify() const { + assert(root() != nullptr); + assert(leftmost() != nullptr); + assert(rightmost() != nullptr); + assert(empty() || size() == internal_verify(root(), nullptr, nullptr)); + assert(leftmost() == (++const_iterator(root(), -1)).node_); + assert(rightmost() == (--const_iterator(root(), root()->finish())).node_); + assert(leftmost()->is_leaf()); + assert(rightmost()->is_leaf()); +} + +template +void btree

::rebalance_or_split(iterator *iter) { + node_type *&node = iter->node_; + int &insert_position = iter->position_; + assert(node->count() == node->max_count()); + assert(kNodeSlots == node->max_count()); + + // First try to make room on the node by rebalancing. + node_type *parent = node->parent(); + if (node != root()) { + if (node->position() > parent->start()) { + // Try rebalancing with our left sibling. + node_type *left = parent->child(node->position() - 1); + assert(left->max_count() == kNodeSlots); + if (left->count() < kNodeSlots) { + // We bias rebalancing based on the position being inserted. If we're + // inserting at the end of the right node then we bias rebalancing to + // fill up the left node. + int to_move = (kNodeSlots - left->count()) / + (1 + (insert_position < static_cast(kNodeSlots))); + to_move = (std::max)(1, to_move); + + if (insert_position - to_move >= node->start() || + left->count() + to_move < static_cast(kNodeSlots)) { + left->rebalance_right_to_left(to_move, node, mutable_allocator()); + + assert(node->max_count() - node->count() == to_move); + insert_position = insert_position - to_move; + if (insert_position < node->start()) { + insert_position = insert_position + left->count() + 1; + node = left; + } + + assert(node->count() < node->max_count()); + return; + } + } + } + + if (node->position() < parent->finish()) { + // Try rebalancing with our right sibling. + node_type *right = parent->child(node->position() + 1); + assert(right->max_count() == kNodeSlots); + if (right->count() < kNodeSlots) { + // We bias rebalancing based on the position being inserted. If we're + // inserting at the beginning of the left node then we bias rebalancing + // to fill up the right node. + int to_move = (static_cast(kNodeSlots) - right->count()) / + (1 + (insert_position > node->start())); + to_move = (std::max)(1, to_move); + + if (insert_position <= node->finish() - to_move || + right->count() + to_move < static_cast(kNodeSlots)) { + node->rebalance_left_to_right(to_move, right, mutable_allocator()); + + if (insert_position > node->finish()) { + insert_position = insert_position - node->count() - 1; + node = right; + } + + assert(node->count() < node->max_count()); + return; + } + } + } + + // Rebalancing failed, make sure there is room on the parent node for a new + // value. + assert(parent->max_count() == kNodeSlots); + if (parent->count() == kNodeSlots) { + iterator parent_iter(node->parent(), node->position()); + rebalance_or_split(&parent_iter); + } + } else { + // Rebalancing not possible because this is the root node. + // Create a new root node and set the current root node as the child of the + // new root. + parent = new_internal_node(parent); + parent->set_generation(root()->generation()); + parent->init_child(parent->start(), root()); + mutable_root() = parent; + // If the former root was a leaf node, then it's now the rightmost node. + assert(parent->start_child()->is_internal() || + parent->start_child() == rightmost()); + } + + // Split the node. + node_type *split_node; + if (node->is_leaf()) { + split_node = new_leaf_node(parent); + node->split(insert_position, split_node, mutable_allocator()); + if (rightmost() == node) mutable_rightmost() = split_node; + } else { + split_node = new_internal_node(parent); + node->split(insert_position, split_node, mutable_allocator()); + } + + if (insert_position > node->finish()) { + insert_position = insert_position - node->count() - 1; + node = split_node; + } +} + +template +void btree

::merge_nodes(node_type *left, node_type *right) { + left->merge(right, mutable_allocator()); + if (rightmost() == right) mutable_rightmost() = left; +} + +template +bool btree

::try_merge_or_rebalance(iterator *iter) { + node_type *parent = iter->node_->parent(); + if (iter->node_->position() > parent->start()) { + // Try merging with our left sibling. + node_type *left = parent->child(iter->node_->position() - 1); + assert(left->max_count() == kNodeSlots); + if (1U + left->count() + iter->node_->count() <= kNodeSlots) { + iter->position_ += 1 + left->count(); + merge_nodes(left, iter->node_); + iter->node_ = left; + return true; + } + } + if (iter->node_->position() < parent->finish()) { + // Try merging with our right sibling. + node_type *right = parent->child(iter->node_->position() + 1); + assert(right->max_count() == kNodeSlots); + if (1U + iter->node_->count() + right->count() <= kNodeSlots) { + merge_nodes(iter->node_, right); + return true; + } + // Try rebalancing with our right sibling. We don't perform rebalancing if + // we deleted the first element from iter->node_ and the node is not + // empty. This is a small optimization for the common pattern of deleting + // from the front of the tree. + if (right->count() > kMinNodeValues && + (iter->node_->count() == 0 || iter->position_ > iter->node_->start())) { + int to_move = (right->count() - iter->node_->count()) / 2; + to_move = (std::min)(to_move, right->count() - 1); + iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator()); + return false; + } + } + if (iter->node_->position() > parent->start()) { + // Try rebalancing with our left sibling. We don't perform rebalancing if + // we deleted the last element from iter->node_ and the node is not + // empty. This is a small optimization for the common pattern of deleting + // from the back of the tree. + node_type *left = parent->child(iter->node_->position() - 1); + if (left->count() > kMinNodeValues && + (iter->node_->count() == 0 || + iter->position_ < iter->node_->finish())) { + int to_move = (left->count() - iter->node_->count()) / 2; + to_move = (std::min)(to_move, left->count() - 1); + left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator()); + iter->position_ += to_move; + return false; + } + } + return false; +} + +template +void btree

::try_shrink() { + node_type *orig_root = root(); + if (orig_root->count() > 0) { + return; + } + // Deleted the last item on the root node, shrink the height of the tree. + if (orig_root->is_leaf()) { + assert(size() == 0); + mutable_root() = mutable_rightmost() = EmptyNode(); + } else { + node_type *child = orig_root->start_child(); + child->make_root(); + mutable_root() = child; + } + node_type::clear_and_delete(orig_root, mutable_allocator()); +} + +template +template +inline IterType btree

::internal_last(IterType iter) { + assert(iter.node_ != nullptr); + while (iter.position_ == iter.node_->finish()) { + iter.position_ = iter.node_->position(); + iter.node_ = iter.node_->parent(); + if (iter.node_->is_leaf()) { + iter.node_ = nullptr; + break; + } + } + iter.update_generation(); + return iter; +} + +template +template +inline auto btree

::internal_emplace(iterator iter, Args &&... args) + -> iterator { + if (iter.node_->is_internal()) { + // We can't insert on an internal node. Instead, we'll insert after the + // previous value which is guaranteed to be on a leaf node. + --iter; + ++iter.position_; + } + const field_type max_count = iter.node_->max_count(); + allocator_type *alloc = mutable_allocator(); + if (iter.node_->count() == max_count) { + // Make room in the leaf for the new item. + if (max_count < kNodeSlots) { + // Insertion into the root where the root is smaller than the full node + // size. Simply grow the size of the root node. + assert(iter.node_ == root()); + iter.node_ = + new_leaf_root_node((std::min)(kNodeSlots, 2 * max_count)); + // Transfer the values from the old root to the new root. + node_type *old_root = root(); + node_type *new_root = iter.node_; + new_root->transfer_n(old_root->count(), new_root->start(), + old_root->start(), old_root, alloc); + new_root->set_finish(old_root->finish()); + old_root->set_finish(old_root->start()); + new_root->set_generation(old_root->generation()); + node_type::clear_and_delete(old_root, alloc); + mutable_root() = mutable_rightmost() = new_root; + } else { + rebalance_or_split(&iter); + } + } + iter.node_->emplace_value(iter.position_, alloc, std::forward(args)...); + ++size_; + iter.update_generation(); + return iter; +} + +template +template +inline auto btree

::internal_locate(const K &key) const + -> SearchResult { + iterator iter(const_cast(root())); + for (;;) { + SearchResult res = + iter.node_->lower_bound(key, key_comp()); + iter.position_ = res.value; + if (res.IsEq()) { + return {iter, MatchKind::kEq}; + } + // Note: in the non-key-compare-to case, we don't need to walk all the way + // down the tree if the keys are equal, but determining equality would + // require doing an extra comparison on each node on the way down, and we + // will need to go all the way to the leaf node in the expected case. + if (iter.node_->is_leaf()) { + break; + } + iter.node_ = iter.node_->child(iter.position_); + } + // Note: in the non-key-compare-to case, the key may actually be equivalent + // here (and the MatchKind::kNe is ignored). + return {iter, MatchKind::kNe}; +} + +template +template +auto btree

::internal_lower_bound(const K &key) const + -> SearchResult { + if (!params_type::template can_have_multiple_equivalent_keys()) { + SearchResult ret = internal_locate(key); + ret.value = internal_last(ret.value); + return ret; + } + iterator iter(const_cast(root())); + SearchResult res; + bool seen_eq = false; + for (;;) { + res = iter.node_->lower_bound(key, key_comp()); + iter.position_ = res.value; + if (iter.node_->is_leaf()) { + break; + } + seen_eq = seen_eq || res.IsEq(); + iter.node_ = iter.node_->child(iter.position_); + } + if (res.IsEq()) return {iter, MatchKind::kEq}; + return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe}; +} + +template +template +auto btree

::internal_upper_bound(const K &key) const -> iterator { + iterator iter(const_cast(root())); + for (;;) { + iter.position_ = iter.node_->upper_bound(key, key_comp()); + if (iter.node_->is_leaf()) { + break; + } + iter.node_ = iter.node_->child(iter.position_); + } + return internal_last(iter); +} + +template +template +auto btree

::internal_find(const K &key) const -> iterator { + SearchResult res = internal_locate(key); + if (res.HasMatch()) { + if (res.IsEq()) { + return res.value; + } + } else { + const iterator iter = internal_last(res.value); + if (iter.node_ != nullptr && !compare_keys(key, iter.key())) { + return iter; + } + } + return {nullptr, 0}; +} + +template +int btree

::internal_verify(const node_type *node, const key_type *lo, + const key_type *hi) const { + assert(node->count() > 0); + assert(node->count() <= node->max_count()); + if (lo) { + assert(!compare_keys(node->key(node->start()), *lo)); + } + if (hi) { + assert(!compare_keys(*hi, node->key(node->finish() - 1))); + } + for (int i = node->start() + 1; i < node->finish(); ++i) { + assert(!compare_keys(node->key(i), node->key(i - 1))); + } + int count = node->count(); + if (node->is_internal()) { + for (int i = node->start(); i <= node->finish(); ++i) { + assert(node->child(i) != nullptr); + assert(node->child(i)->parent() == node); + assert(node->child(i)->position() == i); + count += internal_verify(node->child(i), + i == node->start() ? lo : &node->key(i - 1), + i == node->finish() ? hi : &node->key(i)); + } + } + return count; +} + +struct btree_access { + template + static auto erase_if(BtreeContainer &container, Pred pred) + -> typename BtreeContainer::size_type { + const auto initial_size = container.size(); + auto &tree = container.tree_; + auto *alloc = tree.mutable_allocator(); + for (auto it = container.begin(); it != container.end();) { + if (!pred(*it)) { + ++it; + continue; + } + auto *node = it.node_; + if (node->is_internal()) { + // Handle internal nodes normally. + it = container.erase(it); + continue; + } + // If this is a leaf node, then we do all the erases from this node + // at once before doing rebalancing. + + // The current position to transfer slots to. + int to_pos = it.position_; + node->value_destroy(it.position_, alloc); + while (++it.position_ < node->finish()) { + it.update_generation(); + if (pred(*it)) { + node->value_destroy(it.position_, alloc); + } else { + node->transfer(node->slot(to_pos++), node->slot(it.position_), alloc); + } + } + const int num_deleted = node->finish() - to_pos; + tree.size_ -= num_deleted; + node->set_finish(to_pos); + it.position_ = to_pos; + it = tree.rebalance_after_delete(it); + } + return initial_size - container.size(); + } +}; + +#undef ABSL_BTREE_ENABLE_GENERATIONS + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_BTREE_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/btree_container.h b/CAPI/cpp/grpc/include/absl/container/internal/btree_container.h new file mode 100644 index 0000000..fc2f740 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/btree_container.h @@ -0,0 +1,699 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ +#define ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/common.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// A common base class for btree_set, btree_map, btree_multiset, and +// btree_multimap. +template +class btree_container { + using params_type = typename Tree::params_type; + + protected: + // Alias used for heterogeneous lookup functions. + // `key_arg` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template + using key_arg = + typename KeyArg::template type< + K, typename Tree::key_type>; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using difference_type = typename Tree::difference_type; + using key_compare = typename Tree::original_key_compare; + using value_compare = typename Tree::value_compare; + using allocator_type = typename Tree::allocator_type; + using reference = typename Tree::reference; + using const_reference = typename Tree::const_reference; + using pointer = typename Tree::pointer; + using const_pointer = typename Tree::const_pointer; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using reverse_iterator = typename Tree::reverse_iterator; + using const_reverse_iterator = typename Tree::const_reverse_iterator; + using node_type = typename Tree::node_handle_type; + + // Constructors/assignments. + btree_container() : tree_(key_compare(), allocator_type()) {} + explicit btree_container(const key_compare &comp, + const allocator_type &alloc = allocator_type()) + : tree_(comp, alloc) {} + explicit btree_container(const allocator_type &alloc) + : tree_(key_compare(), alloc) {} + + btree_container(const btree_container &other) + : btree_container(other, absl::allocator_traits:: + select_on_container_copy_construction( + other.get_allocator())) {} + btree_container(const btree_container &other, const allocator_type &alloc) + : tree_(other.tree_, alloc) {} + + btree_container(btree_container &&other) noexcept( + std::is_nothrow_move_constructible::value) = default; + btree_container(btree_container &&other, const allocator_type &alloc) + : tree_(std::move(other.tree_), alloc) {} + + btree_container &operator=(const btree_container &other) = default; + btree_container &operator=(btree_container &&other) noexcept( + std::is_nothrow_move_assignable::value) = default; + + // Iterator routines. + iterator begin() { return tree_.begin(); } + const_iterator begin() const { return tree_.begin(); } + const_iterator cbegin() const { return tree_.begin(); } + iterator end() { return tree_.end(); } + const_iterator end() const { return tree_.end(); } + const_iterator cend() const { return tree_.end(); } + reverse_iterator rbegin() { return tree_.rbegin(); } + const_reverse_iterator rbegin() const { return tree_.rbegin(); } + const_reverse_iterator crbegin() const { return tree_.rbegin(); } + reverse_iterator rend() { return tree_.rend(); } + const_reverse_iterator rend() const { return tree_.rend(); } + const_reverse_iterator crend() const { return tree_.rend(); } + + // Lookup routines. + template + size_type count(const key_arg &key) const { + auto equal_range = this->equal_range(key); + return std::distance(equal_range.first, equal_range.second); + } + template + iterator find(const key_arg &key) { + return tree_.find(key); + } + template + const_iterator find(const key_arg &key) const { + return tree_.find(key); + } + template + bool contains(const key_arg &key) const { + return find(key) != end(); + } + template + iterator lower_bound(const key_arg &key) { + return tree_.lower_bound(key); + } + template + const_iterator lower_bound(const key_arg &key) const { + return tree_.lower_bound(key); + } + template + iterator upper_bound(const key_arg &key) { + return tree_.upper_bound(key); + } + template + const_iterator upper_bound(const key_arg &key) const { + return tree_.upper_bound(key); + } + template + std::pair equal_range(const key_arg &key) { + return tree_.equal_range(key); + } + template + std::pair equal_range( + const key_arg &key) const { + return tree_.equal_range(key); + } + + // Deletion routines. Note that there is also a deletion routine that is + // specific to btree_set_container/btree_multiset_container. + + // Erase the specified iterator from the btree. The iterator must be valid + // (i.e. not equal to end()). Return an iterator pointing to the node after + // the one that was erased (or end() if none exists). + iterator erase(const_iterator iter) { return tree_.erase(iterator(iter)); } + iterator erase(iterator iter) { return tree_.erase(iter); } + iterator erase(const_iterator first, const_iterator last) { + return tree_.erase_range(iterator(first), iterator(last)).second; + } + template + size_type erase(const key_arg &key) { + auto equal_range = this->equal_range(key); + return tree_.erase_range(equal_range.first, equal_range.second).first; + } + + // Extract routines. + node_type extract(iterator position) { + // Use Construct instead of Transfer because the rebalancing code will + // destroy the slot later. + auto node = + CommonAccess::Construct(get_allocator(), position.slot()); + erase(position); + return node; + } + node_type extract(const_iterator position) { + return extract(iterator(position)); + } + + // Utility routines. + ABSL_ATTRIBUTE_REINITIALIZES void clear() { tree_.clear(); } + void swap(btree_container &other) { tree_.swap(other.tree_); } + void verify() const { tree_.verify(); } + + // Size routines. + size_type size() const { return tree_.size(); } + size_type max_size() const { return tree_.max_size(); } + bool empty() const { return tree_.empty(); } + + friend bool operator==(const btree_container &x, const btree_container &y) { + if (x.size() != y.size()) return false; + return std::equal(x.begin(), x.end(), y.begin()); + } + + friend bool operator!=(const btree_container &x, const btree_container &y) { + return !(x == y); + } + + friend bool operator<(const btree_container &x, const btree_container &y) { + return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end()); + } + + friend bool operator>(const btree_container &x, const btree_container &y) { + return y < x; + } + + friend bool operator<=(const btree_container &x, const btree_container &y) { + return !(y < x); + } + + friend bool operator>=(const btree_container &x, const btree_container &y) { + return !(x < y); + } + + // The allocator used by the btree. + allocator_type get_allocator() const { return tree_.get_allocator(); } + + // The key comparator used by the btree. + key_compare key_comp() const { return key_compare(tree_.key_comp()); } + value_compare value_comp() const { return tree_.value_comp(); } + + // Support absl::Hash. + template + friend State AbslHashValue(State h, const btree_container &b) { + for (const auto &v : b) { + h = State::combine(std::move(h), v); + } + return State::combine(std::move(h), b.size()); + } + + protected: + friend struct btree_access; + Tree tree_; +}; + +// A common base class for btree_set and btree_map. +template +class btree_set_container : public btree_container { + using super_type = btree_container; + using params_type = typename Tree::params_type; + using init_type = typename params_type::init_type; + using is_key_compare_to = typename params_type::is_key_compare_to; + friend class BtreeNodePeer; + + protected: + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using key_compare = typename Tree::original_key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using node_type = typename super_type::node_type; + using insert_return_type = InsertReturnType; + + // Inherit constructors. + using super_type::super_type; + btree_set_container() {} + + // Range constructors. + template + btree_set_container(InputIterator b, InputIterator e, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : super_type(comp, alloc) { + insert(b, e); + } + template + btree_set_container(InputIterator b, InputIterator e, + const allocator_type &alloc) + : btree_set_container(b, e, key_compare(), alloc) {} + + // Initializer list constructors. + btree_set_container(std::initializer_list init, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : btree_set_container(init.begin(), init.end(), comp, alloc) {} + btree_set_container(std::initializer_list init, + const allocator_type &alloc) + : btree_set_container(init.begin(), init.end(), alloc) {} + + // Insertion routines. + std::pair insert(const value_type &v) { + return this->tree_.insert_unique(params_type::key(v), v); + } + std::pair insert(value_type &&v) { + return this->tree_.insert_unique(params_type::key(v), std::move(v)); + } + template + std::pair emplace(Args &&... args) { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), + std::forward(args)...); + auto *slot = CommonAccess::GetSlot(node); + return this->tree_.insert_unique(params_type::key(slot), slot); + } + iterator insert(const_iterator hint, const value_type &v) { + return this->tree_ + .insert_hint_unique(iterator(hint), params_type::key(v), v) + .first; + } + iterator insert(const_iterator hint, value_type &&v) { + return this->tree_ + .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v)) + .first; + } + template + iterator emplace_hint(const_iterator hint, Args &&... args) { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), + std::forward(args)...); + auto *slot = CommonAccess::GetSlot(node); + return this->tree_ + .insert_hint_unique(iterator(hint), params_type::key(slot), slot) + .first; + } + template + void insert(InputIterator b, InputIterator e) { + this->tree_.insert_iterator_unique(b, e, 0); + } + void insert(std::initializer_list init) { + this->tree_.insert_iterator_unique(init.begin(), init.end(), 0); + } + insert_return_type insert(node_type &&node) { + if (!node) return {this->end(), false, node_type()}; + std::pair res = + this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)), + CommonAccess::GetSlot(node)); + if (res.second) { + CommonAccess::Destroy(&node); + return {res.first, true, node_type()}; + } else { + return {res.first, false, std::move(node)}; + } + } + iterator insert(const_iterator hint, node_type &&node) { + if (!node) return this->end(); + std::pair res = this->tree_.insert_hint_unique( + iterator(hint), params_type::key(CommonAccess::GetSlot(node)), + CommonAccess::GetSlot(node)); + if (res.second) CommonAccess::Destroy(&node); + return res.first; + } + + // Node extraction routines. + template + node_type extract(const key_arg &key) { + const std::pair lower_and_equal = + this->tree_.lower_bound_equal(key); + return lower_and_equal.second ? extract(lower_and_equal.first) + : node_type(); + } + using super_type::extract; + + // Merge routines. + // Moves elements from `src` into `this`. If the element already exists in + // `this`, it is left unmodified in `src`. + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &src) { // NOLINT + for (auto src_it = src.begin(); src_it != src.end();) { + if (insert(std::move(params_type::element(src_it.slot()))).second) { + src_it = src.erase(src_it); + } else { + ++src_it; + } + } + } + + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &&src) { + merge(src); + } +}; + +// Base class for btree_map. +template +class btree_map_container : public btree_set_container { + using super_type = btree_set_container; + using params_type = typename Tree::params_type; + friend class BtreeNodePeer; + + private: + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using mapped_type = typename params_type::mapped_type; + using value_type = typename Tree::value_type; + using key_compare = typename Tree::original_key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + + // Inherit constructors. + using super_type::super_type; + btree_map_container() {} + + // Insertion routines. + // Note: the nullptr template arguments and extra `const M&` overloads allow + // for supporting bitfield arguments. + template + std::pair insert_or_assign(const key_arg &k, + const M &obj) { + return insert_or_assign_impl(k, obj); + } + template + std::pair insert_or_assign(key_arg &&k, const M &obj) { + return insert_or_assign_impl(std::forward(k), obj); + } + template + std::pair insert_or_assign(const key_arg &k, M &&obj) { + return insert_or_assign_impl(k, std::forward(obj)); + } + template + std::pair insert_or_assign(key_arg &&k, M &&obj) { + return insert_or_assign_impl(std::forward(k), std::forward(obj)); + } + template + iterator insert_or_assign(const_iterator hint, const key_arg &k, + const M &obj) { + return insert_or_assign_hint_impl(hint, k, obj); + } + template + iterator insert_or_assign(const_iterator hint, key_arg &&k, const M &obj) { + return insert_or_assign_hint_impl(hint, std::forward(k), obj); + } + template + iterator insert_or_assign(const_iterator hint, const key_arg &k, M &&obj) { + return insert_or_assign_hint_impl(hint, k, std::forward(obj)); + } + template + iterator insert_or_assign(const_iterator hint, key_arg &&k, M &&obj) { + return insert_or_assign_hint_impl(hint, std::forward(k), + std::forward(obj)); + } + + template ::value, int> = 0> + std::pair try_emplace(const key_arg &k, Args &&... args) { + return try_emplace_impl(k, std::forward(args)...); + } + template ::value, int> = 0> + std::pair try_emplace(key_arg &&k, Args &&... args) { + return try_emplace_impl(std::forward(k), std::forward(args)...); + } + template + iterator try_emplace(const_iterator hint, const key_arg &k, + Args &&... args) { + return try_emplace_hint_impl(hint, k, std::forward(args)...); + } + template + iterator try_emplace(const_iterator hint, key_arg &&k, Args &&... args) { + return try_emplace_hint_impl(hint, std::forward(k), + std::forward(args)...); + } + + template + mapped_type &operator[](const key_arg &k) { + return try_emplace(k).first->second; + } + template + mapped_type &operator[](key_arg &&k) { + return try_emplace(std::forward(k)).first->second; + } + + template + mapped_type &at(const key_arg &key) { + auto it = this->find(key); + if (it == this->end()) + base_internal::ThrowStdOutOfRange("absl::btree_map::at"); + return it->second; + } + template + const mapped_type &at(const key_arg &key) const { + auto it = this->find(key); + if (it == this->end()) + base_internal::ThrowStdOutOfRange("absl::btree_map::at"); + return it->second; + } + + private: + // Note: when we call `std::forward(obj)` twice, it's safe because + // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when + // `ret.second` is false. + template + std::pair insert_or_assign_impl(K &&k, M &&obj) { + const std::pair ret = + this->tree_.insert_unique(k, std::forward(k), std::forward(obj)); + if (!ret.second) ret.first->second = std::forward(obj); + return ret; + } + template + iterator insert_or_assign_hint_impl(const_iterator hint, K &&k, M &&obj) { + const std::pair ret = this->tree_.insert_hint_unique( + iterator(hint), k, std::forward(k), std::forward(obj)); + if (!ret.second) ret.first->second = std::forward(obj); + return ret.first; + } + + template + std::pair try_emplace_impl(K &&k, Args &&... args) { + return this->tree_.insert_unique( + k, std::piecewise_construct, std::forward_as_tuple(std::forward(k)), + std::forward_as_tuple(std::forward(args)...)); + } + template + iterator try_emplace_hint_impl(const_iterator hint, K &&k, Args &&... args) { + return this->tree_ + .insert_hint_unique(iterator(hint), k, std::piecewise_construct, + std::forward_as_tuple(std::forward(k)), + std::forward_as_tuple(std::forward(args)...)) + .first; + } +}; + +// A common base class for btree_multiset and btree_multimap. +template +class btree_multiset_container : public btree_container { + using super_type = btree_container; + using params_type = typename Tree::params_type; + using init_type = typename params_type::init_type; + using is_key_compare_to = typename params_type::is_key_compare_to; + friend class BtreeNodePeer; + + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using key_compare = typename Tree::original_key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using node_type = typename super_type::node_type; + + // Inherit constructors. + using super_type::super_type; + btree_multiset_container() {} + + // Range constructors. + template + btree_multiset_container(InputIterator b, InputIterator e, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : super_type(comp, alloc) { + insert(b, e); + } + template + btree_multiset_container(InputIterator b, InputIterator e, + const allocator_type &alloc) + : btree_multiset_container(b, e, key_compare(), alloc) {} + + // Initializer list constructors. + btree_multiset_container(std::initializer_list init, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : btree_multiset_container(init.begin(), init.end(), comp, alloc) {} + btree_multiset_container(std::initializer_list init, + const allocator_type &alloc) + : btree_multiset_container(init.begin(), init.end(), alloc) {} + + // Insertion routines. + iterator insert(const value_type &v) { return this->tree_.insert_multi(v); } + iterator insert(value_type &&v) { + return this->tree_.insert_multi(std::move(v)); + } + iterator insert(const_iterator hint, const value_type &v) { + return this->tree_.insert_hint_multi(iterator(hint), v); + } + iterator insert(const_iterator hint, value_type &&v) { + return this->tree_.insert_hint_multi(iterator(hint), std::move(v)); + } + template + void insert(InputIterator b, InputIterator e) { + this->tree_.insert_iterator_multi(b, e); + } + void insert(std::initializer_list init) { + this->tree_.insert_iterator_multi(init.begin(), init.end()); + } + template + iterator emplace(Args &&... args) { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), + std::forward(args)...); + return this->tree_.insert_multi(CommonAccess::GetSlot(node)); + } + template + iterator emplace_hint(const_iterator hint, Args &&... args) { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), + std::forward(args)...); + return this->tree_.insert_hint_multi(iterator(hint), + CommonAccess::GetSlot(node)); + } + iterator insert(node_type &&node) { + if (!node) return this->end(); + iterator res = + this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)), + CommonAccess::GetSlot(node)); + CommonAccess::Destroy(&node); + return res; + } + iterator insert(const_iterator hint, node_type &&node) { + if (!node) return this->end(); + iterator res = this->tree_.insert_hint_multi( + iterator(hint), + std::move(params_type::element(CommonAccess::GetSlot(node)))); + CommonAccess::Destroy(&node); + return res; + } + + // Node extraction routines. + template + node_type extract(const key_arg &key) { + const std::pair lower_and_equal = + this->tree_.lower_bound_equal(key); + return lower_and_equal.second ? extract(lower_and_equal.first) + : node_type(); + } + using super_type::extract; + + // Merge routines. + // Moves all elements from `src` into `this`. + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &src) { // NOLINT + for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) { + insert(std::move(params_type::element(src_it.slot()))); + } + src.clear(); + } + + template < + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container &&src) { + merge(src); + } +}; + +// A base class for btree_multimap. +template +class btree_multimap_container : public btree_multiset_container { + using super_type = btree_multiset_container; + using params_type = typename Tree::params_type; + friend class BtreeNodePeer; + + public: + using mapped_type = typename params_type::mapped_type; + + // Inherit constructors. + using super_type::super_type; + btree_multimap_container() {} +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/common.h b/CAPI/cpp/grpc/include/absl/container/internal/common.h new file mode 100644 index 0000000..416d9aa --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/common.h @@ -0,0 +1,207 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_H_ +#define ABSL_CONTAINER_INTERNAL_CONTAINER_H_ + +#include +#include + +#include "absl/meta/type_traits.h" +#include "absl/types/optional.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +struct IsTransparent : std::false_type {}; +template +struct IsTransparent> + : std::true_type {}; + +template +struct KeyArg { + // Transparent. Forward `K`. + template + using type = K; +}; + +template <> +struct KeyArg { + // Not transparent. Always use `key_type`. + template + using type = key_type; +}; + +// The node_handle concept from C++17. +// We specialize node_handle for sets and maps. node_handle_base holds the +// common API of both. +template +class node_handle_base { + protected: + using slot_type = typename PolicyTraits::slot_type; + + public: + using allocator_type = Alloc; + + constexpr node_handle_base() = default; + node_handle_base(node_handle_base&& other) noexcept { + *this = std::move(other); + } + ~node_handle_base() { destroy(); } + node_handle_base& operator=(node_handle_base&& other) noexcept { + destroy(); + if (!other.empty()) { + alloc_ = other.alloc_; + PolicyTraits::transfer(alloc(), slot(), other.slot()); + other.reset(); + } + return *this; + } + + bool empty() const noexcept { return !alloc_; } + explicit operator bool() const noexcept { return !empty(); } + allocator_type get_allocator() const { return *alloc_; } + + protected: + friend struct CommonAccess; + + struct transfer_tag_t {}; + node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s) + : alloc_(a) { + PolicyTraits::transfer(alloc(), slot(), s); + } + + struct construct_tag_t {}; + template + node_handle_base(construct_tag_t, const allocator_type& a, Args&&... args) + : alloc_(a) { + PolicyTraits::construct(alloc(), slot(), std::forward(args)...); + } + + void destroy() { + if (!empty()) { + PolicyTraits::destroy(alloc(), slot()); + reset(); + } + } + + void reset() { + assert(alloc_.has_value()); + alloc_ = absl::nullopt; + } + + slot_type* slot() const { + assert(!empty()); + return reinterpret_cast(std::addressof(slot_space_)); + } + allocator_type* alloc() { return std::addressof(*alloc_); } + + private: + absl::optional alloc_ = {}; + alignas(slot_type) mutable unsigned char slot_space_[sizeof(slot_type)] = {}; +}; + +// For sets. +template +class node_handle : public node_handle_base { + using Base = node_handle_base; + + public: + using value_type = typename PolicyTraits::value_type; + + constexpr node_handle() {} + + value_type& value() const { return PolicyTraits::element(this->slot()); } + + private: + friend struct CommonAccess; + + using Base::Base; +}; + +// For maps. +template +class node_handle> + : public node_handle_base { + using Base = node_handle_base; + using slot_type = typename PolicyTraits::slot_type; + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + + constexpr node_handle() {} + + // When C++17 is available, we can use std::launder to provide mutable + // access to the key. Otherwise, we provide const access. + auto key() const + -> decltype(PolicyTraits::mutable_key(std::declval())) { + return PolicyTraits::mutable_key(this->slot()); + } + + mapped_type& mapped() const { + return PolicyTraits::value(&PolicyTraits::element(this->slot())); + } + + private: + friend struct CommonAccess; + + using Base::Base; +}; + +// Provide access to non-public node-handle functions. +struct CommonAccess { + template + static auto GetSlot(const Node& node) -> decltype(node.slot()) { + return node.slot(); + } + + template + static void Destroy(Node* node) { + node->destroy(); + } + + template + static void Reset(Node* node) { + node->reset(); + } + + template + static T Transfer(Args&&... args) { + return T(typename T::transfer_tag_t{}, std::forward(args)...); + } + + template + static T Construct(Args&&... args) { + return T(typename T::construct_tag_t{}, std::forward(args)...); + } +}; + +// Implement the insert_return_type<> concept of C++17. +template +struct InsertReturnType { + Iterator position; + bool inserted; + NodeType node; +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/compressed_tuple.h b/CAPI/cpp/grpc/include/absl/container/internal/compressed_tuple.h new file mode 100644 index 0000000..5ebe164 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/compressed_tuple.h @@ -0,0 +1,290 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Helper class to perform the Empty Base Optimization. +// Ts can contain classes and non-classes, empty or not. For the ones that +// are empty classes, we perform the optimization. If all types in Ts are empty +// classes, then CompressedTuple is itself an empty class. +// +// To access the members, use member get() function. +// +// Eg: +// absl::container_internal::CompressedTuple value(7, t1, t2, +// t3); +// assert(value.get<0>() == 7); +// T1& t1 = value.get<1>(); +// const T2& t2 = value.get<2>(); +// ... +// +// https://en.cppreference.com/w/cpp/language/ebo + +#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ +#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ + +#include +#include +#include +#include + +#include "absl/utility/utility.h" + +#if defined(_MSC_VER) && !defined(__NVCC__) +// We need to mark these classes with this declspec to ensure that +// CompressedTuple happens. +#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases) +#else +#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +class CompressedTuple; + +namespace internal_compressed_tuple { + +template +struct Elem; +template +struct Elem, I> + : std::tuple_element> {}; +template +using ElemT = typename Elem::type; + +// Use the __is_final intrinsic if available. Where it's not available, classes +// declared with the 'final' specifier cannot be used as CompressedTuple +// elements. +// TODO(sbenza): Replace this with std::is_final in C++14. +template +constexpr bool IsFinal() { +#if defined(__clang__) || defined(__GNUC__) + return __is_final(T); +#else + return false; +#endif +} + +// We can't use EBCO on other CompressedTuples because that would mean that we +// derive from multiple Storage<> instantiations with the same I parameter, +// and potentially from multiple identical Storage<> instantiations. So anytime +// we use type inheritance rather than encapsulation, we mark +// CompressedTupleImpl, to make this easy to detect. +struct uses_inheritance {}; + +template +constexpr bool ShouldUseBase() { + return std::is_class::value && std::is_empty::value && !IsFinal() && + !std::is_base_of::value; +} + +// The storage class provides two specializations: +// - For empty classes, it stores T as a base class. +// - For everything else, it stores T as a member. +template ::type>()> +#else + bool UseBase = ShouldUseBase()> +#endif +struct Storage { + T value; + constexpr Storage() = default; + template + explicit constexpr Storage(absl::in_place_t, V&& v) + : value(absl::forward(v)) {} + constexpr const T& get() const& { return value; } + T& get() & { return value; } + constexpr const T&& get() const&& { return absl::move(*this).value; } + T&& get() && { return std::move(*this).value; } +}; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage : T { + constexpr Storage() = default; + + template + explicit constexpr Storage(absl::in_place_t, V&& v) + : T(absl::forward(v)) {} + + constexpr const T& get() const& { return *this; } + T& get() & { return *this; } + constexpr const T&& get() const&& { return absl::move(*this); } + T&& get() && { return std::move(*this); } +}; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< + CompressedTuple, absl::index_sequence, ShouldAnyUseBase> + // We use the dummy identity function through std::integral_constant to + // convince MSVC of accepting and expanding I in that context. Without it + // you would get: + // error C3548: 'I': parameter pack cannot be used in this context + : uses_inheritance, + Storage::value>... { + constexpr CompressedTupleImpl() = default; + template + explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) + : Storage(absl::in_place, absl::forward(args))... {} + friend CompressedTuple; +}; + +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< + CompressedTuple, absl::index_sequence, false> + // We use the dummy identity function as above... + : Storage::value, false>... { + constexpr CompressedTupleImpl() = default; + template + explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) + : Storage(absl::in_place, absl::forward(args))... {} + friend CompressedTuple; +}; + +std::false_type Or(std::initializer_list); +std::true_type Or(std::initializer_list); + +// MSVC requires this to be done separately rather than within the declaration +// of CompressedTuple below. +template +constexpr bool ShouldAnyUseBase() { + return decltype( + Or({std::integral_constant()>()...})){}; +} + +template +using TupleElementMoveConstructible = + typename std::conditional::value, + std::is_convertible, + std::is_constructible>::type; + +template +struct TupleMoveConstructible : std::false_type {}; + +template +struct TupleMoveConstructible, Vs...> + : std::integral_constant< + bool, absl::conjunction< + TupleElementMoveConstructible...>::value> {}; + +template +struct compressed_tuple_size; + +template +struct compressed_tuple_size> + : public std::integral_constant {}; + +template +struct TupleItemsMoveConstructible + : std::integral_constant< + bool, TupleMoveConstructible::value == + sizeof...(Vs), + T, Vs...>::value> {}; + +} // namespace internal_compressed_tuple + +// Helper class to perform the Empty Base Class Optimization. +// Ts can contain classes and non-classes, empty or not. For the ones that +// are empty classes, we perform the CompressedTuple. If all types in Ts are +// empty classes, then CompressedTuple is itself an empty class. (This +// does not apply when one or more of those empty classes is itself an empty +// CompressedTuple.) +// +// To access the members, use member .get() function. +// +// Eg: +// absl::container_internal::CompressedTuple value(7, t1, t2, +// t3); +// assert(value.get<0>() == 7); +// T1& t1 = value.get<1>(); +// const T2& t2 = value.get<2>(); +// ... +// +// https://en.cppreference.com/w/cpp/language/ebo +template +class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple + : private internal_compressed_tuple::CompressedTupleImpl< + CompressedTuple, absl::index_sequence_for, + internal_compressed_tuple::ShouldAnyUseBase()> { + private: + template + using ElemT = internal_compressed_tuple::ElemT; + + template + using StorageT = internal_compressed_tuple::Storage, I>; + + public: + // There seems to be a bug in MSVC dealing in which using '=default' here will + // cause the compiler to ignore the body of other constructors. The work- + // around is to explicitly implement the default constructor. +#if defined(_MSC_VER) + constexpr CompressedTuple() : CompressedTuple::CompressedTupleImpl() {} +#else + constexpr CompressedTuple() = default; +#endif + explicit constexpr CompressedTuple(const Ts&... base) + : CompressedTuple::CompressedTupleImpl(absl::in_place, base...) {} + + template )>>, + internal_compressed_tuple::TupleItemsMoveConstructible< + CompressedTuple, First, Vs...>>::value, + bool> = true> + explicit constexpr CompressedTuple(First&& first, Vs&&... base) + : CompressedTuple::CompressedTupleImpl(absl::in_place, + absl::forward(first), + absl::forward(base)...) {} + + template + ElemT& get() & { + return StorageT::get(); + } + + template + constexpr const ElemT& get() const& { + return StorageT::get(); + } + + template + ElemT&& get() && { + return std::move(*this).StorageT::get(); + } + + template + constexpr const ElemT&& get() const&& { + return absl::move(*this).StorageT::get(); + } +}; + +// Explicit specialization for a zero-element tuple +// (needed to avoid ambiguous overloads for the default constructor). +template <> +class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC + +#endif // ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/container_memory.h b/CAPI/cpp/grpc/include/absl/container/internal/container_memory.h new file mode 100644 index 0000000..00e9f6d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/container_memory.h @@ -0,0 +1,442 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ +#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/utility/utility.h" + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +#include +#endif + +#ifdef ABSL_HAVE_MEMORY_SANITIZER +#include +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +struct alignas(Alignment) AlignedType {}; + +// Allocates at least n bytes aligned to the specified alignment. +// Alignment must be a power of 2. It must be positive. +// +// Note that many allocators don't honor alignment requirements above certain +// threshold (usually either alignof(std::max_align_t) or alignof(void*)). +// Allocate() doesn't apply alignment corrections. If the underlying allocator +// returns insufficiently alignment pointer, that's what you are going to get. +template +void* Allocate(Alloc* alloc, size_t n) { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + using M = AlignedType; + using A = typename absl::allocator_traits::template rebind_alloc; + using AT = typename absl::allocator_traits::template rebind_traits; + // On macOS, "mem_alloc" is a #define with one argument defined in + // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it + // with the "foo(bar)" syntax. + A my_mem_alloc(*alloc); + void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M)); + assert(reinterpret_cast(p) % Alignment == 0 && + "allocator does not respect alignment"); + return p; +} + +// The pointer must have been previously obtained by calling +// Allocate(alloc, n). +template +void Deallocate(Alloc* alloc, void* p, size_t n) { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + using M = AlignedType; + using A = typename absl::allocator_traits::template rebind_alloc; + using AT = typename absl::allocator_traits::template rebind_traits; + // On macOS, "mem_alloc" is a #define with one argument defined in + // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it + // with the "foo(bar)" syntax. + A my_mem_alloc(*alloc); + AT::deallocate(my_mem_alloc, static_cast(p), + (n + sizeof(M) - 1) / sizeof(M)); +} + +namespace memory_internal { + +// Constructs T into uninitialized storage pointed by `ptr` using the args +// specified in the tuple. +template +void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t, + absl::index_sequence) { + absl::allocator_traits::construct( + *alloc, ptr, std::get(std::forward(t))...); +} + +template +struct WithConstructedImplF { + template + decltype(std::declval()(std::declval())) operator()( + Args&&... args) const { + return std::forward(f)(T(std::forward(args)...)); + } + F&& f; +}; + +template +decltype(std::declval()(std::declval())) WithConstructedImpl( + Tuple&& t, absl::index_sequence, F&& f) { + return WithConstructedImplF{std::forward(f)}( + std::get(std::forward(t))...); +} + +template +auto TupleRefImpl(T&& t, absl::index_sequence) + -> decltype(std::forward_as_tuple(std::get(std::forward(t))...)) { + return std::forward_as_tuple(std::get(std::forward(t))...); +} + +// Returns a tuple of references to the elements of the input tuple. T must be a +// tuple. +template +auto TupleRef(T&& t) -> decltype( + TupleRefImpl(std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>())) { + return TupleRefImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>()); +} + +template +decltype(std::declval()(std::declval(), std::piecewise_construct, + std::declval>(), std::declval())) +DecomposePairImpl(F&& f, std::pair, V> p) { + const auto& key = std::get<0>(p.first); + return std::forward(f)(key, std::piecewise_construct, std::move(p.first), + std::move(p.second)); +} + +} // namespace memory_internal + +// Constructs T into uninitialized storage pointed by `ptr` using the args +// specified in the tuple. +template +void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) { + memory_internal::ConstructFromTupleImpl( + alloc, ptr, std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>()); +} + +// Constructs T using the args specified in the tuple and calls F with the +// constructed value. +template +decltype(std::declval()(std::declval())) WithConstructed( + Tuple&& t, F&& f) { + return memory_internal::WithConstructedImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>(), + std::forward(f)); +} + +// Given arguments of an std::pair's consructor, PairArgs() returns a pair of +// tuples with references to the passed arguments. The tuples contain +// constructor arguments for the first and the second elements of the pair. +// +// The following two snippets are equivalent. +// +// 1. std::pair p(args...); +// +// 2. auto a = PairArgs(args...); +// std::pair p(std::piecewise_construct, +// std::move(a.first), std::move(a.second)); +inline std::pair, std::tuple<>> PairArgs() { return {}; } +template +std::pair, std::tuple> PairArgs(F&& f, S&& s) { + return {std::piecewise_construct, std::forward_as_tuple(std::forward(f)), + std::forward_as_tuple(std::forward(s))}; +} +template +std::pair, std::tuple> PairArgs( + const std::pair& p) { + return PairArgs(p.first, p.second); +} +template +std::pair, std::tuple> PairArgs(std::pair&& p) { + return PairArgs(std::forward(p.first), std::forward(p.second)); +} +template +auto PairArgs(std::piecewise_construct_t, F&& f, S&& s) + -> decltype(std::make_pair(memory_internal::TupleRef(std::forward(f)), + memory_internal::TupleRef(std::forward(s)))) { + return std::make_pair(memory_internal::TupleRef(std::forward(f)), + memory_internal::TupleRef(std::forward(s))); +} + +// A helper function for implementing apply() in map policies. +template +auto DecomposePair(F&& f, Args&&... args) + -> decltype(memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...))) { + return memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...)); +} + +// A helper function for implementing apply() in set policies. +template +decltype(std::declval()(std::declval(), std::declval())) +DecomposeValue(F&& f, Arg&& arg) { + const auto& key = arg; + return std::forward(f)(key, std::forward(arg)); +} + +// Helper functions for asan and msan. +inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) { +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + ASAN_POISON_MEMORY_REGION(m, s); +#endif +#ifdef ABSL_HAVE_MEMORY_SANITIZER + __msan_poison(m, s); +#endif + (void)m; + (void)s; +} + +inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) { +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + ASAN_UNPOISON_MEMORY_REGION(m, s); +#endif +#ifdef ABSL_HAVE_MEMORY_SANITIZER + __msan_unpoison(m, s); +#endif + (void)m; + (void)s; +} + +template +inline void SanitizerPoisonObject(const T* object) { + SanitizerPoisonMemoryRegion(object, sizeof(T)); +} + +template +inline void SanitizerUnpoisonObject(const T* object) { + SanitizerUnpoisonMemoryRegion(object, sizeof(T)); +} + +namespace memory_internal { + +// If Pair is a standard-layout type, OffsetOf::kFirst and +// OffsetOf::kSecond are equivalent to offsetof(Pair, first) and +// offsetof(Pair, second) respectively. Otherwise they are -1. +// +// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout +// type, which is non-portable. +template +struct OffsetOf { + static constexpr size_t kFirst = static_cast(-1); + static constexpr size_t kSecond = static_cast(-1); +}; + +template +struct OffsetOf::type> { + static constexpr size_t kFirst = offsetof(Pair, first); + static constexpr size_t kSecond = offsetof(Pair, second); +}; + +template +struct IsLayoutCompatible { + private: + struct Pair { + K first; + V second; + }; + + // Is P layout-compatible with Pair? + template + static constexpr bool LayoutCompatible() { + return std::is_standard_layout

() && sizeof(P) == sizeof(Pair) && + alignof(P) == alignof(Pair) && + memory_internal::OffsetOf

::kFirst == + memory_internal::OffsetOf::kFirst && + memory_internal::OffsetOf

::kSecond == + memory_internal::OffsetOf::kSecond; + } + + public: + // Whether pair and pair are layout-compatible. If they are, + // then it is safe to store them in a union and read from either. + static constexpr bool value = std::is_standard_layout() && + std::is_standard_layout() && + memory_internal::OffsetOf::kFirst == 0 && + LayoutCompatible>() && + LayoutCompatible>(); +}; + +} // namespace memory_internal + +// The internal storage type for key-value containers like flat_hash_map. +// +// It is convenient for the value_type of a flat_hash_map to be +// pair; the "const K" prevents accidental modification of the key +// when dealing with the reference returned from find() and similar methods. +// However, this creates other problems; we want to be able to emplace(K, V) +// efficiently with move operations, and similarly be able to move a +// pair in insert(). +// +// The solution is this union, which aliases the const and non-const versions +// of the pair. This also allows flat_hash_map to work, even though +// that has the same efficiency issues with move in emplace() and insert() - +// but people do it anyway. +// +// If kMutableKeys is false, only the value member can be accessed. +// +// If kMutableKeys is true, key can be accessed through all slots while value +// and mutable_value must be accessed only via INITIALIZED slots. Slots are +// created and destroyed via mutable_value so that the key can be moved later. +// +// Accessing one of the union fields while the other is active is safe as +// long as they are layout-compatible, which is guaranteed by the definition of +// kMutableKeys. For C++11, the relevant section of the standard is +// https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19) +template +union map_slot_type { + map_slot_type() {} + ~map_slot_type() = delete; + using value_type = std::pair; + using mutable_value_type = + std::pair, absl::remove_const_t>; + + value_type value; + mutable_value_type mutable_value; + absl::remove_const_t key; +}; + +template +struct map_slot_policy { + using slot_type = map_slot_type; + using value_type = std::pair; + using mutable_value_type = std::pair; + + private: + static void emplace(slot_type* slot) { + // The construction of union doesn't do anything at runtime but it allows us + // to access its members without violating aliasing rules. + new (slot) slot_type; + } + // If pair and pair are layout-compatible, we can accept one + // or the other via slot_type. We are also free to access the key via + // slot_type::key in this case. + using kMutableKeys = memory_internal::IsLayoutCompatible; + + public: + static value_type& element(slot_type* slot) { return slot->value; } + static const value_type& element(const slot_type* slot) { + return slot->value; + } + + // When C++17 is available, we can use std::launder to provide mutable + // access to the key for use in node handle. +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 + static K& mutable_key(slot_type* slot) { + // Still check for kMutableKeys so that we can avoid calling std::launder + // unless necessary because it can interfere with optimizations. + return kMutableKeys::value ? slot->key + : *std::launder(const_cast( + std::addressof(slot->value.first))); + } +#else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606) + static const K& mutable_key(slot_type* slot) { return key(slot); } +#endif + + static const K& key(const slot_type* slot) { + return kMutableKeys::value ? slot->key : slot->value.first; + } + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + emplace(slot); + if (kMutableKeys::value) { + absl::allocator_traits::construct(*alloc, &slot->mutable_value, + std::forward(args)...); + } else { + absl::allocator_traits::construct(*alloc, &slot->value, + std::forward(args)...); + } + } + + // Construct this slot by moving from another slot. + template + static void construct(Allocator* alloc, slot_type* slot, slot_type* other) { + emplace(slot); + if (kMutableKeys::value) { + absl::allocator_traits::construct( + *alloc, &slot->mutable_value, std::move(other->mutable_value)); + } else { + absl::allocator_traits::construct(*alloc, &slot->value, + std::move(other->value)); + } + } + + // Construct this slot by copying from another slot. + template + static void construct(Allocator* alloc, slot_type* slot, + const slot_type* other) { + emplace(slot); + absl::allocator_traits::construct(*alloc, &slot->value, + other->value); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + if (kMutableKeys::value) { + absl::allocator_traits::destroy(*alloc, &slot->mutable_value); + } else { + absl::allocator_traits::destroy(*alloc, &slot->value); + } + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + emplace(new_slot); + if (kMutableKeys::value) { + absl::allocator_traits::construct( + *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value)); + } else { + absl::allocator_traits::construct(*alloc, &new_slot->value, + std::move(old_slot->value)); + } + destroy(alloc, old_slot); + } +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/counting_allocator.h b/CAPI/cpp/grpc/include/absl/container/internal/counting_allocator.h new file mode 100644 index 0000000..66068a5 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/counting_allocator.h @@ -0,0 +1,122 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ +#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ + +#include +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// This is a stateful allocator, but the state lives outside of the +// allocator (in whatever test is using the allocator). This is odd +// but helps in tests where the allocator is propagated into nested +// containers - that chain of allocators uses the same state and is +// thus easier to query for aggregate allocation information. +template +class CountingAllocator { + public: + using Allocator = std::allocator; + using AllocatorTraits = std::allocator_traits; + using value_type = typename AllocatorTraits::value_type; + using pointer = typename AllocatorTraits::pointer; + using const_pointer = typename AllocatorTraits::const_pointer; + using size_type = typename AllocatorTraits::size_type; + using difference_type = typename AllocatorTraits::difference_type; + + CountingAllocator() = default; + explicit CountingAllocator(int64_t* bytes_used) : bytes_used_(bytes_used) {} + CountingAllocator(int64_t* bytes_used, int64_t* instance_count) + : bytes_used_(bytes_used), instance_count_(instance_count) {} + + template + CountingAllocator(const CountingAllocator& x) + : bytes_used_(x.bytes_used_), instance_count_(x.instance_count_) {} + + pointer allocate( + size_type n, + typename AllocatorTraits::const_void_pointer hint = nullptr) { + Allocator allocator; + pointer ptr = AllocatorTraits::allocate(allocator, n, hint); + if (bytes_used_ != nullptr) { + *bytes_used_ += n * sizeof(T); + } + return ptr; + } + + void deallocate(pointer p, size_type n) { + Allocator allocator; + AllocatorTraits::deallocate(allocator, p, n); + if (bytes_used_ != nullptr) { + *bytes_used_ -= n * sizeof(T); + } + } + + template + void construct(U* p, Args&&... args) { + Allocator allocator; + AllocatorTraits::construct(allocator, p, std::forward(args)...); + if (instance_count_ != nullptr) { + *instance_count_ += 1; + } + } + + template + void destroy(U* p) { + Allocator allocator; + // Ignore GCC warning bug. +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wuse-after-free" +#endif + AllocatorTraits::destroy(allocator, p); +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) +#pragma GCC diagnostic pop +#endif + if (instance_count_ != nullptr) { + *instance_count_ -= 1; + } + } + + template + class rebind { + public: + using other = CountingAllocator; + }; + + friend bool operator==(const CountingAllocator& a, + const CountingAllocator& b) { + return a.bytes_used_ == b.bytes_used_ && + a.instance_count_ == b.instance_count_; + } + + friend bool operator!=(const CountingAllocator& a, + const CountingAllocator& b) { + return !(a == b); + } + + int64_t* bytes_used_ = nullptr; + int64_t* instance_count_ = nullptr; +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hash_function_defaults.h b/CAPI/cpp/grpc/include/absl/container/internal/hash_function_defaults.h new file mode 100644 index 0000000..250e662 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/hash_function_defaults.h @@ -0,0 +1,163 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Define the default Hash and Eq functions for SwissTable containers. +// +// std::hash and std::equal_to are not appropriate hash and equal +// functions for SwissTable containers. There are two reasons for this. +// +// SwissTable containers are power of 2 sized containers: +// +// This means they use the lower bits of the hash value to find the slot for +// each entry. The typical hash function for integral types is the identity. +// This is a very weak hash function for SwissTable and any power of 2 sized +// hashtable implementation which will lead to excessive collisions. For +// SwissTable we use murmur3 style mixing to reduce collisions to a minimum. +// +// SwissTable containers support heterogeneous lookup: +// +// In order to make heterogeneous lookup work, hash and equal functions must be +// polymorphic. At the same time they have to satisfy the same requirements the +// C++ standard imposes on hash functions and equality operators. That is: +// +// if hash_default_eq(a, b) returns true for any a and b of type T, then +// hash_default_hash(a) must equal hash_default_hash(b) +// +// For SwissTable containers this requirement is relaxed to allow a and b of +// any and possibly different types. Note that like the standard the hash and +// equal functions are still bound to T. This is important because some type U +// can be hashed by/tested for equality differently depending on T. A notable +// example is `const char*`. `const char*` is treated as a c-style string when +// the hash function is hash but as a pointer when the hash +// function is hash. +// +#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/hash/hash.h" +#include "absl/strings/cord.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// The hash of an object of type T is computed by using absl::Hash. +template +struct HashEq { + using Hash = absl::Hash; + using Eq = std::equal_to; +}; + +struct StringHash { + using is_transparent = void; + + size_t operator()(absl::string_view v) const { + return absl::Hash{}(v); + } + size_t operator()(const absl::Cord& v) const { + return absl::Hash{}(v); + } +}; + +struct StringEq { + using is_transparent = void; + bool operator()(absl::string_view lhs, absl::string_view rhs) const { + return lhs == rhs; + } + bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const { + return lhs == rhs; + } + bool operator()(const absl::Cord& lhs, absl::string_view rhs) const { + return lhs == rhs; + } + bool operator()(absl::string_view lhs, const absl::Cord& rhs) const { + return lhs == rhs; + } +}; + +// Supports heterogeneous lookup for string-like elements. +struct StringHashEq { + using Hash = StringHash; + using Eq = StringEq; +}; + +template <> +struct HashEq : StringHashEq {}; +template <> +struct HashEq : StringHashEq {}; +template <> +struct HashEq : StringHashEq {}; + +// Supports heterogeneous lookup for pointers and smart pointers. +template +struct HashEq { + struct Hash { + using is_transparent = void; + template + size_t operator()(const U& ptr) const { + return absl::Hash{}(HashEq::ToPtr(ptr)); + } + }; + struct Eq { + using is_transparent = void; + template + bool operator()(const A& a, const B& b) const { + return HashEq::ToPtr(a) == HashEq::ToPtr(b); + } + }; + + private: + static const T* ToPtr(const T* ptr) { return ptr; } + template + static const T* ToPtr(const std::unique_ptr& ptr) { + return ptr.get(); + } + template + static const T* ToPtr(const std::shared_ptr& ptr) { + return ptr.get(); + } +}; + +template +struct HashEq> : HashEq {}; +template +struct HashEq> : HashEq {}; + +// This header's visibility is restricted. If you need to access the default +// hasher please use the container's ::hasher alias instead. +// +// Example: typename Hash = typename absl::flat_hash_map::hasher +template +using hash_default_hash = typename container_internal::HashEq::Hash; + +// This header's visibility is restricted. If you need to access the default +// key equal please use the container's ::key_equal alias instead. +// +// Example: typename Eq = typename absl::flat_hash_map::key_equal +template +using hash_default_eq = typename container_internal::HashEq::Eq; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hash_generator_testing.h b/CAPI/cpp/grpc/include/absl/container/internal/hash_generator_testing.h new file mode 100644 index 0000000..f1f555a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/hash_generator_testing.h @@ -0,0 +1,182 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Generates random values for testing. Specialized only for the few types we +// care about. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/container/internal/hash_policy_testing.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace hash_internal { +namespace generator_internal { + +template +struct IsMap : std::false_type {}; + +template +struct IsMap> : std::true_type {}; + +} // namespace generator_internal + +std::mt19937_64* GetSharedRng(); + +enum Enum { + kEnumEmpty, + kEnumDeleted, +}; + +enum class EnumClass : uint64_t { + kEmpty, + kDeleted, +}; + +inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) { + return o << static_cast(ec); +} + +template +struct Generator; + +template +struct Generator::value>::type> { + T operator()() const { + std::uniform_int_distribution dist; + return dist(*GetSharedRng()); + } +}; + +template <> +struct Generator { + Enum operator()() const { + std::uniform_int_distribution::type> + dist; + while (true) { + auto variate = dist(*GetSharedRng()); + if (variate != kEnumEmpty && variate != kEnumDeleted) + return static_cast(variate); + } + } +}; + +template <> +struct Generator { + EnumClass operator()() const { + std::uniform_int_distribution< + typename std::underlying_type::type> + dist; + while (true) { + EnumClass variate = static_cast(dist(*GetSharedRng())); + if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted) + return static_cast(variate); + } + } +}; + +template <> +struct Generator { + std::string operator()() const; +}; + +template <> +struct Generator { + absl::string_view operator()() const; +}; + +template <> +struct Generator { + NonStandardLayout operator()() const { + return NonStandardLayout(Generator()()); + } +}; + +template +struct Generator> { + std::pair operator()() const { + return std::pair(Generator::type>()(), + Generator::type>()()); + } +}; + +template +struct Generator> { + std::tuple operator()() const { + return std::tuple(Generator::type>()()...); + } +}; + +template +struct Generator> { + std::unique_ptr operator()() const { + return absl::make_unique(Generator()()); + } +}; + +template +struct Generator().key()), + decltype(std::declval().value())>> + : Generator().key())>::type, + typename std::decay().value())>::type>> {}; + +template +using GeneratedType = decltype( + std::declval::value, + typename Container::value_type, + typename Container::key_type>::type>&>()()); + +// Naive wrapper that performs a linear search of previous values. +// Beware this is O(SQR), which is reasonable for smaller kMaxValues. +template +struct UniqueGenerator { + Generator gen; + std::vector values; + + T operator()() { + assert(values.size() < kMaxValues); + for (;;) { + T value = gen(); + if (std::find(values.begin(), values.end(), value) == values.end()) { + values.push_back(value); + return value; + } + } + } +}; + +} // namespace hash_internal +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_testing.h b/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_testing.h new file mode 100644 index 0000000..01c40d2 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_testing.h @@ -0,0 +1,184 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Utilities to help tests verify that hash tables properly handle stateful +// allocators and hash functions. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/hash/hash.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace hash_testing_internal { + +template +struct WithId { + WithId() : id_(next_id()) {} + WithId(const WithId& that) : id_(that.id_) {} + WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; } + WithId& operator=(const WithId& that) { + id_ = that.id_; + return *this; + } + WithId& operator=(WithId&& that) { + id_ = that.id_; + that.id_ = 0; + return *this; + } + + size_t id() const { return id_; } + + friend bool operator==(const WithId& a, const WithId& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); } + + protected: + explicit WithId(size_t id) : id_(id) {} + + private: + size_t id_; + + template + static size_t next_id() { + // 0 is reserved for moved from state. + static size_t gId = 1; + return gId++; + } +}; + +} // namespace hash_testing_internal + +struct NonStandardLayout { + NonStandardLayout() {} + explicit NonStandardLayout(std::string s) : value(std::move(s)) {} + virtual ~NonStandardLayout() {} + + friend bool operator==(const NonStandardLayout& a, + const NonStandardLayout& b) { + return a.value == b.value; + } + friend bool operator!=(const NonStandardLayout& a, + const NonStandardLayout& b) { + return a.value != b.value; + } + + template + friend H AbslHashValue(H h, const NonStandardLayout& v) { + return H::combine(std::move(h), v.value); + } + + std::string value; +}; + +struct StatefulTestingHash + : absl::container_internal::hash_testing_internal::WithId< + StatefulTestingHash> { + template + size_t operator()(const T& t) const { + return absl::Hash{}(t); + } +}; + +struct StatefulTestingEqual + : absl::container_internal::hash_testing_internal::WithId< + StatefulTestingEqual> { + template + bool operator()(const T& t, const U& u) const { + return t == u; + } +}; + +// It is expected that Alloc() == Alloc() for all allocators so we cannot use +// WithId base. We need to explicitly assign ids. +template +struct Alloc : std::allocator { + using propagate_on_container_swap = std::true_type; + + // Using old paradigm for this to ensure compatibility. + explicit Alloc(size_t id = 0) : id_(id) {} + + Alloc(const Alloc&) = default; + Alloc& operator=(const Alloc&) = default; + + template + Alloc(const Alloc& that) : std::allocator(that), id_(that.id()) {} + + template + struct rebind { + using other = Alloc; + }; + + size_t id() const { return id_; } + + friend bool operator==(const Alloc& a, const Alloc& b) { + return a.id_ == b.id_; + } + friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); } + + private: + size_t id_ = (std::numeric_limits::max)(); +}; + +template +auto items(const Map& m) -> std::vector< + std::pair> { + using std::get; + std::vector> res; + res.reserve(m.size()); + for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v)); + return res; +} + +template +auto keys(const Set& s) + -> std::vector::type> { + std::vector::type> res; + res.reserve(s.size()); + for (const auto& v : s) res.emplace_back(v); + return res; +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions +// where the unordered containers are missing certain constructors that +// take allocator arguments. This test is defined ad-hoc for the platforms +// we care about (notably Crosstool 17) because libstdcxx's useless +// versioning scheme precludes a more principled solution. +// From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html) +// "the unordered associative containers in and +// meet the allocator-aware container requirements;" +#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 ) || \ +( __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9 )) +#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0 +#else +#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1 +#endif + +#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_traits.h b/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_traits.h new file mode 100644 index 0000000..46c97b1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_traits.h @@ -0,0 +1,208 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ +#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ + +#include +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Defines how slots are initialized/destroyed/moved. +template +struct hash_policy_traits { + // The type of the keys stored in the hashtable. + using key_type = typename Policy::key_type; + + private: + struct ReturnKey { + // When C++17 is available, we can use std::launder to provide mutable + // access to the key for use in node handle. +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 + template ::value, int> = 0> + static key_type& Impl(Key&& k, int) { + return *std::launder( + const_cast(std::addressof(std::forward(k)))); + } +#endif + + template + static Key Impl(Key&& k, char) { + return std::forward(k); + } + + // When Key=T&, we forward the lvalue reference. + // When Key=T, we return by value to avoid a dangling reference. + // eg, for string_hash_map. + template + auto operator()(Key&& k, const Args&...) const + -> decltype(Impl(std::forward(k), 0)) { + return Impl(std::forward(k), 0); + } + }; + + template + struct ConstantIteratorsImpl : std::false_type {}; + + template + struct ConstantIteratorsImpl> + : P::constant_iterators {}; + + public: + // The actual object stored in the hash table. + using slot_type = typename Policy::slot_type; + + // The argument type for insertions into the hashtable. This is different + // from value_type for increased performance. See initializer_list constructor + // and insert() member functions for more details. + using init_type = typename Policy::init_type; + + using reference = decltype(Policy::element(std::declval())); + using pointer = typename std::remove_reference::type*; + using value_type = typename std::remove_reference::type; + + // Policies can set this variable to tell raw_hash_set that all iterators + // should be constant, even `iterator`. This is useful for set-like + // containers. + // Defaults to false if not provided by the policy. + using constant_iterators = ConstantIteratorsImpl<>; + + // PRECONDITION: `slot` is UNINITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { + Policy::construct(alloc, slot, std::forward(args)...); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is UNINITIALIZED + template + static void destroy(Alloc* alloc, slot_type* slot) { + Policy::destroy(alloc, slot); + } + + // Transfers the `old_slot` to `new_slot`. Any memory allocated by the + // allocator inside `old_slot` to `new_slot` can be transferred. + // + // OPTIONAL: defaults to: + // + // clone(new_slot, std::move(*old_slot)); + // destroy(old_slot); + // + // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED + // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is + // UNINITIALIZED + template + static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) { + transfer_impl(alloc, new_slot, old_slot, 0); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + template + static auto element(slot_type* slot) -> decltype(P::element(slot)) { + return P::element(slot); + } + + // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`. + // + // If `slot` is nullptr, returns the constant amount of memory owned by any + // full slot or -1 if slots own variable amounts of memory. + // + // PRECONDITION: `slot` is INITIALIZED or nullptr + template + static size_t space_used(const slot_type* slot) { + return P::space_used(slot); + } + + // Provides generalized access to the key for elements, both for elements in + // the table and for elements that have not yet been inserted (or even + // constructed). We would like an API that allows us to say: `key(args...)` + // but we cannot do that for all cases, so we use this more general API that + // can be used for many things, including the following: + // + // - Given an element in a table, get its key. + // - Given an element initializer, get its key. + // - Given `emplace()` arguments, get the element key. + // + // Implementations of this must adhere to a very strict technical + // specification around aliasing and consuming arguments: + // + // Let `value_type` be the result type of `element()` without ref- and + // cv-qualifiers. The first argument is a functor, the rest are constructor + // arguments for `value_type`. Returns `std::forward(f)(k, xs...)`, where + // `k` is the element key, and `xs...` are the new constructor arguments for + // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias + // `ts...`. The key won't be touched once `xs...` are used to construct an + // element; `ts...` won't be touched at all, which allows `apply()` to consume + // any rvalues among them. + // + // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not + // trigger a hard compile error unless it originates from `f`. In other words, + // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not + // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK. + // + // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`, + // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not. + template + static auto apply(F&& f, Ts&&... ts) + -> decltype(P::apply(std::forward(f), std::forward(ts)...)) { + return P::apply(std::forward(f), std::forward(ts)...); + } + + // Returns the "key" portion of the slot. + // Used for node handle manipulation. + template + static auto mutable_key(slot_type* slot) + -> decltype(P::apply(ReturnKey(), element(slot))) { + return P::apply(ReturnKey(), element(slot)); + } + + // Returns the "value" (as opposed to the "key") portion of the element. Used + // by maps to implement `operator[]`, `at()` and `insert_or_assign()`. + template + static auto value(T* elem) -> decltype(P::value(elem)) { + return P::value(elem); + } + + private: + // Use auto -> decltype as an enabler. + template + static auto transfer_impl(Alloc* alloc, slot_type* new_slot, + slot_type* old_slot, int) + -> decltype((void)P::transfer(alloc, new_slot, old_slot)) { + P::transfer(alloc, new_slot, old_slot); + } + template + static void transfer_impl(Alloc* alloc, slot_type* new_slot, + slot_type* old_slot, char) { + construct(alloc, new_slot, std::move(element(old_slot))); + destroy(alloc, old_slot); + } +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug.h b/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug.h new file mode 100644 index 0000000..19d5212 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug.h @@ -0,0 +1,110 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This library provides APIs to debug the probing behavior of hash tables. +// +// In general, the probing behavior is a black box for users and only the +// side effects can be measured in the form of performance differences. +// These APIs give a glimpse on the actual behavior of the probing algorithms in +// these hashtables given a specified hash function and a set of elements. +// +// The probe count distribution can be used to assess the quality of the hash +// function for that particular hash table. Note that a hash function that +// performs well in one hash table implementation does not necessarily performs +// well in a different one. +// +// This library supports std::unordered_{set,map}, dense_hash_{set,map} and +// absl::{flat,node,string}_hash_{set,map}. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ + +#include +#include +#include +#include + +#include "absl/container/internal/hashtable_debug_hooks.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Returns the number of probes required to lookup `key`. Returns 0 for a +// search with no collisions. Higher values mean more hash collisions occurred; +// however, the exact meaning of this number varies according to the container +// type. +template +size_t GetHashtableDebugNumProbes( + const C& c, const typename C::key_type& key) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::GetNumProbes(c, key); +} + +// Gets a histogram of the number of probes for each elements in the container. +// The sum of all the values in the vector is equal to container.size(). +template +std::vector GetHashtableDebugNumProbesHistogram(const C& container) { + std::vector v; + for (auto it = container.begin(); it != container.end(); ++it) { + size_t num_probes = GetHashtableDebugNumProbes( + container, + absl::container_internal::hashtable_debug_internal::GetKey(*it, 0)); + v.resize((std::max)(v.size(), num_probes + 1)); + v[num_probes]++; + } + return v; +} + +struct HashtableDebugProbeSummary { + size_t total_elements; + size_t total_num_probes; + double mean; +}; + +// Gets a summary of the probe count distribution for the elements in the +// container. +template +HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) { + auto probes = GetHashtableDebugNumProbesHistogram(container); + HashtableDebugProbeSummary summary = {}; + for (size_t i = 0; i < probes.size(); ++i) { + summary.total_elements += probes[i]; + summary.total_num_probes += probes[i] * i; + } + summary.mean = 1.0 * summary.total_num_probes / summary.total_elements; + return summary; +} + +// Returns the number of bytes requested from the allocator by the container +// and not freed. +template +size_t AllocatedByteSize(const C& c) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::AllocatedByteSize(c); +} + +// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C` +// and `c.size()` is equal to `num_elements`. +template +size_t LowerBoundAllocatedByteSize(size_t num_elements) { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::LowerBoundAllocatedByteSize(num_elements); +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug_hooks.h b/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug_hooks.h new file mode 100644 index 0000000..3e9ea59 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug_hooks.h @@ -0,0 +1,85 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Provides the internal API for hashtable_debug.h. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ + +#include + +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace hashtable_debug_internal { + +// If it is a map, call get<0>(). +using std::get; +template +auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) { + return get<0>(pair); +} + +// If it is not a map, return the value directly. +template +const typename T::key_type& GetKey(const typename T::key_type& key, char) { + return key; +} + +// Containers should specialize this to provide debug information for that +// container. +template +struct HashtableDebugAccess { + // Returns the number of probes required to find `key` in `c`. The "number of + // probes" is a concept that can vary by container. Implementations should + // return 0 when `key` was found in the minimum number of operations and + // should increment the result for each non-trivial operation required to find + // `key`. + // + // The default implementation uses the bucket api from the standard and thus + // works for `std::unordered_*` containers. + static size_t GetNumProbes(const Container& c, + const typename Container::key_type& key) { + if (!c.bucket_count()) return {}; + size_t num_probes = 0; + size_t bucket = c.bucket(key); + for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) { + if (it == e) return num_probes; + if (c.key_eq()(key, GetKey(*it, 0))) return num_probes; + } + } + + // Returns the number of bytes requested from the allocator by the container + // and not freed. + // + // static size_t AllocatedByteSize(const Container& c); + + // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type + // `Container` and `c.size()` is equal to `num_elements`. + // + // static size_t LowerBoundAllocatedByteSize(size_t num_elements); +}; + +} // namespace hashtable_debug_internal +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hashtablez_sampler.h b/CAPI/cpp/grpc/include/absl/container/internal/hashtablez_sampler.h new file mode 100644 index 0000000..d4016d8 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/hashtablez_sampler.h @@ -0,0 +1,299 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: hashtablez_sampler.h +// ----------------------------------------------------------------------------- +// +// This header file defines the API for a low level library to sample hashtables +// and collect runtime statistics about them. +// +// `HashtablezSampler` controls the lifecycle of `HashtablezInfo` objects which +// store information about a single sample. +// +// `Record*` methods store information into samples. +// `Sample()` and `Unsample()` make use of a single global sampler with +// properties controlled by the flags hashtablez_enabled, +// hashtablez_sample_rate, and hashtablez_max_samples. +// +// WARNING +// +// Using this sampling API may cause sampled Swiss tables to use the global +// allocator (operator `new`) in addition to any custom allocator. If you +// are using a table in an unusual circumstance where allocation or calling a +// linux syscall is unacceptable, this could interfere. +// +// This utility is internal-only. Use at your own risk. + +#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ +#define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/per_thread_tls.h" +#include "absl/base/optimization.h" +#include "absl/profiling/internal/sample_recorder.h" +#include "absl/synchronization/mutex.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Stores information about a sampled hashtable. All mutations to this *must* +// be made through `Record*` functions below. All reads from this *must* only +// occur in the callback to `HashtablezSampler::Iterate`. +struct HashtablezInfo : public profiling_internal::Sample { + // Constructs the object but does not fill in any fields. + HashtablezInfo(); + ~HashtablezInfo(); + HashtablezInfo(const HashtablezInfo&) = delete; + HashtablezInfo& operator=(const HashtablezInfo&) = delete; + + // Puts the object into a clean state, fills in the logically `const` members, + // blocking for any readers that are currently sampling the object. + void PrepareForSampling(int64_t stride, size_t inline_element_size_value) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); + + // These fields are mutated by the various Record* APIs and need to be + // thread-safe. + std::atomic capacity; + std::atomic size; + std::atomic num_erases; + std::atomic num_rehashes; + std::atomic max_probe_length; + std::atomic total_probe_length; + std::atomic hashes_bitwise_or; + std::atomic hashes_bitwise_and; + std::atomic hashes_bitwise_xor; + std::atomic max_reserve; + + // All of the fields below are set by `PrepareForSampling`, they must not be + // mutated in `Record*` functions. They are logically `const` in that sense. + // These are guarded by init_mu, but that is not externalized to clients, + // which can read them only during `SampleRecorder::Iterate` which will hold + // the lock. + static constexpr int kMaxStackDepth = 64; + absl::Time create_time; + int32_t depth; + void* stack[kMaxStackDepth]; + size_t inline_element_size; // How big is the slot? +}; + +inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { +#ifdef ABSL_INTERNAL_HAVE_SSE2 + total_probe_length /= 16; +#else + total_probe_length /= 8; +#endif + info->total_probe_length.store(total_probe_length, std::memory_order_relaxed); + info->num_erases.store(0, std::memory_order_relaxed); + // There is only one concurrent writer, so `load` then `store` is sufficient + // instead of using `fetch_add`. + info->num_rehashes.store( + 1 + info->num_rehashes.load(std::memory_order_relaxed), + std::memory_order_relaxed); +} + +inline void RecordReservationSlow(HashtablezInfo* info, + size_t target_capacity) { + info->max_reserve.store( + (std::max)(info->max_reserve.load(std::memory_order_relaxed), + target_capacity), + std::memory_order_relaxed); +} + +inline void RecordClearedReservationSlow(HashtablezInfo* info) { + info->max_reserve.store(0, std::memory_order_relaxed); +} + +inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, + size_t capacity) { + info->size.store(size, std::memory_order_relaxed); + info->capacity.store(capacity, std::memory_order_relaxed); + if (size == 0) { + // This is a clear, reset the total/num_erases too. + info->total_probe_length.store(0, std::memory_order_relaxed); + info->num_erases.store(0, std::memory_order_relaxed); + } +} + +void RecordInsertSlow(HashtablezInfo* info, size_t hash, + size_t distance_from_desired); + +inline void RecordEraseSlow(HashtablezInfo* info) { + info->size.fetch_sub(1, std::memory_order_relaxed); + // There is only one concurrent writer, so `load` then `store` is sufficient + // instead of using `fetch_add`. + info->num_erases.store( + 1 + info->num_erases.load(std::memory_order_relaxed), + std::memory_order_relaxed); +} + +struct SamplingState { + int64_t next_sample; + // When we make a sampling decision, we record that distance so we can weight + // each sample. + int64_t sample_stride; +}; + +HashtablezInfo* SampleSlow(SamplingState& next_sample, + size_t inline_element_size); +void UnsampleSlow(HashtablezInfo* info); + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +class HashtablezInfoHandle { + public: + explicit HashtablezInfoHandle() : info_(nullptr) {} + explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {} + ~HashtablezInfoHandle() { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + UnsampleSlow(info_); + } + + HashtablezInfoHandle(const HashtablezInfoHandle&) = delete; + HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete; + + HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept + : info_(absl::exchange(o.info_, nullptr)) {} + HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept { + if (ABSL_PREDICT_FALSE(info_ != nullptr)) { + UnsampleSlow(info_); + } + info_ = absl::exchange(o.info_, nullptr); + return *this; + } + + inline void RecordStorageChanged(size_t size, size_t capacity) { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordStorageChangedSlow(info_, size, capacity); + } + + inline void RecordRehash(size_t total_probe_length) { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordRehashSlow(info_, total_probe_length); + } + + inline void RecordReservation(size_t target_capacity) { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordReservationSlow(info_, target_capacity); + } + + inline void RecordClearedReservation() { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordClearedReservationSlow(info_); + } + + inline void RecordInsert(size_t hash, size_t distance_from_desired) { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordInsertSlow(info_, hash, distance_from_desired); + } + + inline void RecordErase() { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; + RecordEraseSlow(info_); + } + + friend inline void swap(HashtablezInfoHandle& lhs, + HashtablezInfoHandle& rhs) { + std::swap(lhs.info_, rhs.info_); + } + + private: + friend class HashtablezInfoHandlePeer; + HashtablezInfo* info_; +}; +#else +// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can +// be removed by the linker, in order to reduce the binary size. +class HashtablezInfoHandle { + public: + explicit HashtablezInfoHandle() = default; + explicit HashtablezInfoHandle(std::nullptr_t) {} + + inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {} + inline void RecordRehash(size_t /*total_probe_length*/) {} + inline void RecordReservation(size_t /*target_capacity*/) {} + inline void RecordClearedReservation() {} + inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {} + inline void RecordErase() {} + + friend inline void swap(HashtablezInfoHandle& /*lhs*/, + HashtablezInfoHandle& /*rhs*/) {} +}; +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) +extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample; +#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + +// Returns an RAII sampling handle that manages registration and unregistation +// with the global sampler. +inline HashtablezInfoHandle Sample( + size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) { +#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) + if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) { + return HashtablezInfoHandle(nullptr); + } + return HashtablezInfoHandle( + SampleSlow(global_next_sample, inline_element_size)); +#else + return HashtablezInfoHandle(nullptr); +#endif // !ABSL_PER_THREAD_TLS +} + +using HashtablezSampler = + ::absl::profiling_internal::SampleRecorder; + +// Returns a global Sampler. +HashtablezSampler& GlobalHashtablezSampler(); + +using HashtablezConfigListener = void (*)(); +void SetHashtablezConfigListener(HashtablezConfigListener l); + +// Enables or disables sampling for Swiss tables. +bool IsHashtablezEnabled(); +void SetHashtablezEnabled(bool enabled); +void SetHashtablezEnabledInternal(bool enabled); + +// Sets the rate at which Swiss tables will be sampled. +int32_t GetHashtablezSampleParameter(); +void SetHashtablezSampleParameter(int32_t rate); +void SetHashtablezSampleParameterInternal(int32_t rate); + +// Sets a soft max for the number of samples that will be kept. +int32_t GetHashtablezMaxSamples(); +void SetHashtablezMaxSamples(int32_t max); +void SetHashtablezMaxSamplesInternal(int32_t max); + +// Configuration override. +// This allows process-wide sampling without depending on order of +// initialization of static storage duration objects. +// The definition of this constant is weak, which allows us to inject a +// different value for it at link time. +extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)(); + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/inlined_vector.h b/CAPI/cpp/grpc/include/absl/container/internal/inlined_vector.h new file mode 100644 index 0000000..54c92a0 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/inlined_vector.h @@ -0,0 +1,953 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ +#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/macros.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inlined_vector_internal { + +// GCC does not deal very well with the below code +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Warray-bounds" +#endif + +template +using AllocatorTraits = std::allocator_traits; +template +using ValueType = typename AllocatorTraits::value_type; +template +using SizeType = typename AllocatorTraits::size_type; +template +using Pointer = typename AllocatorTraits::pointer; +template +using ConstPointer = typename AllocatorTraits::const_pointer; +template +using SizeType = typename AllocatorTraits::size_type; +template +using DifferenceType = typename AllocatorTraits::difference_type; +template +using Reference = ValueType&; +template +using ConstReference = const ValueType&; +template +using Iterator = Pointer; +template +using ConstIterator = ConstPointer; +template +using ReverseIterator = typename std::reverse_iterator>; +template +using ConstReverseIterator = typename std::reverse_iterator>; +template +using MoveIterator = typename std::move_iterator>; + +template +using IsAtLeastForwardIterator = std::is_convertible< + typename std::iterator_traits::iterator_category, + std::forward_iterator_tag>; + +template +using IsMemcpyOk = + absl::conjunction>>, + absl::is_trivially_copy_constructible>, + absl::is_trivially_copy_assignable>, + absl::is_trivially_destructible>>; + +template +struct TypeIdentity { + using type = T; +}; + +// Used for function arguments in template functions to prevent ADL by forcing +// callers to explicitly specify the template parameter. +template +using NoTypeDeduction = typename TypeIdentity::type; + +template >::value> +struct DestroyAdapter; + +template +struct DestroyAdapter { + static void DestroyElements(A& allocator, Pointer destroy_first, + SizeType destroy_size) { + for (SizeType i = destroy_size; i != 0;) { + --i; + AllocatorTraits::destroy(allocator, destroy_first + i); + } + } +}; + +template +struct DestroyAdapter { + static void DestroyElements(A& allocator, Pointer destroy_first, + SizeType destroy_size) { + static_cast(allocator); + static_cast(destroy_first); + static_cast(destroy_size); + } +}; + +template +struct Allocation { + Pointer data; + SizeType capacity; +}; + +template ) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)> +struct MallocAdapter { + static Allocation Allocate(A& allocator, SizeType requested_capacity) { + return {AllocatorTraits::allocate(allocator, requested_capacity), + requested_capacity}; + } + + static void Deallocate(A& allocator, Pointer pointer, + SizeType capacity) { + AllocatorTraits::deallocate(allocator, pointer, capacity); + } +}; + +template +void ConstructElements(NoTypeDeduction& allocator, + Pointer construct_first, ValueAdapter& values, + SizeType construct_size) { + for (SizeType i = 0; i < construct_size; ++i) { + ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); } + ABSL_INTERNAL_CATCH_ANY { + DestroyAdapter::DestroyElements(allocator, construct_first, i); + ABSL_INTERNAL_RETHROW; + } + } +} + +template +void AssignElements(Pointer assign_first, ValueAdapter& values, + SizeType assign_size) { + for (SizeType i = 0; i < assign_size; ++i) { + values.AssignNext(assign_first + i); + } +} + +template +struct StorageView { + Pointer data; + SizeType size; + SizeType capacity; +}; + +template +class IteratorValueAdapter { + public: + explicit IteratorValueAdapter(const Iterator& it) : it_(it) {} + + void ConstructNext(A& allocator, Pointer construct_at) { + AllocatorTraits::construct(allocator, construct_at, *it_); + ++it_; + } + + void AssignNext(Pointer assign_at) { + *assign_at = *it_; + ++it_; + } + + private: + Iterator it_; +}; + +template +class CopyValueAdapter { + public: + explicit CopyValueAdapter(ConstPointer p) : ptr_(p) {} + + void ConstructNext(A& allocator, Pointer construct_at) { + AllocatorTraits::construct(allocator, construct_at, *ptr_); + } + + void AssignNext(Pointer assign_at) { *assign_at = *ptr_; } + + private: + ConstPointer ptr_; +}; + +template +class DefaultValueAdapter { + public: + explicit DefaultValueAdapter() {} + + void ConstructNext(A& allocator, Pointer construct_at) { + AllocatorTraits::construct(allocator, construct_at); + } + + void AssignNext(Pointer assign_at) { *assign_at = ValueType(); } +}; + +template +class AllocationTransaction { + public: + explicit AllocationTransaction(A& allocator) + : allocator_data_(allocator, nullptr), capacity_(0) {} + + ~AllocationTransaction() { + if (DidAllocate()) { + MallocAdapter::Deallocate(GetAllocator(), GetData(), GetCapacity()); + } + } + + AllocationTransaction(const AllocationTransaction&) = delete; + void operator=(const AllocationTransaction&) = delete; + + A& GetAllocator() { return allocator_data_.template get<0>(); } + Pointer& GetData() { return allocator_data_.template get<1>(); } + SizeType& GetCapacity() { return capacity_; } + + bool DidAllocate() { return GetData() != nullptr; } + + Pointer Allocate(SizeType requested_capacity) { + Allocation result = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + GetData() = result.data; + GetCapacity() = result.capacity; + return result.data; + } + + ABSL_MUST_USE_RESULT Allocation Release() && { + Allocation result = {GetData(), GetCapacity()}; + Reset(); + return result; + } + + private: + void Reset() { + GetData() = nullptr; + GetCapacity() = 0; + } + + container_internal::CompressedTuple> allocator_data_; + SizeType capacity_; +}; + +template +class ConstructionTransaction { + public: + explicit ConstructionTransaction(A& allocator) + : allocator_data_(allocator, nullptr), size_(0) {} + + ~ConstructionTransaction() { + if (DidConstruct()) { + DestroyAdapter::DestroyElements(GetAllocator(), GetData(), GetSize()); + } + } + + ConstructionTransaction(const ConstructionTransaction&) = delete; + void operator=(const ConstructionTransaction&) = delete; + + A& GetAllocator() { return allocator_data_.template get<0>(); } + Pointer& GetData() { return allocator_data_.template get<1>(); } + SizeType& GetSize() { return size_; } + + bool DidConstruct() { return GetData() != nullptr; } + template + void Construct(Pointer data, ValueAdapter& values, SizeType size) { + ConstructElements(GetAllocator(), data, values, size); + GetData() = data; + GetSize() = size; + } + void Commit() && { + GetData() = nullptr; + GetSize() = 0; + } + + private: + container_internal::CompressedTuple> allocator_data_; + SizeType size_; +}; + +template +class Storage { + public: + static SizeType NextCapacity(SizeType current_capacity) { + return current_capacity * 2; + } + + static SizeType ComputeCapacity(SizeType current_capacity, + SizeType requested_capacity) { + return (std::max)(NextCapacity(current_capacity), requested_capacity); + } + + // --------------------------------------------------------------------------- + // Storage Constructors and Destructor + // --------------------------------------------------------------------------- + + Storage() : metadata_(A(), /* size and is_allocated */ 0u) {} + + explicit Storage(const A& allocator) + : metadata_(allocator, /* size and is_allocated */ 0u) {} + + ~Storage() { + if (GetSizeAndIsAllocated() == 0) { + // Empty and not allocated; nothing to do. + } else if (IsMemcpyOk::value) { + // No destructors need to be run; just deallocate if necessary. + DeallocateIfAllocated(); + } else { + DestroyContents(); + } + } + + // --------------------------------------------------------------------------- + // Storage Member Accessors + // --------------------------------------------------------------------------- + + SizeType& GetSizeAndIsAllocated() { return metadata_.template get<1>(); } + + const SizeType& GetSizeAndIsAllocated() const { + return metadata_.template get<1>(); + } + + SizeType GetSize() const { return GetSizeAndIsAllocated() >> 1; } + + bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; } + + Pointer GetAllocatedData() { return data_.allocated.allocated_data; } + + ConstPointer GetAllocatedData() const { + return data_.allocated.allocated_data; + } + + Pointer GetInlinedData() { + return reinterpret_cast>( + std::addressof(data_.inlined.inlined_data[0])); + } + + ConstPointer GetInlinedData() const { + return reinterpret_cast>( + std::addressof(data_.inlined.inlined_data[0])); + } + + SizeType GetAllocatedCapacity() const { + return data_.allocated.allocated_capacity; + } + + SizeType GetInlinedCapacity() const { return static_cast>(N); } + + StorageView MakeStorageView() { + return GetIsAllocated() ? StorageView{GetAllocatedData(), GetSize(), + GetAllocatedCapacity()} + : StorageView{GetInlinedData(), GetSize(), + GetInlinedCapacity()}; + } + + A& GetAllocator() { return metadata_.template get<0>(); } + + const A& GetAllocator() const { return metadata_.template get<0>(); } + + // --------------------------------------------------------------------------- + // Storage Member Mutators + // --------------------------------------------------------------------------- + + ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other); + + template + void Initialize(ValueAdapter values, SizeType new_size); + + template + void Assign(ValueAdapter values, SizeType new_size); + + template + void Resize(ValueAdapter values, SizeType new_size); + + template + Iterator Insert(ConstIterator pos, ValueAdapter values, + SizeType insert_count); + + template + Reference EmplaceBack(Args&&... args); + + Iterator Erase(ConstIterator from, ConstIterator to); + + void Reserve(SizeType requested_capacity); + + void ShrinkToFit(); + + void Swap(Storage* other_storage_ptr); + + void SetIsAllocated() { + GetSizeAndIsAllocated() |= static_cast>(1); + } + + void UnsetIsAllocated() { + GetSizeAndIsAllocated() &= ((std::numeric_limits>::max)() - 1); + } + + void SetSize(SizeType size) { + GetSizeAndIsAllocated() = + (size << 1) | static_cast>(GetIsAllocated()); + } + + void SetAllocatedSize(SizeType size) { + GetSizeAndIsAllocated() = (size << 1) | static_cast>(1); + } + + void SetInlinedSize(SizeType size) { + GetSizeAndIsAllocated() = size << static_cast>(1); + } + + void AddSize(SizeType count) { + GetSizeAndIsAllocated() += count << static_cast>(1); + } + + void SubtractSize(SizeType count) { + ABSL_HARDENING_ASSERT(count <= GetSize()); + + GetSizeAndIsAllocated() -= count << static_cast>(1); + } + + void SetAllocation(Allocation allocation) { + data_.allocated.allocated_data = allocation.data; + data_.allocated.allocated_capacity = allocation.capacity; + } + + void MemcpyFrom(const Storage& other_storage) { + ABSL_HARDENING_ASSERT(IsMemcpyOk::value || + other_storage.GetIsAllocated()); + + GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated(); + data_ = other_storage.data_; + } + + void DeallocateIfAllocated() { + if (GetIsAllocated()) { + MallocAdapter::Deallocate(GetAllocator(), GetAllocatedData(), + GetAllocatedCapacity()); + } + } + + private: + ABSL_ATTRIBUTE_NOINLINE void DestroyContents(); + + using Metadata = container_internal::CompressedTuple>; + + struct Allocated { + Pointer allocated_data; + SizeType allocated_capacity; + }; + + struct Inlined { + alignas(ValueType) char inlined_data[sizeof(ValueType[N])]; + }; + + union Data { + Allocated allocated; + Inlined inlined; + }; + + template + ABSL_ATTRIBUTE_NOINLINE Reference EmplaceBackSlow(Args&&... args); + + Metadata metadata_; + Data data_; +}; + +template +void Storage::DestroyContents() { + Pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData(); + DestroyAdapter::DestroyElements(GetAllocator(), data, GetSize()); + DeallocateIfAllocated(); +} + +template +void Storage::InitFrom(const Storage& other) { + const SizeType n = other.GetSize(); + ABSL_HARDENING_ASSERT(n > 0); // Empty sources handled handled in caller. + ConstPointer src; + Pointer dst; + if (!other.GetIsAllocated()) { + dst = GetInlinedData(); + src = other.GetInlinedData(); + } else { + // Because this is only called from the `InlinedVector` constructors, it's + // safe to take on the allocation with size `0`. If `ConstructElements(...)` + // throws, deallocation will be automatically handled by `~Storage()`. + SizeType requested_capacity = ComputeCapacity(GetInlinedCapacity(), n); + Allocation allocation = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + SetAllocation(allocation); + dst = allocation.data; + src = other.GetAllocatedData(); + } + if (IsMemcpyOk::value) { + std::memcpy(reinterpret_cast(dst), + reinterpret_cast(src), n * sizeof(ValueType)); + } else { + auto values = IteratorValueAdapter>(src); + ConstructElements(GetAllocator(), dst, values, n); + } + GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated(); +} + +template +template +auto Storage::Initialize(ValueAdapter values, SizeType new_size) + -> void { + // Only callable from constructors! + ABSL_HARDENING_ASSERT(!GetIsAllocated()); + ABSL_HARDENING_ASSERT(GetSize() == 0); + + Pointer construct_data; + if (new_size > GetInlinedCapacity()) { + // Because this is only called from the `InlinedVector` constructors, it's + // safe to take on the allocation with size `0`. If `ConstructElements(...)` + // throws, deallocation will be automatically handled by `~Storage()`. + SizeType requested_capacity = + ComputeCapacity(GetInlinedCapacity(), new_size); + Allocation allocation = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + construct_data = allocation.data; + SetAllocation(allocation); + SetIsAllocated(); + } else { + construct_data = GetInlinedData(); + } + + ConstructElements(GetAllocator(), construct_data, values, new_size); + + // Since the initial size was guaranteed to be `0` and the allocated bit is + // already correct for either case, *adding* `new_size` gives us the correct + // result faster than setting it directly. + AddSize(new_size); +} + +template +template +auto Storage::Assign(ValueAdapter values, SizeType new_size) + -> void { + StorageView storage_view = MakeStorageView(); + + AllocationTransaction allocation_tx(GetAllocator()); + + absl::Span> assign_loop; + absl::Span> construct_loop; + absl::Span> destroy_loop; + + if (new_size > storage_view.capacity) { + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + construct_loop = {allocation_tx.Allocate(requested_capacity), new_size}; + destroy_loop = {storage_view.data, storage_view.size}; + } else if (new_size > storage_view.size) { + assign_loop = {storage_view.data, storage_view.size}; + construct_loop = {storage_view.data + storage_view.size, + new_size - storage_view.size}; + } else { + assign_loop = {storage_view.data, new_size}; + destroy_loop = {storage_view.data + new_size, storage_view.size - new_size}; + } + + AssignElements(assign_loop.data(), values, assign_loop.size()); + + ConstructElements(GetAllocator(), construct_loop.data(), values, + construct_loop.size()); + + DestroyAdapter::DestroyElements(GetAllocator(), destroy_loop.data(), + destroy_loop.size()); + + if (allocation_tx.DidAllocate()) { + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); + } + + SetSize(new_size); +} + +template +template +auto Storage::Resize(ValueAdapter values, SizeType new_size) + -> void { + StorageView storage_view = MakeStorageView(); + Pointer const base = storage_view.data; + const SizeType size = storage_view.size; + A& alloc = GetAllocator(); + if (new_size <= size) { + // Destroy extra old elements. + DestroyAdapter::DestroyElements(alloc, base + new_size, size - new_size); + } else if (new_size <= storage_view.capacity) { + // Construct new elements in place. + ConstructElements(alloc, base + size, values, new_size - size); + } else { + // Steps: + // a. Allocate new backing store. + // b. Construct new elements in new backing store. + // c. Move existing elements from old backing store to new backing store. + // d. Destroy all elements in old backing store. + // Use transactional wrappers for the first two steps so we can roll + // back if necessary due to exceptions. + AllocationTransaction allocation_tx(alloc); + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + Pointer new_data = allocation_tx.Allocate(requested_capacity); + + ConstructionTransaction construction_tx(alloc); + construction_tx.Construct(new_data + size, values, new_size - size); + + IteratorValueAdapter> move_values( + (MoveIterator(base))); + ConstructElements(alloc, new_data, move_values, size); + + DestroyAdapter::DestroyElements(alloc, base, size); + std::move(construction_tx).Commit(); + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); + } + SetSize(new_size); +} + +template +template +auto Storage::Insert(ConstIterator pos, ValueAdapter values, + SizeType insert_count) -> Iterator { + StorageView storage_view = MakeStorageView(); + + SizeType insert_index = + std::distance(ConstIterator(storage_view.data), pos); + SizeType insert_end_index = insert_index + insert_count; + SizeType new_size = storage_view.size + insert_count; + + if (new_size > storage_view.capacity) { + AllocationTransaction allocation_tx(GetAllocator()); + ConstructionTransaction construction_tx(GetAllocator()); + ConstructionTransaction move_construction_tx(GetAllocator()); + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data)); + + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + Pointer new_data = allocation_tx.Allocate(requested_capacity); + + construction_tx.Construct(new_data + insert_index, values, insert_count); + + move_construction_tx.Construct(new_data, move_values, insert_index); + + ConstructElements(GetAllocator(), new_data + insert_end_index, + move_values, storage_view.size - insert_index); + + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); + + std::move(construction_tx).Commit(); + std::move(move_construction_tx).Commit(); + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + + SetAllocatedSize(new_size); + return Iterator(new_data + insert_index); + } else { + SizeType move_construction_destination_index = + (std::max)(insert_end_index, storage_view.size); + + ConstructionTransaction move_construction_tx(GetAllocator()); + + IteratorValueAdapter> move_construction_values( + MoveIterator(storage_view.data + + (move_construction_destination_index - insert_count))); + absl::Span> move_construction = { + storage_view.data + move_construction_destination_index, + new_size - move_construction_destination_index}; + + Pointer move_assignment_values = storage_view.data + insert_index; + absl::Span> move_assignment = { + storage_view.data + insert_end_index, + move_construction_destination_index - insert_end_index}; + + absl::Span> insert_assignment = {move_assignment_values, + move_construction.size()}; + + absl::Span> insert_construction = { + insert_assignment.data() + insert_assignment.size(), + insert_count - insert_assignment.size()}; + + move_construction_tx.Construct(move_construction.data(), + move_construction_values, + move_construction.size()); + + for (Pointer + destination = move_assignment.data() + move_assignment.size(), + last_destination = move_assignment.data(), + source = move_assignment_values + move_assignment.size(); + ;) { + --destination; + --source; + if (destination < last_destination) break; + *destination = std::move(*source); + } + + AssignElements(insert_assignment.data(), values, + insert_assignment.size()); + + ConstructElements(GetAllocator(), insert_construction.data(), values, + insert_construction.size()); + + std::move(move_construction_tx).Commit(); + + AddSize(insert_count); + return Iterator(storage_view.data + insert_index); + } +} + +template +template +auto Storage::EmplaceBack(Args&&... args) -> Reference { + StorageView storage_view = MakeStorageView(); + const SizeType n = storage_view.size; + if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) { + // Fast path; new element fits. + Pointer last_ptr = storage_view.data + n; + AllocatorTraits::construct(GetAllocator(), last_ptr, + std::forward(args)...); + AddSize(1); + return *last_ptr; + } + // TODO(b/173712035): Annotate with musttail attribute to prevent regression. + return EmplaceBackSlow(std::forward(args)...); +} + +template +template +auto Storage::EmplaceBackSlow(Args&&... args) -> Reference { + StorageView storage_view = MakeStorageView(); + AllocationTransaction allocation_tx(GetAllocator()); + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data)); + SizeType requested_capacity = NextCapacity(storage_view.capacity); + Pointer construct_data = allocation_tx.Allocate(requested_capacity); + Pointer last_ptr = construct_data + storage_view.size; + + // Construct new element. + AllocatorTraits::construct(GetAllocator(), last_ptr, + std::forward(args)...); + // Move elements from old backing store to new backing store. + ABSL_INTERNAL_TRY { + ConstructElements(GetAllocator(), allocation_tx.GetData(), move_values, + storage_view.size); + } + ABSL_INTERNAL_CATCH_ANY { + AllocatorTraits::destroy(GetAllocator(), last_ptr); + ABSL_INTERNAL_RETHROW; + } + // Destroy elements in old backing store. + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); + + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); + AddSize(1); + return *last_ptr; +} + +template +auto Storage::Erase(ConstIterator from, ConstIterator to) + -> Iterator { + StorageView storage_view = MakeStorageView(); + + SizeType erase_size = std::distance(from, to); + SizeType erase_index = + std::distance(ConstIterator(storage_view.data), from); + SizeType erase_end_index = erase_index + erase_size; + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data + erase_end_index)); + + AssignElements(storage_view.data + erase_index, move_values, + storage_view.size - erase_end_index); + + DestroyAdapter::DestroyElements( + GetAllocator(), storage_view.data + (storage_view.size - erase_size), + erase_size); + + SubtractSize(erase_size); + return Iterator(storage_view.data + erase_index); +} + +template +auto Storage::Reserve(SizeType requested_capacity) -> void { + StorageView storage_view = MakeStorageView(); + + if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return; + + AllocationTransaction allocation_tx(GetAllocator()); + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data)); + + SizeType new_requested_capacity = + ComputeCapacity(storage_view.capacity, requested_capacity); + Pointer new_data = allocation_tx.Allocate(new_requested_capacity); + + ConstructElements(GetAllocator(), new_data, move_values, + storage_view.size); + + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); + + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); +} + +template +auto Storage::ShrinkToFit() -> void { + // May only be called on allocated instances! + ABSL_HARDENING_ASSERT(GetIsAllocated()); + + StorageView storage_view{GetAllocatedData(), GetSize(), + GetAllocatedCapacity()}; + + if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return; + + AllocationTransaction allocation_tx(GetAllocator()); + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data)); + + Pointer construct_data; + if (storage_view.size > GetInlinedCapacity()) { + SizeType requested_capacity = storage_view.size; + construct_data = allocation_tx.Allocate(requested_capacity); + if (allocation_tx.GetCapacity() >= storage_view.capacity) { + // Already using the smallest available heap allocation. + return; + } + } else { + construct_data = GetInlinedData(); + } + + ABSL_INTERNAL_TRY { + ConstructElements(GetAllocator(), construct_data, move_values, + storage_view.size); + } + ABSL_INTERNAL_CATCH_ANY { + SetAllocation({storage_view.data, storage_view.capacity}); + ABSL_INTERNAL_RETHROW; + } + + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, + storage_view.size); + + MallocAdapter::Deallocate(GetAllocator(), storage_view.data, + storage_view.capacity); + + if (allocation_tx.DidAllocate()) { + SetAllocation(std::move(allocation_tx).Release()); + } else { + UnsetIsAllocated(); + } +} + +template +auto Storage::Swap(Storage* other_storage_ptr) -> void { + using std::swap; + ABSL_HARDENING_ASSERT(this != other_storage_ptr); + + if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) { + swap(data_.allocated, other_storage_ptr->data_.allocated); + } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) { + Storage* small_ptr = this; + Storage* large_ptr = other_storage_ptr; + if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr); + + for (SizeType i = 0; i < small_ptr->GetSize(); ++i) { + swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]); + } + + IteratorValueAdapter> move_values( + MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize())); + + ConstructElements(large_ptr->GetAllocator(), + small_ptr->GetInlinedData() + small_ptr->GetSize(), + move_values, + large_ptr->GetSize() - small_ptr->GetSize()); + + DestroyAdapter::DestroyElements( + large_ptr->GetAllocator(), + large_ptr->GetInlinedData() + small_ptr->GetSize(), + large_ptr->GetSize() - small_ptr->GetSize()); + } else { + Storage* allocated_ptr = this; + Storage* inlined_ptr = other_storage_ptr; + if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr); + + StorageView allocated_storage_view{ + allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(), + allocated_ptr->GetAllocatedCapacity()}; + + IteratorValueAdapter> move_values( + MoveIterator(inlined_ptr->GetInlinedData())); + + ABSL_INTERNAL_TRY { + ConstructElements(inlined_ptr->GetAllocator(), + allocated_ptr->GetInlinedData(), move_values, + inlined_ptr->GetSize()); + } + ABSL_INTERNAL_CATCH_ANY { + allocated_ptr->SetAllocation(Allocation{ + allocated_storage_view.data, allocated_storage_view.capacity}); + ABSL_INTERNAL_RETHROW; + } + + DestroyAdapter::DestroyElements(inlined_ptr->GetAllocator(), + inlined_ptr->GetInlinedData(), + inlined_ptr->GetSize()); + + inlined_ptr->SetAllocation(Allocation{allocated_storage_view.data, + allocated_storage_view.capacity}); + } + + swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated()); + swap(GetAllocator(), other_storage_ptr->GetAllocator()); +} + +// End ignore "array-bounds" +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + +} // namespace inlined_vector_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/layout.h b/CAPI/cpp/grpc/include/absl/container/internal/layout.h new file mode 100644 index 0000000..a59a243 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/layout.h @@ -0,0 +1,743 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// MOTIVATION AND TUTORIAL +// +// If you want to put in a single heap allocation N doubles followed by M ints, +// it's easy if N and M are known at compile time. +// +// struct S { +// double a[N]; +// int b[M]; +// }; +// +// S* p = new S; +// +// But what if N and M are known only in run time? Class template Layout to the +// rescue! It's a portable generalization of the technique known as struct hack. +// +// // This object will tell us everything we need to know about the memory +// // layout of double[N] followed by int[M]. It's structurally identical to +// // size_t[2] that stores N and M. It's very cheap to create. +// const Layout layout(N, M); +// +// // Allocate enough memory for both arrays. `AllocSize()` tells us how much +// // memory is needed. We are free to use any allocation function we want as +// // long as it returns aligned memory. +// std::unique_ptr p(new unsigned char[layout.AllocSize()]); +// +// // Obtain the pointer to the array of doubles. +// // Equivalent to `reinterpret_cast(p.get())`. +// // +// // We could have written layout.Pointer<0>(p) instead. If all the types are +// // unique you can use either form, but if some types are repeated you must +// // use the index form. +// double* a = layout.Pointer(p.get()); +// +// // Obtain the pointer to the array of ints. +// // Equivalent to `reinterpret_cast(p.get() + N * 8)`. +// int* b = layout.Pointer(p); +// +// If we are unable to specify sizes of all fields, we can pass as many sizes as +// we can to `Partial()`. In return, it'll allow us to access the fields whose +// locations and sizes can be computed from the provided information. +// `Partial()` comes in handy when the array sizes are embedded into the +// allocation. +// +// // size_t[1] containing N, size_t[1] containing M, double[N], int[M]. +// using L = Layout; +// +// unsigned char* Allocate(size_t n, size_t m) { +// const L layout(1, 1, n, m); +// unsigned char* p = new unsigned char[layout.AllocSize()]; +// *layout.Pointer<0>(p) = n; +// *layout.Pointer<1>(p) = m; +// return p; +// } +// +// void Use(unsigned char* p) { +// // First, extract N and M. +// // Specify that the first array has only one element. Using `prefix` we +// // can access the first two arrays but not more. +// constexpr auto prefix = L::Partial(1); +// size_t n = *prefix.Pointer<0>(p); +// size_t m = *prefix.Pointer<1>(p); +// +// // Now we can get pointers to the payload. +// const L layout(1, 1, n, m); +// double* a = layout.Pointer(p); +// int* b = layout.Pointer(p); +// } +// +// The layout we used above combines fixed-size with dynamically-sized fields. +// This is quite common. Layout is optimized for this use case and generates +// optimal code. All computations that can be performed at compile time are +// indeed performed at compile time. +// +// Efficiency tip: The order of fields matters. In `Layout` try to +// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no +// padding in between arrays. +// +// You can manually override the alignment of an array by wrapping the type in +// `Aligned`. `Layout<..., Aligned, ...>` has exactly the same API +// and behavior as `Layout<..., T, ...>` except that the first element of the +// array of `T` is aligned to `N` (the rest of the elements follow without +// padding). `N` cannot be less than `alignof(T)`. +// +// `AllocSize()` and `Pointer()` are the most basic methods for dealing with +// memory layouts. Check out the reference or code below to discover more. +// +// EXAMPLE +// +// // Immutable move-only string with sizeof equal to sizeof(void*). The +// // string size and the characters are kept in the same heap allocation. +// class CompactString { +// public: +// CompactString(const char* s = "") { +// const size_t size = strlen(s); +// // size_t[1] followed by char[size + 1]. +// const L layout(1, size + 1); +// p_.reset(new unsigned char[layout.AllocSize()]); +// // If running under ASAN, mark the padding bytes, if any, to catch +// // memory errors. +// layout.PoisonPadding(p_.get()); +// // Store the size in the allocation. +// *layout.Pointer(p_.get()) = size; +// // Store the characters in the allocation. +// memcpy(layout.Pointer(p_.get()), s, size + 1); +// } +// +// size_t size() const { +// // Equivalent to reinterpret_cast(*p). +// return *L::Partial().Pointer(p_.get()); +// } +// +// const char* c_str() const { +// // Equivalent to reinterpret_cast(p.get() + sizeof(size_t)). +// // The argument in Partial(1) specifies that we have size_t[1] in front +// // of the characters. +// return L::Partial(1).Pointer(p_.get()); +// } +// +// private: +// // Our heap allocation contains a size_t followed by an array of chars. +// using L = Layout; +// std::unique_ptr p_; +// }; +// +// int main() { +// CompactString s = "hello"; +// assert(s.size() == 5); +// assert(strcmp(s.c_str(), "hello") == 0); +// } +// +// DOCUMENTATION +// +// The interface exported by this file consists of: +// - class `Layout<>` and its public members. +// - The public members of class `internal_layout::LayoutImpl<>`. That class +// isn't intended to be used directly, and its name and template parameter +// list are internal implementation details, but the class itself provides +// most of the functionality in this file. See comments on its members for +// detailed documentation. +// +// `Layout::Partial(count1,..., countm)` (where `m` <= `n`) returns a +// `LayoutImpl<>` object. `Layout layout(count1,..., countn)` +// creates a `Layout` object, which exposes the same functionality by inheriting +// from `LayoutImpl<>`. + +#ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_ +#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/str_cat.h" +#include "absl/types/span.h" +#include "absl/utility/utility.h" + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +#include +#endif + +#if defined(__GXX_RTTI) +#define ABSL_INTERNAL_HAS_CXA_DEMANGLE +#endif + +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE +#include +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// A type wrapper that instructs `Layout` to use the specific alignment for the +// array. `Layout<..., Aligned, ...>` has exactly the same API +// and behavior as `Layout<..., T, ...>` except that the first element of the +// array of `T` is aligned to `N` (the rest of the elements follow without +// padding). +// +// Requires: `N >= alignof(T)` and `N` is a power of 2. +template +struct Aligned; + +namespace internal_layout { + +template +struct NotAligned {}; + +template +struct NotAligned> { + static_assert(sizeof(T) == 0, "Aligned cannot be const-qualified"); +}; + +template +using IntToSize = size_t; + +template +using TypeToSize = size_t; + +template +struct Type : NotAligned { + using type = T; +}; + +template +struct Type> { + using type = T; +}; + +template +struct SizeOf : NotAligned, std::integral_constant {}; + +template +struct SizeOf> : std::integral_constant {}; + +// Note: workaround for https://gcc.gnu.org/PR88115 +template +struct AlignOf : NotAligned { + static constexpr size_t value = alignof(T); +}; + +template +struct AlignOf> { + static_assert(N % alignof(T) == 0, + "Custom alignment can't be lower than the type's alignment"); + static constexpr size_t value = N; +}; + +// Does `Ts...` contain `T`? +template +using Contains = absl::disjunction...>; + +template +using CopyConst = + typename std::conditional::value, const To, To>::type; + +// Note: We're not qualifying this with absl:: because it doesn't compile under +// MSVC. +template +using SliceType = Span; + +// This namespace contains no types. It prevents functions defined in it from +// being found by ADL. +namespace adl_barrier { + +template +constexpr size_t Find(Needle, Needle, Ts...) { + static_assert(!Contains(), "Duplicate element type"); + return 0; +} + +template +constexpr size_t Find(Needle, T, Ts...) { + return adl_barrier::Find(Needle(), Ts()...) + 1; +} + +constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); } + +// Returns `q * m` for the smallest `q` such that `q * m >= n`. +// Requires: `m` is a power of two. It's enforced by IsLegalElementType below. +constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); } + +constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; } + +constexpr size_t Max(size_t a) { return a; } + +template +constexpr size_t Max(size_t a, size_t b, Ts... rest) { + return adl_barrier::Max(b < a ? a : b, rest...); +} + +template +std::string TypeName() { + std::string out; + int status = 0; + char* demangled = nullptr; +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE + demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status); +#endif + if (status == 0 && demangled != nullptr) { // Demangling succeeded. + absl::StrAppend(&out, "<", demangled, ">"); + free(demangled); + } else { +#if defined(__GXX_RTTI) || defined(_CPPRTTI) + absl::StrAppend(&out, "<", typeid(T).name(), ">"); +#endif + } + return out; +} + +} // namespace adl_barrier + +template +using EnableIf = typename std::enable_if::type; + +// Can `T` be a template argument of `Layout`? +template +using IsLegalElementType = std::integral_constant< + bool, !std::is_reference::value && !std::is_volatile::value && + !std::is_reference::type>::value && + !std::is_volatile::type>::value && + adl_barrier::IsPow2(AlignOf::value)>; + +template +class LayoutImpl; + +// Public base class of `Layout` and the result type of `Layout::Partial()`. +// +// `Elements...` contains all template arguments of `Layout` that created this +// instance. +// +// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments +// passed to `Layout::Partial()` or `Layout::Layout()`. +// +// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is +// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we +// can compute offsets). +template +class LayoutImpl, absl::index_sequence, + absl::index_sequence> { + private: + static_assert(sizeof...(Elements) > 0, "At least one field is required"); + static_assert(absl::conjunction...>::value, + "Invalid element type (see IsLegalElementType)"); + + enum { + NumTypes = sizeof...(Elements), + NumSizes = sizeof...(SizeSeq), + NumOffsets = sizeof...(OffsetSeq), + }; + + // These are guaranteed by `Layout`. + static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1), + "Internal error"); + static_assert(NumTypes > 0, "Internal error"); + + // Returns the index of `T` in `Elements...`. Results in a compilation error + // if `Elements...` doesn't contain exactly one instance of `T`. + template + static constexpr size_t ElementIndex() { + static_assert(Contains, Type::type>...>(), + "Type not found"); + return adl_barrier::Find(Type(), + Type::type>()...); + } + + template + using ElementAlignment = + AlignOf>::type>; + + public: + // Element types of all arrays packed in a tuple. + using ElementTypes = std::tuple::type...>; + + // Element type of the Nth array. + template + using ElementType = typename std::tuple_element::type; + + constexpr explicit LayoutImpl(IntToSize... sizes) + : size_{sizes...} {} + + // Alignment of the layout, equal to the strictest alignment of all elements. + // All pointers passed to the methods of layout must be aligned to this value. + static constexpr size_t Alignment() { + return adl_barrier::Max(AlignOf::value...); + } + + // Offset in bytes of the Nth array. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Offset<0>() == 0); // The ints starts from 0. + // assert(x.Offset<1>() == 16); // The doubles starts from 16. + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + template = 0> + constexpr size_t Offset() const { + return 0; + } + + template = 0> + constexpr size_t Offset() const { + static_assert(N < NumOffsets, "Index out of bounds"); + return adl_barrier::Align( + Offset() + SizeOf>::value * size_[N - 1], + ElementAlignment::value); + } + + // Offset in bytes of the array with the specified element type. There must + // be exactly one such array and its zero-based index must be at most + // `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Offset() == 0); // The ints starts from 0. + // assert(x.Offset() == 16); // The doubles starts from 16. + template + constexpr size_t Offset() const { + return Offset()>(); + } + + // Offsets in bytes of all arrays for which the offsets are known. + constexpr std::array Offsets() const { + return {{Offset()...}}; + } + + // The number of elements in the Nth array. This is the Nth argument of + // `Layout::Partial()` or `Layout::Layout()` (zero-based). + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Size<0>() == 3); + // assert(x.Size<1>() == 4); + // + // Requires: `N < NumSizes`. + template + constexpr size_t Size() const { + static_assert(N < NumSizes, "Index out of bounds"); + return size_[N]; + } + + // The number of elements in the array with the specified element type. + // There must be exactly one such array and its zero-based index must be + // at most `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Size() == 3); + // assert(x.Size() == 4); + template + constexpr size_t Size() const { + return Size()>(); + } + + // The number of elements of all arrays for which they are known. + constexpr std::array Sizes() const { + return {{Size()...}}; + } + + // Pointer to the beginning of the Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // int* ints = x.Pointer<0>(p); + // double* doubles = x.Pointer<1>(p); + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + // Requires: `p` is aligned to `Alignment()`. + template + CopyConst>* Pointer(Char* p) const { + using C = typename std::remove_const::type; + static_assert( + std::is_same() || std::is_same() || + std::is_same(), + "The argument must be a pointer to [const] [signed|unsigned] char"); + constexpr size_t alignment = Alignment(); + (void)alignment; + assert(reinterpret_cast(p) % alignment == 0); + return reinterpret_cast>*>(p + Offset()); + } + + // Pointer to the beginning of the array with the specified element type. + // There must be exactly one such array and its zero-based index must be at + // most `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // int* ints = x.Pointer(p); + // double* doubles = x.Pointer(p); + // + // Requires: `p` is aligned to `Alignment()`. + template + CopyConst* Pointer(Char* p) const { + return Pointer()>(p); + } + + // Pointers to all arrays for which pointers are known. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // int* ints; + // double* doubles; + // std::tie(ints, doubles) = x.Pointers(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template + std::tuple::type>*...> + Pointers(Char* p) const { + return std::tuple>*...>( + Pointer(p)...); + } + + // The Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span ints = x.Slice<0>(p); + // Span doubles = x.Slice<1>(p); + // + // Requires: `N < NumSizes`. + // Requires: `p` is aligned to `Alignment()`. + template + SliceType>> Slice(Char* p) const { + return SliceType>>(Pointer(p), Size()); + } + + // The array with the specified element type. There must be exactly one + // such array and its zero-based index must be less than `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span ints = x.Slice(p); + // Span doubles = x.Slice(p); + // + // Requires: `p` is aligned to `Alignment()`. + template + SliceType> Slice(Char* p) const { + return Slice()>(p); + } + + // All arrays with known sizes. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // Span ints; + // Span doubles; + // std::tie(ints, doubles) = x.Slices(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template + std::tuple::type>>...> + Slices(Char* p) const { + // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed + // in 6.1). + (void)p; + return std::tuple>>...>( + Slice(p)...); + } + + // The size of the allocation that fits all arrays. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes + // + // Requires: `NumSizes == sizeof...(Ts)`. + constexpr size_t AllocSize() const { + static_assert(NumTypes == NumSizes, "You must specify sizes of all fields"); + return Offset() + + SizeOf>::value * size_[NumTypes - 1]; + } + + // If built with --config=asan, poisons padding bytes (if any) in the + // allocation. The pointer must point to a memory block at least + // `AllocSize()` bytes in length. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // Requires: `p` is aligned to `Alignment()`. + template = 0> + void PoisonPadding(const Char* p) const { + Pointer<0>(p); // verify the requirements on `Char` and `p` + } + + template = 0> + void PoisonPadding(const Char* p) const { + static_assert(N < NumOffsets, "Index out of bounds"); + (void)p; +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + PoisonPadding(p); + // The `if` is an optimization. It doesn't affect the observable behaviour. + if (ElementAlignment::value % ElementAlignment::value) { + size_t start = + Offset() + SizeOf>::value * size_[N - 1]; + ASAN_POISON_MEMORY_REGION(p + start, Offset() - start); + } +#endif + } + + // Human-readable description of the memory layout. Useful for debugging. + // Slow. + // + // // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed + // // by an unknown number of doubles. + // auto x = Layout::Partial(5, 3); + // assert(x.DebugString() == + // "@0(1)[5]; @8(4)[3]; @24(8)"); + // + // Each field is in the following format: @offset(sizeof)[size] ( + // may be missing depending on the target platform). For example, + // @8(4)[3] means that at offset 8 we have an array of ints, where each + // int is 4 bytes, and we have 3 of those ints. The size of the last field may + // be missing (as in the example above). Only fields with known offsets are + // described. Type names may differ across platforms: one compiler might + // produce "unsigned*" where another produces "unsigned int *". + std::string DebugString() const { + const auto offsets = Offsets(); + const size_t sizes[] = {SizeOf>::value...}; + const std::string types[] = { + adl_barrier::TypeName>()...}; + std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")"); + for (size_t i = 0; i != NumOffsets - 1; ++i) { + absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1], + "(", sizes[i + 1], ")"); + } + // NumSizes is a constant that may be zero. Some compilers cannot see that + // inside the if statement "size_[NumSizes - 1]" must be valid. + int last = static_cast(NumSizes) - 1; + if (NumTypes == NumSizes && last >= 0) { + absl::StrAppend(&res, "[", size_[last], "]"); + } + return res; + } + + private: + // Arguments of `Layout::Partial()` or `Layout::Layout()`. + size_t size_[NumSizes > 0 ? NumSizes : 1]; +}; + +template +using LayoutType = LayoutImpl< + std::tuple, absl::make_index_sequence, + absl::make_index_sequence>; + +} // namespace internal_layout + +// Descriptor of arrays of various types and sizes laid out in memory one after +// another. See the top of the file for documentation. +// +// Check out the public API of internal_layout::LayoutImpl above. The type is +// internal to the library but its methods are public, and they are inherited +// by `Layout`. +template +class Layout : public internal_layout::LayoutType { + public: + static_assert(sizeof...(Ts) > 0, "At least one field is required"); + static_assert( + absl::conjunction...>::value, + "Invalid element type (see IsLegalElementType)"); + + // The result type of `Partial()` with `NumSizes` arguments. + template + using PartialType = internal_layout::LayoutType; + + // `Layout` knows the element types of the arrays we want to lay out in + // memory but not the number of elements in each array. + // `Partial(size1, ..., sizeN)` allows us to specify the latter. The + // resulting immutable object can be used to obtain pointers to the + // individual arrays. + // + // It's allowed to pass fewer array sizes than the number of arrays. E.g., + // if all you need is to the offset of the second array, you only need to + // pass one argument -- the number of elements in the first array. + // + // // int[3] followed by 4 bytes of padding and an unknown number of + // // doubles. + // auto x = Layout::Partial(3); + // // doubles start at byte 16. + // assert(x.Offset<1>() == 16); + // + // If you know the number of elements in all arrays, you can still call + // `Partial()` but it's more convenient to use the constructor of `Layout`. + // + // Layout x(3, 5); + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + // + // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`. + // Requires: all arguments are convertible to `size_t`. + template + static constexpr PartialType Partial(Sizes&&... sizes) { + static_assert(sizeof...(Sizes) <= sizeof...(Ts), ""); + return PartialType(absl::forward(sizes)...); + } + + // Creates a layout with the sizes of all arrays specified. If you know + // only the sizes of the first N arrays (where N can be zero), you can use + // `Partial()` defined above. The constructor is essentially equivalent to + // calling `Partial()` and passing in all array sizes; the constructor is + // provided as a convenient abbreviation. + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + constexpr explicit Layout(internal_layout::TypeToSize... sizes) + : internal_layout::LayoutType(sizes...) {} +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/node_slot_policy.h b/CAPI/cpp/grpc/include/absl/container/internal/node_slot_policy.h new file mode 100644 index 0000000..baba574 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/node_slot_policy.h @@ -0,0 +1,92 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Adapts a policy for nodes. +// +// The node policy should model: +// +// struct Policy { +// // Returns a new node allocated and constructed using the allocator, using +// // the specified arguments. +// template +// value_type* new_element(Alloc* alloc, Args&&... args) const; +// +// // Destroys and deallocates node using the allocator. +// template +// void delete_element(Alloc* alloc, value_type* node) const; +// }; +// +// It may also optionally define `value()` and `apply()`. For documentation on +// these, see hash_policy_traits.h. + +#ifndef ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ +#define ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +struct node_slot_policy { + static_assert(std::is_lvalue_reference::value, ""); + + using slot_type = typename std::remove_cv< + typename std::remove_reference::type>::type*; + + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { + *slot = Policy::new_element(alloc, std::forward(args)...); + } + + template + static void destroy(Alloc* alloc, slot_type* slot) { + Policy::delete_element(alloc, *slot); + } + + template + static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) { + *new_slot = *old_slot; + } + + static size_t space_used(const slot_type* slot) { + if (slot == nullptr) return Policy::element_space_used(nullptr); + return Policy::element_space_used(*slot); + } + + static Reference element(slot_type* slot) { return **slot; } + + template + static auto value(T* elem) -> decltype(P::value(elem)) { + return P::value(elem); + } + + template + static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward(ts)...)) { + return P::apply(std::forward(ts)...); + } +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_map.h b/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_map.h new file mode 100644 index 0000000..c7df2ef --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_map.h @@ -0,0 +1,198 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ +#define ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ + +#include +#include +#include + +#include "absl/base/internal/throw_delegate.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +class raw_hash_map : public raw_hash_set { + // P is Policy. It's passed as a template argument to support maps that have + // incomplete types as values, as in unordered_map. + // MappedReference<> may be a non-reference type. + template + using MappedReference = decltype(P::value( + std::addressof(std::declval()))); + + // MappedConstReference<> may be a non-reference type. + template + using MappedConstReference = decltype(P::value( + std::addressof(std::declval()))); + + using KeyArgImpl = + KeyArg::value && IsTransparent::value>; + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + template + using key_arg = typename KeyArgImpl::template type; + + static_assert(!std::is_reference::value, ""); + + // TODO(b/187807849): Evaluate whether to support reference mapped_type and + // remove this assertion if/when it is supported. + static_assert(!std::is_reference::value, ""); + + using iterator = typename raw_hash_map::raw_hash_set::iterator; + using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator; + + raw_hash_map() {} + using raw_hash_map::raw_hash_set::raw_hash_set; + + // The last two template parameters ensure that both arguments are rvalues + // (lvalue arguments are handled by the overloads below). This is necessary + // for supporting bitfield arguments. + // + // union { int n : 1; }; + // flat_hash_map m; + // m.insert_or_assign(n, n); + template + std::pair insert_or_assign(key_arg&& k, V&& v) { + return insert_or_assign_impl(std::forward(k), std::forward(v)); + } + + template + std::pair insert_or_assign(key_arg&& k, const V& v) { + return insert_or_assign_impl(std::forward(k), v); + } + + template + std::pair insert_or_assign(const key_arg& k, V&& v) { + return insert_or_assign_impl(k, std::forward(v)); + } + + template + std::pair insert_or_assign(const key_arg& k, const V& v) { + return insert_or_assign_impl(k, v); + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, V&& v) { + return insert_or_assign(std::forward(k), std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, const V& v) { + return insert_or_assign(std::forward(k), v).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, V&& v) { + return insert_or_assign(k, std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, const V& v) { + return insert_or_assign(k, v).first; + } + + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. + template ::value, int>::type = 0, + K* = nullptr> + std::pair try_emplace(key_arg&& k, Args&&... args) { + return try_emplace_impl(std::forward(k), std::forward(args)...); + } + + template ::value, int>::type = 0> + std::pair try_emplace(const key_arg& k, Args&&... args) { + return try_emplace_impl(k, std::forward(args)...); + } + + template + iterator try_emplace(const_iterator, key_arg&& k, Args&&... args) { + return try_emplace(std::forward(k), std::forward(args)...).first; + } + + template + iterator try_emplace(const_iterator, const key_arg& k, Args&&... args) { + return try_emplace(k, std::forward(args)...).first; + } + + template + MappedReference

at(const key_arg& key) { + auto it = this->find(key); + if (it == this->end()) { + base_internal::ThrowStdOutOfRange( + "absl::container_internal::raw_hash_map<>::at"); + } + return Policy::value(&*it); + } + + template + MappedConstReference

at(const key_arg& key) const { + auto it = this->find(key); + if (it == this->end()) { + base_internal::ThrowStdOutOfRange( + "absl::container_internal::raw_hash_map<>::at"); + } + return Policy::value(&*it); + } + + template + MappedReference

operator[](key_arg&& key) { + return Policy::value(&*try_emplace(std::forward(key)).first); + } + + template + MappedReference

operator[](const key_arg& key) { + return Policy::value(&*try_emplace(key).first); + } + + private: + template + std::pair insert_or_assign_impl(K&& k, V&& v) { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::forward(k), std::forward(v)); + else + Policy::value(&*this->iterator_at(res.first)) = std::forward(v); + return {this->iterator_at(res.first), res.second}; + } + + template + std::pair try_emplace_impl(K&& k, Args&&... args) { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::piecewise_construct, + std::forward_as_tuple(std::forward(k)), + std::forward_as_tuple(std::forward(args)...)); + return {this->iterator_at(res.first), res.second}; + } +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_set.h b/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_set.h new file mode 100644 index 0000000..ea912f8 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_set.h @@ -0,0 +1,2365 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// An open-addressing +// hashtable with quadratic probing. +// +// This is a low level hashtable on top of which different interfaces can be +// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc. +// +// The table interface is similar to that of std::unordered_set. Notable +// differences are that most member functions support heterogeneous keys when +// BOTH the hash and eq functions are marked as transparent. They do so by +// providing a typedef called `is_transparent`. +// +// When heterogeneous lookup is enabled, functions that take key_type act as if +// they have an overload set like: +// +// iterator find(const key_type& key); +// template +// iterator find(const K& key); +// +// size_type erase(const key_type& key); +// template +// size_type erase(const K& key); +// +// std::pair equal_range(const key_type& key); +// template +// std::pair equal_range(const K& key); +// +// When heterogeneous lookup is disabled, only the explicit `key_type` overloads +// exist. +// +// find() also supports passing the hash explicitly: +// +// iterator find(const key_type& key, size_t hash); +// template +// iterator find(const U& key, size_t hash); +// +// In addition the pointer to element and iterator stability guarantees are +// weaker: all iterators and pointers are invalidated after a new element is +// inserted. +// +// IMPLEMENTATION DETAILS +// +// # Table Layout +// +// A raw_hash_set's backing array consists of control bytes followed by slots +// that may or may not contain objects. +// +// The layout of the backing array, for `capacity` slots, is thus, as a +// pseudo-struct: +// +// struct BackingArray { +// // Control bytes for the "real" slots. +// ctrl_t ctrl[capacity]; +// // Always `ctrl_t::kSentinel`. This is used by iterators to find when to +// // stop and serves no other purpose. +// ctrl_t sentinel; +// // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so +// // that if a probe sequence picks a value near the end of `ctrl`, +// // `Group` will have valid control bytes to look at. +// ctrl_t clones[kWidth - 1]; +// // The actual slot data. +// slot_type slots[capacity]; +// }; +// +// The length of this array is computed by `AllocSize()` below. +// +// Control bytes (`ctrl_t`) are bytes (collected into groups of a +// platform-specific size) that define the state of the corresponding slot in +// the slot array. Group manipulation is tightly optimized to be as efficient +// as possible: SSE and friends on x86, clever bit operations on other arches. +// +// Group 1 Group 2 Group 3 +// +---------------+---------------+---------------+ +// | | | | | | | | | | | | | | | | | | | | | | | | | +// +---------------+---------------+---------------+ +// +// Each control byte is either a special value for empty slots, deleted slots +// (sometimes called *tombstones*), and a special end-of-table marker used by +// iterators, or, if occupied, seven bits (H2) from the hash of the value in the +// corresponding slot. +// +// Storing control bytes in a separate array also has beneficial cache effects, +// since more logical slots will fit into a cache line. +// +// # Hashing +// +// We compute two separate hashes, `H1` and `H2`, from the hash of an object. +// `H1(hash(x))` is an index into `slots`, and essentially the starting point +// for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out +// objects that cannot possibly be the one we are looking for. +// +// # Table operations. +// +// The key operations are `insert`, `find`, and `erase`. +// +// Since `insert` and `erase` are implemented in terms of `find`, we describe +// `find` first. To `find` a value `x`, we compute `hash(x)`. From +// `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every +// group of slots in some interesting order. +// +// We now walk through these indices. At each index, we select the entire group +// starting with that index and extract potential candidates: occupied slots +// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the +// group, we stop and return an error. Each candidate slot `y` is compared with +// `x`; if `x == y`, we are done and return `&y`; otherwise we contine to the +// next probe index. Tombstones effectively behave like full slots that never +// match the value we're looking for. +// +// The `H2` bits ensure when we compare a slot to an object with `==`, we are +// likely to have actually found the object. That is, the chance is low that +// `==` is called and returns `false`. Thus, when we search for an object, we +// are unlikely to call `==` many times. This likelyhood can be analyzed as +// follows (assuming that H2 is a random enough hash function). +// +// Let's assume that there are `k` "wrong" objects that must be examined in a +// probe sequence. For example, when doing a `find` on an object that is in the +// table, `k` is the number of objects between the start of the probe sequence +// and the final found object (not including the final found object). The +// expected number of objects with an H2 match is then `k/128`. Measurements +// and analysis indicate that even at high load factors, `k` is less than 32, +// meaning that the number of "false positive" comparisons we must perform is +// less than 1/8 per `find`. + +// `insert` is implemented in terms of `unchecked_insert`, which inserts a +// value presumed to not be in the table (violating this requirement will cause +// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert +// it, we construct a `probe_seq` once again, and use it to find the first +// group with an unoccupied (empty *or* deleted) slot. We place `x` into the +// first such slot in the group and mark it as full with `x`'s H2. +// +// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and +// perform a `find` to see if it's already present; if it is, we're done. If +// it's not, we may decide the table is getting overcrowded (i.e. the load +// factor is greater than 7/8 for big tables; `is_small()` tables use a max load +// factor of 1); in this case, we allocate a bigger array, `unchecked_insert` +// each element of the table into the new array (we know that no insertion here +// will insert an already-present value), and discard the old backing array. At +// this point, we may `unchecked_insert` the value `x`. +// +// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which +// presents a viable, initialized slot pointee to the caller. +// +// `erase` is implemented in terms of `erase_at`, which takes an index to a +// slot. Given an offset, we simply create a tombstone and destroy its contents. +// If we can prove that the slot would not appear in a probe sequence, we can +// make the slot as empty, instead. We can prove this by observing that if a +// group has any empty slots, it has never been full (assuming we never create +// an empty slot in a group with no empties, which this heuristic guarantees we +// never do) and find would stop at this group anyways (since it does not probe +// beyond groups with empties). +// +// `erase` is `erase_at` composed with `find`: if we +// have a value `x`, we can perform a `find`, and then `erase_at` the resulting +// slot. +// +// To iterate, we simply traverse the array, skipping empty and deleted slots +// and stopping when we hit a `kSentinel`. + +#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ +#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/endian.h" +#include "absl/base/internal/prefetch.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" +#include "absl/container/internal/common.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_policy_traits.h" +#include "absl/container/internal/hashtable_debug_hooks.h" +#include "absl/container/internal/hashtablez_sampler.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/numeric/bits.h" +#include "absl/utility/utility.h" + +#ifdef ABSL_INTERNAL_HAVE_SSE2 +#include +#endif + +#ifdef ABSL_INTERNAL_HAVE_SSSE3 +#include +#endif + +#ifdef _MSC_VER +#include +#endif + +#ifdef ABSL_INTERNAL_HAVE_ARM_NEON +#include +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +void SwapAlloc(AllocType& lhs, AllocType& rhs, + std::true_type /* propagate_on_container_swap */) { + using std::swap; + swap(lhs, rhs); +} +template +void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/, + std::false_type /* propagate_on_container_swap */) {} + +// The state for a probe sequence. +// +// Currently, the sequence is a triangular progression of the form +// +// p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1) +// +// The use of `Width` ensures that each probe step does not overlap groups; +// the sequence effectively outputs the addresses of *groups* (although not +// necessarily aligned to any boundary). The `Group` machinery allows us +// to check an entire group with minimal branching. +// +// Wrapping around at `mask + 1` is important, but not for the obvious reason. +// As described above, the first few entries of the control byte array +// are mirrored at the end of the array, which `Group` will find and use +// for selecting candidates. However, when those candidates' slots are +// actually inspected, there are no corresponding slots for the cloned bytes, +// so we need to make sure we've treated those offsets as "wrapping around". +// +// It turns out that this probe sequence visits every group exactly once if the +// number of groups is a power of two, since (i^2+i)/2 is a bijection in +// Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing +template +class probe_seq { + public: + // Creates a new probe sequence using `hash` as the initial value of the + // sequence and `mask` (usually the capacity of the table) as the mask to + // apply to each value in the progression. + probe_seq(size_t hash, size_t mask) { + assert(((mask + 1) & mask) == 0 && "not a mask"); + mask_ = mask; + offset_ = hash & mask_; + } + + // The offset within the table, i.e., the value `p(i)` above. + size_t offset() const { return offset_; } + size_t offset(size_t i) const { return (offset_ + i) & mask_; } + + void next() { + index_ += Width; + offset_ += index_; + offset_ &= mask_; + } + // 0-based probe index, a multiple of `Width`. + size_t index() const { return index_; } + + private: + size_t mask_; + size_t offset_; + size_t index_ = 0; +}; + +template +struct RequireUsableKey { + template + std::pair< + decltype(std::declval()(std::declval())), + decltype(std::declval()(std::declval(), + std::declval()))>* + operator()(const PassedKey&, const Args&...) const; +}; + +template +struct IsDecomposable : std::false_type {}; + +template +struct IsDecomposable< + absl::void_t(), + std::declval()...))>, + Policy, Hash, Eq, Ts...> : std::true_type {}; + +// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. +template +constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) { + using std::swap; + return noexcept(swap(std::declval(), std::declval())); +} +template +constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) { + return false; +} + +template +uint32_t TrailingZeros(T x) { + ABSL_ASSUME(x != 0); + return static_cast(countr_zero(x)); +} + +// An abstract bitmask, such as that emitted by a SIMD instruction. +// +// Specifically, this type implements a simple bitset whose representation is +// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number +// of abstract bits in the bitset, while `Shift` is the log-base-two of the +// width of an abstract bit in the representation. +// This mask provides operations for any number of real bits set in an abstract +// bit. To add iteration on top of that, implementation must guarantee no more +// than one real bit is set in an abstract bit. +template +class NonIterableBitMask { + public: + explicit NonIterableBitMask(T mask) : mask_(mask) {} + + explicit operator bool() const { return this->mask_ != 0; } + + // Returns the index of the lowest *abstract* bit set in `self`. + uint32_t LowestBitSet() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + + // Returns the index of the highest *abstract* bit set in `self`. + uint32_t HighestBitSet() const { + return static_cast((bit_width(mask_) - 1) >> Shift); + } + + // Return the number of trailing zero *abstract* bits. + uint32_t TrailingZeros() const { + return container_internal::TrailingZeros(mask_) >> Shift; + } + + // Return the number of leading zero *abstract* bits. + uint32_t LeadingZeros() const { + constexpr int total_significant_bits = SignificantBits << Shift; + constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; + return static_cast(countl_zero(mask_ << extra_bits)) >> Shift; + } + + T mask_; +}; + +// Mask that can be iterable +// +// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just +// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When +// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as +// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask. +// +// For example: +// for (int i : BitMask(0b101)) -> yields 0, 2 +// for (int i : BitMask(0x0000000080800000)) -> yields 2, 3 +template +class BitMask : public NonIterableBitMask { + using Base = NonIterableBitMask; + static_assert(std::is_unsigned::value, ""); + static_assert(Shift == 0 || Shift == 3, ""); + + public: + explicit BitMask(T mask) : Base(mask) {} + // BitMask is an iterator over the indices of its abstract bits. + using value_type = int; + using iterator = BitMask; + using const_iterator = BitMask; + + BitMask& operator++() { + this->mask_ &= (this->mask_ - 1); + return *this; + } + + uint32_t operator*() const { return Base::LowestBitSet(); } + + BitMask begin() const { return *this; } + BitMask end() const { return BitMask(0); } + + private: + friend bool operator==(const BitMask& a, const BitMask& b) { + return a.mask_ == b.mask_; + } + friend bool operator!=(const BitMask& a, const BitMask& b) { + return a.mask_ != b.mask_; + } +}; + +using h2_t = uint8_t; + +// The values here are selected for maximum performance. See the static asserts +// below for details. + +// A `ctrl_t` is a single control byte, which can have one of four +// states: empty, deleted, full (which has an associated seven-bit h2_t value) +// and the sentinel. They have the following bit patterns: +// +// empty: 1 0 0 0 0 0 0 0 +// deleted: 1 1 1 1 1 1 1 0 +// full: 0 h h h h h h h // h represents the hash bits. +// sentinel: 1 1 1 1 1 1 1 1 +// +// These values are specifically tuned for SSE-flavored SIMD. +// The static_asserts below detail the source of these choices. +// +// We use an enum class so that when strict aliasing is enabled, the compiler +// knows ctrl_t doesn't alias other types. +enum class ctrl_t : int8_t { + kEmpty = -128, // 0b10000000 + kDeleted = -2, // 0b11111110 + kSentinel = -1, // 0b11111111 +}; +static_assert( + (static_cast(ctrl_t::kEmpty) & + static_cast(ctrl_t::kDeleted) & + static_cast(ctrl_t::kSentinel) & 0x80) != 0, + "Special markers need to have the MSB to make checking for them efficient"); +static_assert( + ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel, + "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than " + "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient"); +static_assert( + ctrl_t::kSentinel == static_cast(-1), + "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD " + "registers (pcmpeqd xmm, xmm)"); +static_assert(ctrl_t::kEmpty == static_cast(-128), + "ctrl_t::kEmpty must be -128 to make the SIMD check for its " + "existence efficient (psignb xmm, xmm)"); +static_assert( + (~static_cast(ctrl_t::kEmpty) & + ~static_cast(ctrl_t::kDeleted) & + static_cast(ctrl_t::kSentinel) & 0x7F) != 0, + "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not " + "shared by ctrl_t::kSentinel to make the scalar test for " + "MaskEmptyOrDeleted() efficient"); +static_assert(ctrl_t::kDeleted == static_cast(-2), + "ctrl_t::kDeleted must be -2 to make the implementation of " + "ConvertSpecialToEmptyAndFullToDeleted efficient"); + +ABSL_DLL extern const ctrl_t kEmptyGroup[16]; + +// Returns a pointer to a control byte group that can be used by empty tables. +inline ctrl_t* EmptyGroup() { + // Const must be cast away here; no uses of this function will actually write + // to it, because it is only used for empty tables. + return const_cast(kEmptyGroup); +} + +// Mixes a randomly generated per-process seed with `hash` and `ctrl` to +// randomize insertion order within groups. +bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl); + +// Returns a per-table, hash salt, which changes on resize. This gets mixed into +// H1 to randomize iteration order per-table. +// +// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure +// non-determinism of iteration order in most cases. +inline size_t PerTableSalt(const ctrl_t* ctrl) { + // The low bits of the pointer have little or no entropy because of + // alignment. We shift the pointer to try to use higher entropy bits. A + // good number seems to be 12 bits, because that aligns with page size. + return reinterpret_cast(ctrl) >> 12; +} +// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt. +inline size_t H1(size_t hash, const ctrl_t* ctrl) { + return (hash >> 7) ^ PerTableSalt(ctrl); +} + +// Extracts the H2 portion of a hash: the 7 bits not used for H1. +// +// These are used as an occupied control byte. +inline h2_t H2(size_t hash) { return hash & 0x7F; } + +// Helpers for checking the state of a control byte. +inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; } +inline bool IsFull(ctrl_t c) { return c >= static_cast(0); } +inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; } +inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; } + +#ifdef ABSL_INTERNAL_HAVE_SSE2 +// Quick reference guide for intrinsics used below: +// +// * __m128i: An XMM (128-bit) word. +// +// * _mm_setzero_si128: Returns a zero vector. +// * _mm_set1_epi8: Returns a vector with the same i8 in each lane. +// +// * _mm_subs_epi8: Saturating-subtracts two i8 vectors. +// * _mm_and_si128: Ands two i128s together. +// * _mm_or_si128: Ors two i128s together. +// * _mm_andnot_si128: And-nots two i128s together. +// +// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality, +// filling each lane with 0x00 or 0xff. +// * _mm_cmpgt_epi8: Same as above, but using > rather than ==. +// +// * _mm_loadu_si128: Performs an unaligned load of an i128. +// * _mm_storeu_si128: Performs an unaligned store of an i128. +// +// * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first +// argument if the corresponding lane of the second +// argument is positive, negative, or zero, respectively. +// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a +// bitmask consisting of those bits. +// * _mm_shuffle_epi8: Selects i8s from the first argument, using the low +// four bits of each i8 lane in the second argument as +// indices. + +// https://github.com/abseil/abseil-cpp/issues/209 +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 +// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char +// Work around this by using the portable implementation of Group +// when using -funsigned-char under GCC. +inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) { +#if defined(__GNUC__) && !defined(__clang__) + if (std::is_unsigned::value) { + const __m128i mask = _mm_set1_epi8(0x80); + const __m128i diff = _mm_subs_epi8(b, a); + return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask); + } +#endif + return _mm_cmpgt_epi8(a, b); +} + +struct GroupSse2Impl { + static constexpr size_t kWidth = 16; // the number of slots per group + + explicit GroupSse2Impl(const ctrl_t* pos) { + ctrl = _mm_loadu_si128(reinterpret_cast(pos)); + } + + // Returns a bitmask representing the positions of slots that match hash. + BitMask Match(h2_t hash) const { + auto match = _mm_set1_epi8(hash); + return BitMask( + static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); + } + + // Returns a bitmask representing the positions of empty slots. + NonIterableBitMask MaskEmpty() const { +#ifdef ABSL_INTERNAL_HAVE_SSSE3 + // This only works because ctrl_t::kEmpty is -128. + return NonIterableBitMask( + static_cast(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)))); +#else + auto match = _mm_set1_epi8(static_cast(ctrl_t::kEmpty)); + return NonIterableBitMask( + static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); +#endif + } + + // Returns a bitmask representing the positions of empty or deleted slots. + NonIterableBitMask MaskEmptyOrDeleted() const { + auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); + return NonIterableBitMask(static_cast( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)))); + } + + // Returns the number of trailing empty or deleted elements in the group. + uint32_t CountLeadingEmptyOrDeleted() const { + auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); + return TrailingZeros(static_cast( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1)); + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + auto msbs = _mm_set1_epi8(static_cast(-128)); + auto x126 = _mm_set1_epi8(126); +#ifdef ABSL_INTERNAL_HAVE_SSSE3 + auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); +#else + auto zero = _mm_setzero_si128(); + auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl); + auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); +#endif + _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); + } + + __m128i ctrl; +}; +#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 + +#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) +struct GroupAArch64Impl { + static constexpr size_t kWidth = 8; + + explicit GroupAArch64Impl(const ctrl_t* pos) { + ctrl = vld1_u8(reinterpret_cast(pos)); + } + + BitMask Match(h2_t hash) const { + uint8x8_t dup = vdup_n_u8(hash); + auto mask = vceq_u8(ctrl, dup); + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask( + vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs); + } + + NonIterableBitMask MaskEmpty() const { + uint64_t mask = + vget_lane_u64(vreinterpret_u64_u8( + vceq_s8(vdup_n_s8(static_cast(ctrl_t::kEmpty)), + vreinterpret_s8_u8(ctrl))), + 0); + return NonIterableBitMask(mask); + } + + NonIterableBitMask MaskEmptyOrDeleted() const { + uint64_t mask = + vget_lane_u64(vreinterpret_u64_u8(vcgt_s8( + vdup_n_s8(static_cast(ctrl_t::kSentinel)), + vreinterpret_s8_u8(ctrl))), + 0); + return NonIterableBitMask(mask); + } + + uint32_t CountLeadingEmptyOrDeleted() const { + uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); + // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and + // kDeleted. We lower all other bits and count number of trailing zeros. + // Clang and GCC optimize countr_zero to rbit+clz without any check for 0, + // so we should be fine. + constexpr uint64_t bits = 0x0101010101010101ULL; + return countr_zero((mask | ~(mask >> 7)) & bits) >> 3; + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = mask & msbs; + auto res = (~x + (x >> 7)) & ~lsbs; + little_endian::Store64(dst, res); + } + + uint8x8_t ctrl; +}; +#endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN + +struct GroupPortableImpl { + static constexpr size_t kWidth = 8; + + explicit GroupPortableImpl(const ctrl_t* pos) + : ctrl(little_endian::Load64(pos)) {} + + BitMask Match(h2_t hash) const { + // For the technique, see: + // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord + // (Determine if a word has a byte equal to n). + // + // Caveat: there are false positives but: + // - they only occur if there is a real match + // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel + // - they will be handled gracefully by subsequent checks in code + // + // Example: + // v = 0x1716151413121110 + // hash = 0x12 + // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + return BitMask((x - lsbs) & ~x & msbs); + } + + NonIterableBitMask MaskEmpty() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return NonIterableBitMask((ctrl & (~ctrl << 6)) & + msbs); + } + + NonIterableBitMask MaskEmptyOrDeleted() const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return NonIterableBitMask((ctrl & (~ctrl << 7)) & + msbs); + } + + uint32_t CountLeadingEmptyOrDeleted() const { + // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and + // kDeleted. We lower all other bits and count number of trailing zeros. + constexpr uint64_t bits = 0x0101010101010101ULL; + return countr_zero((ctrl | ~(ctrl >> 7)) & bits) >> 3; + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl & msbs; + auto res = (~x + (x >> 7)) & ~lsbs; + little_endian::Store64(dst, res); + } + + uint64_t ctrl; +}; + +#ifdef ABSL_INTERNAL_HAVE_SSE2 +using Group = GroupSse2Impl; +#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) +using Group = GroupAArch64Impl; +#else +using Group = GroupPortableImpl; +#endif + +// Returns he number of "cloned control bytes". +// +// This is the number of control bytes that are present both at the beginning +// of the control byte array and at the end, such that we can create a +// `Group::kWidth`-width probe window starting from any control byte. +constexpr size_t NumClonedBytes() { return Group::kWidth - 1; } + +template +class raw_hash_set; + +// Returns whether `n` is a valid capacity (i.e., number of slots). +// +// A valid capacity is a non-zero integer `2^m - 1`. +inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } + +// Applies the following mapping to every byte in the control array: +// * kDeleted -> kEmpty +// * kEmpty -> kEmpty +// * _ -> kDeleted +// PRECONDITION: +// IsValidCapacity(capacity) +// ctrl[capacity] == ctrl_t::kSentinel +// ctrl[i] != ctrl_t::kSentinel for all i < capacity +void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity); + +// Converts `n` into the next valid capacity, per `IsValidCapacity`. +inline size_t NormalizeCapacity(size_t n) { + return n ? ~size_t{} >> countl_zero(n) : 1; +} + +// General notes on capacity/growth methods below: +// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an +// average of two empty slots per group. +// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity. +// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we +// never need to probe (the whole table fits in one group) so we don't need a +// load factor less than 1. + +// Given `capacity`, applies the load factor; i.e., it returns the maximum +// number of values we should put into the table before a resizing rehash. +inline size_t CapacityToGrowth(size_t capacity) { + assert(IsValidCapacity(capacity)); + // `capacity*7/8` + if (Group::kWidth == 8 && capacity == 7) { + // x-x/8 does not work when x==7. + return 6; + } + return capacity - capacity / 8; +} + +// Given `growth`, "unapplies" the load factor to find how large the capacity +// should be to stay within the load factor. +// +// This might not be a valid capacity and `NormalizeCapacity()` should be +// called on this. +inline size_t GrowthToLowerboundCapacity(size_t growth) { + // `growth*8/7` + if (Group::kWidth == 8 && growth == 7) { + // x+(x-1)/7 does not work when x==7. + return 8; + } + return growth + static_cast((static_cast(growth) - 1) / 7); +} + +template +size_t SelectBucketCountForIterRange(InputIter first, InputIter last, + size_t bucket_count) { + if (bucket_count != 0) { + return bucket_count; + } + using InputIterCategory = + typename std::iterator_traits::iterator_category; + if (std::is_base_of::value) { + return GrowthToLowerboundCapacity( + static_cast(std::distance(first, last))); + } + return 0; +} + +#define ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, msg) \ + ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) && msg) + +inline void AssertIsValid(ctrl_t* ctrl) { + ABSL_HARDENING_ASSERT( + (ctrl == nullptr || IsFull(*ctrl)) && + "Invalid operation on iterator. The element might have " + "been erased, the table might have rehashed, or this may " + "be an end() iterator."); +} + +struct FindInfo { + size_t offset; + size_t probe_length; +}; + +// Whether a table is "small". A small table fits entirely into a probing +// group, i.e., has a capacity < `Group::kWidth`. +// +// In small mode we are able to use the whole capacity. The extra control +// bytes give us at least one "empty" control byte to stop the iteration. +// This is important to make 1 a valid capacity. +// +// In small mode only the first `capacity` control bytes after the sentinel +// are valid. The rest contain dummy ctrl_t::kEmpty values that do not +// represent a real slot. This is important to take into account on +// `find_first_non_full()`, where we never try +// `ShouldInsertBackwards()` for small tables. +inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; } + +// Begins a probing operation on `ctrl`, using `hash`. +inline probe_seq probe(const ctrl_t* ctrl, size_t hash, + size_t capacity) { + return probe_seq(H1(hash, ctrl), capacity); +} + +// Probes an array of control bits using a probe sequence derived from `hash`, +// and returns the offset corresponding to the first deleted or empty slot. +// +// Behavior when the entire table is full is undefined. +// +// NOTE: this function must work with tables having both empty and deleted +// slots in the same group. Such tables appear during `erase()`. +template +inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash, + size_t capacity) { + auto seq = probe(ctrl, hash, capacity); + while (true) { + Group g{ctrl + seq.offset()}; + auto mask = g.MaskEmptyOrDeleted(); + if (mask) { +#if !defined(NDEBUG) + // We want to add entropy even when ASLR is not enabled. + // In debug build we will randomly insert in either the front or back of + // the group. + // TODO(kfm,sbenza): revisit after we do unconditional mixing + if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) { + return {seq.offset(mask.HighestBitSet()), seq.index()}; + } +#endif + return {seq.offset(mask.LowestBitSet()), seq.index()}; + } + seq.next(); + assert(seq.index() <= capacity && "full table!"); + } +} + +// Extern template for inline function keep possibility of inlining. +// When compiler decided to not inline, no symbols will be added to the +// corresponding translation unit. +extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t); + +// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire +// array as marked as empty. +inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot, + size_t slot_size) { + std::memset(ctrl, static_cast(ctrl_t::kEmpty), + capacity + 1 + NumClonedBytes()); + ctrl[capacity] = ctrl_t::kSentinel; + SanitizerPoisonMemoryRegion(slot, slot_size * capacity); +} + +// Sets `ctrl[i]` to `h`. +// +// Unlike setting it directly, this function will perform bounds checks and +// mirror the value to the cloned tail if necessary. +inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl, + const void* slot, size_t slot_size) { + assert(i < capacity); + + auto* slot_i = static_cast(slot) + i * slot_size; + if (IsFull(h)) { + SanitizerUnpoisonMemoryRegion(slot_i, slot_size); + } else { + SanitizerPoisonMemoryRegion(slot_i, slot_size); + } + + ctrl[i] = h; + ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h; +} + +// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. +inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl, + const void* slot, size_t slot_size) { + SetCtrl(i, static_cast(h), capacity, ctrl, slot, slot_size); +} + +// Given the capacity of a table, computes the offset (from the start of the +// backing allocation) at which the slots begin. +inline size_t SlotOffset(size_t capacity, size_t slot_align) { + assert(IsValidCapacity(capacity)); + const size_t num_control_bytes = capacity + 1 + NumClonedBytes(); + return (num_control_bytes + slot_align - 1) & (~slot_align + 1); +} + +// Given the capacity of a table, computes the total size of the backing +// array. +inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) { + return SlotOffset(capacity, slot_align) + capacity * slot_size; +} + +// A SwissTable. +// +// Policy: a policy defines how to perform different operations on +// the slots of the hashtable (see hash_policy_traits.h for the full interface +// of policy). +// +// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The +// functor should accept a key and return size_t as hash. For best performance +// it is important that the hash function provides high entropy across all bits +// of the hash. +// +// Eq: a (possibly polymorphic) functor that compares two keys for equality. It +// should accept two (of possibly different type) keys and return a bool: true +// if they are equal, false if they are not. If two keys compare equal, then +// their hash values as defined by Hash MUST be equal. +// +// Allocator: an Allocator +// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which +// the storage of the hashtable will be allocated and the elements will be +// constructed and destroyed. +template +class raw_hash_set { + using PolicyTraits = hash_policy_traits; + using KeyArgImpl = + KeyArg::value && IsTransparent::value>; + + public: + using init_type = typename PolicyTraits::init_type; + using key_type = typename PolicyTraits::key_type; + // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user + // code fixes! + using slot_type = typename PolicyTraits::slot_type; + using allocator_type = Alloc; + using size_type = size_t; + using difference_type = ptrdiff_t; + using hasher = Hash; + using key_equal = Eq; + using policy_type = Policy; + using value_type = typename PolicyTraits::value_type; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits::pointer; + using const_pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits::const_pointer; + + // Alias used for heterogeneous lookup functions. + // `key_arg` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template + using key_arg = typename KeyArgImpl::template type; + + private: + // Give an early error when key_type is not hashable/eq. + auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); + auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); + + using AllocTraits = absl::allocator_traits; + using SlotAlloc = typename absl::allocator_traits< + allocator_type>::template rebind_alloc; + using SlotAllocTraits = typename absl::allocator_traits< + allocator_type>::template rebind_traits; + + static_assert(std::is_lvalue_reference::value, + "Policy::element() must return a reference"); + + template + struct SameAsElementReference + : std::is_same::type>::type, + typename std::remove_cv< + typename std::remove_reference::type>::type> {}; + + // An enabler for insert(T&&): T must be convertible to init_type or be the + // same as [cv] value_type [ref]. + // Note: we separate SameAsElementReference into its own type to avoid using + // reference unless we need to. MSVC doesn't seem to like it in some + // cases. + template + using RequiresInsertable = typename std::enable_if< + absl::disjunction, + SameAsElementReference>::value, + int>::type; + + // RequiresNotInit is a workaround for gcc prior to 7.1. + // See https://godbolt.org/g/Y4xsUh. + template + using RequiresNotInit = + typename std::enable_if::value, int>::type; + + template + using IsDecomposable = IsDecomposable; + + public: + static_assert(std::is_same::value, + "Allocators with custom pointer types are not supported"); + static_assert(std::is_same::value, + "Allocators with custom pointer types are not supported"); + + class iterator { + friend class raw_hash_set; + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = typename raw_hash_set::value_type; + using reference = + absl::conditional_t; + using pointer = absl::remove_reference_t*; + using difference_type = typename raw_hash_set::difference_type; + + iterator() {} + + // PRECONDITION: not an end() iterator. + reference operator*() const { + ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, + "operator*() called on invalid iterator."); + return PolicyTraits::element(slot_); + } + + // PRECONDITION: not an end() iterator. + pointer operator->() const { + ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, + "operator-> called on invalid iterator."); + return &operator*(); + } + + // PRECONDITION: not an end() iterator. + iterator& operator++() { + ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, + "operator++ called on invalid iterator."); + ++ctrl_; + ++slot_; + skip_empty_or_deleted(); + return *this; + } + // PRECONDITION: not an end() iterator. + iterator operator++(int) { + auto tmp = *this; + ++*this; + return tmp; + } + + friend bool operator==(const iterator& a, const iterator& b) { + AssertIsValid(a.ctrl_); + AssertIsValid(b.ctrl_); + return a.ctrl_ == b.ctrl_; + } + friend bool operator!=(const iterator& a, const iterator& b) { + return !(a == b); + } + + private: + iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) { + // This assumption helps the compiler know that any non-end iterator is + // not equal to any end iterator. + ABSL_ASSUME(ctrl != nullptr); + } + + // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until + // they reach one. + // + // If a sentinel is reached, we null both of them out instead. + void skip_empty_or_deleted() { + while (IsEmptyOrDeleted(*ctrl_)) { + uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); + ctrl_ += shift; + slot_ += shift; + } + if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr; + } + + ctrl_t* ctrl_ = nullptr; + // To avoid uninitialized member warnings, put slot_ in an anonymous union. + // The member is not initialized on singleton and end iterators. + union { + slot_type* slot_; + }; + }; + + class const_iterator { + friend class raw_hash_set; + + public: + using iterator_category = typename iterator::iterator_category; + using value_type = typename raw_hash_set::value_type; + using reference = typename raw_hash_set::const_reference; + using pointer = typename raw_hash_set::const_pointer; + using difference_type = typename raw_hash_set::difference_type; + + const_iterator() {} + // Implicit construction from iterator. + const_iterator(iterator i) : inner_(std::move(i)) {} + + reference operator*() const { return *inner_; } + pointer operator->() const { return inner_.operator->(); } + + const_iterator& operator++() { + ++inner_; + return *this; + } + const_iterator operator++(int) { return inner_++; } + + friend bool operator==(const const_iterator& a, const const_iterator& b) { + return a.inner_ == b.inner_; + } + friend bool operator!=(const const_iterator& a, const const_iterator& b) { + return !(a == b); + } + + private: + const_iterator(const ctrl_t* ctrl, const slot_type* slot) + : inner_(const_cast(ctrl), const_cast(slot)) {} + + iterator inner_; + }; + + using node_type = node_handle, Alloc>; + using insert_return_type = InsertReturnType; + + raw_hash_set() noexcept( + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value) {} + + explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), + const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : ctrl_(EmptyGroup()), + settings_(0, HashtablezInfoHandle(), hash, eq, alloc) { + if (bucket_count) { + capacity_ = NormalizeCapacity(bucket_count); + initialize_slots(); + } + } + + raw_hash_set(size_t bucket_count, const hasher& hash, + const allocator_type& alloc) + : raw_hash_set(bucket_count, hash, key_equal(), alloc) {} + + raw_hash_set(size_t bucket_count, const allocator_type& alloc) + : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {} + + explicit raw_hash_set(const allocator_type& alloc) + : raw_hash_set(0, hasher(), key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count), + hash, eq, alloc) { + insert(first, last); + } + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {} + + template + raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc) + : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {} + + // Instead of accepting std::initializer_list as the first + // argument like std::unordered_set does, we have two overloads + // that accept std::initializer_list and std::initializer_list. + // This is advantageous for performance. + // + // // Turns {"abc", "def"} into std::initializer_list, then + // // copies the strings into the set. + // std::unordered_set s = {"abc", "def"}; + // + // // Turns {"abc", "def"} into std::initializer_list, then + // // copies the strings into the set. + // absl::flat_hash_set s = {"abc", "def"}; + // + // The same trick is used in insert(). + // + // The enabler is necessary to prevent this constructor from triggering where + // the copy constructor is meant to be called. + // + // absl::flat_hash_set a, b{a}; + // + // RequiresNotInit is a workaround for gcc prior to 7.1. + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, + const hasher& hash = hasher(), const key_equal& eq = key_equal(), + const allocator_type& alloc = allocator_type()) + : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count, + const hasher& hash, const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, size_t bucket_count, + const allocator_type& alloc) + : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, const allocator_type& alloc) + : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + raw_hash_set(std::initializer_list init, + const allocator_type& alloc) + : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} + + raw_hash_set(const raw_hash_set& that) + : raw_hash_set(that, AllocTraits::select_on_container_copy_construction( + that.alloc_ref())) {} + + raw_hash_set(const raw_hash_set& that, const allocator_type& a) + : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) { + reserve(that.size()); + // Because the table is guaranteed to be empty, we can do something faster + // than a full `insert`. + for (const auto& v : that) { + const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); + auto target = find_first_non_full(ctrl_, hash, capacity_); + SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_, + sizeof(slot_type)); + emplace_at(target.offset, v); + infoz().RecordInsert(hash, target.probe_length); + } + size_ = that.size(); + growth_left() -= that.size(); + } + + raw_hash_set(raw_hash_set&& that) noexcept( + std::is_nothrow_copy_constructible::value&& + std::is_nothrow_copy_constructible::value&& + std::is_nothrow_copy_constructible::value) + : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())), + slots_(absl::exchange(that.slots_, nullptr)), + size_(absl::exchange(that.size_, 0)), + capacity_(absl::exchange(that.capacity_, 0)), + // Hash, equality and allocator are copied instead of moved because + // `that` must be left valid. If Hash is std::function, moving it + // would create a nullptr functor that cannot be called. + settings_(absl::exchange(that.growth_left(), 0), + absl::exchange(that.infoz(), HashtablezInfoHandle()), + that.hash_ref(), that.eq_ref(), that.alloc_ref()) {} + + raw_hash_set(raw_hash_set&& that, const allocator_type& a) + : ctrl_(EmptyGroup()), + slots_(nullptr), + size_(0), + capacity_(0), + settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(), + a) { + if (a == that.alloc_ref()) { + std::swap(ctrl_, that.ctrl_); + std::swap(slots_, that.slots_); + std::swap(size_, that.size_); + std::swap(capacity_, that.capacity_); + std::swap(growth_left(), that.growth_left()); + std::swap(infoz(), that.infoz()); + } else { + reserve(that.size()); + // Note: this will copy elements of dense_set and unordered_set instead of + // moving them. This can be fixed if it ever becomes an issue. + for (auto& elem : that) insert(std::move(elem)); + } + } + + raw_hash_set& operator=(const raw_hash_set& that) { + raw_hash_set tmp(that, + AllocTraits::propagate_on_container_copy_assignment::value + ? that.alloc_ref() + : alloc_ref()); + swap(tmp); + return *this; + } + + raw_hash_set& operator=(raw_hash_set&& that) noexcept( + absl::allocator_traits::is_always_equal::value&& + std::is_nothrow_move_assignable::value&& + std::is_nothrow_move_assignable::value) { + // TODO(sbenza): We should only use the operations from the noexcept clause + // to make sure we actually adhere to that contract. + return move_assign( + std::move(that), + typename AllocTraits::propagate_on_container_move_assignment()); + } + + ~raw_hash_set() { destroy_slots(); } + + iterator begin() { + auto it = iterator_at(0); + it.skip_empty_or_deleted(); + return it; + } + iterator end() { return {}; } + + const_iterator begin() const { + return const_cast(this)->begin(); + } + const_iterator end() const { return {}; } + const_iterator cbegin() const { return begin(); } + const_iterator cend() const { return end(); } + + bool empty() const { return !size(); } + size_t size() const { return size_; } + size_t capacity() const { return capacity_; } + size_t max_size() const { return (std::numeric_limits::max)(); } + + ABSL_ATTRIBUTE_REINITIALIZES void clear() { + // Iterating over this container is O(bucket_count()). When bucket_count() + // is much greater than size(), iteration becomes prohibitively expensive. + // For clear() it is more important to reuse the allocated array when the + // container is small because allocation takes comparatively long time + // compared to destruction of the elements of the container. So we pick the + // largest bucket_count() threshold for which iteration is still fast and + // past that we simply deallocate the array. + if (capacity_ > 127) { + destroy_slots(); + + infoz().RecordClearedReservation(); + } else if (capacity_) { + for (size_t i = 0; i != capacity_; ++i) { + if (IsFull(ctrl_[i])) { + PolicyTraits::destroy(&alloc_ref(), slots_ + i); + } + } + size_ = 0; + ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type)); + reset_growth_left(); + } + assert(empty()); + infoz().RecordStorageChanged(0, capacity_); + } + + // This overload kicks in when the argument is an rvalue of insertable and + // decomposable type other than init_type. + // + // flat_hash_map m; + // m.insert(std::make_pair("abc", 42)); + // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc + // bug. + template = 0, class T2 = T, + typename std::enable_if::value, int>::type = 0, + T* = nullptr> + std::pair insert(T&& value) { + return emplace(std::forward(value)); + } + + // This overload kicks in when the argument is a bitfield or an lvalue of + // insertable and decomposable type. + // + // union { int n : 1; }; + // flat_hash_set s; + // s.insert(n); + // + // flat_hash_set s; + // const char* p = "hello"; + // s.insert(p); + // + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable with RequiresInsertable. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + template < + class T, RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + std::pair insert(const T& value) { + return emplace(value); + } + + // This overload kicks in when the argument is an rvalue of init_type. Its + // purpose is to handle brace-init-list arguments. + // + // flat_hash_map s; + // s.insert({"abc", 42}); + std::pair insert(init_type&& value) { + return emplace(std::move(value)); + } + + // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc + // bug. + template = 0, class T2 = T, + typename std::enable_if::value, int>::type = 0, + T* = nullptr> + iterator insert(const_iterator, T&& value) { + return insert(std::forward(value)).first; + } + + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable with RequiresInsertable. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + template < + class T, RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + iterator insert(const_iterator, const T& value) { + return insert(value).first; + } + + iterator insert(const_iterator, init_type&& value) { + return insert(std::move(value)).first; + } + + template + void insert(InputIt first, InputIt last) { + for (; first != last; ++first) emplace(*first); + } + + template = 0, RequiresInsertable = 0> + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } + + void insert(std::initializer_list ilist) { + insert(ilist.begin(), ilist.end()); + } + + insert_return_type insert(node_type&& node) { + if (!node) return {end(), false, node_type()}; + const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node)); + auto res = PolicyTraits::apply( + InsertSlot{*this, std::move(*CommonAccess::GetSlot(node))}, + elem); + if (res.second) { + CommonAccess::Reset(&node); + return {res.first, true, node_type()}; + } else { + return {res.first, false, std::move(node)}; + } + } + + iterator insert(const_iterator, node_type&& node) { + auto res = insert(std::move(node)); + node = std::move(res.node); + return res.position; + } + + // This overload kicks in if we can deduce the key from args. This enables us + // to avoid constructing value_type if an entry with the same key already + // exists. + // + // For example: + // + // flat_hash_map m = {{"abc", "def"}}; + // // Creates no std::string copies and makes no heap allocations. + // m.emplace("abc", "xyz"); + template ::value, int>::type = 0> + std::pair emplace(Args&&... args) { + return PolicyTraits::apply(EmplaceDecomposable{*this}, + std::forward(args)...); + } + + // This overload kicks in if we cannot deduce the key from args. It constructs + // value_type unconditionally and then either moves it into the table or + // destroys. + template ::value, int>::type = 0> + std::pair emplace(Args&&... args) { + alignas(slot_type) unsigned char raw[sizeof(slot_type)]; + slot_type* slot = reinterpret_cast(&raw); + + PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); + const auto& elem = PolicyTraits::element(slot); + return PolicyTraits::apply(InsertSlot{*this, std::move(*slot)}, elem); + } + + template + iterator emplace_hint(const_iterator, Args&&... args) { + return emplace(std::forward(args)...).first; + } + + // Extension API: support for lazy emplace. + // + // Looks up key in the table. If found, returns the iterator to the element. + // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`. + // + // `f` must abide by several restrictions: + // - it MUST call `raw_hash_set::constructor` with arguments as if a + // `raw_hash_set::value_type` is constructed, + // - it MUST NOT access the container before the call to + // `raw_hash_set::constructor`, and + // - it MUST NOT erase the lazily emplaced element. + // Doing any of these is undefined behavior. + // + // For example: + // + // std::unordered_set s; + // // Makes ArenaStr even if "abc" is in the map. + // s.insert(ArenaString(&arena, "abc")); + // + // flat_hash_set s; + // // Makes ArenaStr only if "abc" is not in the map. + // s.lazy_emplace("abc", [&](const constructor& ctor) { + // ctor(&arena, "abc"); + // }); + // + // WARNING: This API is currently experimental. If there is a way to implement + // the same thing with the rest of the API, prefer that. + class constructor { + friend class raw_hash_set; + + public: + template + void operator()(Args&&... args) const { + assert(*slot_); + PolicyTraits::construct(alloc_, *slot_, std::forward(args)...); + *slot_ = nullptr; + } + + private: + constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {} + + allocator_type* alloc_; + slot_type** slot_; + }; + + template + iterator lazy_emplace(const key_arg& key, F&& f) { + auto res = find_or_prepare_insert(key); + if (res.second) { + slot_type* slot = slots_ + res.first; + std::forward(f)(constructor(&alloc_ref(), &slot)); + assert(!slot); + } + return iterator_at(res.first); + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.erase("abc"); + // + // flat_hash_set s; + // // Uses "abc" directly without copying it into std::string. + // s.erase("abc"); + template + size_type erase(const key_arg& key) { + auto it = find(key); + if (it == end()) return 0; + erase(it); + return 1; + } + + // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, + // this method returns void to reduce algorithmic complexity to O(1). The + // iterator is invalidated, so any increment should be done before calling + // erase. In order to erase while iterating across a map, use the following + // idiom (which also works for standard containers): + // + // for (auto it = m.begin(), end = m.end(); it != end;) { + // // `erase()` will invalidate `it`, so advance `it` first. + // auto copy_it = it++; + // if () { + // m.erase(copy_it); + // } + // } + void erase(const_iterator cit) { erase(cit.inner_); } + + // This overload is necessary because otherwise erase(const K&) would be + // a better match if non-const iterator is passed as an argument. + void erase(iterator it) { + ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_, + "erase() called on invalid iterator."); + PolicyTraits::destroy(&alloc_ref(), it.slot_); + erase_meta_only(it); + } + + iterator erase(const_iterator first, const_iterator last) { + while (first != last) { + erase(first++); + } + return last.inner_; + } + + // Moves elements from `src` into `this`. + // If the element already exists in `this`, it is left unmodified in `src`. + template + void merge(raw_hash_set& src) { // NOLINT + assert(this != &src); + for (auto it = src.begin(), e = src.end(); it != e;) { + auto next = std::next(it); + if (PolicyTraits::apply(InsertSlot{*this, std::move(*it.slot_)}, + PolicyTraits::element(it.slot_)) + .second) { + src.erase_meta_only(it); + } + it = next; + } + } + + template + void merge(raw_hash_set&& src) { + merge(src); + } + + node_type extract(const_iterator position) { + ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_, + "extract() called on invalid iterator."); + auto node = + CommonAccess::Transfer(alloc_ref(), position.inner_.slot_); + erase_meta_only(position); + return node; + } + + template < + class K = key_type, + typename std::enable_if::value, int>::type = 0> + node_type extract(const key_arg& key) { + auto it = find(key); + return it == end() ? node_type() : extract(const_iterator{it}); + } + + void swap(raw_hash_set& that) noexcept( + IsNoThrowSwappable() && IsNoThrowSwappable() && + IsNoThrowSwappable( + typename AllocTraits::propagate_on_container_swap{})) { + using std::swap; + swap(ctrl_, that.ctrl_); + swap(slots_, that.slots_); + swap(size_, that.size_); + swap(capacity_, that.capacity_); + swap(growth_left(), that.growth_left()); + swap(hash_ref(), that.hash_ref()); + swap(eq_ref(), that.eq_ref()); + swap(infoz(), that.infoz()); + SwapAlloc(alloc_ref(), that.alloc_ref(), + typename AllocTraits::propagate_on_container_swap{}); + } + + void rehash(size_t n) { + if (n == 0 && capacity_ == 0) return; + if (n == 0 && size_ == 0) { + destroy_slots(); + infoz().RecordStorageChanged(0, 0); + infoz().RecordClearedReservation(); + return; + } + + // bitor is a faster way of doing `max` here. We will round up to the next + // power-of-2-minus-1, so bitor is good enough. + auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size())); + // n == 0 unconditionally rehashes as per the standard. + if (n == 0 || m > capacity_) { + resize(m); + + // This is after resize, to ensure that we have completed the allocation + // and have potentially sampled the hashtable. + infoz().RecordReservation(n); + } + } + + void reserve(size_t n) { + if (n > size() + growth_left()) { + size_t m = GrowthToLowerboundCapacity(n); + resize(NormalizeCapacity(m)); + + // This is after resize, to ensure that we have completed the allocation + // and have potentially sampled the hashtable. + infoz().RecordReservation(n); + } + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.count("abc"); + // + // ch_set s; + // // Uses "abc" directly without copying it into std::string. + // s.count("abc"); + template + size_t count(const key_arg& key) const { + return find(key) == end() ? 0 : 1; + } + + // Issues CPU prefetch instructions for the memory needed to find or insert + // a key. Like all lookup functions, this support heterogeneous keys. + // + // NOTE: This is a very low level operation and should not be used without + // specific benchmarks indicating its importance. + template + void prefetch(const key_arg& key) const { + (void)key; + // Avoid probing if we won't be able to prefetch the addresses received. +#ifdef ABSL_INTERNAL_HAVE_PREFETCH + prefetch_heap_block(); + auto seq = probe(ctrl_, hash_ref()(key), capacity_); + base_internal::PrefetchT0(ctrl_ + seq.offset()); + base_internal::PrefetchT0(slots_ + seq.offset()); +#endif // ABSL_INTERNAL_HAVE_PREFETCH + } + + // The API of find() has two extensions. + // + // 1. The hash can be passed by the user. It must be equal to the hash of the + // key. + // + // 2. The type of the key argument doesn't have to be key_type. This is so + // called heterogeneous key support. + template + iterator find(const key_arg& key, size_t hash) { + auto seq = probe(ctrl_, hash, capacity_); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (uint32_t i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slots_ + seq.offset(i))))) + return iterator_at(seq.offset(i)); + } + if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end(); + seq.next(); + assert(seq.index() <= capacity_ && "full table!"); + } + } + template + iterator find(const key_arg& key) { + prefetch_heap_block(); + return find(key, hash_ref()(key)); + } + + template + const_iterator find(const key_arg& key, size_t hash) const { + return const_cast(this)->find(key, hash); + } + template + const_iterator find(const key_arg& key) const { + prefetch_heap_block(); + return find(key, hash_ref()(key)); + } + + template + bool contains(const key_arg& key) const { + return find(key) != end(); + } + + template + std::pair equal_range(const key_arg& key) { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + template + std::pair equal_range( + const key_arg& key) const { + auto it = find(key); + if (it != end()) return {it, std::next(it)}; + return {it, it}; + } + + size_t bucket_count() const { return capacity_; } + float load_factor() const { + return capacity_ ? static_cast(size()) / capacity_ : 0.0; + } + float max_load_factor() const { return 1.0f; } + void max_load_factor(float) { + // Does nothing. + } + + hasher hash_function() const { return hash_ref(); } + key_equal key_eq() const { return eq_ref(); } + allocator_type get_allocator() const { return alloc_ref(); } + + friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) { + if (a.size() != b.size()) return false; + const raw_hash_set* outer = &a; + const raw_hash_set* inner = &b; + if (outer->capacity() > inner->capacity()) std::swap(outer, inner); + for (const value_type& elem : *outer) + if (!inner->has_element(elem)) return false; + return true; + } + + friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) { + return !(a == b); + } + + template + friend typename std::enable_if::value, + H>::type + AbslHashValue(H h, const raw_hash_set& s) { + return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()), + s.size()); + } + + friend void swap(raw_hash_set& a, + raw_hash_set& b) noexcept(noexcept(a.swap(b))) { + a.swap(b); + } + + private: + template + friend struct absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess; + + struct FindElement { + template + const_iterator operator()(const K& key, Args&&...) const { + return s.find(key); + } + const raw_hash_set& s; + }; + + struct HashElement { + template + size_t operator()(const K& key, Args&&...) const { + return h(key); + } + const hasher& h; + }; + + template + struct EqualElement { + template + bool operator()(const K2& lhs, Args&&...) const { + return eq(lhs, rhs); + } + const K1& rhs; + const key_equal& eq; + }; + + struct EmplaceDecomposable { + template + std::pair operator()(const K& key, Args&&... args) const { + auto res = s.find_or_prepare_insert(key); + if (res.second) { + s.emplace_at(res.first, std::forward(args)...); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + }; + + template + struct InsertSlot { + template + std::pair operator()(const K& key, Args&&...) && { + auto res = s.find_or_prepare_insert(key); + if (res.second) { + PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot); + } else if (do_destroy) { + PolicyTraits::destroy(&s.alloc_ref(), &slot); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + // Constructed slot. Either moved into place or destroyed. + slot_type&& slot; + }; + + // Erases, but does not destroy, the value pointed to by `it`. + // + // This merely updates the pertinent control byte. This can be used in + // conjunction with Policy::transfer to move the object to another place. + void erase_meta_only(const_iterator it) { + assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator"); + --size_; + const size_t index = static_cast(it.inner_.ctrl_ - ctrl_); + const size_t index_before = (index - Group::kWidth) & capacity_; + const auto empty_after = Group(it.inner_.ctrl_).MaskEmpty(); + const auto empty_before = Group(ctrl_ + index_before).MaskEmpty(); + + // We count how many consecutive non empties we have to the right and to the + // left of `it`. If the sum is >= kWidth then there is at least one probe + // window that might have seen a full group. + bool was_never_full = + empty_before && empty_after && + static_cast(empty_after.TrailingZeros() + + empty_before.LeadingZeros()) < Group::kWidth; + + SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted, + capacity_, ctrl_, slots_, sizeof(slot_type)); + growth_left() += was_never_full; + infoz().RecordErase(); + } + + // Allocates a backing array for `self` and initializes its control bytes. + // This reads `capacity_` and updates all other fields based on the result of + // the allocation. + // + // This does not free the currently held array; `capacity_` must be nonzero. + void initialize_slots() { + assert(capacity_); + // Folks with custom allocators often make unwarranted assumptions about the + // behavior of their classes vis-a-vis trivial destructability and what + // calls they will or wont make. Avoid sampling for people with custom + // allocators to get us out of this mess. This is not a hard guarantee but + // a workaround while we plan the exact guarantee we want to provide. + // + // People are often sloppy with the exact type of their allocator (sometimes + // it has an extra const or is missing the pair, but rebinds made it work + // anyway). To avoid the ambiguity, we work off SlotAlloc which we have + // bound more carefully. + if (std::is_same>::value && + slots_ == nullptr) { + infoz() = Sample(sizeof(slot_type)); + } + + char* mem = static_cast(Allocate( + &alloc_ref(), + AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)))); + ctrl_ = reinterpret_cast(mem); + slots_ = reinterpret_cast( + mem + SlotOffset(capacity_, alignof(slot_type))); + ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type)); + reset_growth_left(); + infoz().RecordStorageChanged(size_, capacity_); + } + + // Destroys all slots in the backing array, frees the backing array, and + // clears all top-level book-keeping data. + // + // This essentially implements `map = raw_hash_set();`. + void destroy_slots() { + if (!capacity_) return; + for (size_t i = 0; i != capacity_; ++i) { + if (IsFull(ctrl_[i])) { + PolicyTraits::destroy(&alloc_ref(), slots_ + i); + } + } + + // Unpoison before returning the memory to the allocator. + SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); + Deallocate( + &alloc_ref(), ctrl_, + AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))); + ctrl_ = EmptyGroup(); + slots_ = nullptr; + size_ = 0; + capacity_ = 0; + growth_left() = 0; + } + + void resize(size_t new_capacity) { + assert(IsValidCapacity(new_capacity)); + auto* old_ctrl = ctrl_; + auto* old_slots = slots_; + const size_t old_capacity = capacity_; + capacity_ = new_capacity; + initialize_slots(); + + size_t total_probe_length = 0; + for (size_t i = 0; i != old_capacity; ++i) { + if (IsFull(old_ctrl[i])) { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, + PolicyTraits::element(old_slots + i)); + auto target = find_first_non_full(ctrl_, hash, capacity_); + size_t new_i = target.offset; + total_probe_length += target.probe_length; + SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i); + } + } + if (old_capacity) { + SanitizerUnpoisonMemoryRegion(old_slots, + sizeof(slot_type) * old_capacity); + Deallocate( + &alloc_ref(), old_ctrl, + AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type))); + } + infoz().RecordRehash(total_probe_length); + } + + // Prunes control bytes to remove as many tombstones as possible. + // + // See the comment on `rehash_and_grow_if_necessary()`. + void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE { + assert(IsValidCapacity(capacity_)); + assert(!is_small(capacity_)); + // Algorithm: + // - mark all DELETED slots as EMPTY + // - mark all FULL slots as DELETED + // - for each slot marked as DELETED + // hash = Hash(element) + // target = find_first_non_full(hash) + // if target is in the same group + // mark slot as FULL + // else if target is EMPTY + // transfer element to target + // mark slot as EMPTY + // mark target as FULL + // else if target is DELETED + // swap current element with target element + // mark target as FULL + // repeat procedure for current slot with moved from element (target) + ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_); + alignas(slot_type) unsigned char raw[sizeof(slot_type)]; + size_t total_probe_length = 0; + slot_type* slot = reinterpret_cast(&raw); + for (size_t i = 0; i != capacity_; ++i) { + if (!IsDeleted(ctrl_[i])) continue; + const size_t hash = PolicyTraits::apply( + HashElement{hash_ref()}, PolicyTraits::element(slots_ + i)); + const FindInfo target = find_first_non_full(ctrl_, hash, capacity_); + const size_t new_i = target.offset; + total_probe_length += target.probe_length; + + // Verify if the old and new i fall within the same group wrt the hash. + // If they do, we don't need to move the object as it falls already in the + // best probe we can. + const size_t probe_offset = probe(ctrl_, hash, capacity_).offset(); + const auto probe_index = [probe_offset, this](size_t pos) { + return ((pos - probe_offset) & capacity_) / Group::kWidth; + }; + + // Element doesn't move. + if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) { + SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); + continue; + } + if (IsEmpty(ctrl_[new_i])) { + // Transfer element to the empty spot. + // SetCtrl poisons/unpoisons the slots so we have to call it at the + // right time. + SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i); + SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type)); + } else { + assert(IsDeleted(ctrl_[new_i])); + SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); + // Until we are done rehashing, DELETED marks previously FULL slots. + // Swap i and new_i elements. + PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i); + PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot); + --i; // repeat + } + } + reset_growth_left(); + infoz().RecordRehash(total_probe_length); + } + + // Called whenever the table *might* need to conditionally grow. + // + // This function is an optimization opportunity to perform a rehash even when + // growth is unnecessary, because vacating tombstones is beneficial for + // performance in the long-run. + void rehash_and_grow_if_necessary() { + if (capacity_ == 0) { + resize(1); + } else if (capacity_ > Group::kWidth && + // Do these calcuations in 64-bit to avoid overflow. + size() * uint64_t{32} <= capacity_ * uint64_t{25}) { + // Squash DELETED without growing if there is enough capacity. + // + // Rehash in place if the current size is <= 25/32 of capacity_. + // Rationale for such a high factor: 1) drop_deletes_without_resize() is + // faster than resize, and 2) it takes quite a bit of work to add + // tombstones. In the worst case, seems to take approximately 4 + // insert/erase pairs to create a single tombstone and so if we are + // rehashing because of tombstones, we can afford to rehash-in-place as + // long as we are reclaiming at least 1/8 the capacity without doing more + // than 2X the work. (Where "work" is defined to be size() for rehashing + // or rehashing in place, and 1 for an insert or erase.) But rehashing in + // place is faster per operation than inserting or even doubling the size + // of the table, so we actually afford to reclaim even less space from a + // resize-in-place. The decision is to rehash in place if we can reclaim + // at about 1/8th of the usable capacity (specifically 3/28 of the + // capacity) which means that the total cost of rehashing will be a small + // fraction of the total work. + // + // Here is output of an experiment using the BM_CacheInSteadyState + // benchmark running the old case (where we rehash-in-place only if we can + // reclaim at least 7/16*capacity_) vs. this code (which rehashes in place + // if we can recover 3/32*capacity_). + // + // Note that although in the worst-case number of rehashes jumped up from + // 15 to 190, but the number of operations per second is almost the same. + // + // Abridged output of running BM_CacheInSteadyState benchmark from + // raw_hash_set_benchmark. N is the number of insert/erase operations. + // + // | OLD (recover >= 7/16 | NEW (recover >= 3/32) + // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes + // 448 | 145284 0.44 18 | 140118 0.44 19 + // 493 | 152546 0.24 11 | 151417 0.48 28 + // 538 | 151439 0.26 11 | 151152 0.53 38 + // 583 | 151765 0.28 11 | 150572 0.57 50 + // 628 | 150241 0.31 11 | 150853 0.61 66 + // 672 | 149602 0.33 12 | 150110 0.66 90 + // 717 | 149998 0.35 12 | 149531 0.70 129 + // 762 | 149836 0.37 13 | 148559 0.74 190 + // 807 | 149736 0.39 14 | 151107 0.39 14 + // 852 | 150204 0.42 15 | 151019 0.42 15 + drop_deletes_without_resize(); + } else { + // Otherwise grow the container. + resize(capacity_ * 2 + 1); + } + } + + bool has_element(const value_type& elem) const { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); + auto seq = probe(ctrl_, hash, capacity_); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (uint32_t i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) == + elem)) + return true; + } + if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false; + seq.next(); + assert(seq.index() <= capacity_ && "full table!"); + } + return false; + } + + // TODO(alkis): Optimize this assuming *this and that don't overlap. + raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) { + raw_hash_set tmp(std::move(that)); + swap(tmp); + return *this; + } + raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) { + raw_hash_set tmp(std::move(that), alloc_ref()); + swap(tmp); + return *this; + } + + protected: + // Attempts to find `key` in the table; if it isn't found, returns a slot that + // the value can be inserted into, with the control byte already set to + // `key`'s H2. + template + std::pair find_or_prepare_insert(const K& key) { + prefetch_heap_block(); + auto hash = hash_ref()(key); + auto seq = probe(ctrl_, hash, capacity_); + while (true) { + Group g{ctrl_ + seq.offset()}; + for (uint32_t i : g.Match(H2(hash))) { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slots_ + seq.offset(i))))) + return {seq.offset(i), false}; + } + if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break; + seq.next(); + assert(seq.index() <= capacity_ && "full table!"); + } + return {prepare_insert(hash), true}; + } + + // Given the hash of a value not currently in the table, finds the next + // viable slot index to insert it at. + // + // REQUIRES: At least one non-full slot available. + size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE { + auto target = find_first_non_full(ctrl_, hash, capacity_); + if (ABSL_PREDICT_FALSE(growth_left() == 0 && + !IsDeleted(ctrl_[target.offset]))) { + rehash_and_grow_if_necessary(); + target = find_first_non_full(ctrl_, hash, capacity_); + } + ++size_; + growth_left() -= IsEmpty(ctrl_[target.offset]); + SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_, + sizeof(slot_type)); + infoz().RecordInsert(hash, target.probe_length); + return target.offset; + } + + // Constructs the value in the space pointed by the iterator. This only works + // after an unsuccessful find_or_prepare_insert() and before any other + // modifications happen in the raw_hash_set. + // + // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where + // k is the key decomposed from `forward(args)...`, and the bool + // returned by find_or_prepare_insert(k) was true. + // POSTCONDITION: *m.iterator_at(i) == value_type(forward(args)...). + template + void emplace_at(size_t i, Args&&... args) { + PolicyTraits::construct(&alloc_ref(), slots_ + i, + std::forward(args)...); + + assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) == + iterator_at(i) && + "constructed value does not match the lookup key"); + } + + iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; } + const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; } + + private: + friend struct RawHashSetTestOnlyAccess; + + void reset_growth_left() { + growth_left() = CapacityToGrowth(capacity()) - size_; + } + + // The number of slots we can still fill without needing to rehash. + // + // This is stored separately due to tombstones: we do not include tombstones + // in the growth capacity, because we'd like to rehash when the table is + // otherwise filled with tombstones: otherwise, probe sequences might get + // unacceptably long without triggering a rehash. Callers can also force a + // rehash via the standard `rehash(0)`, which will recompute this value as a + // side-effect. + // + // See `CapacityToGrowth()`. + size_t& growth_left() { return settings_.template get<0>(); } + + // Prefetch the heap-allocated memory region to resolve potential TLB misses. + // This is intended to overlap with execution of calculating the hash for a + // key. + void prefetch_heap_block() const { + base_internal::PrefetchT2(ctrl_); + } + + HashtablezInfoHandle& infoz() { return settings_.template get<1>(); } + + hasher& hash_ref() { return settings_.template get<2>(); } + const hasher& hash_ref() const { return settings_.template get<2>(); } + key_equal& eq_ref() { return settings_.template get<3>(); } + const key_equal& eq_ref() const { return settings_.template get<3>(); } + allocator_type& alloc_ref() { return settings_.template get<4>(); } + const allocator_type& alloc_ref() const { + return settings_.template get<4>(); + } + + // TODO(alkis): Investigate removing some of these fields: + // - ctrl/slots can be derived from each other + // - size can be moved into the slot array + + // The control bytes (and, also, a pointer to the base of the backing array). + // + // This contains `capacity_ + 1 + NumClonedBytes()` entries, even + // when the table is empty (hence EmptyGroup). + ctrl_t* ctrl_ = EmptyGroup(); + // The beginning of the slots, located at `SlotOffset()` bytes after + // `ctrl_`. May be null for empty tables. + slot_type* slots_ = nullptr; + + // The number of filled slots. + size_t size_ = 0; + + // The total number of available slots. + size_t capacity_ = 0; + absl::container_internal::CompressedTuple + settings_{0u, HashtablezInfoHandle{}, hasher{}, key_equal{}, + allocator_type{}}; +}; + +// Erases all elements that satisfy the predicate `pred` from the container `c`. +template +typename raw_hash_set::size_type EraseIf( + Predicate& pred, raw_hash_set* c) { + const auto initial_size = c->size(); + for (auto it = c->begin(), last = c->end(); it != last;) { + if (pred(*it)) { + c->erase(it++); + } else { + ++it; + } + } + return initial_size - c->size(); +} + +namespace hashtable_debug_internal { +template +struct HashtableDebugAccess> { + using Traits = typename Set::PolicyTraits; + using Slot = typename Traits::slot_type; + + static size_t GetNumProbes(const Set& set, + const typename Set::key_type& key) { + size_t num_probes = 0; + size_t hash = set.hash_ref()(key); + auto seq = probe(set.ctrl_, hash, set.capacity_); + while (true) { + container_internal::Group g{set.ctrl_ + seq.offset()}; + for (uint32_t i : g.Match(container_internal::H2(hash))) { + if (Traits::apply( + typename Set::template EqualElement{ + key, set.eq_ref()}, + Traits::element(set.slots_ + seq.offset(i)))) + return num_probes; + ++num_probes; + } + if (g.MaskEmpty()) return num_probes; + seq.next(); + ++num_probes; + } + } + + static size_t AllocatedByteSize(const Set& c) { + size_t capacity = c.capacity_; + if (capacity == 0) return 0; + size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot)); + + size_t per_slot = Traits::space_used(static_cast(nullptr)); + if (per_slot != ~size_t{}) { + m += per_slot * c.size(); + } else { + for (size_t i = 0; i != capacity; ++i) { + if (container_internal::IsFull(c.ctrl_[i])) { + m += Traits::space_used(c.slots_ + i); + } + } + } + return m; + } + + static size_t LowerBoundAllocatedByteSize(size_t size) { + size_t capacity = GrowthToLowerboundCapacity(size); + if (capacity == 0) return 0; + size_t m = + AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot)); + size_t per_slot = Traits::space_used(static_cast(nullptr)); + if (per_slot != ~size_t{}) { + m += per_slot * size; + } + return m; + } +}; + +} // namespace hashtable_debug_internal +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#undef ABSL_INTERNAL_ASSERT_IS_FULL + +#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/test_instance_tracker.h b/CAPI/cpp/grpc/include/absl/container/internal/test_instance_tracker.h new file mode 100644 index 0000000..5ff6fd7 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/test_instance_tracker.h @@ -0,0 +1,274 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_ +#define ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_ + +#include +#include + +#include "absl/types/compare.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace test_internal { + +// A type that counts number of occurrences of the type, the live occurrences of +// the type, as well as the number of copies, moves, swaps, and comparisons that +// have occurred on the type. This is used as a base class for the copyable, +// copyable+movable, and movable types below that are used in actual tests. Use +// InstanceTracker in tests to track the number of instances. +class BaseCountedInstance { + public: + explicit BaseCountedInstance(int x) : value_(x) { + ++num_instances_; + ++num_live_instances_; + } + BaseCountedInstance(const BaseCountedInstance& x) + : value_(x.value_), is_live_(x.is_live_) { + ++num_instances_; + if (is_live_) ++num_live_instances_; + ++num_copies_; + } + BaseCountedInstance(BaseCountedInstance&& x) + : value_(x.value_), is_live_(x.is_live_) { + x.is_live_ = false; + ++num_instances_; + ++num_moves_; + } + ~BaseCountedInstance() { + --num_instances_; + if (is_live_) --num_live_instances_; + } + + BaseCountedInstance& operator=(const BaseCountedInstance& x) { + value_ = x.value_; + if (is_live_) --num_live_instances_; + is_live_ = x.is_live_; + if (is_live_) ++num_live_instances_; + ++num_copies_; + return *this; + } + BaseCountedInstance& operator=(BaseCountedInstance&& x) { + value_ = x.value_; + if (is_live_) --num_live_instances_; + is_live_ = x.is_live_; + x.is_live_ = false; + ++num_moves_; + return *this; + } + + bool operator==(const BaseCountedInstance& x) const { + ++num_comparisons_; + return value_ == x.value_; + } + + bool operator!=(const BaseCountedInstance& x) const { + ++num_comparisons_; + return value_ != x.value_; + } + + bool operator<(const BaseCountedInstance& x) const { + ++num_comparisons_; + return value_ < x.value_; + } + + bool operator>(const BaseCountedInstance& x) const { + ++num_comparisons_; + return value_ > x.value_; + } + + bool operator<=(const BaseCountedInstance& x) const { + ++num_comparisons_; + return value_ <= x.value_; + } + + bool operator>=(const BaseCountedInstance& x) const { + ++num_comparisons_; + return value_ >= x.value_; + } + + absl::weak_ordering compare(const BaseCountedInstance& x) const { + ++num_comparisons_; + return value_ < x.value_ + ? absl::weak_ordering::less + : value_ == x.value_ ? absl::weak_ordering::equivalent + : absl::weak_ordering::greater; + } + + int value() const { + if (!is_live_) std::abort(); + return value_; + } + + friend std::ostream& operator<<(std::ostream& o, + const BaseCountedInstance& v) { + return o << "[value:" << v.value() << "]"; + } + + // Implementation of efficient swap() that counts swaps. + static void SwapImpl( + BaseCountedInstance& lhs, // NOLINT(runtime/references) + BaseCountedInstance& rhs) { // NOLINT(runtime/references) + using std::swap; + swap(lhs.value_, rhs.value_); + swap(lhs.is_live_, rhs.is_live_); + ++BaseCountedInstance::num_swaps_; + } + + private: + friend class InstanceTracker; + + int value_; + + // Indicates if the value is live, ie it hasn't been moved away from. + bool is_live_ = true; + + // Number of instances. + static int num_instances_; + + // Number of live instances (those that have not been moved away from.) + static int num_live_instances_; + + // Number of times that BaseCountedInstance objects were moved. + static int num_moves_; + + // Number of times that BaseCountedInstance objects were copied. + static int num_copies_; + + // Number of times that BaseCountedInstance objects were swapped. + static int num_swaps_; + + // Number of times that BaseCountedInstance objects were compared. + static int num_comparisons_; +}; + +// Helper to track the BaseCountedInstance instance counters. Expects that the +// number of instances and live_instances are the same when it is constructed +// and when it is destructed. +class InstanceTracker { + public: + InstanceTracker() + : start_instances_(BaseCountedInstance::num_instances_), + start_live_instances_(BaseCountedInstance::num_live_instances_) { + ResetCopiesMovesSwaps(); + } + ~InstanceTracker() { + if (instances() != 0) std::abort(); + if (live_instances() != 0) std::abort(); + } + + // Returns the number of BaseCountedInstance instances both containing valid + // values and those moved away from compared to when the InstanceTracker was + // constructed + int instances() const { + return BaseCountedInstance::num_instances_ - start_instances_; + } + + // Returns the number of live BaseCountedInstance instances compared to when + // the InstanceTracker was constructed + int live_instances() const { + return BaseCountedInstance::num_live_instances_ - start_live_instances_; + } + + // Returns the number of moves on BaseCountedInstance objects since + // construction or since the last call to ResetCopiesMovesSwaps(). + int moves() const { return BaseCountedInstance::num_moves_ - start_moves_; } + + // Returns the number of copies on BaseCountedInstance objects since + // construction or the last call to ResetCopiesMovesSwaps(). + int copies() const { + return BaseCountedInstance::num_copies_ - start_copies_; + } + + // Returns the number of swaps on BaseCountedInstance objects since + // construction or the last call to ResetCopiesMovesSwaps(). + int swaps() const { return BaseCountedInstance::num_swaps_ - start_swaps_; } + + // Returns the number of comparisons on BaseCountedInstance objects since + // construction or the last call to ResetCopiesMovesSwaps(). + int comparisons() const { + return BaseCountedInstance::num_comparisons_ - start_comparisons_; + } + + // Resets the base values for moves, copies, comparisons, and swaps to the + // current values, so that subsequent Get*() calls for moves, copies, + // comparisons, and swaps will compare to the situation at the point of this + // call. + void ResetCopiesMovesSwaps() { + start_moves_ = BaseCountedInstance::num_moves_; + start_copies_ = BaseCountedInstance::num_copies_; + start_swaps_ = BaseCountedInstance::num_swaps_; + start_comparisons_ = BaseCountedInstance::num_comparisons_; + } + + private: + int start_instances_; + int start_live_instances_; + int start_moves_; + int start_copies_; + int start_swaps_; + int start_comparisons_; +}; + +// Copyable, not movable. +class CopyableOnlyInstance : public BaseCountedInstance { + public: + explicit CopyableOnlyInstance(int x) : BaseCountedInstance(x) {} + CopyableOnlyInstance(const CopyableOnlyInstance& rhs) = default; + CopyableOnlyInstance& operator=(const CopyableOnlyInstance& rhs) = default; + + friend void swap(CopyableOnlyInstance& lhs, CopyableOnlyInstance& rhs) { + BaseCountedInstance::SwapImpl(lhs, rhs); + } + + static bool supports_move() { return false; } +}; + +// Copyable and movable. +class CopyableMovableInstance : public BaseCountedInstance { + public: + explicit CopyableMovableInstance(int x) : BaseCountedInstance(x) {} + CopyableMovableInstance(const CopyableMovableInstance& rhs) = default; + CopyableMovableInstance(CopyableMovableInstance&& rhs) = default; + CopyableMovableInstance& operator=(const CopyableMovableInstance& rhs) = + default; + CopyableMovableInstance& operator=(CopyableMovableInstance&& rhs) = default; + + friend void swap(CopyableMovableInstance& lhs, CopyableMovableInstance& rhs) { + BaseCountedInstance::SwapImpl(lhs, rhs); + } + + static bool supports_move() { return true; } +}; + +// Only movable, not default-constructible. +class MovableOnlyInstance : public BaseCountedInstance { + public: + explicit MovableOnlyInstance(int x) : BaseCountedInstance(x) {} + MovableOnlyInstance(MovableOnlyInstance&& other) = default; + MovableOnlyInstance& operator=(MovableOnlyInstance&& other) = default; + + friend void swap(MovableOnlyInstance& lhs, MovableOnlyInstance& rhs) { + BaseCountedInstance::SwapImpl(lhs, rhs); + } + + static bool supports_move() { return true; } +}; + +} // namespace test_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/tracked.h b/CAPI/cpp/grpc/include/absl/container/internal/tracked.h new file mode 100644 index 0000000..29f5829 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/tracked.h @@ -0,0 +1,83 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_TRACKED_H_ +#define ABSL_CONTAINER_INTERNAL_TRACKED_H_ + +#include + +#include +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// A class that tracks its copies and moves so that it can be queried in tests. +template +class Tracked { + public: + Tracked() {} + // NOLINTNEXTLINE(runtime/explicit) + Tracked(const T& val) : val_(val) {} + Tracked(const Tracked& that) + : val_(that.val_), + num_moves_(that.num_moves_), + num_copies_(that.num_copies_) { + ++(*num_copies_); + } + Tracked(Tracked&& that) + : val_(std::move(that.val_)), + num_moves_(std::move(that.num_moves_)), + num_copies_(std::move(that.num_copies_)) { + ++(*num_moves_); + } + Tracked& operator=(const Tracked& that) { + val_ = that.val_; + num_moves_ = that.num_moves_; + num_copies_ = that.num_copies_; + ++(*num_copies_); + } + Tracked& operator=(Tracked&& that) { + val_ = std::move(that.val_); + num_moves_ = std::move(that.num_moves_); + num_copies_ = std::move(that.num_copies_); + ++(*num_moves_); + } + + const T& val() const { return val_; } + + friend bool operator==(const Tracked& a, const Tracked& b) { + return a.val_ == b.val_; + } + friend bool operator!=(const Tracked& a, const Tracked& b) { + return !(a == b); + } + + size_t num_copies() { return *num_copies_; } + size_t num_moves() { return *num_moves_; } + + private: + T val_; + std::shared_ptr num_moves_ = std::make_shared(0); + std::shared_ptr num_copies_ = std::make_shared(0); +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_TRACKED_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_constructor_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_constructor_test.h new file mode 100644 index 0000000..7e84dc2 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_constructor_test.h @@ -0,0 +1,494 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_ + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +class ConstructorTest : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(ConstructorTest); + +TYPED_TEST_P(ConstructorTest, NoArgs) { + TypeParam m; + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); +} + +TYPED_TEST_P(ConstructorTest, BucketCount) { + TypeParam m(123); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountHash) { + using H = typename TypeParam::hasher; + H hasher; + TypeParam m(123, hasher); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + H hasher; + E equal; + TypeParam m(123, hasher, equal); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +template +struct is_std_unordered_map : std::false_type {}; + +template +struct is_std_unordered_map> : std::true_type {}; + +#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17) +using has_cxx14_std_apis = std::true_type; +#else +using has_cxx14_std_apis = std::false_type; +#endif + +template +using expect_cxx14_apis = + absl::disjunction>, + has_cxx14_std_apis>; + +template +void BucketCountAllocTest(std::false_type) {} + +template +void BucketCountAllocTest(std::true_type) { + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountAlloc) { + BucketCountAllocTest(expect_cxx14_apis()); +} + +template +void BucketCountHashAllocTest(std::false_type) {} + +template +void BucketCountHashAllocTest(std::true_type) { + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + TypeParam m(123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) { + BucketCountHashAllocTest(expect_cxx14_apis()); +} + +#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS +using has_alloc_std_constructors = std::true_type; +#else +using has_alloc_std_constructors = std::false_type; +#endif + +template +using expect_alloc_constructors = + absl::disjunction>, + has_alloc_std_constructors>; + +template +void AllocTest(std::false_type) {} + +template +void AllocTest(std::true_type) { + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); +} + +TYPED_TEST_P(ConstructorTest, Alloc) { + AllocTest(expect_alloc_constructors()); +} + +TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::UniqueGenerator()); + TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +template +void InputIteratorBucketAllocTest(std::false_type) {} + +template +void InputIteratorBucketAllocTest(std::true_type) { + using T = hash_internal::GeneratedType; + using A = typename TypeParam::allocator_type; + A alloc(0); + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::UniqueGenerator()); + TypeParam m(values.begin(), values.end(), 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) { + InputIteratorBucketAllocTest(expect_cxx14_apis()); +} + +template +void InputIteratorBucketHashAllocTest(std::false_type) {} + +template +void InputIteratorBucketHashAllocTest(std::true_type) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::UniqueGenerator()); + TypeParam m(values.begin(), values.end(), 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) { + InputIteratorBucketHashAllocTest(expect_cxx14_apis()); +} + +TYPED_TEST_P(ConstructorTest, CopyConstructor) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(gen()); + TypeParam n(m); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); +} + +template +void CopyConstructorAllocTest(std::false_type) {} + +template +void CopyConstructorAllocTest(std::true_type) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(gen()); + TypeParam n(m, A(11)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) { + CopyConstructorAllocTest(expect_alloc_constructors()); +} + +// TODO(alkis): Test non-propagating allocators on copy constructors. + +TYPED_TEST_P(ConstructorTest, MoveConstructor) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(gen()); + TypeParam t(m); + TypeParam n(std::move(t)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); +} + +template +void MoveConstructorAllocTest(std::false_type) {} + +template +void MoveConstructorAllocTest(std::true_type) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(gen()); + TypeParam t(m); + TypeParam n(std::move(t), A(1)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) { + MoveConstructorAllocTest(expect_alloc_constructors()); +} + +// TODO(alkis): Test non-propagating allocators on move constructors. + +TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(values, 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +template +void InitializerListBucketAllocTest(std::false_type) {} + +template +void InitializerListBucketAllocTest(std::true_type) { + using T = hash_internal::GeneratedType; + using A = typename TypeParam::allocator_type; + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + A alloc(0); + TypeParam m(values, 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) { + InitializerListBucketAllocTest(expect_cxx14_apis()); +} + +template +void InitializerListBucketHashAllocTest(std::false_type) {} + +template +void InitializerListBucketHashAllocTest(std::true_type) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values, 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) { + InitializerListBucketHashAllocTest(expect_cxx14_apis()); +} + +TYPED_TEST_P(ConstructorTest, Assignment) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam n; + n = m; + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); +} + +// TODO(alkis): Test [non-]propagating allocators on move/copy assignments +// (it depends on traits). + +TYPED_TEST_P(ConstructorTest, MoveAssignment) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam t(m); + TypeParam n; + n = std::move(t); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); +} + +TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam n({gen()}); + n = m; + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam t(m); + TypeParam n({gen()}); + n = std::move(t); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); +} + +TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values); + m = *&m; // Avoid -Wself-assign + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); +} + +// We cannot test self move as standard states that it leaves standard +// containers in unspecified state (and in practice in causes memory-leak +// according to heap-checker!). + +REGISTER_TYPED_TEST_SUITE_P( + ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, + BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc, + InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, + InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc, + MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc, + InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment, + MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting, + MoveAssignmentOverwritesExisting, + AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf); + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_lookup_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_lookup_test.h new file mode 100644 index 0000000..3713cd9 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_lookup_test.h @@ -0,0 +1,117 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_ + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +class LookupTest : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(LookupTest); + +TYPED_TEST_P(LookupTest, At) { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + for (const auto& p : values) { + const auto& val = m.at(p.first); + EXPECT_EQ(p.second, val) << ::testing::PrintToString(p.first); + } +} + +TYPED_TEST_P(LookupTest, OperatorBracket) { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m; + for (const auto& p : values) { + auto& val = m[p.first]; + EXPECT_EQ(V(), val) << ::testing::PrintToString(p.first); + val = p.second; + } + for (const auto& p : values) + EXPECT_EQ(p.second, m[p.first]) << ::testing::PrintToString(p.first); +} + +TYPED_TEST_P(LookupTest, Count) { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m; + for (const auto& p : values) + EXPECT_EQ(0, m.count(p.first)) << ::testing::PrintToString(p.first); + m.insert(values.begin(), values.end()); + for (const auto& p : values) + EXPECT_EQ(1, m.count(p.first)) << ::testing::PrintToString(p.first); +} + +TYPED_TEST_P(LookupTest, Find) { + using std::get; + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m; + for (const auto& p : values) + EXPECT_TRUE(m.end() == m.find(p.first)) + << ::testing::PrintToString(p.first); + m.insert(values.begin(), values.end()); + for (const auto& p : values) { + auto it = m.find(p.first); + EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(p.first); + EXPECT_EQ(p.second, get<1>(*it)) << ::testing::PrintToString(p.first); + } +} + +TYPED_TEST_P(LookupTest, EqualRange) { + using std::get; + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m; + for (const auto& p : values) { + auto r = m.equal_range(p.first); + ASSERT_EQ(0, std::distance(r.first, r.second)); + } + m.insert(values.begin(), values.end()); + for (const auto& p : values) { + auto r = m.equal_range(p.first); + ASSERT_EQ(1, std::distance(r.first, r.second)); + EXPECT_EQ(p.second, get<1>(*r.first)) << ::testing::PrintToString(p.first); + } +} + +REGISTER_TYPED_TEST_SUITE_P(LookupTest, At, OperatorBracket, Count, Find, + EqualRange); + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_members_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_members_test.h new file mode 100644 index 0000000..7d48cdb --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_members_test.h @@ -0,0 +1,87 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_ + +#include +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +class MembersTest : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(MembersTest); + +template +void UseType() {} + +TYPED_TEST_P(MembersTest, Typedefs) { + EXPECT_TRUE((std::is_same, + typename TypeParam::value_type>())); + EXPECT_TRUE((absl::conjunction< + absl::negation>, + std::is_integral>())); + EXPECT_TRUE((absl::conjunction< + std::is_signed, + std::is_integral>())); + EXPECT_TRUE((std::is_convertible< + decltype(std::declval()( + std::declval())), + size_t>())); + EXPECT_TRUE((std::is_convertible< + decltype(std::declval()( + std::declval(), + std::declval())), + bool>())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same::pointer, + typename TypeParam::pointer>())); + EXPECT_TRUE( + (std::is_same::const_pointer, + typename TypeParam::const_pointer>())); +} + +TYPED_TEST_P(MembersTest, SimpleFunctions) { + EXPECT_GT(TypeParam().max_size(), 0); +} + +TYPED_TEST_P(MembersTest, BeginEnd) { + TypeParam t = {typename TypeParam::value_type{}}; + EXPECT_EQ(t.begin(), t.cbegin()); + EXPECT_EQ(t.end(), t.cend()); + EXPECT_NE(t.begin(), t.end()); + EXPECT_NE(t.cbegin(), t.cend()); +} + +REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd); + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_modifiers_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_modifiers_test.h new file mode 100644 index 0000000..4d9ab30 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_modifiers_test.h @@ -0,0 +1,352 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_ + +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +class ModifiersTest : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(ModifiersTest); + +TYPED_TEST_P(ModifiersTest, Clear) { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + m.clear(); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(m.empty()); +} + +TYPED_TEST_P(ModifiersTest, Insert) { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + auto p = m.insert(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + T val2 = {val.first, hash_internal::Generator()()}; + p = m.insert(val2); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); +} + +TYPED_TEST_P(ModifiersTest, InsertHint) { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + auto it = m.insert(m.end(), val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); + T val2 = {val.first, hash_internal::Generator()()}; + it = m.insert(it, val2); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); +} + +TYPED_TEST_P(ModifiersTest, InsertRange) { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m; + m.insert(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); +} + +TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + m.reserve(10); + const size_t original_capacity = m.bucket_count(); + m.insert(val); + EXPECT_EQ(m.bucket_count(), original_capacity); + T val2 = {val.first, hash_internal::Generator()()}; + m.insert(val2); + EXPECT_EQ(m.bucket_count(), original_capacity); +} + +TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) { +#if !defined(__GLIBCXX__) + using T = hash_internal::GeneratedType; + std::vector base_values; + std::generate_n(std::back_inserter(base_values), 10, + hash_internal::Generator()); + std::vector values; + while (values.size() != 100) { + std::copy_n(base_values.begin(), 10, std::back_inserter(values)); + } + TypeParam m; + m.reserve(10); + const size_t original_capacity = m.bucket_count(); + m.insert(values.begin(), values.end()); + EXPECT_EQ(m.bucket_count(), original_capacity); +#endif +} + +TYPED_TEST_P(ModifiersTest, InsertOrAssign) { +#ifdef UNORDERED_MAP_CXX17 + using std::get; + using K = typename TypeParam::key_type; + using V = typename TypeParam::mapped_type; + K k = hash_internal::Generator()(); + V val = hash_internal::Generator()(); + TypeParam m; + auto p = m.insert_or_assign(k, val); + EXPECT_TRUE(p.second); + EXPECT_EQ(k, get<0>(*p.first)); + EXPECT_EQ(val, get<1>(*p.first)); + V val2 = hash_internal::Generator()(); + p = m.insert_or_assign(k, val2); + EXPECT_FALSE(p.second); + EXPECT_EQ(k, get<0>(*p.first)); + EXPECT_EQ(val2, get<1>(*p.first)); +#endif +} + +TYPED_TEST_P(ModifiersTest, InsertOrAssignHint) { +#ifdef UNORDERED_MAP_CXX17 + using std::get; + using K = typename TypeParam::key_type; + using V = typename TypeParam::mapped_type; + K k = hash_internal::Generator()(); + V val = hash_internal::Generator()(); + TypeParam m; + auto it = m.insert_or_assign(m.end(), k, val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(k, get<0>(*it)); + EXPECT_EQ(val, get<1>(*it)); + V val2 = hash_internal::Generator()(); + it = m.insert_or_assign(it, k, val2); + EXPECT_EQ(k, get<0>(*it)); + EXPECT_EQ(val2, get<1>(*it)); +#endif +} + +TYPED_TEST_P(ModifiersTest, Emplace) { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto p = m.emplace(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + T val2 = {val.first, hash_internal::Generator()()}; + p = m.emplace(val2); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); +} + +TYPED_TEST_P(ModifiersTest, EmplaceHint) { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto it = m.emplace_hint(m.end(), val); + EXPECT_EQ(val, *it); + T val2 = {val.first, hash_internal::Generator()()}; + it = m.emplace_hint(it, val2); + EXPECT_EQ(val, *it); +} + +TYPED_TEST_P(ModifiersTest, TryEmplace) { +#ifdef UNORDERED_MAP_CXX17 + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto p = m.try_emplace(val.first, val.second); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + T val2 = {val.first, hash_internal::Generator()()}; + p = m.try_emplace(val2.first, val2.second); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); +#endif +} + +TYPED_TEST_P(ModifiersTest, TryEmplaceHint) { +#ifdef UNORDERED_MAP_CXX17 + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto it = m.try_emplace(m.end(), val.first, val.second); + EXPECT_EQ(val, *it); + T val2 = {val.first, hash_internal::Generator()()}; + it = m.try_emplace(it, val2.first, val2.second); + EXPECT_EQ(val, *it); +#endif +} + +template +using IfNotVoid = typename std::enable_if::value, V>::type; + +// In openmap we chose not to return the iterator from erase because that's +// more expensive. As such we adapt erase to return an iterator here. +struct EraseFirst { + template + auto operator()(Map* m, int) const + -> IfNotVoiderase(m->begin()))> { + return m->erase(m->begin()); + } + template + typename Map::iterator operator()(Map* m, ...) const { + auto it = m->begin(); + m->erase(it++); + return it; + } +}; + +TYPED_TEST_P(ModifiersTest, Erase) { + using T = hash_internal::GeneratedType; + using std::get; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + auto& first = *m.begin(); + std::vector values2; + for (const auto& val : values) + if (get<0>(val) != get<0>(first)) values2.push_back(val); + auto it = EraseFirst()(&m, 0); + ASSERT_TRUE(it != m.end()); + EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it)); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values2.begin(), + values2.end())); +} + +TYPED_TEST_P(ModifiersTest, EraseRange) { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + auto it = m.erase(m.begin(), m.end()); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(it == m.end()); +} + +TYPED_TEST_P(ModifiersTest, EraseKey) { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_EQ(1, m.erase(values[0].first)); + EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0])); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values.begin() + 1, + values.end())); +} + +TYPED_TEST_P(ModifiersTest, Swap) { + using T = hash_internal::GeneratedType; + std::vector v1; + std::vector v2; + std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator()); + std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator()); + TypeParam m1(v1.begin(), v1.end()); + TypeParam m2(v2.begin(), v2.end()); + EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v1)); + EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v2)); + m1.swap(m2); + EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v2)); + EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v1)); +} + +// TODO(alkis): Write tests for extract. +// TODO(alkis): Write tests for merge. + +REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint, + InsertRange, InsertWithinCapacity, + InsertRangeWithinCapacity, InsertOrAssign, + InsertOrAssignHint, Emplace, EmplaceHint, + TryEmplace, TryEmplaceHint, Erase, EraseRange, + EraseKey, Swap); + +template +struct is_unique_ptr : std::false_type {}; + +template +struct is_unique_ptr> : std::true_type {}; + +template +class UniquePtrModifiersTest : public ::testing::Test { + protected: + UniquePtrModifiersTest() { + static_assert(is_unique_ptr::value, + "UniquePtrModifiersTyest may only be called with a " + "std::unique_ptr value type."); + } +}; + +GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(UniquePtrModifiersTest); + +TYPED_TEST_SUITE_P(UniquePtrModifiersTest); + +// Test that we do not move from rvalue arguments if an insertion does not +// happen. +TYPED_TEST_P(UniquePtrModifiersTest, TryEmplace) { +#ifdef UNORDERED_MAP_CXX17 + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + auto p = m.try_emplace(val.first, std::move(val.second)); + EXPECT_TRUE(p.second); + // A moved from std::unique_ptr is guaranteed to be nullptr. + EXPECT_EQ(val.second, nullptr); + T val2 = {val.first, hash_internal::Generator()()}; + p = m.try_emplace(val2.first, std::move(val2.second)); + EXPECT_FALSE(p.second); + EXPECT_NE(val2.second, nullptr); +#endif +} + +REGISTER_TYPED_TEST_SUITE_P(UniquePtrModifiersTest, TryEmplace); + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_constructor_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_constructor_test.h new file mode 100644 index 0000000..af1116e --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_constructor_test.h @@ -0,0 +1,496 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_ + +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +class ConstructorTest : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(ConstructorTest); + +TYPED_TEST_P(ConstructorTest, NoArgs) { + TypeParam m; + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); +} + +TYPED_TEST_P(ConstructorTest, BucketCount) { + TypeParam m(123); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountHash) { + using H = typename TypeParam::hasher; + H hasher; + TypeParam m(123, hasher); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + H hasher; + E equal; + TypeParam m(123, hasher, equal); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + + const auto& cm = m; + EXPECT_EQ(cm.hash_function(), hasher); + EXPECT_EQ(cm.key_eq(), equal); + EXPECT_EQ(cm.get_allocator(), alloc); + EXPECT_TRUE(cm.empty()); + EXPECT_THAT(keys(cm), ::testing::UnorderedElementsAre()); + EXPECT_GE(cm.bucket_count(), 123); +} + +template +struct is_std_unordered_set : std::false_type {}; + +template +struct is_std_unordered_set> : std::true_type {}; + +#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17) +using has_cxx14_std_apis = std::true_type; +#else +using has_cxx14_std_apis = std::false_type; +#endif + +template +using expect_cxx14_apis = + absl::disjunction>, + has_cxx14_std_apis>; + +template +void BucketCountAllocTest(std::false_type) {} + +template +void BucketCountAllocTest(std::true_type) { + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountAlloc) { + BucketCountAllocTest(expect_cxx14_apis()); +} + +template +void BucketCountHashAllocTest(std::false_type) {} + +template +void BucketCountHashAllocTest(std::true_type) { + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + TypeParam m(123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) { + BucketCountHashAllocTest(expect_cxx14_apis()); +} + +#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS +using has_alloc_std_constructors = std::true_type; +#else +using has_alloc_std_constructors = std::false_type; +#endif + +template +using expect_alloc_constructors = + absl::disjunction>, + has_alloc_std_constructors>; + +template +void AllocTest(std::false_type) {} + +template +void AllocTest(std::true_type) { + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); +} + +TYPED_TEST_P(ConstructorTest, Alloc) { + AllocTest(expect_alloc_constructors()); +} + +TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + std::vector values; + for (size_t i = 0; i != 10; ++i) + values.push_back(hash_internal::Generator()()); + TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +template +void InputIteratorBucketAllocTest(std::false_type) {} + +template +void InputIteratorBucketAllocTest(std::true_type) { + using T = hash_internal::GeneratedType; + using A = typename TypeParam::allocator_type; + A alloc(0); + std::vector values; + for (size_t i = 0; i != 10; ++i) + values.push_back(hash_internal::Generator()()); + TypeParam m(values.begin(), values.end(), 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) { + InputIteratorBucketAllocTest(expect_cxx14_apis()); +} + +template +void InputIteratorBucketHashAllocTest(std::false_type) {} + +template +void InputIteratorBucketHashAllocTest(std::true_type) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + std::vector values; + for (size_t i = 0; i != 10; ++i) + values.push_back(hash_internal::Generator()()); + TypeParam m(values.begin(), values.end(), 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) { + InputIteratorBucketHashAllocTest(expect_cxx14_apis()); +} + +TYPED_TEST_P(ConstructorTest, CopyConstructor) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator()()); + TypeParam n(m); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + EXPECT_NE(TypeParam(0, hasher, equal, alloc), n); +} + +template +void CopyConstructorAllocTest(std::false_type) {} + +template +void CopyConstructorAllocTest(std::true_type) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator()()); + TypeParam n(m, A(11)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) { + CopyConstructorAllocTest(expect_alloc_constructors()); +} + +// TODO(alkis): Test non-propagating allocators on copy constructors. + +TYPED_TEST_P(ConstructorTest, MoveConstructor) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator()()); + TypeParam t(m); + TypeParam n(std::move(t)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); +} + +template +void MoveConstructorAllocTest(std::false_type) {} + +template +void MoveConstructorAllocTest(std::true_type) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator()()); + TypeParam t(m); + TypeParam n(std::move(t), A(1)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) { + MoveConstructorAllocTest(expect_alloc_constructors()); +} + +// TODO(alkis): Test non-propagating allocators on move constructors. + +TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(values, 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +template +void InitializerListBucketAllocTest(std::false_type) {} + +template +void InitializerListBucketAllocTest(std::true_type) { + using T = hash_internal::GeneratedType; + using A = typename TypeParam::allocator_type; + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + A alloc(0); + TypeParam m(values, 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) { + InitializerListBucketAllocTest(expect_cxx14_apis()); +} + +template +void InitializerListBucketHashAllocTest(std::false_type) {} + +template +void InitializerListBucketHashAllocTest(std::true_type) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values, 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); +} + +TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) { + InitializerListBucketHashAllocTest(expect_cxx14_apis()); +} + +TYPED_TEST_P(ConstructorTest, CopyAssignment) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::Generator gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam n; + n = m; + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); +} + +// TODO(alkis): Test [non-]propagating allocators on move/copy assignments +// (it depends on traits). + +TYPED_TEST_P(ConstructorTest, MoveAssignment) { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::Generator gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam t(m); + TypeParam n; + n = std::move(t); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); +} + +TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam n({gen()}); + n = m; + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam t(m); + TypeParam n({gen()}); + n = std::move(t); + EXPECT_EQ(m, n); +} + +TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); +} + +TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values); + m = *&m; // Avoid -Wself-assign. + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); +} + +REGISTER_TYPED_TEST_SUITE_P( + ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, + BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc, + InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, + InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc, + MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc, + InitializerListBucketAlloc, InitializerListBucketHashAlloc, CopyAssignment, + MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting, + MoveAssignmentOverwritesExisting, + AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf); + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_lookup_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_lookup_test.h new file mode 100644 index 0000000..b35f766 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_lookup_test.h @@ -0,0 +1,91 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_ + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +class LookupTest : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(LookupTest); + +TYPED_TEST_P(LookupTest, Count) { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m; + for (const auto& v : values) + EXPECT_EQ(0, m.count(v)) << ::testing::PrintToString(v); + m.insert(values.begin(), values.end()); + for (const auto& v : values) + EXPECT_EQ(1, m.count(v)) << ::testing::PrintToString(v); +} + +TYPED_TEST_P(LookupTest, Find) { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m; + for (const auto& v : values) + EXPECT_TRUE(m.end() == m.find(v)) << ::testing::PrintToString(v); + m.insert(values.begin(), values.end()); + for (const auto& v : values) { + typename TypeParam::iterator it = m.find(v); + static_assert(std::is_same::value, + ""); + static_assert(std::is_same())>::value, + ""); + EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(v); + EXPECT_EQ(v, *it) << ::testing::PrintToString(v); + } +} + +TYPED_TEST_P(LookupTest, EqualRange) { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m; + for (const auto& v : values) { + auto r = m.equal_range(v); + ASSERT_EQ(0, std::distance(r.first, r.second)); + } + m.insert(values.begin(), values.end()); + for (const auto& v : values) { + auto r = m.equal_range(v); + ASSERT_EQ(1, std::distance(r.first, r.second)); + EXPECT_EQ(v, *r.first); + } +} + +REGISTER_TYPED_TEST_SUITE_P(LookupTest, Count, Find, EqualRange); + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_members_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_members_test.h new file mode 100644 index 0000000..4c5e104 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_members_test.h @@ -0,0 +1,86 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_ + +#include +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +class MembersTest : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(MembersTest); + +template +void UseType() {} + +TYPED_TEST_P(MembersTest, Typedefs) { + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((absl::conjunction< + absl::negation>, + std::is_integral>())); + EXPECT_TRUE((absl::conjunction< + std::is_signed, + std::is_integral>())); + EXPECT_TRUE((std::is_convertible< + decltype(std::declval()( + std::declval())), + size_t>())); + EXPECT_TRUE((std::is_convertible< + decltype(std::declval()( + std::declval(), + std::declval())), + bool>())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same::pointer, + typename TypeParam::pointer>())); + EXPECT_TRUE( + (std::is_same::const_pointer, + typename TypeParam::const_pointer>())); +} + +TYPED_TEST_P(MembersTest, SimpleFunctions) { + EXPECT_GT(TypeParam().max_size(), 0); +} + +TYPED_TEST_P(MembersTest, BeginEnd) { + TypeParam t = {typename TypeParam::value_type{}}; + EXPECT_EQ(t.begin(), t.cbegin()); + EXPECT_EQ(t.end(), t.cend()); + EXPECT_NE(t.begin(), t.end()); + EXPECT_NE(t.cbegin(), t.cend()); +} + +REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd); + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_modifiers_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_modifiers_test.h new file mode 100644 index 0000000..d8864bb --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_modifiers_test.h @@ -0,0 +1,221 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_ +#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_ + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/container/internal/hash_generator_testing.h" +#include "absl/container/internal/hash_policy_testing.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +template +class ModifiersTest : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(ModifiersTest); + +TYPED_TEST_P(ModifiersTest, Clear) { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + m.clear(); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(m.empty()); +} + +TYPED_TEST_P(ModifiersTest, Insert) { + using T = hash_internal::GeneratedType; + T val = hash_internal::Generator()(); + TypeParam m; + auto p = m.insert(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + p = m.insert(val); + EXPECT_FALSE(p.second); +} + +TYPED_TEST_P(ModifiersTest, InsertHint) { + using T = hash_internal::GeneratedType; + T val = hash_internal::Generator()(); + TypeParam m; + auto it = m.insert(m.end(), val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); + it = m.insert(it, val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); +} + +TYPED_TEST_P(ModifiersTest, InsertRange) { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m; + m.insert(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); +} + +TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) { + using T = hash_internal::GeneratedType; + T val = hash_internal::Generator()(); + TypeParam m; + m.reserve(10); + const size_t original_capacity = m.bucket_count(); + m.insert(val); + EXPECT_EQ(m.bucket_count(), original_capacity); + m.insert(val); + EXPECT_EQ(m.bucket_count(), original_capacity); +} + +TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) { +#if !defined(__GLIBCXX__) + using T = hash_internal::GeneratedType; + std::vector base_values; + std::generate_n(std::back_inserter(base_values), 10, + hash_internal::Generator()); + std::vector values; + while (values.size() != 100) { + values.insert(values.end(), base_values.begin(), base_values.end()); + } + TypeParam m; + m.reserve(10); + const size_t original_capacity = m.bucket_count(); + m.insert(values.begin(), values.end()); + EXPECT_EQ(m.bucket_count(), original_capacity); +#endif +} + +TYPED_TEST_P(ModifiersTest, Emplace) { + using T = hash_internal::GeneratedType; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto p = m.emplace(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + p = m.emplace(val); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); +} + +TYPED_TEST_P(ModifiersTest, EmplaceHint) { + using T = hash_internal::GeneratedType; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto it = m.emplace_hint(m.end(), val); + EXPECT_EQ(val, *it); + it = m.emplace_hint(it, val); + EXPECT_EQ(val, *it); +} + +template +using IfNotVoid = typename std::enable_if::value, V>::type; + +// In openmap we chose not to return the iterator from erase because that's +// more expensive. As such we adapt erase to return an iterator here. +struct EraseFirst { + template + auto operator()(Map* m, int) const + -> IfNotVoiderase(m->begin()))> { + return m->erase(m->begin()); + } + template + typename Map::iterator operator()(Map* m, ...) const { + auto it = m->begin(); + m->erase(it++); + return it; + } +}; + +TYPED_TEST_P(ModifiersTest, Erase) { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + std::vector values2; + for (const auto& val : values) + if (val != *m.begin()) values2.push_back(val); + auto it = EraseFirst()(&m, 0); + ASSERT_TRUE(it != m.end()); + EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it)); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values2.begin(), + values2.end())); +} + +TYPED_TEST_P(ModifiersTest, EraseRange) { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + auto it = m.erase(m.begin(), m.end()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(it == m.end()); +} + +TYPED_TEST_P(ModifiersTest, EraseKey) { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, + hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_EQ(1, m.erase(values[0])); + EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0])); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values.begin() + 1, + values.end())); +} + +TYPED_TEST_P(ModifiersTest, Swap) { + using T = hash_internal::GeneratedType; + std::vector v1; + std::vector v2; + std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator()); + std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator()); + TypeParam m1(v1.begin(), v1.end()); + TypeParam m2(v2.begin(), v2.end()); + EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v1)); + EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v2)); + m1.swap(m2); + EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v2)); + EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v1)); +} + +// TODO(alkis): Write tests for extract. +// TODO(alkis): Write tests for merge. + +REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint, + InsertRange, InsertWithinCapacity, + InsertRangeWithinCapacity, Emplace, EmplaceHint, + Erase, EraseRange, EraseKey, Swap); + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/node_hash_map.h b/CAPI/cpp/grpc/include/absl/container/node_hash_map.h new file mode 100644 index 0000000..6868e63 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/node_hash_map.h @@ -0,0 +1,604 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: node_hash_map.h +// ----------------------------------------------------------------------------- +// +// An `absl::node_hash_map` is an unordered associative container of +// unique keys and associated values designed to be a more efficient replacement +// for `std::unordered_map`. Like `unordered_map`, search, insertion, and +// deletion of map elements can be done as an `O(1)` operation. However, +// `node_hash_map` (and other unordered associative containers known as the +// collection of Abseil "Swiss tables") contain other optimizations that result +// in both memory and computation advantages. +// +// In most cases, your default choice for a hash map should be a map of type +// `flat_hash_map`. However, if you need pointer stability and cannot store +// a `flat_hash_map` with `unique_ptr` elements, a `node_hash_map` may be a +// valid alternative. As well, if you are migrating your code from using +// `std::unordered_map`, a `node_hash_map` provides a more straightforward +// migration, because it guarantees pointer stability. Consider migrating to +// `node_hash_map` and perhaps converting to a more efficient `flat_hash_map` +// upon further review. + +#ifndef ABSL_CONTAINER_NODE_HASH_MAP_H_ +#define ABSL_CONTAINER_NODE_HASH_MAP_H_ + +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/base/macros.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/node_slot_policy.h" +#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +template +class NodeHashMapPolicy; +} // namespace container_internal + +// ----------------------------------------------------------------------------- +// absl::node_hash_map +// ----------------------------------------------------------------------------- +// +// An `absl::node_hash_map` is an unordered associative container which +// has been optimized for both speed and memory footprint in most common use +// cases. Its interface is similar to that of `std::unordered_map` with +// the following notable differences: +// +// * Supports heterogeneous lookup, through `find()`, `operator[]()` and +// `insert()`, provided that the map is provided a compatible heterogeneous +// hashing function and equality operator. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash map. +// * Returns `void` from the `erase(iterator)` overload. +// +// By default, `node_hash_map` uses the `absl::Hash` hashing framework. +// All fundamental and Abseil types that support the `absl::Hash` framework have +// a compatible equality operator for comparing insertions into `node_hash_map`. +// If your type is not yet supported by the `absl::Hash` framework, see +// absl/hash/hash.h for information on extending Abseil hashing to user-defined +// types. +// +// Using `absl::node_hash_map` at interface boundaries in dynamically loaded +// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may +// be randomized across dynamically loaded libraries. +// +// Example: +// +// // Create a node hash map of three strings (that map to strings) +// absl::node_hash_map ducks = +// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; +// +// // Insert a new element into the node hash map +// ducks.insert({"d", "donald"}}; +// +// // Force a rehash of the node hash map +// ducks.rehash(0); +// +// // Find the element with the key "b" +// std::string search_key = "b"; +// auto result = ducks.find(search_key); +// if (result != ducks.end()) { +// std::cout << "Result: " << result->second << std::endl; +// } +template , + class Eq = absl::container_internal::hash_default_eq, + class Alloc = std::allocator>> +class node_hash_map + : public absl::container_internal::raw_hash_map< + absl::container_internal::NodeHashMapPolicy, Hash, Eq, + Alloc> { + using Base = typename node_hash_map::raw_hash_map; + + public: + // Constructors and Assignment Operators + // + // A node_hash_map supports the same overload set as `std::unordered_map` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::node_hash_map map1; + // + // * Initializer List constructor + // + // absl::node_hash_map map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::node_hash_map map3(map2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::node_hash_map map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::node_hash_map map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::node_hash_map map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::node_hash_map map7(v.begin(), v.end()); + node_hash_map() {} + using Base::Base; + + // node_hash_map::begin() + // + // Returns an iterator to the beginning of the `node_hash_map`. + using Base::begin; + + // node_hash_map::cbegin() + // + // Returns a const iterator to the beginning of the `node_hash_map`. + using Base::cbegin; + + // node_hash_map::cend() + // + // Returns a const iterator to the end of the `node_hash_map`. + using Base::cend; + + // node_hash_map::end() + // + // Returns an iterator to the end of the `node_hash_map`. + using Base::end; + + // node_hash_map::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `node_hash_map`. + // + // NOTE: this member function is particular to `absl::node_hash_map` and is + // not provided in the `std::unordered_map` API. + using Base::capacity; + + // node_hash_map::empty() + // + // Returns whether or not the `node_hash_map` is empty. + using Base::empty; + + // node_hash_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `node_hash_map` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `node_hash_map`. + using Base::max_size; + + // node_hash_map::size() + // + // Returns the number of elements currently within the `node_hash_map`. + using Base::size; + + // node_hash_map::clear() + // + // Removes all elements from the `node_hash_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // node_hash_map::erase() + // + // Erases elements within the `node_hash_map`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `node_hash_map`, returning + // `void`. + // + // NOTE: this return behavior is different than that of STL containers in + // general and `std::unordered_map` in particular. + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // node_hash_map::insert() + // + // Inserts an element of the specified value into the `node_hash_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const init_type& value): + // + // Inserts a value into the `node_hash_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a `bool` denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // std::pair insert(init_type&& value): + // + // Inserts a moveable value into the `node_hash_map`. Returns a `std::pair` + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a `bool` denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const init_type& value): + // iterator insert(const_iterator hint, T&& value): + // iterator insert(const_iterator hint, init_type&& value); + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `node_hash_map` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `node_hash_map` we guarantee the first match is inserted. + using Base::insert; + + // node_hash_map::insert_or_assign() + // + // Inserts an element of the specified value into the `node_hash_map` provided + // that a value with the given key does not already exist, or replaces it with + // the element value if a key for that value already exists, returning an + // iterator pointing to the newly inserted element. If rehashing occurs due to + // the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert_or_assign(const init_type& k, T&& obj): + // std::pair insert_or_assign(init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `node_hash_map`. + // + // iterator insert_or_assign(const_iterator hint, + // const init_type& k, T&& obj): + // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `node_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // node_hash_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // node_hash_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // node_hash_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + // Overloads are listed below. + // + // std::pair try_emplace(const key_type& k, Args&&... args): + // std::pair try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `node_hash_map`. + // + // iterator try_emplace(const_iterator hint, + // const key_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `node_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + // + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. + using Base::try_emplace; + + // node_hash_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the key,value pair of the element at the indicated position and + // returns a node handle owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the key,value pair of the element with a key matching the passed + // key value and returns a node handle owning that extracted data. If the + // `node_hash_map` does not contain an element with a matching key, this + // function returns an empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + using Base::extract; + + // node_hash_map::merge() + // + // Extracts elements from a given `source` node hash map into this + // `node_hash_map`. If the destination `node_hash_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // node_hash_map::swap(node_hash_map& other) + // + // Exchanges the contents of this `node_hash_map` with those of the `other` + // node hash map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `node_hash_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the node hash map's hashing and key equivalence + // functions be Swappable, and are exchaged using unqualified calls to + // non-member `swap()`. If the map's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // node_hash_map::rehash(count) + // + // Rehashes the `node_hash_map`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + using Base::rehash; + + // node_hash_map::reserve(count) + // + // Sets the number of slots in the `node_hash_map` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // node_hash_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // node_hash_map::contains() + // + // Determines whether an element with a key comparing equal to the given `key` + // exists within the `node_hash_map`, returning `true` if so or `false` + // otherwise. + using Base::contains; + + // node_hash_map::count(const Key& key) const + // + // Returns the number of elements with a key comparing equal to the given + // `key` within the `node_hash_map`. note that this function will return + // either `1` or `0` since duplicate keys are not allowed within a + // `node_hash_map`. + using Base::count; + + // node_hash_map::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `node_hash_map`. + using Base::equal_range; + + // node_hash_map::find() + // + // Finds an element with the passed `key` within the `node_hash_map`. + using Base::find; + + // node_hash_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `node_hash_map`, performing an `insert()` if the key does not already + // exist. If an insertion occurs and results in a rehashing of the container, + // all iterators are invalidated. Otherwise iterators are not affected and + // references are not invalidated. Overloads are listed below. + // + // T& operator[](const Key& key): + // + // Inserts an init_type object constructed in-place if the element with the + // given key does not exist. + // + // T& operator[](Key&& key): + // + // Inserts an init_type object constructed in-place provided that an element + // with the given key does not exist. + using Base::operator[]; + + // node_hash_map::bucket_count() + // + // Returns the number of "buckets" within the `node_hash_map`. + using Base::bucket_count; + + // node_hash_map::load_factor() + // + // Returns the current load factor of the `node_hash_map` (the average number + // of slots occupied with a value within the hash map). + using Base::load_factor; + + // node_hash_map::max_load_factor() + // + // Manages the maximum load factor of the `node_hash_map`. Overloads are + // listed below. + // + // float node_hash_map::max_load_factor() + // + // Returns the current maximum load factor of the `node_hash_map`. + // + // void node_hash_map::max_load_factor(float ml) + // + // Sets the maximum load factor of the `node_hash_map` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `node_hash_map` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // node_hash_map::get_allocator() + // + // Returns the allocator function associated with this `node_hash_map`. + using Base::get_allocator; + + // node_hash_map::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `node_hash_map`. + using Base::hash_function; + + // node_hash_map::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; +}; + +// erase_if(node_hash_map<>, Pred) +// +// Erases all elements that satisfy the predicate `pred` from the container `c`. +// Returns the number of erased elements. +template +typename node_hash_map::size_type erase_if( + node_hash_map& c, Predicate pred) { + return container_internal::EraseIf(pred, &c); +} + +namespace container_internal { + +template +class NodeHashMapPolicy + : public absl::container_internal::node_slot_policy< + std::pair&, NodeHashMapPolicy> { + using value_type = std::pair; + + public: + using key_type = Key; + using mapped_type = Value; + using init_type = std::pair; + + template + static value_type* new_element(Allocator* alloc, Args&&... args) { + using PairAlloc = typename absl::allocator_traits< + Allocator>::template rebind_alloc; + PairAlloc pair_alloc(*alloc); + value_type* res = + absl::allocator_traits::allocate(pair_alloc, 1); + absl::allocator_traits::construct(pair_alloc, res, + std::forward(args)...); + return res; + } + + template + static void delete_element(Allocator* alloc, value_type* pair) { + using PairAlloc = typename absl::allocator_traits< + Allocator>::template rebind_alloc; + PairAlloc pair_alloc(*alloc); + absl::allocator_traits::destroy(pair_alloc, pair); + absl::allocator_traits::deallocate(pair_alloc, pair, 1); + } + + template + static decltype(absl::container_internal::DecomposePair( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposePair(std::forward(f), + std::forward(args)...); + } + + static size_t element_space_used(const value_type*) { + return sizeof(value_type); + } + + static Value& value(value_type* elem) { return elem->second; } + static const Value& value(const value_type* elem) { return elem->second; } +}; +} // namespace container_internal + +namespace container_algorithm_internal { + +// Specialization of trait in absl/algorithm/container.h +template +struct IsUnorderedContainer< + absl::node_hash_map> : std::true_type {}; + +} // namespace container_algorithm_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_NODE_HASH_MAP_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/node_hash_set.h b/CAPI/cpp/grpc/include/absl/container/node_hash_set.h new file mode 100644 index 0000000..f2cc70c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/node_hash_set.h @@ -0,0 +1,500 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: node_hash_set.h +// ----------------------------------------------------------------------------- +// +// An `absl::node_hash_set` is an unordered associative container designed to +// be a more efficient replacement for `std::unordered_set`. Like +// `unordered_set`, search, insertion, and deletion of set elements can be done +// as an `O(1)` operation. However, `node_hash_set` (and other unordered +// associative containers known as the collection of Abseil "Swiss tables") +// contain other optimizations that result in both memory and computation +// advantages. +// +// In most cases, your default choice for a hash table should be a map of type +// `flat_hash_map` or a set of type `flat_hash_set`. However, if you need +// pointer stability, a `node_hash_set` should be your preferred choice. As +// well, if you are migrating your code from using `std::unordered_set`, a +// `node_hash_set` should be an easy migration. Consider migrating to +// `node_hash_set` and perhaps converting to a more efficient `flat_hash_set` +// upon further review. + +#ifndef ABSL_CONTAINER_NODE_HASH_SET_H_ +#define ABSL_CONTAINER_NODE_HASH_SET_H_ + +#include + +#include "absl/algorithm/container.h" +#include "absl/base/macros.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/node_slot_policy.h" +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +template +struct NodeHashSetPolicy; +} // namespace container_internal + +// ----------------------------------------------------------------------------- +// absl::node_hash_set +// ----------------------------------------------------------------------------- +// +// An `absl::node_hash_set` is an unordered associative container which +// has been optimized for both speed and memory footprint in most common use +// cases. Its interface is similar to that of `std::unordered_set` with the +// following notable differences: +// +// * Supports heterogeneous lookup, through `find()`, `operator[]()` and +// `insert()`, provided that the set is provided a compatible heterogeneous +// hashing function and equality operator. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash set. +// * Returns `void` from the `erase(iterator)` overload. +// +// By default, `node_hash_set` uses the `absl::Hash` hashing framework. +// All fundamental and Abseil types that support the `absl::Hash` framework have +// a compatible equality operator for comparing insertions into `node_hash_set`. +// If your type is not yet supported by the `absl::Hash` framework, see +// absl/hash/hash.h for information on extending Abseil hashing to user-defined +// types. +// +// Using `absl::node_hash_set` at interface boundaries in dynamically loaded +// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may +// be randomized across dynamically loaded libraries. +// +// Example: +// +// // Create a node hash set of three strings +// absl::node_hash_set ducks = +// {"huey", "dewey", "louie"}; +// +// // Insert a new element into the node hash set +// ducks.insert("donald"); +// +// // Force a rehash of the node hash set +// ducks.rehash(0); +// +// // See if "dewey" is present +// if (ducks.contains("dewey")) { +// std::cout << "We found dewey!" << std::endl; +// } +template , + class Eq = absl::container_internal::hash_default_eq, + class Alloc = std::allocator> +class node_hash_set + : public absl::container_internal::raw_hash_set< + absl::container_internal::NodeHashSetPolicy, Hash, Eq, Alloc> { + using Base = typename node_hash_set::raw_hash_set; + + public: + // Constructors and Assignment Operators + // + // A node_hash_set supports the same overload set as `std::unordered_set` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::node_hash_set set1; + // + // * Initializer List constructor + // + // absl::node_hash_set set2 = + // {{"huey"}, {"dewey"}, {"louie"}}; + // + // * Copy constructor + // + // absl::node_hash_set set3(set2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::node_hash_set set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::node_hash_set set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::node_hash_set set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::node_hash_set set7(v.begin(), v.end()); + node_hash_set() {} + using Base::Base; + + // node_hash_set::begin() + // + // Returns an iterator to the beginning of the `node_hash_set`. + using Base::begin; + + // node_hash_set::cbegin() + // + // Returns a const iterator to the beginning of the `node_hash_set`. + using Base::cbegin; + + // node_hash_set::cend() + // + // Returns a const iterator to the end of the `node_hash_set`. + using Base::cend; + + // node_hash_set::end() + // + // Returns an iterator to the end of the `node_hash_set`. + using Base::end; + + // node_hash_set::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `node_hash_set`. + // + // NOTE: this member function is particular to `absl::node_hash_set` and is + // not provided in the `std::unordered_set` API. + using Base::capacity; + + // node_hash_set::empty() + // + // Returns whether or not the `node_hash_set` is empty. + using Base::empty; + + // node_hash_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `node_hash_set` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `node_hash_set`. + using Base::max_size; + + // node_hash_set::size() + // + // Returns the number of elements currently within the `node_hash_set`. + using Base::size; + + // node_hash_set::clear() + // + // Removes all elements from the `node_hash_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // node_hash_set::erase() + // + // Erases elements within the `node_hash_set`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `node_hash_set`, returning + // `void`. + // + // NOTE: this return behavior is different than that of STL containers in + // general and `std::unordered_set` in particular. + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // node_hash_set::insert() + // + // Inserts an element of the specified value into the `node_hash_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const T& value): + // + // Inserts a value into the `node_hash_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // + // Inserts a moveable value into the `node_hash_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const T& value): + // iterator insert(const_iterator hint, T&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `node_hash_set` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `node_hash_set` we guarantee the first match is inserted. + using Base::insert; + + // node_hash_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // node_hash_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // node_hash_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `node_hash_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + using Base::extract; + + // node_hash_set::merge() + // + // Extracts elements from a given `source` node hash set into this + // `node_hash_set`. If the destination `node_hash_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // node_hash_set::swap(node_hash_set& other) + // + // Exchanges the contents of this `node_hash_set` with those of the `other` + // node hash set, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `node_hash_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the node hash set's hashing and key equivalence + // functions be Swappable, and are exchaged using unqualified calls to + // non-member `swap()`. If the set's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // node_hash_set::rehash(count) + // + // Rehashes the `node_hash_set`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_set`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // node_hash_set::reserve(count) + // + // Sets the number of slots in the `node_hash_set` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // node_hash_set::contains() + // + // Determines whether an element comparing equal to the given `key` exists + // within the `node_hash_set`, returning `true` if so or `false` otherwise. + using Base::contains; + + // node_hash_set::count(const Key& key) const + // + // Returns the number of elements comparing equal to the given `key` within + // the `node_hash_set`. note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `node_hash_set`. + using Base::count; + + // node_hash_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `node_hash_set`. + using Base::equal_range; + + // node_hash_set::find() + // + // Finds an element with the passed `key` within the `node_hash_set`. + using Base::find; + + // node_hash_set::bucket_count() + // + // Returns the number of "buckets" within the `node_hash_set`. Note that + // because a node hash set contains all elements within its internal storage, + // this value simply equals the current capacity of the `node_hash_set`. + using Base::bucket_count; + + // node_hash_set::load_factor() + // + // Returns the current load factor of the `node_hash_set` (the average number + // of slots occupied with a value within the hash set). + using Base::load_factor; + + // node_hash_set::max_load_factor() + // + // Manages the maximum load factor of the `node_hash_set`. Overloads are + // listed below. + // + // float node_hash_set::max_load_factor() + // + // Returns the current maximum load factor of the `node_hash_set`. + // + // void node_hash_set::max_load_factor(float ml) + // + // Sets the maximum load factor of the `node_hash_set` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `node_hash_set` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // node_hash_set::get_allocator() + // + // Returns the allocator function associated with this `node_hash_set`. + using Base::get_allocator; + + // node_hash_set::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `node_hash_set`. + using Base::hash_function; + + // node_hash_set::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; +}; + +// erase_if(node_hash_set<>, Pred) +// +// Erases all elements that satisfy the predicate `pred` from the container `c`. +// Returns the number of erased elements. +template +typename node_hash_set::size_type erase_if( + node_hash_set& c, Predicate pred) { + return container_internal::EraseIf(pred, &c); +} + +namespace container_internal { + +template +struct NodeHashSetPolicy + : absl::container_internal::node_slot_policy> { + using key_type = T; + using init_type = T; + using constant_iterators = std::true_type; + + template + static T* new_element(Allocator* alloc, Args&&... args) { + using ValueAlloc = + typename absl::allocator_traits::template rebind_alloc; + ValueAlloc value_alloc(*alloc); + T* res = absl::allocator_traits::allocate(value_alloc, 1); + absl::allocator_traits::construct(value_alloc, res, + std::forward(args)...); + return res; + } + + template + static void delete_element(Allocator* alloc, T* elem) { + using ValueAlloc = + typename absl::allocator_traits::template rebind_alloc; + ValueAlloc value_alloc(*alloc); + absl::allocator_traits::destroy(value_alloc, elem); + absl::allocator_traits::deallocate(value_alloc, elem, 1); + } + + template + static decltype(absl::container_internal::DecomposeValue( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposeValue( + std::forward(f), std::forward(args)...); + } + + static size_t element_space_used(const T*) { return sizeof(T); } +}; +} // namespace container_internal + +namespace container_algorithm_internal { + +// Specialization of trait in absl/algorithm/container.h +template +struct IsUnorderedContainer> + : std::true_type {}; + +} // namespace container_algorithm_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_NODE_HASH_SET_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/failure_signal_handler.h b/CAPI/cpp/grpc/include/absl/debugging/failure_signal_handler.h new file mode 100644 index 0000000..500115c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/failure_signal_handler.h @@ -0,0 +1,121 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: failure_signal_handler.h +// ----------------------------------------------------------------------------- +// +// This file configures the Abseil *failure signal handler* to capture and dump +// useful debugging information (such as a stacktrace) upon program failure. +// +// To use the failure signal handler, call `absl::InstallFailureSignalHandler()` +// very early in your program, usually in the first few lines of main(): +// +// int main(int argc, char** argv) { +// // Initialize the symbolizer to get a human-readable stack trace +// absl::InitializeSymbolizer(argv[0]); +// +// absl::FailureSignalHandlerOptions options; +// absl::InstallFailureSignalHandler(options); +// DoSomethingInteresting(); +// return 0; +// } +// +// Any program that raises a fatal signal (such as `SIGSEGV`, `SIGILL`, +// `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUG`, and `SIGTRAP`) will call the +// installed failure signal handler and provide debugging information to stderr. +// +// Note that you should *not* install the Abseil failure signal handler more +// than once. You may, of course, have another (non-Abseil) failure signal +// handler installed (which would be triggered if Abseil's failure signal +// handler sets `call_previous_handler` to `true`). + +#ifndef ABSL_DEBUGGING_FAILURE_SIGNAL_HANDLER_H_ +#define ABSL_DEBUGGING_FAILURE_SIGNAL_HANDLER_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// FailureSignalHandlerOptions +// +// Struct for holding `absl::InstallFailureSignalHandler()` configuration +// options. +struct FailureSignalHandlerOptions { + // If true, try to symbolize the stacktrace emitted on failure, provided that + // you have initialized a symbolizer for that purpose. (See symbolize.h for + // more information.) + bool symbolize_stacktrace = true; + + // If true, try to run signal handlers on an alternate stack (if supported on + // the given platform). An alternate stack is useful for program crashes due + // to a stack overflow; by running on a alternate stack, the signal handler + // may run even when normal stack space has been exausted. The downside of + // using an alternate stack is that extra memory for the alternate stack needs + // to be pre-allocated. + bool use_alternate_stack = true; + + // If positive, indicates the number of seconds after which the failure signal + // handler is invoked to abort the program. Setting such an alarm is useful in + // cases where the failure signal handler itself may become hung or + // deadlocked. + int alarm_on_failure_secs = 3; + + // If true, call the previously registered signal handler for the signal that + // was received (if one was registered) after the existing signal handler + // runs. This mechanism can be used to chain signal handlers together. + // + // If false, the signal is raised to the default handler for that signal + // (which normally terminates the program). + // + // IMPORTANT: If true, the chained fatal signal handlers must not try to + // recover from the fatal signal. Instead, they should terminate the program + // via some mechanism, like raising the default handler for the signal, or by + // calling `_exit()`. Note that the failure signal handler may put parts of + // the Abseil library into a state from which they cannot recover. + bool call_previous_handler = false; + + // If non-null, indicates a pointer to a callback function that will be called + // upon failure, with a string argument containing failure data. This function + // may be used as a hook to write failure data to a secondary location, such + // as a log file. This function will also be called with null data, as a hint + // to flush any buffered data before the program may be terminated. Consider + // flushing any buffered data in all calls to this function. + // + // Since this function runs within a signal handler, it should be + // async-signal-safe if possible. + // See http://man7.org/linux/man-pages/man7/signal-safety.7.html + void (*writerfn)(const char*) = nullptr; +}; + +// InstallFailureSignalHandler() +// +// Installs a signal handler for the common failure signals `SIGSEGV`, `SIGILL`, +// `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUG`, and `SIGTRAP` (provided they exist +// on the given platform). The failure signal handler dumps program failure data +// useful for debugging in an unspecified format to stderr. This data may +// include the program counter, a stacktrace, and register information on some +// systems; do not rely on an exact format for the output, as it is subject to +// change. +void InstallFailureSignalHandler(const FailureSignalHandlerOptions& options); + +namespace debugging_internal { +const char* FailureSignalToString(int signo); +} // namespace debugging_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_FAILURE_SIGNAL_HANDLER_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/address_is_readable.h b/CAPI/cpp/grpc/include/absl/debugging/internal/address_is_readable.h new file mode 100644 index 0000000..4bbaf4d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/address_is_readable.h @@ -0,0 +1,32 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_ +#define ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { + +// Return whether the byte at *addr is readable, without faulting. +// Save and restores errno. +bool AddressIsReadable(const void *addr); + +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/demangle.h b/CAPI/cpp/grpc/include/absl/debugging/internal/demangle.h new file mode 100644 index 0000000..c314d9b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/demangle.h @@ -0,0 +1,71 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// An async-signal-safe and thread-safe demangler for Itanium C++ ABI +// (aka G++ V3 ABI). +// +// The demangler is implemented to be used in async signal handlers to +// symbolize stack traces. We cannot use libstdc++'s +// abi::__cxa_demangle() in such signal handlers since it's not async +// signal safe (it uses malloc() internally). +// +// Note that this demangler doesn't support full demangling. More +// specifically, it doesn't print types of function parameters and +// types of template arguments. It just skips them. However, it's +// still very useful to extract basic information such as class, +// function, constructor, destructor, and operator names. +// +// See the implementation note in demangle.cc if you are interested. +// +// Example: +// +// | Mangled Name | The Demangler | abi::__cxa_demangle() +// |---------------|---------------|----------------------- +// | _Z1fv | f() | f() +// | _Z1fi | f() | f(int) +// | _Z3foo3bar | foo() | foo(bar) +// | _Z1fIiEvi | f<>() | void f(int) +// | _ZN1N1fE | N::f | N::f +// | _ZN3Foo3BarEv | Foo::Bar() | Foo::Bar() +// | _Zrm1XS_" | operator%() | operator%(X, X) +// | _ZN3FooC1Ev | Foo::Foo() | Foo::Foo() +// | _Z1fSs | f() | f(std::basic_string, +// | | | std::allocator >) +// +// See the unit test for more examples. +// +// Note: we might want to write demanglers for ABIs other than Itanium +// C++ ABI in the future. +// + +#ifndef ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_ +#define ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { + +// Demangle `mangled`. On success, return true and write the +// demangled symbol name to `out`. Otherwise, return false. +// `out` is modified even if demangling is unsuccessful. +bool Demangle(const char *mangled, char *out, int out_size); + +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/elf_mem_image.h b/CAPI/cpp/grpc/include/absl/debugging/internal/elf_mem_image.h new file mode 100644 index 0000000..113071a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/elf_mem_image.h @@ -0,0 +1,139 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Allow dynamic symbol lookup for in-memory Elf images. + +#ifndef ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_ +#define ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_ + +// Including this will define the __GLIBC__ macro if glibc is being +// used. +#include + +#include "absl/base/config.h" + +// Maybe one day we can rewrite this file not to require the elf +// symbol extensions in glibc, but for right now we need them. +#ifdef ABSL_HAVE_ELF_MEM_IMAGE +#error ABSL_HAVE_ELF_MEM_IMAGE cannot be directly set +#endif + +#if defined(__ELF__) && !defined(__OpenBSD__) && !defined(__QNX__) && \ + !defined(__native_client__) && !defined(__asmjs__) && \ + !defined(__wasm__) && !defined(__HAIKU__) +#define ABSL_HAVE_ELF_MEM_IMAGE 1 +#endif + +#ifdef ABSL_HAVE_ELF_MEM_IMAGE + +#include // for ElfW + +#if defined(__FreeBSD__) && !defined(ElfW) +#define ElfW(x) __ElfN(x) +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { + +// An in-memory ELF image (may not exist on disk). +class ElfMemImage { + private: + // Sentinel: there could never be an elf image at &kInvalidBaseSentinel. + static const int kInvalidBaseSentinel; + + public: + // Sentinel: there could never be an elf image at this address. + static constexpr const void *const kInvalidBase = + static_cast(&kInvalidBaseSentinel); + + // Information about a single vdso symbol. + // All pointers are into .dynsym, .dynstr, or .text of the VDSO. + // Do not free() them or modify through them. + struct SymbolInfo { + const char *name; // E.g. "__vdso_getcpu" + const char *version; // E.g. "LINUX_2.6", could be "" + // for unversioned symbol. + const void *address; // Relocated symbol address. + const ElfW(Sym) *symbol; // Symbol in the dynamic symbol table. + }; + + // Supports iteration over all dynamic symbols. + class SymbolIterator { + public: + friend class ElfMemImage; + const SymbolInfo *operator->() const; + const SymbolInfo &operator*() const; + SymbolIterator& operator++(); + bool operator!=(const SymbolIterator &rhs) const; + bool operator==(const SymbolIterator &rhs) const; + private: + SymbolIterator(const void *const image, int index); + void Update(int incr); + SymbolInfo info_; + int index_; + const void *const image_; + }; + + + explicit ElfMemImage(const void *base); + void Init(const void *base); + bool IsPresent() const { return ehdr_ != nullptr; } + const ElfW(Phdr)* GetPhdr(int index) const; + const ElfW(Sym)* GetDynsym(int index) const; + const ElfW(Versym)* GetVersym(int index) const; + const ElfW(Verdef)* GetVerdef(int index) const; + const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const; + const char* GetDynstr(ElfW(Word) offset) const; + const void* GetSymAddr(const ElfW(Sym) *sym) const; + const char* GetVerstr(ElfW(Word) offset) const; + int GetNumSymbols() const; + + SymbolIterator begin() const; + SymbolIterator end() const; + + // Look up versioned dynamic symbol in the image. + // Returns false if image is not present, or doesn't contain given + // symbol/version/type combination. + // If info_out is non-null, additional details are filled in. + bool LookupSymbol(const char *name, const char *version, + int symbol_type, SymbolInfo *info_out) const; + + // Find info about symbol (if any) which overlaps given address. + // Returns true if symbol was found; false if image isn't present + // or doesn't have a symbol overlapping given address. + // If info_out is non-null, additional details are filled in. + bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const; + + private: + const ElfW(Ehdr) *ehdr_; + const ElfW(Sym) *dynsym_; + const ElfW(Versym) *versym_; + const ElfW(Verdef) *verdef_; + const ElfW(Word) *hash_; + const char *dynstr_; + size_t strsize_; + size_t verdefnum_; + ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD). +}; + +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_HAVE_ELF_MEM_IMAGE + +#endif // ABSL_DEBUGGING_INTERNAL_ELF_MEM_IMAGE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/examine_stack.h b/CAPI/cpp/grpc/include/absl/debugging/internal/examine_stack.h new file mode 100644 index 0000000..190af87 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/examine_stack.h @@ -0,0 +1,64 @@ +// +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_ +#define ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { + +// Type of function used for printing in stack trace dumping, etc. +// We avoid closures to keep things simple. +typedef void OutputWriter(const char*, void*); + +// RegisterDebugStackTraceHook() allows to register a single routine +// `hook` that is called each time DumpStackTrace() is called. +// `hook` may be called from a signal handler. +typedef void (*SymbolizeUrlEmitter)(void* const stack[], int depth, + OutputWriter* writer, void* writer_arg); + +// Registration of SymbolizeUrlEmitter for use inside of a signal handler. +// This is inherently unsafe and must be signal safe code. +void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook); +SymbolizeUrlEmitter GetDebugStackTraceHook(); + +// Returns the program counter from signal context, or nullptr if +// unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of +// ucontext_t on non-POSIX systems. +void* GetProgramCounter(void* const vuc); + +// Uses `writer` to dump the program counter, stack trace, and stack +// frame sizes. +void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[], + int frame_sizes[], int depth, + int min_dropped_frames, + bool symbolize_stacktrace, + OutputWriter* writer, void* writer_arg); + +// Dump current stack trace omitting the topmost `min_dropped_frames` stack +// frames. +void DumpStackTrace(int min_dropped_frames, int max_num_frames, + bool symbolize_stacktrace, OutputWriter* writer, + void* writer_arg); + +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stack_consumption.h b/CAPI/cpp/grpc/include/absl/debugging/internal/stack_consumption.h new file mode 100644 index 0000000..f41b64c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stack_consumption.h @@ -0,0 +1,50 @@ +// +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Helper function for measuring stack consumption of signal handlers. + +#ifndef ABSL_DEBUGGING_INTERNAL_STACK_CONSUMPTION_H_ +#define ABSL_DEBUGGING_INTERNAL_STACK_CONSUMPTION_H_ + +#include "absl/base/config.h" + +// The code in this module is not portable. +// Use this feature test macro to detect its availability. +#ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION +#error ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION cannot be set directly +#elif !defined(__APPLE__) && !defined(_WIN32) && \ + (defined(__i386__) || defined(__x86_64__) || defined(__ppc__) || \ + defined(__aarch64__) || defined(__riscv)) +#define ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION 1 + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { + +// Returns the stack consumption in bytes for the code exercised by +// signal_handler. To measure stack consumption, signal_handler is registered +// as a signal handler, so the code that it exercises must be async-signal +// safe. The argument of signal_handler is an implementation detail of signal +// handlers and should ignored by the code for signal_handler. Use global +// variables to pass information between your test code and signal_handler. +int GetSignalHandlerStackConsumption(void (*signal_handler)(int)); + +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION + +#endif // ABSL_DEBUGGING_INTERNAL_STACK_CONSUMPTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_aarch64-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_aarch64-inl.inc new file mode 100644 index 0000000..4f9db9d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_aarch64-inl.inc @@ -0,0 +1,204 @@ +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_ + +// Generate stack tracer for aarch64 + +#if defined(__linux__) +#include +#include +#include +#endif + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/debugging/internal/address_is_readable.h" +#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems +#include "absl/debugging/stacktrace.h" + +static const uintptr_t kUnknownFrameSize = 0; + +#if defined(__linux__) +// Returns the address of the VDSO __kernel_rt_sigreturn function, if present. +static const unsigned char* GetKernelRtSigreturnAddress() { + constexpr uintptr_t kImpossibleAddress = 1; + ABSL_CONST_INIT static std::atomic memoized{kImpossibleAddress}; + uintptr_t address = memoized.load(std::memory_order_relaxed); + if (address != kImpossibleAddress) { + return reinterpret_cast(address); + } + + address = reinterpret_cast(nullptr); + +#ifdef ABSL_HAVE_VDSO_SUPPORT + absl::debugging_internal::VDSOSupport vdso; + if (vdso.IsPresent()) { + absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info; + auto lookup = [&](int type) { + return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.6.39", type, + &symbol_info); + }; + if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) || + symbol_info.address == nullptr) { + // Unexpected: VDSO is present, yet the expected symbol is missing + // or null. + assert(false && "VDSO is present, but doesn't have expected symbol"); + } else { + if (reinterpret_cast(symbol_info.address) != + kImpossibleAddress) { + address = reinterpret_cast(symbol_info.address); + } else { + assert(false && "VDSO returned invalid address"); + } + } + } +#endif + + memoized.store(address, std::memory_order_relaxed); + return reinterpret_cast(address); +} +#endif // __linux__ + +// Compute the size of a stack frame in [low..high). We assume that +// low < high. Return size of kUnknownFrameSize. +template +static inline uintptr_t ComputeStackFrameSize(const T* low, + const T* high) { + const char* low_char_ptr = reinterpret_cast(low); + const char* high_char_ptr = reinterpret_cast(high); + return low < high ? high_char_ptr - low_char_ptr : kUnknownFrameSize; +} + +// Given a pointer to a stack frame, locate and return the calling +// stackframe, or return null if no stackframe can be found. Perform sanity +// checks (the strictness of which is controlled by the boolean parameter +// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static void **NextStackFrame(void **old_frame_pointer, const void *uc) { + void **new_frame_pointer = reinterpret_cast(*old_frame_pointer); + bool check_frame_size = true; + +#if defined(__linux__) + if (WITH_CONTEXT && uc != nullptr) { + // Check to see if next frame's return address is __kernel_rt_sigreturn. + if (old_frame_pointer[1] == GetKernelRtSigreturnAddress()) { + const ucontext_t *ucv = static_cast(uc); + // old_frame_pointer[0] is not suitable for unwinding, look at + // ucontext to discover frame pointer before signal. + void **const pre_signal_frame_pointer = + reinterpret_cast(ucv->uc_mcontext.regs[29]); + + // Check that alleged frame pointer is actually readable. This is to + // prevent "double fault" in case we hit the first fault due to e.g. + // stack corruption. + if (!absl::debugging_internal::AddressIsReadable( + pre_signal_frame_pointer)) + return nullptr; + + // Alleged frame pointer is readable, use it for further unwinding. + new_frame_pointer = pre_signal_frame_pointer; + + // Skip frame size check if we return from a signal. We may be using a + // an alternate stack for signals. + check_frame_size = false; + } + } +#endif + + // aarch64 ABI requires stack pointer to be 16-byte-aligned. + if ((reinterpret_cast(new_frame_pointer) & 15) != 0) + return nullptr; + + // Check frame size. In strict mode, we assume frames to be under + // 100,000 bytes. In non-strict mode, we relax the limit to 1MB. + if (check_frame_size) { + const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000; + const uintptr_t frame_size = + ComputeStackFrameSize(old_frame_pointer, new_frame_pointer); + if (frame_size == kUnknownFrameSize || frame_size > max_size) + return nullptr; + } + + return new_frame_pointer; +} + +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { +#ifdef __GNUC__ + void **frame_pointer = reinterpret_cast(__builtin_frame_address(0)); +#else +# error reading stack point not yet supported on this platform. +#endif + + skip_count++; // Skip the frame for this function. + int n = 0; + + // The frame pointer points to low address of a frame. The first 64-bit + // word of a frame points to the next frame up the call chain, which normally + // is just after the high address of the current frame. The second word of + // a frame contains return adress of to the caller. To find a pc value + // associated with the current frame, we need to go down a level in the call + // chain. So we remember return the address of the last frame seen. This + // does not work for the first stack frame, which belongs to UnwindImp() but + // we skip the frame for UnwindImp() anyway. + void* prev_return_address = nullptr; + + while (frame_pointer && n < max_depth) { + // The absl::GetStackFrames routine is called when we are in some + // informational context (the failure signal handler for example). + // Use the non-strict unwinding rules to produce a stack trace + // that is as complete as possible (even if it contains a few bogus + // entries in some rare cases). + void **next_frame_pointer = + NextStackFrame(frame_pointer, ucp); + + if (skip_count > 0) { + skip_count--; + } else { + result[n] = prev_return_address; + if (IS_STACK_FRAMES) { + sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer); + } + n++; + } + prev_return_address = frame_pointer[1]; + frame_pointer = next_frame_pointer; + } + if (min_dropped_frames != nullptr) { + // Implementation detail: we clamp the max of frames we are willing to + // count, so as not to spend too much time in the loop below. + const int kMaxUnwind = 200; + int num_dropped_frames = 0; + for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) { + if (skip_count > 0) { + skip_count--; + } else { + num_dropped_frames++; + } + frame_pointer = + NextStackFrame(frame_pointer, ucp); + } + *min_dropped_frames = num_dropped_frames; + } + return n; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { + return true; +} +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_AARCH64_INL_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_arm-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_arm-inl.inc new file mode 100644 index 0000000..102a2a1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_arm-inl.inc @@ -0,0 +1,139 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This is inspired by Craig Silverstein's PowerPC stacktrace code. + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_ + +#include + +#include "absl/debugging/stacktrace.h" + +// WARNING: +// This only works if all your code is in either ARM or THUMB mode. With +// interworking, the frame pointer of the caller can either be in r11 (ARM +// mode) or r7 (THUMB mode). A callee only saves the frame pointer of its +// mode in a fixed location on its stack frame. If the caller is a different +// mode, there is no easy way to find the frame pointer. It can either be +// still in the designated register or saved on stack along with other callee +// saved registers. + +// Given a pointer to a stack frame, locate and return the calling +// stackframe, or return nullptr if no stackframe can be found. Perform sanity +// checks (the strictness of which is controlled by the boolean parameter +// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. +template +static void **NextStackFrame(void **old_sp) { + void **new_sp = (void**) old_sp[-1]; + + // Check that the transition from frame pointer old_sp to frame + // pointer new_sp isn't clearly bogus + if (STRICT_UNWINDING) { + // With the stack growing downwards, older stack frame must be + // at a greater address that the current one. + if (new_sp <= old_sp) return nullptr; + // Assume stack frames larger than 100,000 bytes are bogus. + if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr; + } else { + // In the non-strict mode, allow discontiguous stack frames. + // (alternate-signal-stacks for example). + if (new_sp == old_sp) return nullptr; + // And allow frames upto about 1MB. + if ((new_sp > old_sp) + && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr; + } + if ((uintptr_t)new_sp & (sizeof(void *) - 1)) return nullptr; + return new_sp; +} + +// This ensures that absl::GetStackTrace sets up the Link Register properly. +#ifdef __GNUC__ +void StacktraceArmDummyFunction() __attribute__((noinline)); +void StacktraceArmDummyFunction() { __asm__ volatile(""); } +#else +# error StacktraceArmDummyFunction() needs to be ported to this platform. +#endif + +template +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void * /* ucp */, int *min_dropped_frames) { +#ifdef __GNUC__ + void **sp = reinterpret_cast(__builtin_frame_address(0)); +#else +# error reading stack point not yet supported on this platform. +#endif + + // On ARM, the return address is stored in the link register (r14). + // This is not saved on the stack frame of a leaf function. To + // simplify code that reads return addresses, we call a dummy + // function so that the return address of this function is also + // stored in the stack frame. This works at least for gcc. + StacktraceArmDummyFunction(); + + int n = 0; + while (sp && n < max_depth) { + // The absl::GetStackFrames routine is called when we are in some + // informational context (the failure signal handler for example). + // Use the non-strict unwinding rules to produce a stack trace + // that is as complete as possible (even if it contains a few bogus + // entries in some rare cases). + void **next_sp = NextStackFrame(sp); + + if (skip_count > 0) { + skip_count--; + } else { + result[n] = *sp; + + if (IS_STACK_FRAMES) { + if (next_sp > sp) { + sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp; + } else { + // A frame-size of 0 is used to indicate unknown frame size. + sizes[n] = 0; + } + } + n++; + } + sp = next_sp; + } + if (min_dropped_frames != nullptr) { + // Implementation detail: we clamp the max of frames we are willing to + // count, so as not to spend too much time in the loop below. + const int kMaxUnwind = 200; + int num_dropped_frames = 0; + for (int j = 0; sp != nullptr && j < kMaxUnwind; j++) { + if (skip_count > 0) { + skip_count--; + } else { + num_dropped_frames++; + } + sp = NextStackFrame(sp); + } + *min_dropped_frames = num_dropped_frames; + } + return n; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { + return false; +} +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_ARM_INL_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_config.h b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_config.h new file mode 100644 index 0000000..3929b1b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_config.h @@ -0,0 +1,88 @@ +/* + * Copyright 2017 The Abseil Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + + * Defines ABSL_STACKTRACE_INL_HEADER to the *-inl.h containing + * actual unwinder implementation. + * This header is "private" to stacktrace.cc. + * DO NOT include it into any other files. +*/ +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_ + +#include "absl/base/config.h" + +#if defined(ABSL_STACKTRACE_INL_HEADER) +#error ABSL_STACKTRACE_INL_HEADER cannot be directly set + +#elif defined(_WIN32) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_win32-inl.inc" + +#elif defined(__APPLE__) +#ifdef ABSL_HAVE_THREAD_LOCAL +// Thread local support required for UnwindImpl. +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_generic-inl.inc" +#endif // defined(ABSL_HAVE_THREAD_LOCAL) + +// Emscripten stacktraces rely on JS. Do not use them in standalone mode. +#elif defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_emscripten-inl.inc" + +#elif defined(__linux__) && !defined(__ANDROID__) + +#if defined(NO_FRAME_POINTER) && \ + (defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)) +// Note: The libunwind-based implementation is not available to open-source +// users. +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_libunwind-inl.inc" +#define STACKTRACE_USES_LIBUNWIND 1 +#elif defined(NO_FRAME_POINTER) && defined(__has_include) +#if __has_include() +// Note: When using glibc this may require -funwind-tables to function properly. +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_generic-inl.inc" +#endif // __has_include() +#elif defined(__i386__) || defined(__x86_64__) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_x86-inl.inc" +#elif defined(__ppc__) || defined(__PPC__) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_powerpc-inl.inc" +#elif defined(__aarch64__) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_aarch64-inl.inc" +#elif defined(__riscv) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_riscv-inl.inc" +#elif defined(__has_include) +#if __has_include() +// Note: When using glibc this may require -funwind-tables to function properly. +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_generic-inl.inc" +#endif // __has_include() +#endif // defined(__has_include) + +#endif // defined(__linux__) && !defined(__ANDROID__) + +// Fallback to the empty implementation. +#if !defined(ABSL_STACKTRACE_INL_HEADER) +#define ABSL_STACKTRACE_INL_HEADER \ + "absl/debugging/internal/stacktrace_unimplemented-inl.inc" +#endif + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_emscripten-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_emscripten-inl.inc new file mode 100644 index 0000000..0f44451 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_emscripten-inl.inc @@ -0,0 +1,110 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Portable implementation - just use glibc +// +// Note: The glibc implementation may cause a call to malloc. +// This can cause a deadlock in HeapProfiler. + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_ + +#include + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/debugging/stacktrace.h" + +extern "C" { +uintptr_t emscripten_stack_snapshot(); +uint32_t emscripten_stack_unwind_buffer(uintptr_t pc, void *buffer, + uint32_t depth); +} + +// Sometimes, we can try to get a stack trace from within a stack +// trace, which can cause a self-deadlock. +// Protect against such reentrant call by failing to get a stack trace. +// +// We use __thread here because the code here is extremely low level -- it is +// called while collecting stack traces from within malloc and mmap, and thus +// can not call anything which might call malloc or mmap itself. +static __thread int recursive = 0; + +// The stack trace function might be invoked very early in the program's +// execution (e.g. from the very first malloc). +// As such, we suppress usage of backtrace during this early stage of execution. +static std::atomic disable_stacktraces(true); // Disabled until healthy. +// Waiting until static initializers run seems to be late enough. +// This file is included into stacktrace.cc so this will only run once. +ABSL_ATTRIBUTE_UNUSED static int stacktraces_enabler = []() { + // Check if we can even create stacktraces. If not, bail early and leave + // disable_stacktraces set as-is. + // clang-format off + if (!EM_ASM_INT({ return (typeof wasmOffsetConverter !== 'undefined'); })) { + return 0; + } + // clang-format on + disable_stacktraces.store(false, std::memory_order_relaxed); + return 0; +}(); + +template +static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { + if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) { + return 0; + } + ++recursive; + + static_cast(ucp); // Unused. + constexpr int kStackLength = 64; + void *stack[kStackLength]; + + int size; + uintptr_t pc = emscripten_stack_snapshot(); + size = emscripten_stack_unwind_buffer(pc, stack, kStackLength); + + int result_count = size - skip_count; + if (result_count < 0) result_count = 0; + if (result_count > max_depth) result_count = max_depth; + for (int i = 0; i < result_count; i++) result[i] = stack[i + skip_count]; + + if (IS_STACK_FRAMES) { + // No implementation for finding out the stack frame sizes yet. + memset(sizes, 0, sizeof(*sizes) * result_count); + } + if (min_dropped_frames != nullptr) { + if (size - skip_count - max_depth > 0) { + *min_dropped_frames = size - skip_count - max_depth; + } else { + *min_dropped_frames = 0; + } + } + + --recursive; + + return result_count; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { return true; } +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_EMSCRIPTEN_INL_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_generic-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_generic-inl.inc new file mode 100644 index 0000000..b2792a1 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_generic-inl.inc @@ -0,0 +1,108 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Portable implementation - just use glibc +// +// Note: The glibc implementation may cause a call to malloc. +// This can cause a deadlock in HeapProfiler. + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_ + +#include +#include +#include + +#include "absl/debugging/stacktrace.h" +#include "absl/base/attributes.h" + +// Sometimes, we can try to get a stack trace from within a stack +// trace, because we don't block signals inside this code (which would be too +// expensive: the two extra system calls per stack trace do matter here). +// That can cause a self-deadlock. +// Protect against such reentrant call by failing to get a stack trace. +// +// We use __thread here because the code here is extremely low level -- it is +// called while collecting stack traces from within malloc and mmap, and thus +// can not call anything which might call malloc or mmap itself. +static __thread int recursive = 0; + +// The stack trace function might be invoked very early in the program's +// execution (e.g. from the very first malloc if using tcmalloc). Also, the +// glibc implementation itself will trigger malloc the first time it is called. +// As such, we suppress usage of backtrace during this early stage of execution. +static std::atomic disable_stacktraces(true); // Disabled until healthy. +// Waiting until static initializers run seems to be late enough. +// This file is included into stacktrace.cc so this will only run once. +ABSL_ATTRIBUTE_UNUSED static int stacktraces_enabler = []() { + void* unused_stack[1]; + // Force the first backtrace to happen early to get the one-time shared lib + // loading (allocation) out of the way. After the first call it is much safer + // to use backtrace from a signal handler if we crash somewhere later. + backtrace(unused_stack, 1); + disable_stacktraces.store(false, std::memory_order_relaxed); + return 0; +}(); + +template +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { + if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) { + return 0; + } + ++recursive; + + static_cast(ucp); // Unused. + static const int kStackLength = 64; + void * stack[kStackLength]; + int size; + + size = backtrace(stack, kStackLength); + skip_count++; // we want to skip the current frame as well + int result_count = size - skip_count; + if (result_count < 0) + result_count = 0; + if (result_count > max_depth) + result_count = max_depth; + for (int i = 0; i < result_count; i++) + result[i] = stack[i + skip_count]; + + if (IS_STACK_FRAMES) { + // No implementation for finding out the stack frame sizes yet. + memset(sizes, 0, sizeof(*sizes) * result_count); + } + if (min_dropped_frames != nullptr) { + if (size - skip_count - max_depth > 0) { + *min_dropped_frames = size - skip_count - max_depth; + } else { + *min_dropped_frames = 0; + } + } + + --recursive; + + return result_count; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { + return true; +} +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_powerpc-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_powerpc-inl.inc new file mode 100644 index 0000000..085cef6 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_powerpc-inl.inc @@ -0,0 +1,258 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Produce stack trace. I'm guessing (hoping!) the code is much like +// for x86. For apple machines, at least, it seems to be; see +// https://developer.apple.com/documentation/mac/runtimehtml/RTArch-59.html +// https://www.linux-foundation.org/spec/ELF/ppc64/PPC-elf64abi-1.9.html#STACK +// Linux has similar code: http://patchwork.ozlabs.org/linuxppc/patch?id=8882 + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_ + +#if defined(__linux__) +#include // for PT_NIP. +#include // for ucontext_t +#endif + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" +#include "absl/debugging/stacktrace.h" +#include "absl/debugging/internal/address_is_readable.h" +#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems + +// Given a stack pointer, return the saved link register value. +// Note that this is the link register for a callee. +static inline void *StacktracePowerPCGetLR(void **sp) { + // PowerPC has 3 main ABIs, which say where in the stack the + // Link Register is. For DARWIN and AIX (used by apple and + // linux ppc64), it's in sp[2]. For SYSV (used by linux ppc), + // it's in sp[1]. +#if defined(_CALL_AIX) || defined(_CALL_DARWIN) + return *(sp+2); +#elif defined(_CALL_SYSV) + return *(sp+1); +#elif defined(__APPLE__) || defined(__FreeBSD__) || \ + (defined(__linux__) && defined(__PPC64__)) + // This check is in case the compiler doesn't define _CALL_AIX/etc. + return *(sp+2); +#elif defined(__linux) + // This check is in case the compiler doesn't define _CALL_SYSV. + return *(sp+1); +#else +#error Need to specify the PPC ABI for your archiecture. +#endif +} + +// Given a pointer to a stack frame, locate and return the calling +// stackframe, or return null if no stackframe can be found. Perform sanity +// checks (the strictness of which is controlled by the boolean parameter +// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static void **NextStackFrame(void **old_sp, const void *uc) { + void **new_sp = (void **) *old_sp; + enum { kStackAlignment = 16 }; + + // Check that the transition from frame pointer old_sp to frame + // pointer new_sp isn't clearly bogus + if (STRICT_UNWINDING) { + // With the stack growing downwards, older stack frame must be + // at a greater address that the current one. + if (new_sp <= old_sp) return nullptr; + // Assume stack frames larger than 100,000 bytes are bogus. + if ((uintptr_t)new_sp - (uintptr_t)old_sp > 100000) return nullptr; + } else { + // In the non-strict mode, allow discontiguous stack frames. + // (alternate-signal-stacks for example). + if (new_sp == old_sp) return nullptr; + // And allow frames upto about 1MB. + if ((new_sp > old_sp) + && ((uintptr_t)new_sp - (uintptr_t)old_sp > 1000000)) return nullptr; + } + if ((uintptr_t)new_sp % kStackAlignment != 0) return nullptr; + +#if defined(__linux__) + enum StackTraceKernelSymbolStatus { + kNotInitialized = 0, kAddressValid, kAddressInvalid }; + + if (IS_WITH_CONTEXT && uc != nullptr) { + static StackTraceKernelSymbolStatus kernel_symbol_status = + kNotInitialized; // Sentinel: not computed yet. + // Initialize with sentinel value: __kernel_rt_sigtramp_rt64 can not + // possibly be there. + static const unsigned char *kernel_sigtramp_rt64_address = nullptr; + if (kernel_symbol_status == kNotInitialized) { + absl::debugging_internal::VDSOSupport vdso; + if (vdso.IsPresent()) { + absl::debugging_internal::VDSOSupport::SymbolInfo + sigtramp_rt64_symbol_info; + if (!vdso.LookupSymbol( + "__kernel_sigtramp_rt64", "LINUX_2.6.15", + absl::debugging_internal::VDSOSupport::kVDSOSymbolType, + &sigtramp_rt64_symbol_info) || + sigtramp_rt64_symbol_info.address == nullptr) { + // Unexpected: VDSO is present, yet the expected symbol is missing + // or null. + assert(false && "VDSO is present, but doesn't have expected symbol"); + kernel_symbol_status = kAddressInvalid; + } else { + kernel_sigtramp_rt64_address = + reinterpret_cast( + sigtramp_rt64_symbol_info.address); + kernel_symbol_status = kAddressValid; + } + } else { + kernel_symbol_status = kAddressInvalid; + } + } + + if (new_sp != nullptr && + kernel_symbol_status == kAddressValid && + StacktracePowerPCGetLR(new_sp) == kernel_sigtramp_rt64_address) { + const ucontext_t* signal_context = + reinterpret_cast(uc); + void **const sp_before_signal = +#if defined(__PPC64__) + reinterpret_cast(signal_context->uc_mcontext.gp_regs[PT_R1]); +#else + reinterpret_cast( + signal_context->uc_mcontext.uc_regs->gregs[PT_R1]); +#endif + // Check that alleged sp before signal is nonnull and is reasonably + // aligned. + if (sp_before_signal != nullptr && + ((uintptr_t)sp_before_signal % kStackAlignment) == 0) { + // Check that alleged stack pointer is actually readable. This is to + // prevent a "double fault" in case we hit the first fault due to e.g. + // a stack corruption. + if (absl::debugging_internal::AddressIsReadable(sp_before_signal)) { + // Alleged stack pointer is readable, use it for further unwinding. + new_sp = sp_before_signal; + } + } + } + } +#endif + + return new_sp; +} + +// This ensures that absl::GetStackTrace sets up the Link Register properly. +ABSL_ATTRIBUTE_NOINLINE static void AbslStacktracePowerPCDummyFunction() { + ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); +} + +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { + void **sp; + // Apple macOS uses an old version of gnu as -- both Darwin 7.9.0 (Panther) + // and Darwin 8.8.1 (Tiger) use as 1.38. This means we have to use a + // different asm syntax. I don't know quite the best way to discriminate + // systems using the old as from the new one; I've gone with __APPLE__. +#ifdef __APPLE__ + __asm__ volatile ("mr %0,r1" : "=r" (sp)); +#else + __asm__ volatile ("mr %0,1" : "=r" (sp)); +#endif + + // On PowerPC, the "Link Register" or "Link Record" (LR), is a stack + // entry that holds the return address of the subroutine call (what + // instruction we run after our function finishes). This is the + // same as the stack-pointer of our parent routine, which is what we + // want here. While the compiler will always(?) set up LR for + // subroutine calls, it may not for leaf functions (such as this one). + // This routine forces the compiler (at least gcc) to push it anyway. + AbslStacktracePowerPCDummyFunction(); + + // The LR save area is used by the callee, so the top entry is bogus. + skip_count++; + + int n = 0; + + // Unlike ABIs of X86 and ARM, PowerPC ABIs say that return address (in + // the link register) of a function call is stored in the caller's stack + // frame instead of the callee's. When we look for the return address + // associated with a stack frame, we need to make sure that there is a + // caller frame before it. So we call NextStackFrame before entering the + // loop below and check next_sp instead of sp for loop termination. + // The outermost frame is set up by runtimes and it does not have a + // caller frame, so it is skipped. + + // The absl::GetStackFrames routine is called when we are in some + // informational context (the failure signal handler for example). + // Use the non-strict unwinding rules to produce a stack trace + // that is as complete as possible (even if it contains a few + // bogus entries in some rare cases). + void **next_sp = NextStackFrame(sp, ucp); + + while (next_sp && n < max_depth) { + if (skip_count > 0) { + skip_count--; + } else { + result[n] = StacktracePowerPCGetLR(sp); + if (IS_STACK_FRAMES) { + if (next_sp > sp) { + sizes[n] = (uintptr_t)next_sp - (uintptr_t)sp; + } else { + // A frame-size of 0 is used to indicate unknown frame size. + sizes[n] = 0; + } + } + n++; + } + + sp = next_sp; + next_sp = NextStackFrame(sp, ucp); + } + + if (min_dropped_frames != nullptr) { + // Implementation detail: we clamp the max of frames we are willing to + // count, so as not to spend too much time in the loop below. + const int kMaxUnwind = 1000; + int num_dropped_frames = 0; + for (int j = 0; next_sp != nullptr && j < kMaxUnwind; j++) { + if (skip_count > 0) { + skip_count--; + } else { + num_dropped_frames++; + } + next_sp = NextStackFrame(next_sp, ucp); + } + *min_dropped_frames = num_dropped_frames; + } + return n; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { + return true; +} +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_POWERPC_INL_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_riscv-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_riscv-inl.inc new file mode 100644 index 0000000..7123b71 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_riscv-inl.inc @@ -0,0 +1,236 @@ +// Copyright 2021 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_RISCV_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_RISCV_INL_H_ + +// Generate stack trace for riscv + +#include + +#include "absl/base/config.h" +#if defined(__linux__) +#include +#include +#include +#endif + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/debugging/internal/address_is_readable.h" +#include "absl/debugging/internal/vdso_support.h" +#include "absl/debugging/stacktrace.h" + +static const uintptr_t kUnknownFrameSize = 0; + +#if defined(__linux__) +// Returns the address of the VDSO __kernel_rt_sigreturn function, if present. +static const unsigned char *GetKernelRtSigreturnAddress() { + constexpr uintptr_t kImpossibleAddress = 0; + ABSL_CONST_INIT static std::atomic memoized(kImpossibleAddress); + uintptr_t address = memoized.load(std::memory_order_relaxed); + if (address != kImpossibleAddress) { + return reinterpret_cast(address); + } + + address = reinterpret_cast(nullptr); + +#if ABSL_HAVE_VDSO_SUPPORT + absl::debugging_internal::VDSOSupport vdso; + if (vdso.IsPresent()) { + absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info; + // Symbol versioning pulled from arch/riscv/kernel/vdso/vdso.lds at v5.10. + auto lookup = [&](int type) { + return vdso.LookupSymbol("__vdso_rt_sigreturn", "LINUX_4.15", type, + &symbol_info); + }; + if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) || + symbol_info.address == nullptr) { + // Unexpected: VDSO is present, yet the expected symbol is missing or + // null. + assert(false && "VDSO is present, but doesn't have expected symbol"); + } else { + if (reinterpret_cast(symbol_info.address) != + kImpossibleAddress) { + address = reinterpret_cast(symbol_info.address); + } else { + assert(false && "VDSO returned invalid address"); + } + } + } +#endif + + memoized.store(address, std::memory_order_relaxed); + return reinterpret_cast(address); +} +#endif // __linux__ + +// Compute the size of a stack frame in [low..high). We assume that low < high. +// Return size of kUnknownFrameSize. +template +static inline uintptr_t ComputeStackFrameSize(const T *low, const T *high) { + const char *low_char_ptr = reinterpret_cast(low); + const char *high_char_ptr = reinterpret_cast(high); + return low < high ? high_char_ptr - low_char_ptr : kUnknownFrameSize; +} + +// Given a pointer to a stack frame, locate and return the calling stackframe, +// or return null if no stackframe can be found. Perform sanity checks (the +// strictness of which is controlled by the boolean parameter +// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static void ** NextStackFrame(void **old_frame_pointer, const void *uc) { + // . + // . + // . + // +-> +----------------+ + // | | return address | + // | | previous fp | + // | | ... | + // | +----------------+ <-+ + // | | return address | | + // +---|- previous fp | | + // | ... | | + // $fp ->|----------------+ | + // | return address | | + // | previous fp -|---+ + // $sp ->| ... | + // +----------------+ + void **new_frame_pointer = reinterpret_cast(old_frame_pointer[-2]); + bool check_frame_size = true; + +#if defined(__linux__) + if (WITH_CONTEXT && uc != nullptr) { + // Check to see if next frame's return address is __kernel_rt_sigreturn. + if (old_frame_pointer[-1] == GetKernelRtSigreturnAddress()) { + const ucontext_t *ucv = static_cast(uc); + // old_frame_pointer is not suitable for unwinding, look at ucontext to + // discover frame pointer before signal. + // + // RISCV ELF psABI has the frame pointer at x8/fp/s0. + // -- RISCV psABI Table 18.2 + void **const pre_signal_frame_pointer = + reinterpret_cast(ucv->uc_mcontext.__gregs[8]); + + // Check the alleged frame pointer is actually readable. This is to + // prevent "double fault" in case we hit the first fault due to stack + // corruption. + if (!absl::debugging_internal::AddressIsReadable( + pre_signal_frame_pointer)) + return nullptr; + + // Alleged frame pointer is readable, use it for further unwinding. + new_frame_pointer = pre_signal_frame_pointer; + + // Skip frame size check if we return from a signal. We may be using an + // alterate stack for signals. + check_frame_size = false; + } + } +#endif + + // The RISCV ELF psABI mandates that the stack pointer is always 16-byte + // aligned. + // FIXME(abdulras) this doesn't hold for ILP32E which only mandates a 4-byte + // alignment. + if ((reinterpret_cast(new_frame_pointer) & 15) != 0) + return nullptr; + + // Check frame size. In strict mode, we assume frames to be under 100,000 + // bytes. In non-strict mode, we relax the limit to 1MB. + if (check_frame_size) { + const uintptr_t max_size = STRICT_UNWINDING ? 100000 : 1000000; + const uintptr_t frame_size = + ComputeStackFrameSize(old_frame_pointer, new_frame_pointer); + if (frame_size == kUnknownFrameSize || frame_size > max_size) + return nullptr; + } + + return new_frame_pointer; +} + +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { + // The `frame_pointer` that is computed here points to the top of the frame. + // The two words preceding the address are the return address and the previous + // frame pointer. +#if defined(__GNUC__) + void **frame_pointer = reinterpret_cast(__builtin_frame_address(0)); +#else +#error reading stack pointer not yet supported on this platform +#endif + + int n = 0; + void *return_address = nullptr; + while (frame_pointer && n < max_depth) { + return_address = frame_pointer[-1]; + + // The absl::GetStackFrames routine is called when we are in some + // informational context (the failure signal handler for example). Use the + // non-strict unwinding rules to produce a stack trace that is as complete + // as possible (even if it contains a few bogus entries in some rare cases). + void **next_frame_pointer = + NextStackFrame(frame_pointer, ucp); + + if (skip_count > 0) { + skip_count--; + } else { + result[n] = return_address; + if (IS_STACK_FRAMES) { + sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer); + } + n++; + } + + frame_pointer = next_frame_pointer; + } + + if (min_dropped_frames != nullptr) { + // Implementation detail: we clamp the max of frames we are willing to + // count, so as not to spend too much time in the loop below. + const int kMaxUnwind = 200; + int num_dropped_frames = 0; + for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) { + if (skip_count > 0) { + skip_count--; + } else { + num_dropped_frames++; + } + frame_pointer = + NextStackFrame(frame_pointer, ucp); + } + *min_dropped_frames = num_dropped_frames; + } + + return n; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { return true; } +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_unimplemented-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_unimplemented-inl.inc new file mode 100644 index 0000000..5b8fb19 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_unimplemented-inl.inc @@ -0,0 +1,24 @@ +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_ + +template +static int UnwindImpl(void** /* result */, int* /* sizes */, + int /* max_depth */, int /* skip_count */, + const void* /* ucp */, int *min_dropped_frames) { + if (min_dropped_frames != nullptr) { + *min_dropped_frames = 0; + } + return 0; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { + return false; +} +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_UNIMPLEMENTED_INL_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_win32-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_win32-inl.inc new file mode 100644 index 0000000..1c666c8 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_win32-inl.inc @@ -0,0 +1,93 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Produces a stack trace for Windows. Normally, one could use +// stacktrace_x86-inl.h or stacktrace_x86_64-inl.h -- and indeed, that +// should work for binaries compiled using MSVC in "debug" mode. +// However, in "release" mode, Windows uses frame-pointer +// optimization, which makes getting a stack trace very difficult. +// +// There are several approaches one can take. One is to use Windows +// intrinsics like StackWalk64. These can work, but have restrictions +// on how successful they can be. Another attempt is to write a +// version of stacktrace_x86-inl.h that has heuristic support for +// dealing with FPO, similar to what WinDbg does (see +// http://www.nynaeve.net/?p=97). There are (non-working) examples of +// these approaches, complete with TODOs, in stacktrace_win32-inl.h#1 +// +// The solution we've ended up doing is to call the undocumented +// windows function RtlCaptureStackBackTrace, which probably doesn't +// work with FPO but at least is fast, and doesn't require a symbol +// server. +// +// This code is inspired by a patch from David Vitek: +// https://code.google.com/p/google-perftools/issues/detail?id=83 + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_ + +#include // for GetProcAddress and GetModuleHandle +#include + +typedef USHORT NTAPI RtlCaptureStackBackTrace_Function( + IN ULONG frames_to_skip, + IN ULONG frames_to_capture, + OUT PVOID *backtrace, + OUT PULONG backtrace_hash); + +// It is not possible to load RtlCaptureStackBackTrace at static init time in +// UWP. CaptureStackBackTrace is the public version of RtlCaptureStackBackTrace +#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \ + !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) +static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn = + &::CaptureStackBackTrace; +#else +// Load the function we need at static init time, where we don't have +// to worry about someone else holding the loader's lock. +static RtlCaptureStackBackTrace_Function* const RtlCaptureStackBackTrace_fn = + (RtlCaptureStackBackTrace_Function*)GetProcAddress( + GetModuleHandleA("ntdll.dll"), "RtlCaptureStackBackTrace"); +#endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP + +template +static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count, + const void*, int* min_dropped_frames) { + int n = 0; + if (!RtlCaptureStackBackTrace_fn) { + // can't find a stacktrace with no function to call + } else { + n = (int)RtlCaptureStackBackTrace_fn(skip_count + 2, max_depth, result, 0); + } + if (IS_STACK_FRAMES) { + // No implementation for finding out the stack frame sizes yet. + memset(sizes, 0, sizeof(*sizes) * n); + } + if (min_dropped_frames != nullptr) { + // Not implemented. + *min_dropped_frames = 0; + } + return n; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { + return false; +} +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_WIN32_INL_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_x86-inl.inc b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_x86-inl.inc new file mode 100644 index 0000000..1b5d823 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_x86-inl.inc @@ -0,0 +1,369 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Produce stack trace + +#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_ +#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_ + +#if defined(__linux__) && (defined(__i386__) || defined(__x86_64__)) +#include // for ucontext_t +#endif + +#if !defined(_WIN32) +#include +#endif + +#include +#include +#include + +#include "absl/base/macros.h" +#include "absl/base/port.h" +#include "absl/debugging/internal/address_is_readable.h" +#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems +#include "absl/debugging/stacktrace.h" + +#include "absl/base/internal/raw_logging.h" + +using absl::debugging_internal::AddressIsReadable; + +#if defined(__linux__) && defined(__i386__) +// Count "push %reg" instructions in VDSO __kernel_vsyscall(), +// preceeding "syscall" or "sysenter". +// If __kernel_vsyscall uses frame pointer, answer 0. +// +// kMaxBytes tells how many instruction bytes of __kernel_vsyscall +// to analyze before giving up. Up to kMaxBytes+1 bytes of +// instructions could be accessed. +// +// Here are known __kernel_vsyscall instruction sequences: +// +// SYSENTER (linux-2.6.26/arch/x86/vdso/vdso32/sysenter.S). +// Used on Intel. +// 0xffffe400 <__kernel_vsyscall+0>: push %ecx +// 0xffffe401 <__kernel_vsyscall+1>: push %edx +// 0xffffe402 <__kernel_vsyscall+2>: push %ebp +// 0xffffe403 <__kernel_vsyscall+3>: mov %esp,%ebp +// 0xffffe405 <__kernel_vsyscall+5>: sysenter +// +// SYSCALL (see linux-2.6.26/arch/x86/vdso/vdso32/syscall.S). +// Used on AMD. +// 0xffffe400 <__kernel_vsyscall+0>: push %ebp +// 0xffffe401 <__kernel_vsyscall+1>: mov %ecx,%ebp +// 0xffffe403 <__kernel_vsyscall+3>: syscall +// + +// The sequence below isn't actually expected in Google fleet, +// here only for completeness. Remove this comment from OSS release. + +// i386 (see linux-2.6.26/arch/x86/vdso/vdso32/int80.S) +// 0xffffe400 <__kernel_vsyscall+0>: int $0x80 +// 0xffffe401 <__kernel_vsyscall+1>: ret +// +static const int kMaxBytes = 10; + +// We use assert()s instead of DCHECK()s -- this is too low level +// for DCHECK(). + +static int CountPushInstructions(const unsigned char *const addr) { + int result = 0; + for (int i = 0; i < kMaxBytes; ++i) { + if (addr[i] == 0x89) { + // "mov reg,reg" + if (addr[i + 1] == 0xE5) { + // Found "mov %esp,%ebp". + return 0; + } + ++i; // Skip register encoding byte. + } else if (addr[i] == 0x0F && + (addr[i + 1] == 0x34 || addr[i + 1] == 0x05)) { + // Found "sysenter" or "syscall". + return result; + } else if ((addr[i] & 0xF0) == 0x50) { + // Found "push %reg". + ++result; + } else if (addr[i] == 0xCD && addr[i + 1] == 0x80) { + // Found "int $0x80" + assert(result == 0); + return 0; + } else { + // Unexpected instruction. + assert(false && "unexpected instruction in __kernel_vsyscall"); + return 0; + } + } + // Unexpected: didn't find SYSENTER or SYSCALL in + // [__kernel_vsyscall, __kernel_vsyscall + kMaxBytes) interval. + assert(false && "did not find SYSENTER or SYSCALL in __kernel_vsyscall"); + return 0; +} +#endif + +// Assume stack frames larger than 100,000 bytes are bogus. +static const int kMaxFrameBytes = 100000; + +// Returns the stack frame pointer from signal context, 0 if unknown. +// vuc is a ucontext_t *. We use void* to avoid the use +// of ucontext_t on non-POSIX systems. +static uintptr_t GetFP(const void *vuc) { +#if !defined(__linux__) + static_cast(vuc); // Avoid an unused argument compiler warning. +#else + if (vuc != nullptr) { + auto *uc = reinterpret_cast(vuc); +#if defined(__i386__) + const auto bp = uc->uc_mcontext.gregs[REG_EBP]; + const auto sp = uc->uc_mcontext.gregs[REG_ESP]; +#elif defined(__x86_64__) + const auto bp = uc->uc_mcontext.gregs[REG_RBP]; + const auto sp = uc->uc_mcontext.gregs[REG_RSP]; +#else + const uintptr_t bp = 0; + const uintptr_t sp = 0; +#endif + // Sanity-check that the base pointer is valid. It's possible that some + // code in the process is compiled with --copt=-fomit-frame-pointer or + // --copt=-momit-leaf-frame-pointer. + // + // TODO(bcmills): -momit-leaf-frame-pointer is currently the default + // behavior when building with clang. Talk to the C++ toolchain team about + // fixing that. + if (bp >= sp && bp - sp <= kMaxFrameBytes) return bp; + + // If bp isn't a plausible frame pointer, return the stack pointer instead. + // If we're lucky, it points to the start of a stack frame; otherwise, we'll + // get one frame of garbage in the stack trace and fail the sanity check on + // the next iteration. + return sp; + } +#endif + return 0; +} + +// Given a pointer to a stack frame, locate and return the calling +// stackframe, or return null if no stackframe can be found. Perform sanity +// checks (the strictness of which is controlled by the boolean parameter +// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned. +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +static void **NextStackFrame(void **old_fp, const void *uc, + size_t stack_low, size_t stack_high) { + void **new_fp = (void **)*old_fp; + +#if defined(__linux__) && defined(__i386__) + if (WITH_CONTEXT && uc != nullptr) { + // How many "push %reg" instructions are there at __kernel_vsyscall? + // This is constant for a given kernel and processor, so compute + // it only once. + static int num_push_instructions = -1; // Sentinel: not computed yet. + // Initialize with sentinel value: __kernel_rt_sigreturn can not possibly + // be there. + static const unsigned char *kernel_rt_sigreturn_address = nullptr; + static const unsigned char *kernel_vsyscall_address = nullptr; + if (num_push_instructions == -1) { +#ifdef ABSL_HAVE_VDSO_SUPPORT + absl::debugging_internal::VDSOSupport vdso; + if (vdso.IsPresent()) { + absl::debugging_internal::VDSOSupport::SymbolInfo + rt_sigreturn_symbol_info; + absl::debugging_internal::VDSOSupport::SymbolInfo vsyscall_symbol_info; + if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.5", STT_FUNC, + &rt_sigreturn_symbol_info) || + !vdso.LookupSymbol("__kernel_vsyscall", "LINUX_2.5", STT_FUNC, + &vsyscall_symbol_info) || + rt_sigreturn_symbol_info.address == nullptr || + vsyscall_symbol_info.address == nullptr) { + // Unexpected: 32-bit VDSO is present, yet one of the expected + // symbols is missing or null. + assert(false && "VDSO is present, but doesn't have expected symbols"); + num_push_instructions = 0; + } else { + kernel_rt_sigreturn_address = + reinterpret_cast( + rt_sigreturn_symbol_info.address); + kernel_vsyscall_address = + reinterpret_cast( + vsyscall_symbol_info.address); + num_push_instructions = + CountPushInstructions(kernel_vsyscall_address); + } + } else { + num_push_instructions = 0; + } +#else // ABSL_HAVE_VDSO_SUPPORT + num_push_instructions = 0; +#endif // ABSL_HAVE_VDSO_SUPPORT + } + if (num_push_instructions != 0 && kernel_rt_sigreturn_address != nullptr && + old_fp[1] == kernel_rt_sigreturn_address) { + const ucontext_t *ucv = static_cast(uc); + // This kernel does not use frame pointer in its VDSO code, + // and so %ebp is not suitable for unwinding. + void **const reg_ebp = + reinterpret_cast(ucv->uc_mcontext.gregs[REG_EBP]); + const unsigned char *const reg_eip = + reinterpret_cast(ucv->uc_mcontext.gregs[REG_EIP]); + if (new_fp == reg_ebp && kernel_vsyscall_address <= reg_eip && + reg_eip - kernel_vsyscall_address < kMaxBytes) { + // We "stepped up" to __kernel_vsyscall, but %ebp is not usable. + // Restore from 'ucv' instead. + void **const reg_esp = + reinterpret_cast(ucv->uc_mcontext.gregs[REG_ESP]); + // Check that alleged %esp is not null and is reasonably aligned. + if (reg_esp && + ((uintptr_t)reg_esp & (sizeof(reg_esp) - 1)) == 0) { + // Check that alleged %esp is actually readable. This is to prevent + // "double fault" in case we hit the first fault due to e.g. stack + // corruption. + void *const reg_esp2 = reg_esp[num_push_instructions - 1]; + if (AddressIsReadable(reg_esp2)) { + // Alleged %esp is readable, use it for further unwinding. + new_fp = reinterpret_cast(reg_esp2); + } + } + } + } + } +#endif + + const uintptr_t old_fp_u = reinterpret_cast(old_fp); + const uintptr_t new_fp_u = reinterpret_cast(new_fp); + + // Check that the transition from frame pointer old_fp to frame + // pointer new_fp isn't clearly bogus. Skip the checks if new_fp + // matches the signal context, so that we don't skip out early when + // using an alternate signal stack. + // + // TODO(bcmills): The GetFP call should be completely unnecessary when + // ENABLE_COMBINED_UNWINDER is set (because we should be back in the thread's + // stack by this point), but it is empirically still needed (e.g. when the + // stack includes a call to abort). unw_get_reg returns UNW_EBADREG for some + // frames. Figure out why GetValidFrameAddr and/or libunwind isn't doing what + // it's supposed to. + if (STRICT_UNWINDING && + (!WITH_CONTEXT || uc == nullptr || new_fp_u != GetFP(uc))) { + // With the stack growing downwards, older stack frame must be + // at a greater address that the current one. + if (new_fp_u <= old_fp_u) return nullptr; + if (new_fp_u - old_fp_u > kMaxFrameBytes) return nullptr; + + if (stack_low < old_fp_u && old_fp_u <= stack_high) { + // Old BP was in the expected stack region... + if (!(stack_low < new_fp_u && new_fp_u <= stack_high)) { + // ... but new BP is outside of expected stack region. + // It is most likely bogus. + return nullptr; + } + } else { + // We may be here if we are executing in a co-routine with a + // separate stack. We can't do safety checks in this case. + } + } else { + if (new_fp == nullptr) return nullptr; // skip AddressIsReadable() below + // In the non-strict mode, allow discontiguous stack frames. + // (alternate-signal-stacks for example). + if (new_fp == old_fp) return nullptr; + } + + if (new_fp_u & (sizeof(void *) - 1)) return nullptr; +#ifdef __i386__ + // On 32-bit machines, the stack pointer can be very close to + // 0xffffffff, so we explicitly check for a pointer into the + // last two pages in the address space + if (new_fp_u >= 0xffffe000) return nullptr; +#endif +#if !defined(_WIN32) + if (!STRICT_UNWINDING) { + // Lax sanity checks cause a crash in 32-bit tcmalloc/crash_reason_test + // on AMD-based machines with VDSO-enabled kernels. + // Make an extra sanity check to insure new_fp is readable. + // Note: NextStackFrame() is only called while the program + // is already on its last leg, so it's ok to be slow here. + + if (!AddressIsReadable(new_fp)) { + return nullptr; + } + } +#endif + return new_fp; +} + +template +ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack. +ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack. +ABSL_ATTRIBUTE_NOINLINE +static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count, + const void *ucp, int *min_dropped_frames) { + int n = 0; + void **fp = reinterpret_cast(__builtin_frame_address(0)); + + size_t stack_low = getpagesize(); // Assume that the first page is not stack. + size_t stack_high = std::numeric_limits::max() - sizeof(void *); + + while (fp && n < max_depth) { + if (*(fp + 1) == reinterpret_cast(0)) { + // In 64-bit code, we often see a frame that + // points to itself and has a return address of 0. + break; + } + void **next_fp = NextStackFrame( + fp, ucp, stack_low, stack_high); + if (skip_count > 0) { + skip_count--; + } else { + result[n] = *(fp + 1); + if (IS_STACK_FRAMES) { + if (next_fp > fp) { + sizes[n] = (uintptr_t)next_fp - (uintptr_t)fp; + } else { + // A frame-size of 0 is used to indicate unknown frame size. + sizes[n] = 0; + } + } + n++; + } + fp = next_fp; + } + if (min_dropped_frames != nullptr) { + // Implementation detail: we clamp the max of frames we are willing to + // count, so as not to spend too much time in the loop below. + const int kMaxUnwind = 1000; + int num_dropped_frames = 0; + for (int j = 0; fp != nullptr && j < kMaxUnwind; j++) { + if (skip_count > 0) { + skip_count--; + } else { + num_dropped_frames++; + } + fp = NextStackFrame(fp, ucp, stack_low, + stack_high); + } + *min_dropped_frames = num_dropped_frames; + } + return n; +} + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { +bool StackTraceWorksForTest() { + return true; +} +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/symbolize.h b/CAPI/cpp/grpc/include/absl/debugging/internal/symbolize.h new file mode 100644 index 0000000..27d5e65 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/symbolize.h @@ -0,0 +1,153 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file contains internal parts of the Abseil symbolizer. +// Do not depend on the anything in this file, it may change at anytime. + +#ifndef ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_ +#define ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_ + +#ifdef __cplusplus + +#include +#include + +#include "absl/base/config.h" +#include "absl/strings/string_view.h" + +#ifdef ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE +#error ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE cannot be directly set +#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) \ + && !defined(__asmjs__) && !defined(__wasm__) +#define ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE 1 + +#include +#include // For ElfW() macro. +#include +#include + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { + +// Iterates over all sections, invoking callback on each with the section name +// and the section header. +// +// Returns true on success; otherwise returns false in case of errors. +// +// This is not async-signal-safe. +bool ForEachSection(int fd, + const std::function& callback); + +// Gets the section header for the given name, if it exists. Returns true on +// success. Otherwise, returns false. +bool GetSectionHeaderByName(int fd, const char *name, size_t name_len, + ElfW(Shdr) *out); + +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE + +#ifdef ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE +#error ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE cannot be directly set +#elif defined(__APPLE__) +#define ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE 1 +#endif + +#ifdef ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE +#error ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE cannot be directly set +#elif defined(__EMSCRIPTEN__) +#define ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE 1 +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { + +struct SymbolDecoratorArgs { + // The program counter we are getting symbolic name for. + const void *pc; + // 0 for main executable, load address for shared libraries. + ptrdiff_t relocation; + // Read-only file descriptor for ELF image covering "pc", + // or -1 if no such ELF image exists in /proc/self/maps. + int fd; + // Output buffer, size. + // Note: the buffer may not be empty -- default symbolizer may have already + // produced some output, and earlier decorators may have adorned it in + // some way. You are free to replace or augment the contents (within the + // symbol_buf_size limit). + char *const symbol_buf; + size_t symbol_buf_size; + // Temporary scratch space, size. + // Use that space in preference to allocating your own stack buffer to + // conserve stack. + char *const tmp_buf; + size_t tmp_buf_size; + // User-provided argument + void* arg; +}; +using SymbolDecorator = void (*)(const SymbolDecoratorArgs *); + +// Installs a function-pointer as a decorator. Returns a value less than zero +// if the system cannot install the decorator. Otherwise, returns a unique +// identifier corresponding to the decorator. This identifier can be used to +// uninstall the decorator - See RemoveSymbolDecorator() below. +int InstallSymbolDecorator(SymbolDecorator decorator, void* arg); + +// Removes a previously installed function-pointer decorator. Parameter "ticket" +// is the return-value from calling InstallSymbolDecorator(). +bool RemoveSymbolDecorator(int ticket); + +// Remove all installed decorators. Returns true if successful, false if +// symbolization is currently in progress. +bool RemoveAllSymbolDecorators(void); + +// Registers an address range to a file mapping. +// +// Preconditions: +// start <= end +// filename != nullptr +// +// Returns true if the file was successfully registered. +bool RegisterFileMappingHint(const void* start, const void* end, + uint64_t offset, const char* filename); + +// Looks up the file mapping registered by RegisterFileMappingHint for an +// address range. If there is one, the file name is stored in *filename and +// *start and *end are modified to reflect the registered mapping. Returns +// whether any hint was found. +bool GetFileMappingHint(const void** start, const void** end, uint64_t* offset, + const char** filename); + +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // __cplusplus + +#include + +#ifdef __cplusplus +extern "C" +#endif // __cplusplus + + bool + AbslInternalGetFileMappingHint(const void** start, const void** end, + uint64_t* offset, const char** filename); + +#endif // ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/vdso_support.h b/CAPI/cpp/grpc/include/absl/debugging/internal/vdso_support.h new file mode 100644 index 0000000..6562c6c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/vdso_support.h @@ -0,0 +1,158 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Allow dynamic symbol lookup in the kernel VDSO page. +// +// VDSO stands for "Virtual Dynamic Shared Object" -- a page of +// executable code, which looks like a shared library, but doesn't +// necessarily exist anywhere on disk, and which gets mmap()ed into +// every process by kernels which support VDSO, such as 2.6.x for 32-bit +// executables, and 2.6.24 and above for 64-bit executables. +// +// More details could be found here: +// http://www.trilithium.com/johan/2005/08/linux-gate/ +// +// VDSOSupport -- a class representing kernel VDSO (if present). +// +// Example usage: +// VDSOSupport vdso; +// VDSOSupport::SymbolInfo info; +// typedef (*FN)(unsigned *, void *, void *); +// FN fn = nullptr; +// if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) { +// fn = reinterpret_cast(info.address); +// } + +#ifndef ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_ +#define ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_ + +#include + +#include "absl/base/attributes.h" +#include "absl/debugging/internal/elf_mem_image.h" + +#ifdef ABSL_HAVE_ELF_MEM_IMAGE + +#ifdef ABSL_HAVE_VDSO_SUPPORT +#error ABSL_HAVE_VDSO_SUPPORT cannot be directly set +#else +#define ABSL_HAVE_VDSO_SUPPORT 1 +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { + +// NOTE: this class may be used from within tcmalloc, and can not +// use any memory allocation routines. +class VDSOSupport { + public: + VDSOSupport(); + + typedef ElfMemImage::SymbolInfo SymbolInfo; + typedef ElfMemImage::SymbolIterator SymbolIterator; + + // On PowerPC64 VDSO symbols can either be of type STT_FUNC or STT_NOTYPE + // depending on how the kernel is built. The kernel is normally built with + // STT_NOTYPE type VDSO symbols. Let's make things simpler first by using a + // compile-time constant. +#ifdef __powerpc64__ + enum { kVDSOSymbolType = STT_NOTYPE }; +#else + enum { kVDSOSymbolType = STT_FUNC }; +#endif + + // Answers whether we have a vdso at all. + bool IsPresent() const { return image_.IsPresent(); } + + // Allow to iterate over all VDSO symbols. + SymbolIterator begin() const { return image_.begin(); } + SymbolIterator end() const { return image_.end(); } + + // Look up versioned dynamic symbol in the kernel VDSO. + // Returns false if VDSO is not present, or doesn't contain given + // symbol/version/type combination. + // If info_out != nullptr, additional details are filled in. + bool LookupSymbol(const char *name, const char *version, + int symbol_type, SymbolInfo *info_out) const; + + // Find info about symbol (if any) which overlaps given address. + // Returns true if symbol was found; false if VDSO isn't present + // or doesn't have a symbol overlapping given address. + // If info_out != nullptr, additional details are filled in. + bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const; + + // Used only for testing. Replace real VDSO base with a mock. + // Returns previous value of vdso_base_. After you are done testing, + // you are expected to call SetBase() with previous value, in order to + // reset state to the way it was. + const void *SetBase(const void *s); + + // Computes vdso_base_ and returns it. Should be called as early as + // possible; before any thread creation, chroot or setuid. + static const void *Init(); + + private: + // image_ represents VDSO ELF image in memory. + // image_.ehdr_ == nullptr implies there is no VDSO. + ElfMemImage image_; + + // Cached value of auxv AT_SYSINFO_EHDR, computed once. + // This is a tri-state: + // kInvalidBase => value hasn't been determined yet. + // 0 => there is no VDSO. + // else => vma of VDSO Elf{32,64}_Ehdr. + // + // When testing with mock VDSO, low bit is set. + // The low bit is always available because vdso_base_ is + // page-aligned. + static std::atomic vdso_base_; + + // NOLINT on 'long' because these routines mimic kernel api. + // The 'cache' parameter may be used by some versions of the kernel, + // and should be nullptr or point to a static buffer containing at + // least two 'long's. + static long InitAndGetCPU(unsigned *cpu, void *cache, // NOLINT 'long'. + void *unused); + static long GetCPUViaSyscall(unsigned *cpu, void *cache, // NOLINT 'long'. + void *unused); + typedef long (*GetCpuFn)(unsigned *cpu, void *cache, // NOLINT 'long'. + void *unused); + + // This function pointer may point to InitAndGetCPU, + // GetCPUViaSyscall, or __vdso_getcpu at different stages of initialization. + ABSL_CONST_INIT static std::atomic getcpu_fn_; + + friend int GetCPU(void); // Needs access to getcpu_fn_. + + VDSOSupport(const VDSOSupport&) = delete; + VDSOSupport& operator=(const VDSOSupport&) = delete; +}; + +// Same as sched_getcpu() on later glibc versions. +// Return current CPU, using (fast) __vdso_getcpu@LINUX_2.6 if present, +// otherwise use syscall(SYS_getcpu,...). +// May return -1 with errno == ENOSYS if the kernel doesn't +// support SYS_getcpu. +int GetCPU(); + +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_HAVE_ELF_MEM_IMAGE + +#endif // ABSL_DEBUGGING_INTERNAL_VDSO_SUPPORT_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/leak_check.h b/CAPI/cpp/grpc/include/absl/debugging/leak_check.h new file mode 100644 index 0000000..eff162f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/leak_check.h @@ -0,0 +1,150 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: leak_check.h +// ----------------------------------------------------------------------------- +// +// This file contains functions that affect leak checking behavior within +// targets built with the LeakSanitizer (LSan), a memory leak detector that is +// integrated within the AddressSanitizer (ASan) as an additional component, or +// which can be used standalone. LSan and ASan are included (or can be provided) +// as additional components for most compilers such as Clang, gcc and MSVC. +// Note: this leak checking API is not yet supported in MSVC. +// Leak checking is enabled by default in all ASan builds. +// +// https://clang.llvm.org/docs/LeakSanitizer.html +// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer +// +// GCC and Clang both automatically enable LeakSanitizer when AddressSanitizer +// is enabled. To use the mode, simply pass `-fsanitize=address` to both the +// compiler and linker. An example Bazel command could be +// +// $ bazel test --copt=-fsanitize=address --linkopt=-fsanitize=address ... +// +// GCC and Clang auto support a standalone LeakSanitizer mode (a mode which does +// not also use AddressSanitizer). To use the mode, simply pass +// `-fsanitize=leak` to both the compiler and linker. Since GCC does not +// currently provide a way of detecting this mode at compile-time, GCC users +// must also pass -DLEAK_SANIITIZER to the compiler. An example Bazel command +// could be +// +// $ bazel test --copt=-DLEAK_SANITIZER --copt=-fsanitize=leak +// --linkopt=-fsanitize=leak ... +// +// ----------------------------------------------------------------------------- +#ifndef ABSL_DEBUGGING_LEAK_CHECK_H_ +#define ABSL_DEBUGGING_LEAK_CHECK_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// HaveLeakSanitizer() +// +// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is +// currently built into this target. +bool HaveLeakSanitizer(); + +// LeakCheckerIsActive() +// +// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is +// currently built into this target and is turned on. +bool LeakCheckerIsActive(); + +// DoIgnoreLeak() +// +// Implements `IgnoreLeak()` below. This function should usually +// not be called directly; calling `IgnoreLeak()` is preferred. +void DoIgnoreLeak(const void* ptr); + +// IgnoreLeak() +// +// Instruct the leak sanitizer to ignore leak warnings on the object referenced +// by the passed pointer, as well as all heap objects transitively referenced +// by it. The passed object pointer can point to either the beginning of the +// object or anywhere within it. +// +// Example: +// +// static T* obj = IgnoreLeak(new T(...)); +// +// If the passed `ptr` does not point to an actively allocated object at the +// time `IgnoreLeak()` is called, the call is a no-op; if it is actively +// allocated, leak sanitizer will assume this object is referenced even if +// there is no actual reference in user memory. +// +template +T* IgnoreLeak(T* ptr) { + DoIgnoreLeak(ptr); + return ptr; +} + +// FindAndReportLeaks() +// +// If any leaks are detected, prints a leak report and returns true. This +// function may be called repeatedly, and does not affect end-of-process leak +// checking. +// +// Example: +// if (FindAndReportLeaks()) { +// ... diagnostic already printed. Exit with failure code. +// exit(1) +// } +bool FindAndReportLeaks(); + +// LeakCheckDisabler +// +// This helper class indicates that any heap allocations done in the code block +// covered by the scoped object, which should be allocated on the stack, will +// not be reported as leaks. Leak check disabling will occur within the code +// block and any nested function calls within the code block. +// +// Example: +// +// void Foo() { +// LeakCheckDisabler disabler; +// ... code that allocates objects whose leaks should be ignored ... +// } +// +// REQUIRES: Destructor runs in same thread as constructor +class LeakCheckDisabler { + public: + LeakCheckDisabler(); + LeakCheckDisabler(const LeakCheckDisabler&) = delete; + LeakCheckDisabler& operator=(const LeakCheckDisabler&) = delete; + ~LeakCheckDisabler(); +}; + +// RegisterLivePointers() +// +// Registers `ptr[0,size-1]` as pointers to memory that is still actively being +// referenced and for which leak checking should be ignored. This function is +// useful if you store pointers in mapped memory, for memory ranges that we know +// are correct but for which normal analysis would flag as leaked code. +void RegisterLivePointers(const void* ptr, size_t size); + +// UnRegisterLivePointers() +// +// Deregisters the pointers previously marked as active in +// `RegisterLivePointers()`, enabling leak checking of those pointers. +void UnRegisterLivePointers(const void* ptr, size_t size); + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_LEAK_CHECK_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/stacktrace.h b/CAPI/cpp/grpc/include/absl/debugging/stacktrace.h new file mode 100644 index 0000000..0ec0ffd --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/stacktrace.h @@ -0,0 +1,231 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: stacktrace.h +// ----------------------------------------------------------------------------- +// +// This file contains routines to extract the current stack trace and associated +// stack frames. These functions are thread-safe and async-signal-safe. +// +// Note that stack trace functionality is platform dependent and requires +// additional support from the compiler/build system in most cases. (That is, +// this functionality generally only works on platforms/builds that have been +// specifically configured to support it.) +// +// Note: stack traces in Abseil that do not utilize a symbolizer will result in +// frames consisting of function addresses rather than human-readable function +// names. (See symbolize.h for information on symbolizing these values.) + +#ifndef ABSL_DEBUGGING_STACKTRACE_H_ +#define ABSL_DEBUGGING_STACKTRACE_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// GetStackFrames() +// +// Records program counter values for up to `max_depth` frames, skipping the +// most recent `skip_count` stack frames, stores their corresponding values +// and sizes in `results` and `sizes` buffers, and returns the number of frames +// stored. (Note that the frame generated for the `absl::GetStackFrames()` +// routine itself is also skipped.) +// +// Example: +// +// main() { foo(); } +// foo() { bar(); } +// bar() { +// void* result[10]; +// int sizes[10]; +// int depth = absl::GetStackFrames(result, sizes, 10, 1); +// } +// +// The current stack frame would consist of three function calls: `bar()`, +// `foo()`, and then `main()`; however, since the `GetStackFrames()` call sets +// `skip_count` to `1`, it will skip the frame for `bar()`, the most recently +// invoked function call. It will therefore return 2 and fill `result` with +// program counters within the following functions: +// +// result[0] foo() +// result[1] main() +// +// (Note: in practice, a few more entries after `main()` may be added to account +// for startup processes.) +// +// Corresponding stack frame sizes will also be recorded: +// +// sizes[0] 16 +// sizes[1] 16 +// +// (Stack frame sizes of `16` above are just for illustration purposes.) +// +// Stack frame sizes of 0 or less indicate that those frame sizes couldn't +// be identified. +// +// This routine may return fewer stack frame entries than are +// available. Also note that `result` and `sizes` must both be non-null. +extern int GetStackFrames(void** result, int* sizes, int max_depth, + int skip_count); + +// GetStackFramesWithContext() +// +// Records program counter values obtained from a signal handler. Records +// program counter values for up to `max_depth` frames, skipping the most recent +// `skip_count` stack frames, stores their corresponding values and sizes in +// `results` and `sizes` buffers, and returns the number of frames stored. (Note +// that the frame generated for the `absl::GetStackFramesWithContext()` routine +// itself is also skipped.) +// +// The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value +// passed to a signal handler registered via the `sa_sigaction` field of a +// `sigaction` struct. (See +// http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may +// help a stack unwinder to provide a better stack trace under certain +// conditions. `uc` may safely be null. +// +// The `min_dropped_frames` output parameter, if non-null, points to the +// location to note any dropped stack frames, if any, due to buffer limitations +// or other reasons. (This value will be set to `0` if no frames were dropped.) +// The number of total stack frames is guaranteed to be >= skip_count + +// max_depth + *min_dropped_frames. +extern int GetStackFramesWithContext(void** result, int* sizes, int max_depth, + int skip_count, const void* uc, + int* min_dropped_frames); + +// GetStackTrace() +// +// Records program counter values for up to `max_depth` frames, skipping the +// most recent `skip_count` stack frames, stores their corresponding values +// in `results`, and returns the number of frames +// stored. Note that this function is similar to `absl::GetStackFrames()` +// except that it returns the stack trace only, and not stack frame sizes. +// +// Example: +// +// main() { foo(); } +// foo() { bar(); } +// bar() { +// void* result[10]; +// int depth = absl::GetStackTrace(result, 10, 1); +// } +// +// This produces: +// +// result[0] foo +// result[1] main +// .... ... +// +// `result` must not be null. +extern int GetStackTrace(void** result, int max_depth, int skip_count); + +// GetStackTraceWithContext() +// +// Records program counter values obtained from a signal handler. Records +// program counter values for up to `max_depth` frames, skipping the most recent +// `skip_count` stack frames, stores their corresponding values in `results`, +// and returns the number of frames stored. (Note that the frame generated for +// the `absl::GetStackFramesWithContext()` routine itself is also skipped.) +// +// The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value +// passed to a signal handler registered via the `sa_sigaction` field of a +// `sigaction` struct. (See +// http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may +// help a stack unwinder to provide a better stack trace under certain +// conditions. `uc` may safely be null. +// +// The `min_dropped_frames` output parameter, if non-null, points to the +// location to note any dropped stack frames, if any, due to buffer limitations +// or other reasons. (This value will be set to `0` if no frames were dropped.) +// The number of total stack frames is guaranteed to be >= skip_count + +// max_depth + *min_dropped_frames. +extern int GetStackTraceWithContext(void** result, int max_depth, + int skip_count, const void* uc, + int* min_dropped_frames); + +// SetStackUnwinder() +// +// Provides a custom function for unwinding stack frames that will be used in +// place of the default stack unwinder when invoking the static +// GetStack{Frames,Trace}{,WithContext}() functions above. +// +// The arguments passed to the unwinder function will match the +// arguments passed to `absl::GetStackFramesWithContext()` except that sizes +// will be non-null iff the caller is interested in frame sizes. +// +// If unwinder is set to null, we revert to the default stack-tracing behavior. +// +// ***************************************************************************** +// WARNING +// ***************************************************************************** +// +// absl::SetStackUnwinder is not suitable for general purpose use. It is +// provided for custom runtimes. +// Some things to watch out for when calling `absl::SetStackUnwinder()`: +// +// (a) The unwinder may be called from within signal handlers and +// therefore must be async-signal-safe. +// +// (b) Even after a custom stack unwinder has been unregistered, other +// threads may still be in the process of using that unwinder. +// Therefore do not clean up any state that may be needed by an old +// unwinder. +// ***************************************************************************** +extern void SetStackUnwinder(int (*unwinder)(void** pcs, int* sizes, + int max_depth, int skip_count, + const void* uc, + int* min_dropped_frames)); + +// DefaultStackUnwinder() +// +// Records program counter values of up to `max_depth` frames, skipping the most +// recent `skip_count` stack frames, and stores their corresponding values in +// `pcs`. (Note that the frame generated for this call itself is also skipped.) +// This function acts as a generic stack-unwinder; prefer usage of the more +// specific `GetStack{Trace,Frames}{,WithContext}()` functions above. +// +// If you have set your own stack unwinder (with the `SetStackUnwinder()` +// function above, you can still get the default stack unwinder by calling +// `DefaultStackUnwinder()`, which will ignore any previously set stack unwinder +// and use the default one instead. +// +// Because this function is generic, only `pcs` is guaranteed to be non-null +// upon return. It is legal for `sizes`, `uc`, and `min_dropped_frames` to all +// be null when called. +// +// The semantics are the same as the corresponding `GetStack*()` function in the +// case where `absl::SetStackUnwinder()` was never called. Equivalents are: +// +// null sizes | non-nullptr sizes +// |==========================================================| +// null uc | GetStackTrace() | GetStackFrames() | +// non-null uc | GetStackTraceWithContext() | GetStackFramesWithContext() | +// |==========================================================| +extern int DefaultStackUnwinder(void** pcs, int* sizes, int max_depth, + int skip_count, const void* uc, + int* min_dropped_frames); + +namespace debugging_internal { +// Returns true for platforms which are expected to have functioning stack trace +// implementations. Intended to be used for tests which want to exclude +// verification of logic known to be broken because stack traces are not +// working. +extern bool StackTraceWorksForTest(); +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_STACKTRACE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/symbolize.h b/CAPI/cpp/grpc/include/absl/debugging/symbolize.h new file mode 100644 index 0000000..43d93a8 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/symbolize.h @@ -0,0 +1,99 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: symbolize.h +// ----------------------------------------------------------------------------- +// +// This file configures the Abseil symbolizer for use in converting instruction +// pointer addresses (program counters) into human-readable names (function +// calls, etc.) within Abseil code. +// +// The symbolizer may be invoked from several sources: +// +// * Implicitly, through the installation of an Abseil failure signal handler. +// (See failure_signal_handler.h for more information.) +// * By calling `Symbolize()` directly on a program counter you obtain through +// `absl::GetStackTrace()` or `absl::GetStackFrames()`. (See stacktrace.h +// for more information. +// * By calling `Symbolize()` directly on a program counter you obtain through +// other means (which would be platform-dependent). +// +// In all of the above cases, the symbolizer must first be initialized before +// any program counter values can be symbolized. If you are installing a failure +// signal handler, initialize the symbolizer before you do so. +// +// Example: +// +// int main(int argc, char** argv) { +// // Initialize the Symbolizer before installing the failure signal handler +// absl::InitializeSymbolizer(argv[0]); +// +// // Now you may install the failure signal handler +// absl::FailureSignalHandlerOptions options; +// absl::InstallFailureSignalHandler(options); +// +// // Start running your main program +// ... +// return 0; +// } +// +#ifndef ABSL_DEBUGGING_SYMBOLIZE_H_ +#define ABSL_DEBUGGING_SYMBOLIZE_H_ + +#include "absl/debugging/internal/symbolize.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// InitializeSymbolizer() +// +// Initializes the program counter symbolizer, given the path of the program +// (typically obtained through `main()`s `argv[0]`). The Abseil symbolizer +// allows you to read program counters (instruction pointer values) using their +// human-readable names within output such as stack traces. +// +// Example: +// +// int main(int argc, char *argv[]) { +// absl::InitializeSymbolizer(argv[0]); +// // Now you can use the symbolizer +// } +void InitializeSymbolizer(const char* argv0); +// +// Symbolize() +// +// Symbolizes a program counter (instruction pointer value) `pc` and, on +// success, writes the name to `out`. The symbol name is demangled, if possible. +// Note that the symbolized name may be truncated and will be NUL-terminated. +// Demangling is supported for symbols generated by GCC 3.x or newer). Returns +// `false` on failure. +// +// Example: +// +// // Print a program counter and its symbol name. +// static void DumpPCAndSymbol(void *pc) { +// char tmp[1024]; +// const char *symbol = "(unknown)"; +// if (absl::Symbolize(pc, tmp, sizeof(tmp))) { +// symbol = tmp; +// } +// absl::PrintF("%p %s\n", pc, symbol); +// } +bool Symbolize(const void *pc, char *out, int out_size); + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_DEBUGGING_SYMBOLIZE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/symbolize_darwin.inc b/CAPI/cpp/grpc/include/absl/debugging/symbolize_darwin.inc new file mode 100644 index 0000000..443ce9e --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/symbolize_darwin.inc @@ -0,0 +1,101 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include +#include + +#include "absl/base/internal/raw_logging.h" +#include "absl/debugging/internal/demangle.h" +#include "absl/strings/numbers.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +void InitializeSymbolizer(const char*) {} + +namespace debugging_internal { +namespace { + +static std::string GetSymbolString(absl::string_view backtrace_line) { + // Example Backtrace lines: + // 0 libimaging_shared.dylib 0x018c152a + // _ZNSt11_Deque_baseIN3nik7mediadb4PageESaIS2_EE17_M_initialize_mapEm + 3478 + // + // or + // 0 libimaging_shared.dylib 0x0000000001895c39 + // _ZN3nik4util19register_shared_ptrINS_3gpu7TextureEEEvPKvS5_ + 39 + // + // or + // 0 mysterious_app 0x0124000120120009 main + 17 + auto address_pos = backtrace_line.find(" 0x"); + if (address_pos == absl::string_view::npos) return std::string(); + absl::string_view symbol_view = backtrace_line.substr(address_pos + 1); + + auto space_pos = symbol_view.find(" "); + if (space_pos == absl::string_view::npos) return std::string(); + symbol_view = symbol_view.substr(space_pos + 1); // to mangled symbol + + auto plus_pos = symbol_view.find(" + "); + if (plus_pos == absl::string_view::npos) return std::string(); + symbol_view = symbol_view.substr(0, plus_pos); // strip remainng + + return std::string(symbol_view); +} + +} // namespace +} // namespace debugging_internal + +bool Symbolize(const void* pc, char* out, int out_size) { + if (out_size <= 0 || pc == nullptr) { + out = nullptr; + return false; + } + + // This allocates a char* array. + char** frame_strings = backtrace_symbols(const_cast(&pc), 1); + + if (frame_strings == nullptr) return false; + + std::string symbol = debugging_internal::GetSymbolString(frame_strings[0]); + free(frame_strings); + + char tmp_buf[1024]; + if (debugging_internal::Demangle(symbol.c_str(), tmp_buf, sizeof(tmp_buf))) { + size_t len = strlen(tmp_buf); + if (len + 1 <= static_cast(out_size)) { // +1 for '\0' + assert(len < sizeof(tmp_buf)); + memmove(out, tmp_buf, len + 1); + } + } else { + strncpy(out, symbol.c_str(), out_size); + } + + if (out[out_size - 1] != '\0') { + // strncpy() does not '\0' terminate when it truncates. + static constexpr char kEllipsis[] = "..."; + int ellipsis_size = std::min(sizeof(kEllipsis) - 1, out_size - 1); + memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size); + out[out_size - 1] = '\0'; + } + + return true; +} + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/CAPI/cpp/grpc/include/absl/debugging/symbolize_elf.inc b/CAPI/cpp/grpc/include/absl/debugging/symbolize_elf.inc new file mode 100644 index 0000000..9bfdd91 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/symbolize_elf.inc @@ -0,0 +1,1613 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This library provides Symbolize() function that symbolizes program +// counters to their corresponding symbol names on linux platforms. +// This library has a minimal implementation of an ELF symbol table +// reader (i.e. it doesn't depend on libelf, etc.). +// +// The algorithm used in Symbolize() is as follows. +// +// 1. Go through a list of maps in /proc/self/maps and find the map +// containing the program counter. +// +// 2. Open the mapped file and find a regular symbol table inside. +// Iterate over symbols in the symbol table and look for the symbol +// containing the program counter. If such a symbol is found, +// obtain the symbol name, and demangle the symbol if possible. +// If the symbol isn't found in the regular symbol table (binary is +// stripped), try the same thing with a dynamic symbol table. +// +// Note that Symbolize() is originally implemented to be used in +// signal handlers, hence it doesn't use malloc() and other unsafe +// operations. It should be both thread-safe and async-signal-safe. +// +// Implementation note: +// +// We don't use heaps but only use stacks. We want to reduce the +// stack consumption so that the symbolizer can run on small stacks. +// +// Here are some numbers collected with GCC 4.1.0 on x86: +// - sizeof(Elf32_Sym) = 16 +// - sizeof(Elf32_Shdr) = 40 +// - sizeof(Elf64_Sym) = 24 +// - sizeof(Elf64_Shdr) = 64 +// +// This implementation is intended to be async-signal-safe but uses some +// functions which are not guaranteed to be so, such as memchr() and +// memmove(). We assume they are async-signal-safe. + +#include +#include +#include +#include // For ElfW() macro. +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/casts.h" +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/low_level_alloc.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/spinlock.h" +#include "absl/base/port.h" +#include "absl/debugging/internal/demangle.h" +#include "absl/debugging/internal/vdso_support.h" +#include "absl/strings/string_view.h" + +#if defined(__FreeBSD__) && !defined(ElfW) +#define ElfW(x) __ElfN(x) +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// Value of argv[0]. Used by MaybeInitializeObjFile(). +static char *argv0_value = nullptr; + +void InitializeSymbolizer(const char *argv0) { +#ifdef ABSL_HAVE_VDSO_SUPPORT + // We need to make sure VDSOSupport::Init() is called before any setuid or + // chroot calls, so InitializeSymbolizer() should be called very early in the + // life of a program. + absl::debugging_internal::VDSOSupport::Init(); +#endif + if (argv0_value != nullptr) { + free(argv0_value); + argv0_value = nullptr; + } + if (argv0 != nullptr && argv0[0] != '\0') { + argv0_value = strdup(argv0); + } +} + +namespace debugging_internal { +namespace { + +// Re-runs fn until it doesn't cause EINTR. +#define NO_INTR(fn) \ + do { \ + } while ((fn) < 0 && errno == EINTR) + +// On Linux, ELF_ST_* are defined in . To make this portable +// we define our own ELF_ST_BIND and ELF_ST_TYPE if not available. +#ifndef ELF_ST_BIND +#define ELF_ST_BIND(info) (((unsigned char)(info)) >> 4) +#endif + +#ifndef ELF_ST_TYPE +#define ELF_ST_TYPE(info) (((unsigned char)(info)) & 0xF) +#endif + +// Some platforms use a special .opd section to store function pointers. +const char kOpdSectionName[] = ".opd"; + +#if (defined(__powerpc__) && !(_CALL_ELF > 1)) || defined(__ia64) +// Use opd section for function descriptors on these platforms, the function +// address is the first word of the descriptor. +enum { kPlatformUsesOPDSections = 1 }; +#else // not PPC or IA64 +enum { kPlatformUsesOPDSections = 0 }; +#endif + +// This works for PowerPC & IA64 only. A function descriptor consist of two +// pointers and the first one is the function's entry. +const size_t kFunctionDescriptorSize = sizeof(void *) * 2; + +const int kMaxDecorators = 10; // Seems like a reasonable upper limit. + +struct InstalledSymbolDecorator { + SymbolDecorator fn; + void *arg; + int ticket; +}; + +int g_num_decorators; +InstalledSymbolDecorator g_decorators[kMaxDecorators]; + +struct FileMappingHint { + const void *start; + const void *end; + uint64_t offset; + const char *filename; +}; + +// Protects g_decorators. +// We are using SpinLock and not a Mutex here, because we may be called +// from inside Mutex::Lock itself, and it prohibits recursive calls. +// This happens in e.g. base/stacktrace_syscall_unittest. +// Moreover, we are using only TryLock(), if the decorator list +// is being modified (is busy), we skip all decorators, and possibly +// loose some info. Sorry, that's the best we could do. +ABSL_CONST_INIT absl::base_internal::SpinLock g_decorators_mu( + absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY); + +const int kMaxFileMappingHints = 8; +int g_num_file_mapping_hints; +FileMappingHint g_file_mapping_hints[kMaxFileMappingHints]; +// Protects g_file_mapping_hints. +ABSL_CONST_INIT absl::base_internal::SpinLock g_file_mapping_mu( + absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY); + +// Async-signal-safe function to zero a buffer. +// memset() is not guaranteed to be async-signal-safe. +static void SafeMemZero(void* p, size_t size) { + unsigned char *c = static_cast(p); + while (size--) { + *c++ = 0; + } +} + +struct ObjFile { + ObjFile() + : filename(nullptr), + start_addr(nullptr), + end_addr(nullptr), + offset(0), + fd(-1), + elf_type(-1) { + SafeMemZero(&elf_header, sizeof(elf_header)); + SafeMemZero(&phdr[0], sizeof(phdr)); + } + + char *filename; + const void *start_addr; + const void *end_addr; + uint64_t offset; + + // The following fields are initialized on the first access to the + // object file. + int fd; + int elf_type; + ElfW(Ehdr) elf_header; + + // PT_LOAD program header describing executable code. + // Normally we expect just one, but SWIFT binaries have two. + std::array phdr; +}; + +// Build 4-way associative cache for symbols. Within each cache line, symbols +// are replaced in LRU order. +enum { + ASSOCIATIVITY = 4, +}; +struct SymbolCacheLine { + const void *pc[ASSOCIATIVITY]; + char *name[ASSOCIATIVITY]; + + // age[i] is incremented when a line is accessed. it's reset to zero if the + // i'th entry is read. + uint32_t age[ASSOCIATIVITY]; +}; + +// --------------------------------------------------------------- +// An async-signal-safe arena for LowLevelAlloc +static std::atomic g_sig_safe_arena; + +static base_internal::LowLevelAlloc::Arena *SigSafeArena() { + return g_sig_safe_arena.load(std::memory_order_acquire); +} + +static void InitSigSafeArena() { + if (SigSafeArena() == nullptr) { + base_internal::LowLevelAlloc::Arena *new_arena = + base_internal::LowLevelAlloc::NewArena( + base_internal::LowLevelAlloc::kAsyncSignalSafe); + base_internal::LowLevelAlloc::Arena *old_value = nullptr; + if (!g_sig_safe_arena.compare_exchange_strong(old_value, new_arena, + std::memory_order_release, + std::memory_order_relaxed)) { + // We lost a race to allocate an arena; deallocate. + base_internal::LowLevelAlloc::DeleteArena(new_arena); + } + } +} + +// --------------------------------------------------------------- +// An AddrMap is a vector of ObjFile, using SigSafeArena() for allocation. + +class AddrMap { + public: + AddrMap() : size_(0), allocated_(0), obj_(nullptr) {} + ~AddrMap() { base_internal::LowLevelAlloc::Free(obj_); } + int Size() const { return size_; } + ObjFile *At(int i) { return &obj_[i]; } + ObjFile *Add(); + void Clear(); + + private: + int size_; // count of valid elements (<= allocated_) + int allocated_; // count of allocated elements + ObjFile *obj_; // array of allocated_ elements + AddrMap(const AddrMap &) = delete; + AddrMap &operator=(const AddrMap &) = delete; +}; + +void AddrMap::Clear() { + for (int i = 0; i != size_; i++) { + At(i)->~ObjFile(); + } + size_ = 0; +} + +ObjFile *AddrMap::Add() { + if (size_ == allocated_) { + int new_allocated = allocated_ * 2 + 50; + ObjFile *new_obj_ = + static_cast(base_internal::LowLevelAlloc::AllocWithArena( + new_allocated * sizeof(*new_obj_), SigSafeArena())); + if (obj_) { + memcpy(new_obj_, obj_, allocated_ * sizeof(*new_obj_)); + base_internal::LowLevelAlloc::Free(obj_); + } + obj_ = new_obj_; + allocated_ = new_allocated; + } + return new (&obj_[size_++]) ObjFile; +} + +// --------------------------------------------------------------- + +enum FindSymbolResult { SYMBOL_NOT_FOUND = 1, SYMBOL_TRUNCATED, SYMBOL_FOUND }; + +class Symbolizer { + public: + Symbolizer(); + ~Symbolizer(); + const char *GetSymbol(const void *const pc); + + private: + char *CopyString(const char *s) { + int len = strlen(s); + char *dst = static_cast( + base_internal::LowLevelAlloc::AllocWithArena(len + 1, SigSafeArena())); + ABSL_RAW_CHECK(dst != nullptr, "out of memory"); + memcpy(dst, s, len + 1); + return dst; + } + ObjFile *FindObjFile(const void *const start, + size_t size) ABSL_ATTRIBUTE_NOINLINE; + static bool RegisterObjFile(const char *filename, + const void *const start_addr, + const void *const end_addr, uint64_t offset, + void *arg); + SymbolCacheLine *GetCacheLine(const void *const pc); + const char *FindSymbolInCache(const void *const pc); + const char *InsertSymbolInCache(const void *const pc, const char *name); + void AgeSymbols(SymbolCacheLine *line); + void ClearAddrMap(); + FindSymbolResult GetSymbolFromObjectFile(const ObjFile &obj, + const void *const pc, + const ptrdiff_t relocation, + char *out, int out_size, + char *tmp_buf, int tmp_buf_size); + const char *GetUncachedSymbol(const void *pc); + + enum { + SYMBOL_BUF_SIZE = 3072, + TMP_BUF_SIZE = 1024, + SYMBOL_CACHE_LINES = 128, + }; + + AddrMap addr_map_; + + bool ok_; + bool addr_map_read_; + + char symbol_buf_[SYMBOL_BUF_SIZE]; + + // tmp_buf_ will be used to store arrays of ElfW(Shdr) and ElfW(Sym) + // so we ensure that tmp_buf_ is properly aligned to store either. + alignas(16) char tmp_buf_[TMP_BUF_SIZE]; + static_assert(alignof(ElfW(Shdr)) <= 16, + "alignment of tmp buf too small for Shdr"); + static_assert(alignof(ElfW(Sym)) <= 16, + "alignment of tmp buf too small for Sym"); + + SymbolCacheLine symbol_cache_[SYMBOL_CACHE_LINES]; +}; + +static std::atomic g_cached_symbolizer; + +} // namespace + +static int SymbolizerSize() { +#if defined(__wasm__) || defined(__asmjs__) + int pagesize = getpagesize(); +#else + int pagesize = sysconf(_SC_PAGESIZE); +#endif + return ((sizeof(Symbolizer) - 1) / pagesize + 1) * pagesize; +} + +// Return (and set null) g_cached_symbolized_state if it is not null. +// Otherwise return a new symbolizer. +static Symbolizer *AllocateSymbolizer() { + InitSigSafeArena(); + Symbolizer *symbolizer = + g_cached_symbolizer.exchange(nullptr, std::memory_order_acquire); + if (symbolizer != nullptr) { + return symbolizer; + } + return new (base_internal::LowLevelAlloc::AllocWithArena( + SymbolizerSize(), SigSafeArena())) Symbolizer(); +} + +// Set g_cached_symbolize_state to s if it is null, otherwise +// delete s. +static void FreeSymbolizer(Symbolizer *s) { + Symbolizer *old_cached_symbolizer = nullptr; + if (!g_cached_symbolizer.compare_exchange_strong(old_cached_symbolizer, s, + std::memory_order_release, + std::memory_order_relaxed)) { + s->~Symbolizer(); + base_internal::LowLevelAlloc::Free(s); + } +} + +Symbolizer::Symbolizer() : ok_(true), addr_map_read_(false) { + for (SymbolCacheLine &symbol_cache_line : symbol_cache_) { + for (size_t j = 0; j < ABSL_ARRAYSIZE(symbol_cache_line.name); ++j) { + symbol_cache_line.pc[j] = nullptr; + symbol_cache_line.name[j] = nullptr; + symbol_cache_line.age[j] = 0; + } + } +} + +Symbolizer::~Symbolizer() { + for (SymbolCacheLine &symbol_cache_line : symbol_cache_) { + for (char *s : symbol_cache_line.name) { + base_internal::LowLevelAlloc::Free(s); + } + } + ClearAddrMap(); +} + +// We don't use assert() since it's not guaranteed to be +// async-signal-safe. Instead we define a minimal assertion +// macro. So far, we don't need pretty printing for __FILE__, etc. +#define SAFE_ASSERT(expr) ((expr) ? static_cast(0) : abort()) + +// Read up to "count" bytes from file descriptor "fd" into the buffer +// starting at "buf" while handling short reads and EINTR. On +// success, return the number of bytes read. Otherwise, return -1. +static ssize_t ReadPersistent(int fd, void *buf, size_t count) { + SAFE_ASSERT(fd >= 0); + SAFE_ASSERT(count <= SSIZE_MAX); + char *buf0 = reinterpret_cast(buf); + size_t num_bytes = 0; + while (num_bytes < count) { + ssize_t len; + NO_INTR(len = read(fd, buf0 + num_bytes, count - num_bytes)); + if (len < 0) { // There was an error other than EINTR. + ABSL_RAW_LOG(WARNING, "read failed: errno=%d", errno); + return -1; + } + if (len == 0) { // Reached EOF. + break; + } + num_bytes += len; + } + SAFE_ASSERT(num_bytes <= count); + return static_cast(num_bytes); +} + +// Read up to "count" bytes from "offset" in the file pointed by file +// descriptor "fd" into the buffer starting at "buf". On success, +// return the number of bytes read. Otherwise, return -1. +static ssize_t ReadFromOffset(const int fd, void *buf, const size_t count, + const off_t offset) { + off_t off = lseek(fd, offset, SEEK_SET); + if (off == (off_t)-1) { + ABSL_RAW_LOG(WARNING, "lseek(%d, %ju, SEEK_SET) failed: errno=%d", fd, + static_cast(offset), errno); + return -1; + } + return ReadPersistent(fd, buf, count); +} + +// Try reading exactly "count" bytes from "offset" bytes in a file +// pointed by "fd" into the buffer starting at "buf" while handling +// short reads and EINTR. On success, return true. Otherwise, return +// false. +static bool ReadFromOffsetExact(const int fd, void *buf, const size_t count, + const off_t offset) { + ssize_t len = ReadFromOffset(fd, buf, count, offset); + return len >= 0 && static_cast(len) == count; +} + +// Returns elf_header.e_type if the file pointed by fd is an ELF binary. +static int FileGetElfType(const int fd) { + ElfW(Ehdr) elf_header; + if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) { + return -1; + } + if (memcmp(elf_header.e_ident, ELFMAG, SELFMAG) != 0) { + return -1; + } + return elf_header.e_type; +} + +// Read the section headers in the given ELF binary, and if a section +// of the specified type is found, set the output to this section header +// and return true. Otherwise, return false. +// To keep stack consumption low, we would like this function to not get +// inlined. +static ABSL_ATTRIBUTE_NOINLINE bool GetSectionHeaderByType( + const int fd, ElfW(Half) sh_num, const off_t sh_offset, ElfW(Word) type, + ElfW(Shdr) * out, char *tmp_buf, int tmp_buf_size) { + ElfW(Shdr) *buf = reinterpret_cast(tmp_buf); + const int buf_entries = tmp_buf_size / sizeof(buf[0]); + const int buf_bytes = buf_entries * sizeof(buf[0]); + + for (int i = 0; i < sh_num;) { + const ssize_t num_bytes_left = (sh_num - i) * sizeof(buf[0]); + const ssize_t num_bytes_to_read = + (buf_bytes > num_bytes_left) ? num_bytes_left : buf_bytes; + const off_t offset = sh_offset + i * sizeof(buf[0]); + const ssize_t len = ReadFromOffset(fd, buf, num_bytes_to_read, offset); + if (len % sizeof(buf[0]) != 0) { + ABSL_RAW_LOG( + WARNING, + "Reading %zd bytes from offset %ju returned %zd which is not a " + "multiple of %zu.", + num_bytes_to_read, static_cast(offset), len, + sizeof(buf[0])); + return false; + } + const ssize_t num_headers_in_buf = len / sizeof(buf[0]); + SAFE_ASSERT(num_headers_in_buf <= buf_entries); + for (int j = 0; j < num_headers_in_buf; ++j) { + if (buf[j].sh_type == type) { + *out = buf[j]; + return true; + } + } + i += num_headers_in_buf; + } + return false; +} + +// There is no particular reason to limit section name to 63 characters, +// but there has (as yet) been no need for anything longer either. +const int kMaxSectionNameLen = 64; + +bool ForEachSection(int fd, + const std::function &callback) { + ElfW(Ehdr) elf_header; + if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) { + return false; + } + + ElfW(Shdr) shstrtab; + off_t shstrtab_offset = + (elf_header.e_shoff + elf_header.e_shentsize * elf_header.e_shstrndx); + if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) { + return false; + } + + for (int i = 0; i < elf_header.e_shnum; ++i) { + ElfW(Shdr) out; + off_t section_header_offset = + (elf_header.e_shoff + elf_header.e_shentsize * i); + if (!ReadFromOffsetExact(fd, &out, sizeof(out), section_header_offset)) { + return false; + } + off_t name_offset = shstrtab.sh_offset + out.sh_name; + char header_name[kMaxSectionNameLen]; + ssize_t n_read = + ReadFromOffset(fd, &header_name, kMaxSectionNameLen, name_offset); + if (n_read == -1) { + return false; + } else if (n_read > kMaxSectionNameLen) { + // Long read? + return false; + } + + absl::string_view name(header_name, strnlen(header_name, n_read)); + if (!callback(name, out)) { + break; + } + } + return true; +} + +// name_len should include terminating '\0'. +bool GetSectionHeaderByName(int fd, const char *name, size_t name_len, + ElfW(Shdr) * out) { + char header_name[kMaxSectionNameLen]; + if (sizeof(header_name) < name_len) { + ABSL_RAW_LOG(WARNING, + "Section name '%s' is too long (%zu); " + "section will not be found (even if present).", + name, name_len); + // No point in even trying. + return false; + } + + ElfW(Ehdr) elf_header; + if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) { + return false; + } + + ElfW(Shdr) shstrtab; + off_t shstrtab_offset = + (elf_header.e_shoff + elf_header.e_shentsize * elf_header.e_shstrndx); + if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) { + return false; + } + + for (int i = 0; i < elf_header.e_shnum; ++i) { + off_t section_header_offset = + (elf_header.e_shoff + elf_header.e_shentsize * i); + if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) { + return false; + } + off_t name_offset = shstrtab.sh_offset + out->sh_name; + ssize_t n_read = ReadFromOffset(fd, &header_name, name_len, name_offset); + if (n_read < 0) { + return false; + } else if (static_cast(n_read) != name_len) { + // Short read -- name could be at end of file. + continue; + } + if (memcmp(header_name, name, name_len) == 0) { + return true; + } + } + return false; +} + +// Compare symbols at in the same address. +// Return true if we should pick symbol1. +static bool ShouldPickFirstSymbol(const ElfW(Sym) & symbol1, + const ElfW(Sym) & symbol2) { + // If one of the symbols is weak and the other is not, pick the one + // this is not a weak symbol. + char bind1 = ELF_ST_BIND(symbol1.st_info); + char bind2 = ELF_ST_BIND(symbol1.st_info); + if (bind1 == STB_WEAK && bind2 != STB_WEAK) return false; + if (bind2 == STB_WEAK && bind1 != STB_WEAK) return true; + + // If one of the symbols has zero size and the other is not, pick the + // one that has non-zero size. + if (symbol1.st_size != 0 && symbol2.st_size == 0) { + return true; + } + if (symbol1.st_size == 0 && symbol2.st_size != 0) { + return false; + } + + // If one of the symbols has no type and the other is not, pick the + // one that has a type. + char type1 = ELF_ST_TYPE(symbol1.st_info); + char type2 = ELF_ST_TYPE(symbol1.st_info); + if (type1 != STT_NOTYPE && type2 == STT_NOTYPE) { + return true; + } + if (type1 == STT_NOTYPE && type2 != STT_NOTYPE) { + return false; + } + + // Pick the first one, if we still cannot decide. + return true; +} + +// Return true if an address is inside a section. +static bool InSection(const void *address, const ElfW(Shdr) * section) { + const char *start = reinterpret_cast(section->sh_addr); + size_t size = static_cast(section->sh_size); + return start <= address && address < (start + size); +} + +static const char *ComputeOffset(const char *base, ptrdiff_t offset) { + // Note: cast to uintptr_t to avoid undefined behavior when base evaluates to + // zero and offset is non-zero. + return reinterpret_cast( + reinterpret_cast(base) + offset); +} + +// Read a symbol table and look for the symbol containing the +// pc. Iterate over symbols in a symbol table and look for the symbol +// containing "pc". If the symbol is found, and its name fits in +// out_size, the name is written into out and SYMBOL_FOUND is returned. +// If the name does not fit, truncated name is written into out, +// and SYMBOL_TRUNCATED is returned. Out is NUL-terminated. +// If the symbol is not found, SYMBOL_NOT_FOUND is returned; +// To keep stack consumption low, we would like this function to not get +// inlined. +static ABSL_ATTRIBUTE_NOINLINE FindSymbolResult FindSymbol( + const void *const pc, const int fd, char *out, int out_size, + ptrdiff_t relocation, const ElfW(Shdr) * strtab, const ElfW(Shdr) * symtab, + const ElfW(Shdr) * opd, char *tmp_buf, int tmp_buf_size) { + if (symtab == nullptr) { + return SYMBOL_NOT_FOUND; + } + + // Read multiple symbols at once to save read() calls. + ElfW(Sym) *buf = reinterpret_cast(tmp_buf); + const int buf_entries = tmp_buf_size / sizeof(buf[0]); + + const int num_symbols = symtab->sh_size / symtab->sh_entsize; + + // On platforms using an .opd section (PowerPC & IA64), a function symbol + // has the address of a function descriptor, which contains the real + // starting address. However, we do not always want to use the real + // starting address because we sometimes want to symbolize a function + // pointer into the .opd section, e.g. FindSymbol(&foo,...). + const bool pc_in_opd = + kPlatformUsesOPDSections && opd != nullptr && InSection(pc, opd); + const bool deref_function_descriptor_pointer = + kPlatformUsesOPDSections && opd != nullptr && !pc_in_opd; + + ElfW(Sym) best_match; + SafeMemZero(&best_match, sizeof(best_match)); + bool found_match = false; + for (int i = 0; i < num_symbols;) { + off_t offset = symtab->sh_offset + i * symtab->sh_entsize; + const int num_remaining_symbols = num_symbols - i; + const int entries_in_chunk = std::min(num_remaining_symbols, buf_entries); + const int bytes_in_chunk = entries_in_chunk * sizeof(buf[0]); + const ssize_t len = ReadFromOffset(fd, buf, bytes_in_chunk, offset); + SAFE_ASSERT(len % sizeof(buf[0]) == 0); + const ssize_t num_symbols_in_buf = len / sizeof(buf[0]); + SAFE_ASSERT(num_symbols_in_buf <= entries_in_chunk); + for (int j = 0; j < num_symbols_in_buf; ++j) { + const ElfW(Sym) &symbol = buf[j]; + + // For a DSO, a symbol address is relocated by the loading address. + // We keep the original address for opd redirection below. + const char *const original_start_address = + reinterpret_cast(symbol.st_value); + const char *start_address = + ComputeOffset(original_start_address, relocation); + +#ifdef __arm__ + // ARM functions are always aligned to multiples of two bytes; the + // lowest-order bit in start_address is ignored by the CPU and indicates + // whether the function contains ARM (0) or Thumb (1) code. We don't care + // about what encoding is being used; we just want the real start address + // of the function. + start_address = reinterpret_cast( + reinterpret_cast(start_address) & ~1); +#endif + + if (deref_function_descriptor_pointer && + InSection(original_start_address, opd)) { + // The opd section is mapped into memory. Just dereference + // start_address to get the first double word, which points to the + // function entry. + start_address = *reinterpret_cast(start_address); + } + + // If pc is inside the .opd section, it points to a function descriptor. + const size_t size = pc_in_opd ? kFunctionDescriptorSize : symbol.st_size; + const void *const end_address = ComputeOffset(start_address, size); + if (symbol.st_value != 0 && // Skip null value symbols. + symbol.st_shndx != 0 && // Skip undefined symbols. +#ifdef STT_TLS + ELF_ST_TYPE(symbol.st_info) != STT_TLS && // Skip thread-local data. +#endif // STT_TLS + ((start_address <= pc && pc < end_address) || + (start_address == pc && pc == end_address))) { + if (!found_match || ShouldPickFirstSymbol(symbol, best_match)) { + found_match = true; + best_match = symbol; + } + } + } + i += num_symbols_in_buf; + } + + if (found_match) { + const size_t off = strtab->sh_offset + best_match.st_name; + const ssize_t n_read = ReadFromOffset(fd, out, out_size, off); + if (n_read <= 0) { + // This should never happen. + ABSL_RAW_LOG(WARNING, + "Unable to read from fd %d at offset %zu: n_read = %zd", fd, + off, n_read); + return SYMBOL_NOT_FOUND; + } + ABSL_RAW_CHECK(n_read <= out_size, "ReadFromOffset read too much data."); + + // strtab->sh_offset points into .strtab-like section that contains + // NUL-terminated strings: '\0foo\0barbaz\0...". + // + // sh_offset+st_name points to the start of symbol name, but we don't know + // how long the symbol is, so we try to read as much as we have space for, + // and usually over-read (i.e. there is a NUL somewhere before n_read). + if (memchr(out, '\0', n_read) == nullptr) { + // Either out_size was too small (n_read == out_size and no NUL), or + // we tried to read past the EOF (n_read < out_size) and .strtab is + // corrupt (missing terminating NUL; should never happen for valid ELF). + out[n_read - 1] = '\0'; + return SYMBOL_TRUNCATED; + } + return SYMBOL_FOUND; + } + + return SYMBOL_NOT_FOUND; +} + +// Get the symbol name of "pc" from the file pointed by "fd". Process +// both regular and dynamic symbol tables if necessary. +// See FindSymbol() comment for description of return value. +FindSymbolResult Symbolizer::GetSymbolFromObjectFile( + const ObjFile &obj, const void *const pc, const ptrdiff_t relocation, + char *out, int out_size, char *tmp_buf, int tmp_buf_size) { + ElfW(Shdr) symtab; + ElfW(Shdr) strtab; + ElfW(Shdr) opd; + ElfW(Shdr) *opd_ptr = nullptr; + + // On platforms using an .opd sections for function descriptor, read + // the section header. The .opd section is in data segment and should be + // loaded but we check that it is mapped just to be extra careful. + if (kPlatformUsesOPDSections) { + if (GetSectionHeaderByName(obj.fd, kOpdSectionName, + sizeof(kOpdSectionName) - 1, &opd) && + FindObjFile(reinterpret_cast(opd.sh_addr) + relocation, + opd.sh_size) != nullptr) { + opd_ptr = &opd; + } else { + return SYMBOL_NOT_FOUND; + } + } + + // Consult a regular symbol table, then fall back to the dynamic symbol table. + for (const auto symbol_table_type : {SHT_SYMTAB, SHT_DYNSYM}) { + if (!GetSectionHeaderByType(obj.fd, obj.elf_header.e_shnum, + obj.elf_header.e_shoff, symbol_table_type, + &symtab, tmp_buf, tmp_buf_size)) { + continue; + } + if (!ReadFromOffsetExact( + obj.fd, &strtab, sizeof(strtab), + obj.elf_header.e_shoff + symtab.sh_link * sizeof(symtab))) { + continue; + } + const FindSymbolResult rc = + FindSymbol(pc, obj.fd, out, out_size, relocation, &strtab, &symtab, + opd_ptr, tmp_buf, tmp_buf_size); + if (rc != SYMBOL_NOT_FOUND) { + return rc; + } + } + + return SYMBOL_NOT_FOUND; +} + +namespace { +// Thin wrapper around a file descriptor so that the file descriptor +// gets closed for sure. +class FileDescriptor { + public: + explicit FileDescriptor(int fd) : fd_(fd) {} + FileDescriptor(const FileDescriptor &) = delete; + FileDescriptor &operator=(const FileDescriptor &) = delete; + + ~FileDescriptor() { + if (fd_ >= 0) { + NO_INTR(close(fd_)); + } + } + + int get() const { return fd_; } + + private: + const int fd_; +}; + +// Helper class for reading lines from file. +// +// Note: we don't use ProcMapsIterator since the object is big (it has +// a 5k array member) and uses async-unsafe functions such as sscanf() +// and snprintf(). +class LineReader { + public: + explicit LineReader(int fd, char *buf, int buf_len) + : fd_(fd), + buf_len_(buf_len), + buf_(buf), + bol_(buf), + eol_(buf), + eod_(buf) {} + + LineReader(const LineReader &) = delete; + LineReader &operator=(const LineReader &) = delete; + + // Read '\n'-terminated line from file. On success, modify "bol" + // and "eol", then return true. Otherwise, return false. + // + // Note: if the last line doesn't end with '\n', the line will be + // dropped. It's an intentional behavior to make the code simple. + bool ReadLine(const char **bol, const char **eol) { + if (BufferIsEmpty()) { // First time. + const ssize_t num_bytes = ReadPersistent(fd_, buf_, buf_len_); + if (num_bytes <= 0) { // EOF or error. + return false; + } + eod_ = buf_ + num_bytes; + bol_ = buf_; + } else { + bol_ = eol_ + 1; // Advance to the next line in the buffer. + SAFE_ASSERT(bol_ <= eod_); // "bol_" can point to "eod_". + if (!HasCompleteLine()) { + const int incomplete_line_length = eod_ - bol_; + // Move the trailing incomplete line to the beginning. + memmove(buf_, bol_, incomplete_line_length); + // Read text from file and append it. + char *const append_pos = buf_ + incomplete_line_length; + const int capacity_left = buf_len_ - incomplete_line_length; + const ssize_t num_bytes = + ReadPersistent(fd_, append_pos, capacity_left); + if (num_bytes <= 0) { // EOF or error. + return false; + } + eod_ = append_pos + num_bytes; + bol_ = buf_; + } + } + eol_ = FindLineFeed(); + if (eol_ == nullptr) { // '\n' not found. Malformed line. + return false; + } + *eol_ = '\0'; // Replace '\n' with '\0'. + + *bol = bol_; + *eol = eol_; + return true; + } + + private: + char *FindLineFeed() const { + return reinterpret_cast(memchr(bol_, '\n', eod_ - bol_)); + } + + bool BufferIsEmpty() const { return buf_ == eod_; } + + bool HasCompleteLine() const { + return !BufferIsEmpty() && FindLineFeed() != nullptr; + } + + const int fd_; + const int buf_len_; + char *const buf_; + char *bol_; + char *eol_; + const char *eod_; // End of data in "buf_". +}; +} // namespace + +// Place the hex number read from "start" into "*hex". The pointer to +// the first non-hex character or "end" is returned. +static const char *GetHex(const char *start, const char *end, + uint64_t *const value) { + uint64_t hex = 0; + const char *p; + for (p = start; p < end; ++p) { + int ch = *p; + if ((ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'F') || + (ch >= 'a' && ch <= 'f')) { + hex = (hex << 4) | (ch < 'A' ? ch - '0' : (ch & 0xF) + 9); + } else { // Encountered the first non-hex character. + break; + } + } + SAFE_ASSERT(p <= end); + *value = hex; + return p; +} + +static const char *GetHex(const char *start, const char *end, + const void **const addr) { + uint64_t hex = 0; + const char *p = GetHex(start, end, &hex); + *addr = reinterpret_cast(hex); + return p; +} + +// Normally we are only interested in "r?x" maps. +// On the PowerPC, function pointers point to descriptors in the .opd +// section. The descriptors themselves are not executable code, so +// we need to relax the check below to "r??". +static bool ShouldUseMapping(const char *const flags) { + return flags[0] == 'r' && (kPlatformUsesOPDSections || flags[2] == 'x'); +} + +// Read /proc/self/maps and run "callback" for each mmapped file found. If +// "callback" returns false, stop scanning and return true. Else continue +// scanning /proc/self/maps. Return true if no parse error is found. +static ABSL_ATTRIBUTE_NOINLINE bool ReadAddrMap( + bool (*callback)(const char *filename, const void *const start_addr, + const void *const end_addr, uint64_t offset, void *arg), + void *arg, void *tmp_buf, int tmp_buf_size) { + // Use /proc/self/task//maps instead of /proc/self/maps. The latter + // requires kernel to stop all threads, and is significantly slower when there + // are 1000s of threads. + char maps_path[80]; + snprintf(maps_path, sizeof(maps_path), "/proc/self/task/%d/maps", getpid()); + + int maps_fd; + NO_INTR(maps_fd = open(maps_path, O_RDONLY)); + FileDescriptor wrapped_maps_fd(maps_fd); + if (wrapped_maps_fd.get() < 0) { + ABSL_RAW_LOG(WARNING, "%s: errno=%d", maps_path, errno); + return false; + } + + // Iterate over maps and look for the map containing the pc. Then + // look into the symbol tables inside. + LineReader reader(wrapped_maps_fd.get(), static_cast(tmp_buf), + tmp_buf_size); + while (true) { + const char *cursor; + const char *eol; + if (!reader.ReadLine(&cursor, &eol)) { // EOF or malformed line. + break; + } + + const char *line = cursor; + const void *start_address; + // Start parsing line in /proc/self/maps. Here is an example: + // + // 08048000-0804c000 r-xp 00000000 08:01 2142121 /bin/cat + // + // We want start address (08048000), end address (0804c000), flags + // (r-xp) and file name (/bin/cat). + + // Read start address. + cursor = GetHex(cursor, eol, &start_address); + if (cursor == eol || *cursor != '-') { + ABSL_RAW_LOG(WARNING, "Corrupt /proc/self/maps line: %s", line); + return false; + } + ++cursor; // Skip '-'. + + // Read end address. + const void *end_address; + cursor = GetHex(cursor, eol, &end_address); + if (cursor == eol || *cursor != ' ') { + ABSL_RAW_LOG(WARNING, "Corrupt /proc/self/maps line: %s", line); + return false; + } + ++cursor; // Skip ' '. + + // Read flags. Skip flags until we encounter a space or eol. + const char *const flags_start = cursor; + while (cursor < eol && *cursor != ' ') { + ++cursor; + } + // We expect at least four letters for flags (ex. "r-xp"). + if (cursor == eol || cursor < flags_start + 4) { + ABSL_RAW_LOG(WARNING, "Corrupt /proc/self/maps: %s", line); + return false; + } + + // Check flags. + if (!ShouldUseMapping(flags_start)) { + continue; // We skip this map. + } + ++cursor; // Skip ' '. + + // Read file offset. + uint64_t offset; + cursor = GetHex(cursor, eol, &offset); + ++cursor; // Skip ' '. + + // Skip to file name. "cursor" now points to dev. We need to skip at least + // two spaces for dev and inode. + int num_spaces = 0; + while (cursor < eol) { + if (*cursor == ' ') { + ++num_spaces; + } else if (num_spaces >= 2) { + // The first non-space character after skipping two spaces + // is the beginning of the file name. + break; + } + ++cursor; + } + + // Check whether this entry corresponds to our hint table for the true + // filename. + bool hinted = + GetFileMappingHint(&start_address, &end_address, &offset, &cursor); + if (!hinted && (cursor == eol || cursor[0] == '[')) { + // not an object file, typically [vdso] or [vsyscall] + continue; + } + if (!callback(cursor, start_address, end_address, offset, arg)) break; + } + return true; +} + +// Find the objfile mapped in address region containing [addr, addr + len). +ObjFile *Symbolizer::FindObjFile(const void *const addr, size_t len) { + for (int i = 0; i < 2; ++i) { + if (!ok_) return nullptr; + + // Read /proc/self/maps if necessary + if (!addr_map_read_) { + addr_map_read_ = true; + if (!ReadAddrMap(RegisterObjFile, this, tmp_buf_, TMP_BUF_SIZE)) { + ok_ = false; + return nullptr; + } + } + + int lo = 0; + int hi = addr_map_.Size(); + while (lo < hi) { + int mid = (lo + hi) / 2; + if (addr < addr_map_.At(mid)->end_addr) { + hi = mid; + } else { + lo = mid + 1; + } + } + if (lo != addr_map_.Size()) { + ObjFile *obj = addr_map_.At(lo); + SAFE_ASSERT(obj->end_addr > addr); + if (addr >= obj->start_addr && + reinterpret_cast(addr) + len <= obj->end_addr) + return obj; + } + + // The address mapping may have changed since it was last read. Retry. + ClearAddrMap(); + } + return nullptr; +} + +void Symbolizer::ClearAddrMap() { + for (int i = 0; i != addr_map_.Size(); i++) { + ObjFile *o = addr_map_.At(i); + base_internal::LowLevelAlloc::Free(o->filename); + if (o->fd >= 0) { + NO_INTR(close(o->fd)); + } + } + addr_map_.Clear(); + addr_map_read_ = false; +} + +// Callback for ReadAddrMap to register objfiles in an in-memory table. +bool Symbolizer::RegisterObjFile(const char *filename, + const void *const start_addr, + const void *const end_addr, uint64_t offset, + void *arg) { + Symbolizer *impl = static_cast(arg); + + // Files are supposed to be added in the increasing address order. Make + // sure that's the case. + int addr_map_size = impl->addr_map_.Size(); + if (addr_map_size != 0) { + ObjFile *old = impl->addr_map_.At(addr_map_size - 1); + if (old->end_addr > end_addr) { + ABSL_RAW_LOG(ERROR, + "Unsorted addr map entry: 0x%" PRIxPTR ": %s <-> 0x%" PRIxPTR + ": %s", + reinterpret_cast(end_addr), filename, + reinterpret_cast(old->end_addr), old->filename); + return true; + } else if (old->end_addr == end_addr) { + // The same entry appears twice. This sometimes happens for [vdso]. + if (old->start_addr != start_addr || + strcmp(old->filename, filename) != 0) { + ABSL_RAW_LOG(ERROR, + "Duplicate addr 0x%" PRIxPTR ": %s <-> 0x%" PRIxPTR ": %s", + reinterpret_cast(end_addr), filename, + reinterpret_cast(old->end_addr), old->filename); + } + return true; + } else if (old->end_addr == start_addr && + reinterpret_cast(old->start_addr) - old->offset == + reinterpret_cast(start_addr) - offset && + strcmp(old->filename, filename) == 0) { + // Two contiguous map entries that span a contiguous region of the file, + // perhaps because some part of the file was mlock()ed. Combine them. + old->end_addr = end_addr; + return true; + } + } + ObjFile *obj = impl->addr_map_.Add(); + obj->filename = impl->CopyString(filename); + obj->start_addr = start_addr; + obj->end_addr = end_addr; + obj->offset = offset; + obj->elf_type = -1; // filled on demand + obj->fd = -1; // opened on demand + return true; +} + +// This function wraps the Demangle function to provide an interface +// where the input symbol is demangled in-place. +// To keep stack consumption low, we would like this function to not +// get inlined. +static ABSL_ATTRIBUTE_NOINLINE void DemangleInplace(char *out, int out_size, + char *tmp_buf, + int tmp_buf_size) { + if (Demangle(out, tmp_buf, tmp_buf_size)) { + // Demangling succeeded. Copy to out if the space allows. + int len = strlen(tmp_buf); + if (len + 1 <= out_size) { // +1 for '\0'. + SAFE_ASSERT(len < tmp_buf_size); + memmove(out, tmp_buf, len + 1); + } + } +} + +SymbolCacheLine *Symbolizer::GetCacheLine(const void *const pc) { + uintptr_t pc0 = reinterpret_cast(pc); + pc0 >>= 3; // drop the low 3 bits + + // Shuffle bits. + pc0 ^= (pc0 >> 6) ^ (pc0 >> 12) ^ (pc0 >> 18); + return &symbol_cache_[pc0 % SYMBOL_CACHE_LINES]; +} + +void Symbolizer::AgeSymbols(SymbolCacheLine *line) { + for (uint32_t &age : line->age) { + ++age; + } +} + +const char *Symbolizer::FindSymbolInCache(const void *const pc) { + if (pc == nullptr) return nullptr; + + SymbolCacheLine *line = GetCacheLine(pc); + for (size_t i = 0; i < ABSL_ARRAYSIZE(line->pc); ++i) { + if (line->pc[i] == pc) { + AgeSymbols(line); + line->age[i] = 0; + return line->name[i]; + } + } + return nullptr; +} + +const char *Symbolizer::InsertSymbolInCache(const void *const pc, + const char *name) { + SAFE_ASSERT(pc != nullptr); + + SymbolCacheLine *line = GetCacheLine(pc); + uint32_t max_age = 0; + int oldest_index = -1; + for (size_t i = 0; i < ABSL_ARRAYSIZE(line->pc); ++i) { + if (line->pc[i] == nullptr) { + AgeSymbols(line); + line->pc[i] = pc; + line->name[i] = CopyString(name); + line->age[i] = 0; + return line->name[i]; + } + if (line->age[i] >= max_age) { + max_age = line->age[i]; + oldest_index = i; + } + } + + AgeSymbols(line); + ABSL_RAW_CHECK(oldest_index >= 0, "Corrupt cache"); + base_internal::LowLevelAlloc::Free(line->name[oldest_index]); + line->pc[oldest_index] = pc; + line->name[oldest_index] = CopyString(name); + line->age[oldest_index] = 0; + return line->name[oldest_index]; +} + +static void MaybeOpenFdFromSelfExe(ObjFile *obj) { + if (memcmp(obj->start_addr, ELFMAG, SELFMAG) != 0) { + return; + } + int fd = open("/proc/self/exe", O_RDONLY); + if (fd == -1) { + return; + } + // Verify that contents of /proc/self/exe matches in-memory image of + // the binary. This can fail if the "deleted" binary is in fact not + // the main executable, or for binaries that have the first PT_LOAD + // segment smaller than 4K. We do it in four steps so that the + // buffer is smaller and we don't consume too much stack space. + const char *mem = reinterpret_cast(obj->start_addr); + for (int i = 0; i < 4; ++i) { + char buf[1024]; + ssize_t n = read(fd, buf, sizeof(buf)); + if (n != sizeof(buf) || memcmp(buf, mem, sizeof(buf)) != 0) { + close(fd); + return; + } + mem += sizeof(buf); + } + obj->fd = fd; +} + +static bool MaybeInitializeObjFile(ObjFile *obj) { + if (obj->fd < 0) { + obj->fd = open(obj->filename, O_RDONLY); + + if (obj->fd < 0) { + // Getting /proc/self/exe here means that we were hinted. + if (strcmp(obj->filename, "/proc/self/exe") == 0) { + // /proc/self/exe may be inaccessible (due to setuid, etc.), so try + // accessing the binary via argv0. + if (argv0_value != nullptr) { + obj->fd = open(argv0_value, O_RDONLY); + } + } else { + MaybeOpenFdFromSelfExe(obj); + } + } + + if (obj->fd < 0) { + ABSL_RAW_LOG(WARNING, "%s: open failed: errno=%d", obj->filename, errno); + return false; + } + obj->elf_type = FileGetElfType(obj->fd); + if (obj->elf_type < 0) { + ABSL_RAW_LOG(WARNING, "%s: wrong elf type: %d", obj->filename, + obj->elf_type); + return false; + } + + if (!ReadFromOffsetExact(obj->fd, &obj->elf_header, sizeof(obj->elf_header), + 0)) { + ABSL_RAW_LOG(WARNING, "%s: failed to read elf header", obj->filename); + return false; + } + const int phnum = obj->elf_header.e_phnum; + const int phentsize = obj->elf_header.e_phentsize; + size_t phoff = obj->elf_header.e_phoff; + size_t num_executable_load_segments = 0; + for (int j = 0; j < phnum; j++) { + ElfW(Phdr) phdr; + if (!ReadFromOffsetExact(obj->fd, &phdr, sizeof(phdr), phoff)) { + ABSL_RAW_LOG(WARNING, "%s: failed to read program header %d", + obj->filename, j); + return false; + } + phoff += phentsize; + constexpr int rx = PF_X | PF_R; + if (phdr.p_type != PT_LOAD || (phdr.p_flags & rx) != rx) { + // Not a LOAD segment, or not executable code. + continue; + } + if (num_executable_load_segments < obj->phdr.size()) { + memcpy(&obj->phdr[num_executable_load_segments++], &phdr, sizeof(phdr)); + } else { + ABSL_RAW_LOG(WARNING, "%s: too many executable LOAD segments", + obj->filename); + break; + } + } + if (num_executable_load_segments == 0) { + // This object has no "r-x" LOAD segments. That's unexpected. + ABSL_RAW_LOG(WARNING, "%s: no executable LOAD segments", obj->filename); + return false; + } + } + return true; +} + +// The implementation of our symbolization routine. If it +// successfully finds the symbol containing "pc" and obtains the +// symbol name, returns pointer to that symbol. Otherwise, returns nullptr. +// If any symbol decorators have been installed via InstallSymbolDecorator(), +// they are called here as well. +// To keep stack consumption low, we would like this function to not +// get inlined. +const char *Symbolizer::GetUncachedSymbol(const void *pc) { + ObjFile *const obj = FindObjFile(pc, 1); + ptrdiff_t relocation = 0; + int fd = -1; + if (obj != nullptr) { + if (MaybeInitializeObjFile(obj)) { + const size_t start_addr = reinterpret_cast(obj->start_addr); + if (obj->elf_type == ET_DYN && start_addr >= obj->offset) { + // This object was relocated. + // + // For obj->offset > 0, adjust the relocation since a mapping at offset + // X in the file will have a start address of [true relocation]+X. + relocation = start_addr - obj->offset; + + // Note: some binaries have multiple "rx" LOAD segments. We must + // find the right one. + ElfW(Phdr) *phdr = nullptr; + for (size_t j = 0; j < obj->phdr.size(); j++) { + ElfW(Phdr) &p = obj->phdr[j]; + if (p.p_type != PT_LOAD) { + // We only expect PT_LOADs. This must be PT_NULL that we didn't + // write over (i.e. we exhausted all interesting PT_LOADs). + ABSL_RAW_CHECK(p.p_type == PT_NULL, "unexpected p_type"); + break; + } + if (pc < reinterpret_cast(start_addr + p.p_memsz)) { + phdr = &p; + break; + } + } + if (phdr == nullptr) { + // That's unexpected. Hope for the best. + ABSL_RAW_LOG( + WARNING, + "%s: unable to find LOAD segment for pc: %p, start_addr: %zx", + obj->filename, pc, start_addr); + } else { + // Adjust relocation in case phdr.p_vaddr != 0. + // This happens for binaries linked with `lld --rosegment`, and for + // binaries linked with BFD `ld -z separate-code`. + relocation -= phdr->p_vaddr - phdr->p_offset; + } + } + + fd = obj->fd; + if (GetSymbolFromObjectFile(*obj, pc, relocation, symbol_buf_, + sizeof(symbol_buf_), tmp_buf_, + sizeof(tmp_buf_)) == SYMBOL_FOUND) { + // Only try to demangle the symbol name if it fit into symbol_buf_. + DemangleInplace(symbol_buf_, sizeof(symbol_buf_), tmp_buf_, + sizeof(tmp_buf_)); + } + } + } else { +#if ABSL_HAVE_VDSO_SUPPORT + VDSOSupport vdso; + if (vdso.IsPresent()) { + VDSOSupport::SymbolInfo symbol_info; + if (vdso.LookupSymbolByAddress(pc, &symbol_info)) { + // All VDSO symbols are known to be short. + size_t len = strlen(symbol_info.name); + ABSL_RAW_CHECK(len + 1 < sizeof(symbol_buf_), + "VDSO symbol unexpectedly long"); + memcpy(symbol_buf_, symbol_info.name, len + 1); + } + } +#endif + } + + if (g_decorators_mu.TryLock()) { + if (g_num_decorators > 0) { + SymbolDecoratorArgs decorator_args = { + pc, relocation, fd, symbol_buf_, sizeof(symbol_buf_), + tmp_buf_, sizeof(tmp_buf_), nullptr}; + for (int i = 0; i < g_num_decorators; ++i) { + decorator_args.arg = g_decorators[i].arg; + g_decorators[i].fn(&decorator_args); + } + } + g_decorators_mu.Unlock(); + } + if (symbol_buf_[0] == '\0') { + return nullptr; + } + symbol_buf_[sizeof(symbol_buf_) - 1] = '\0'; // Paranoia. + return InsertSymbolInCache(pc, symbol_buf_); +} + +const char *Symbolizer::GetSymbol(const void *pc) { + const char *entry = FindSymbolInCache(pc); + if (entry != nullptr) { + return entry; + } + symbol_buf_[0] = '\0'; + +#ifdef __hppa__ + { + // In some contexts (e.g., return addresses), PA-RISC uses the lowest two + // bits of the address to indicate the privilege level. Clear those bits + // before trying to symbolize. + const auto pc_bits = reinterpret_cast(pc); + const auto address = pc_bits & ~0x3; + entry = GetUncachedSymbol(reinterpret_cast(address)); + if (entry != nullptr) { + return entry; + } + + // In some contexts, PA-RISC also uses bit 1 of the address to indicate that + // this is a cross-DSO function pointer. Such function pointers actually + // point to a procedure label, a struct whose first 32-bit (pointer) element + // actually points to the function text. With no symbol found for this + // address so far, try interpreting it as a cross-DSO function pointer and + // see how that goes. + if (pc_bits & 0x2) { + return GetUncachedSymbol(*reinterpret_cast(address)); + } + + return nullptr; + } +#else + return GetUncachedSymbol(pc); +#endif +} + +bool RemoveAllSymbolDecorators(void) { + if (!g_decorators_mu.TryLock()) { + // Someone else is using decorators. Get out. + return false; + } + g_num_decorators = 0; + g_decorators_mu.Unlock(); + return true; +} + +bool RemoveSymbolDecorator(int ticket) { + if (!g_decorators_mu.TryLock()) { + // Someone else is using decorators. Get out. + return false; + } + for (int i = 0; i < g_num_decorators; ++i) { + if (g_decorators[i].ticket == ticket) { + while (i < g_num_decorators - 1) { + g_decorators[i] = g_decorators[i + 1]; + ++i; + } + g_num_decorators = i; + break; + } + } + g_decorators_mu.Unlock(); + return true; // Decorator is known to be removed. +} + +int InstallSymbolDecorator(SymbolDecorator decorator, void *arg) { + static int ticket = 0; + + if (!g_decorators_mu.TryLock()) { + // Someone else is using decorators. Get out. + return -2; + } + int ret = ticket; + if (g_num_decorators >= kMaxDecorators) { + ret = -1; + } else { + g_decorators[g_num_decorators] = {decorator, arg, ticket++}; + ++g_num_decorators; + } + g_decorators_mu.Unlock(); + return ret; +} + +bool RegisterFileMappingHint(const void *start, const void *end, uint64_t offset, + const char *filename) { + SAFE_ASSERT(start <= end); + SAFE_ASSERT(filename != nullptr); + + InitSigSafeArena(); + + if (!g_file_mapping_mu.TryLock()) { + return false; + } + + bool ret = true; + if (g_num_file_mapping_hints >= kMaxFileMappingHints) { + ret = false; + } else { + // TODO(ckennelly): Move this into a string copy routine. + int len = strlen(filename); + char *dst = static_cast( + base_internal::LowLevelAlloc::AllocWithArena(len + 1, SigSafeArena())); + ABSL_RAW_CHECK(dst != nullptr, "out of memory"); + memcpy(dst, filename, len + 1); + + auto &hint = g_file_mapping_hints[g_num_file_mapping_hints++]; + hint.start = start; + hint.end = end; + hint.offset = offset; + hint.filename = dst; + } + + g_file_mapping_mu.Unlock(); + return ret; +} + +bool GetFileMappingHint(const void **start, const void **end, uint64_t *offset, + const char **filename) { + if (!g_file_mapping_mu.TryLock()) { + return false; + } + bool found = false; + for (int i = 0; i < g_num_file_mapping_hints; i++) { + if (g_file_mapping_hints[i].start <= *start && + *end <= g_file_mapping_hints[i].end) { + // We assume that the start_address for the mapping is the base + // address of the ELF section, but when [start_address,end_address) is + // not strictly equal to [hint.start, hint.end), that assumption is + // invalid. + // + // This uses the hint's start address (even though hint.start is not + // necessarily equal to start_address) to ensure the correct + // relocation is computed later. + *start = g_file_mapping_hints[i].start; + *end = g_file_mapping_hints[i].end; + *offset = g_file_mapping_hints[i].offset; + *filename = g_file_mapping_hints[i].filename; + found = true; + break; + } + } + g_file_mapping_mu.Unlock(); + return found; +} + +} // namespace debugging_internal + +bool Symbolize(const void *pc, char *out, int out_size) { + // Symbolization is very slow under tsan. + ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN(); + SAFE_ASSERT(out_size >= 0); + debugging_internal::Symbolizer *s = debugging_internal::AllocateSymbolizer(); + const char *name = s->GetSymbol(pc); + bool ok = false; + if (name != nullptr && out_size > 0) { + strncpy(out, name, out_size); + ok = true; + if (out[out_size - 1] != '\0') { + // strncpy() does not '\0' terminate when it truncates. Do so, with + // trailing ellipsis. + static constexpr char kEllipsis[] = "..."; + int ellipsis_size = + std::min(implicit_cast(strlen(kEllipsis)), out_size - 1); + memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size); + out[out_size - 1] = '\0'; + } + } + debugging_internal::FreeSymbolizer(s); + ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END(); + return ok; +} + +ABSL_NAMESPACE_END +} // namespace absl + +extern "C" bool AbslInternalGetFileMappingHint(const void **start, + const void **end, uint64_t *offset, + const char **filename) { + return absl::debugging_internal::GetFileMappingHint(start, end, offset, + filename); +} diff --git a/CAPI/cpp/grpc/include/absl/debugging/symbolize_emscripten.inc b/CAPI/cpp/grpc/include/absl/debugging/symbolize_emscripten.inc new file mode 100644 index 0000000..c226c45 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/symbolize_emscripten.inc @@ -0,0 +1,72 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include +#include + +#include "absl/base/internal/raw_logging.h" +#include "absl/debugging/internal/demangle.h" +#include "absl/strings/numbers.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" + +extern "C" { +const char* emscripten_pc_get_function(const void* pc); +} + +// clang-format off +EM_JS(bool, HaveOffsetConverter, (), + { return typeof wasmOffsetConverter !== 'undefined'; }); +// clang-format on + +namespace absl { +ABSL_NAMESPACE_BEGIN + +void InitializeSymbolizer(const char*) { + if (!HaveOffsetConverter()) { + ABSL_RAW_LOG(INFO, + "Symbolization unavailable. Rebuild with -sWASM=1 " + "and -sUSE_OFFSET_CONVERTER=1."); + } +} + +bool Symbolize(const void* pc, char* out, int out_size) { + // Check if we have the offset converter necessary for pc_get_function. + // Without it, the program will abort(). + if (!HaveOffsetConverter()) { + return false; + } + const char* func_name = emscripten_pc_get_function(pc); + if (func_name == nullptr) { + return false; + } + + strncpy(out, func_name, out_size); + + if (out[out_size - 1] != '\0') { + // strncpy() does not '\0' terminate when it truncates. + static constexpr char kEllipsis[] = "..."; + int ellipsis_size = std::min(sizeof(kEllipsis) - 1, out_size - 1); + memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size); + out[out_size - 1] = '\0'; + } + + return true; +} + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/CAPI/cpp/grpc/include/absl/debugging/symbolize_unimplemented.inc b/CAPI/cpp/grpc/include/absl/debugging/symbolize_unimplemented.inc new file mode 100644 index 0000000..db24456 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/symbolize_unimplemented.inc @@ -0,0 +1,40 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "absl/base/internal/raw_logging.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace debugging_internal { + +int InstallSymbolDecorator(SymbolDecorator, void*) { return -1; } +bool RemoveSymbolDecorator(int) { return false; } +bool RemoveAllSymbolDecorators(void) { return false; } +bool RegisterFileMappingHint(const void *, const void *, uint64_t, const char *) { + return false; +} +bool GetFileMappingHint(const void **, const void **, uint64_t *, const char **) { + return false; +} + +} // namespace debugging_internal + +void InitializeSymbolizer(const char*) {} +bool Symbolize(const void *, char *, int) { return false; } + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/CAPI/cpp/grpc/include/absl/debugging/symbolize_win32.inc b/CAPI/cpp/grpc/include/absl/debugging/symbolize_win32.inc new file mode 100644 index 0000000..c3df46f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/debugging/symbolize_win32.inc @@ -0,0 +1,81 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// See "Retrieving Symbol Information by Address": +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms680578(v=vs.85).aspx + +#include + +// MSVC header dbghelp.h has a warning for an ignored typedef. +#pragma warning(push) +#pragma warning(disable:4091) +#include +#pragma warning(pop) + +#pragma comment(lib, "dbghelp.lib") + +#include +#include + +#include "absl/base/internal/raw_logging.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +static HANDLE process = NULL; + +void InitializeSymbolizer(const char*) { + if (process != nullptr) { + return; + } + process = GetCurrentProcess(); + + // Symbols are not loaded until a reference is made requiring the + // symbols be loaded. This is the fastest, most efficient way to use + // the symbol handler. + SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME); + if (!SymInitialize(process, nullptr, true)) { + // GetLastError() returns a Win32 DWORD, but we assign to + // unsigned long long to simplify the ABSL_RAW_LOG case below. The uniform + // initialization guarantees this is not a narrowing conversion. + const unsigned long long error{GetLastError()}; // NOLINT(runtime/int) + ABSL_RAW_LOG(FATAL, "SymInitialize() failed: %llu", error); + } +} + +bool Symbolize(const void* pc, char* out, int out_size) { + if (out_size <= 0) { + return false; + } + alignas(SYMBOL_INFO) char buf[sizeof(SYMBOL_INFO) + MAX_SYM_NAME]; + SYMBOL_INFO* symbol = reinterpret_cast(buf); + symbol->SizeOfStruct = sizeof(SYMBOL_INFO); + symbol->MaxNameLen = MAX_SYM_NAME; + if (!SymFromAddr(process, reinterpret_cast(pc), nullptr, symbol)) { + return false; + } + strncpy(out, symbol->Name, out_size); + if (out[out_size - 1] != '\0') { + // strncpy() does not '\0' terminate when it truncates. + static constexpr char kEllipsis[] = "..."; + int ellipsis_size = + std::min(sizeof(kEllipsis) - 1, out_size - 1); + memcpy(out + out_size - ellipsis_size - 1, kEllipsis, ellipsis_size); + out[out_size - 1] = '\0'; + } + return true; +} + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/CAPI/cpp/grpc/include/absl/flags/commandlineflag.h b/CAPI/cpp/grpc/include/absl/flags/commandlineflag.h new file mode 100644 index 0000000..f2fa089 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/commandlineflag.h @@ -0,0 +1,200 @@ +// +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: commandlineflag.h +// ----------------------------------------------------------------------------- +// +// This header file defines the `CommandLineFlag`, which acts as a type-erased +// handle for accessing metadata about the Abseil Flag in question. +// +// Because an actual Abseil flag is of an unspecified type, you should not +// manipulate or interact directly with objects of that type. Instead, use the +// CommandLineFlag type as an intermediary. +#ifndef ABSL_FLAGS_COMMANDLINEFLAG_H_ +#define ABSL_FLAGS_COMMANDLINEFLAG_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/fast_type_id.h" +#include "absl/flags/internal/commandlineflag.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace flags_internal { +class PrivateHandleAccessor; +} // namespace flags_internal + +// CommandLineFlag +// +// This type acts as a type-erased handle for an instance of an Abseil Flag and +// holds reflection information pertaining to that flag. Use CommandLineFlag to +// access a flag's name, location, help string etc. +// +// To obtain an absl::CommandLineFlag, invoke `absl::FindCommandLineFlag()` +// passing it the flag name string. +// +// Example: +// +// // Obtain reflection handle for a flag named "flagname". +// const absl::CommandLineFlag* my_flag_data = +// absl::FindCommandLineFlag("flagname"); +// +// // Now you can get flag info from that reflection handle. +// std::string flag_location = my_flag_data->Filename(); +// ... +class CommandLineFlag { + public: + constexpr CommandLineFlag() = default; + + // Not copyable/assignable. + CommandLineFlag(const CommandLineFlag&) = delete; + CommandLineFlag& operator=(const CommandLineFlag&) = delete; + + // absl::CommandLineFlag::IsOfType() + // + // Return true iff flag has type T. + template + inline bool IsOfType() const { + return TypeId() == base_internal::FastTypeId(); + } + + // absl::CommandLineFlag::TryGet() + // + // Attempts to retrieve the flag value. Returns value on success, + // absl::nullopt otherwise. + template + absl::optional TryGet() const { + if (IsRetired() || !IsOfType()) { + return absl::nullopt; + } + + // Implementation notes: + // + // We are wrapping a union around the value of `T` to serve three purposes: + // + // 1. `U.value` has correct size and alignment for a value of type `T` + // 2. The `U.value` constructor is not invoked since U's constructor does + // not do it explicitly. + // 3. The `U.value` destructor is invoked since U's destructor does it + // explicitly. This makes `U` a kind of RAII wrapper around non default + // constructible value of T, which is destructed when we leave the + // scope. We do need to destroy U.value, which is constructed by + // CommandLineFlag::Read even though we left it in a moved-from state + // after std::move. + // + // All of this serves to avoid requiring `T` being default constructible. + union U { + T value; + U() {} + ~U() { value.~T(); } + }; + U u; + + Read(&u.value); + // allow retired flags to be "read", so we can report invalid access. + if (IsRetired()) { + return absl::nullopt; + } + return std::move(u.value); + } + + // absl::CommandLineFlag::Name() + // + // Returns name of this flag. + virtual absl::string_view Name() const = 0; + + // absl::CommandLineFlag::Filename() + // + // Returns name of the file where this flag is defined. + virtual std::string Filename() const = 0; + + // absl::CommandLineFlag::Help() + // + // Returns help message associated with this flag. + virtual std::string Help() const = 0; + + // absl::CommandLineFlag::IsRetired() + // + // Returns true iff this object corresponds to retired flag. + virtual bool IsRetired() const; + + // absl::CommandLineFlag::DefaultValue() + // + // Returns the default value for this flag. + virtual std::string DefaultValue() const = 0; + + // absl::CommandLineFlag::CurrentValue() + // + // Returns the current value for this flag. + virtual std::string CurrentValue() const = 0; + + // absl::CommandLineFlag::ParseFrom() + // + // Sets the value of the flag based on specified string `value`. If the flag + // was successfully set to new value, it returns true. Otherwise, sets `error` + // to indicate the error, leaves the flag unchanged, and returns false. + bool ParseFrom(absl::string_view value, std::string* error); + + protected: + ~CommandLineFlag() = default; + + private: + friend class flags_internal::PrivateHandleAccessor; + + // Sets the value of the flag based on specified string `value`. If the flag + // was successfully set to new value, it returns true. Otherwise, sets `error` + // to indicate the error, leaves the flag unchanged, and returns false. There + // are three ways to set the flag's value: + // * Update the current flag value + // * Update the flag's default value + // * Update the current flag value if it was never set before + // The mode is selected based on `set_mode` parameter. + virtual bool ParseFrom(absl::string_view value, + flags_internal::FlagSettingMode set_mode, + flags_internal::ValueSource source, + std::string& error) = 0; + + // Returns id of the flag's value type. + virtual flags_internal::FlagFastTypeId TypeId() const = 0; + + // Interface to save flag to some persistent state. Returns current flag state + // or nullptr if flag does not support saving and restoring a state. + virtual std::unique_ptr SaveState() = 0; + + // Copy-construct a new value of the flag's type in a memory referenced by + // the dst based on the current flag's value. + virtual void Read(void* dst) const = 0; + + // To be deleted. Used to return true if flag's current value originated from + // command line. + virtual bool IsSpecifiedOnCommandLine() const = 0; + + // Validates supplied value usign validator or parseflag routine + virtual bool ValidateInputValue(absl::string_view value) const = 0; + + // Checks that flags default value can be converted to string and back to the + // flag's value type. + virtual void CheckDefaultValueParsingRoundtrip() const = 0; +}; + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_COMMANDLINEFLAG_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/config.h b/CAPI/cpp/grpc/include/absl/flags/config.h new file mode 100644 index 0000000..14c4235 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/config.h @@ -0,0 +1,68 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_CONFIG_H_ +#define ABSL_FLAGS_CONFIG_H_ + +// Determine if we should strip string literals from the Flag objects. +// By default we strip string literals on mobile platforms. +#if !defined(ABSL_FLAGS_STRIP_NAMES) + +#if defined(__ANDROID__) +#define ABSL_FLAGS_STRIP_NAMES 1 + +#elif defined(__APPLE__) +#include +#if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE +#define ABSL_FLAGS_STRIP_NAMES 1 +#elif defined(TARGET_OS_EMBEDDED) && TARGET_OS_EMBEDDED +#define ABSL_FLAGS_STRIP_NAMES 1 +#endif // TARGET_OS_* +#endif + +#endif // !defined(ABSL_FLAGS_STRIP_NAMES) + +#if !defined(ABSL_FLAGS_STRIP_NAMES) +// If ABSL_FLAGS_STRIP_NAMES wasn't set on the command line or above, +// the default is not to strip. +#define ABSL_FLAGS_STRIP_NAMES 0 +#endif + +#if !defined(ABSL_FLAGS_STRIP_HELP) +// By default, if we strip names, we also strip help. +#define ABSL_FLAGS_STRIP_HELP ABSL_FLAGS_STRIP_NAMES +#endif + +// These macros represent the "source of truth" for the list of supported +// built-in types. +#define ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \ + A(bool, bool) \ + A(short, short) \ + A(unsigned short, unsigned_short) \ + A(int, int) \ + A(unsigned int, unsigned_int) \ + A(long, long) \ + A(unsigned long, unsigned_long) \ + A(long long, long_long) \ + A(unsigned long long, unsigned_long_long) \ + A(double, double) \ + A(float, float) + +#define ABSL_FLAGS_INTERNAL_SUPPORTED_TYPES(A) \ + ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \ + A(std::string, std_string) \ + A(std::vector, std_vector_of_string) + +#endif // ABSL_FLAGS_CONFIG_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/declare.h b/CAPI/cpp/grpc/include/absl/flags/declare.h new file mode 100644 index 0000000..d1437bb --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/declare.h @@ -0,0 +1,73 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: declare.h +// ----------------------------------------------------------------------------- +// +// This file defines the ABSL_DECLARE_FLAG macro, allowing you to declare an +// `absl::Flag` for use within a translation unit. You should place this +// declaration within the header file associated with the .cc file that defines +// and owns the `Flag`. + +#ifndef ABSL_FLAGS_DECLARE_H_ +#define ABSL_FLAGS_DECLARE_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace flags_internal { + +// absl::Flag represents a flag of type 'T' created by ABSL_FLAG. +template +class Flag; + +} // namespace flags_internal + +// Flag +// +// Forward declaration of the `absl::Flag` type for use in defining the macro. +#if defined(_MSC_VER) && !defined(__clang__) +template +class Flag; +#else +template +using Flag = flags_internal::Flag; +#endif + +ABSL_NAMESPACE_END +} // namespace absl + +// ABSL_DECLARE_FLAG() +// +// This macro is a convenience for declaring use of an `absl::Flag` within a +// translation unit. This macro should be used within a header file to +// declare usage of the flag within any .cc file including that header file. +// +// The ABSL_DECLARE_FLAG(type, name) macro expands to: +// +// extern absl::Flag FLAGS_name; +#define ABSL_DECLARE_FLAG(type, name) ABSL_DECLARE_FLAG_INTERNAL(type, name) + +// Internal implementation of ABSL_DECLARE_FLAG to allow macro expansion of its +// arguments. Clients must use ABSL_DECLARE_FLAG instead. +#define ABSL_DECLARE_FLAG_INTERNAL(type, name) \ + extern absl::Flag FLAGS_##name; \ + namespace absl /* block flags in namespaces */ {} \ + /* second redeclaration is to allow applying attributes */ \ + extern absl::Flag FLAGS_##name + +#endif // ABSL_FLAGS_DECLARE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/flag.h b/CAPI/cpp/grpc/include/absl/flags/flag.h new file mode 100644 index 0000000..b7f94be --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/flag.h @@ -0,0 +1,310 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: flag.h +// ----------------------------------------------------------------------------- +// +// This header file defines the `absl::Flag` type for holding command-line +// flag data, and abstractions to create, get and set such flag data. +// +// It is important to note that this type is **unspecified** (an implementation +// detail) and you do not construct or manipulate actual `absl::Flag` +// instances. Instead, you define and declare flags using the +// `ABSL_FLAG()` and `ABSL_DECLARE_FLAG()` macros, and get and set flag values +// using the `absl::GetFlag()` and `absl::SetFlag()` functions. + +#ifndef ABSL_FLAGS_FLAG_H_ +#define ABSL_FLAGS_FLAG_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" +#include "absl/flags/config.h" +#include "absl/flags/internal/flag.h" +#include "absl/flags/internal/registry.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// Flag +// +// An `absl::Flag` holds a command-line flag value, providing a runtime +// parameter to a binary. Such flags should be defined in the global namespace +// and (preferably) in the module containing the binary's `main()` function. +// +// You should not construct and cannot use the `absl::Flag` type directly; +// instead, you should declare flags using the `ABSL_DECLARE_FLAG()` macro +// within a header file, and define your flag using `ABSL_FLAG()` within your +// header's associated `.cc` file. Such flags will be named `FLAGS_name`. +// +// Example: +// +// .h file +// +// // Declares usage of a flag named "FLAGS_count" +// ABSL_DECLARE_FLAG(int, count); +// +// .cc file +// +// // Defines a flag named "FLAGS_count" with a default `int` value of 0. +// ABSL_FLAG(int, count, 0, "Count of items to process"); +// +// No public methods of `absl::Flag` are part of the Abseil Flags API. +// +// For type support of Abseil Flags, see the marshalling.h header file, which +// discusses supported standard types, optional flags, and additional Abseil +// type support. +#if !defined(_MSC_VER) || defined(__clang__) +template +using Flag = flags_internal::Flag; +#else +#include "absl/flags/internal/flag_msvc.inc" +#endif + +// GetFlag() +// +// Returns the value (of type `T`) of an `absl::Flag` instance, by value. Do +// not construct an `absl::Flag` directly and call `absl::GetFlag()`; +// instead, refer to flag's constructed variable name (e.g. `FLAGS_name`). +// Because this function returns by value and not by reference, it is +// thread-safe, but note that the operation may be expensive; as a result, avoid +// `absl::GetFlag()` within any tight loops. +// +// Example: +// +// // FLAGS_count is a Flag of type `int` +// int my_count = absl::GetFlag(FLAGS_count); +// +// // FLAGS_firstname is a Flag of type `std::string` +// std::string first_name = absl::GetFlag(FLAGS_firstname); +template +ABSL_MUST_USE_RESULT T GetFlag(const absl::Flag& flag) { + return flags_internal::FlagImplPeer::InvokeGet(flag); +} + +// SetFlag() +// +// Sets the value of an `absl::Flag` to the value `v`. Do not construct an +// `absl::Flag` directly and call `absl::SetFlag()`; instead, use the +// flag's variable name (e.g. `FLAGS_name`). This function is +// thread-safe, but is potentially expensive. Avoid setting flags in general, +// but especially within performance-critical code. +template +void SetFlag(absl::Flag* flag, const T& v) { + flags_internal::FlagImplPeer::InvokeSet(*flag, v); +} + +// Overload of `SetFlag()` to allow callers to pass in a value that is +// convertible to `T`. E.g., use this overload to pass a "const char*" when `T` +// is `std::string`. +template +void SetFlag(absl::Flag* flag, const V& v) { + T value(v); + flags_internal::FlagImplPeer::InvokeSet(*flag, value); +} + +// GetFlagReflectionHandle() +// +// Returns the reflection handle corresponding to specified Abseil Flag +// instance. Use this handle to access flag's reflection information, like name, +// location, default value etc. +// +// Example: +// +// std::string = absl::GetFlagReflectionHandle(FLAGS_count).DefaultValue(); + +template +const CommandLineFlag& GetFlagReflectionHandle(const absl::Flag& f) { + return flags_internal::FlagImplPeer::InvokeReflect(f); +} + +ABSL_NAMESPACE_END +} // namespace absl + + +// ABSL_FLAG() +// +// This macro defines an `absl::Flag` instance of a specified type `T`: +// +// ABSL_FLAG(T, name, default_value, help); +// +// where: +// +// * `T` is a supported flag type (see the list of types in `marshalling.h`), +// * `name` designates the name of the flag (as a global variable +// `FLAGS_name`), +// * `default_value` is an expression holding the default value for this flag +// (which must be implicitly convertible to `T`), +// * `help` is the help text, which can also be an expression. +// +// This macro expands to a flag named 'FLAGS_name' of type 'T': +// +// absl::Flag FLAGS_name = ...; +// +// Note that all such instances are created as global variables. +// +// For `ABSL_FLAG()` values that you wish to expose to other translation units, +// it is recommended to define those flags within the `.cc` file associated with +// the header where the flag is declared. +// +// Note: do not construct objects of type `absl::Flag` directly. Only use the +// `ABSL_FLAG()` macro for such construction. +#define ABSL_FLAG(Type, name, default_value, help) \ + ABSL_FLAG_IMPL(Type, name, default_value, help) + +// ABSL_FLAG().OnUpdate() +// +// Defines a flag of type `T` with a callback attached: +// +// ABSL_FLAG(T, name, default_value, help).OnUpdate(callback); +// +// `callback` should be convertible to `void (*)()`. +// +// After any setting of the flag value, the callback will be called at least +// once. A rapid sequence of changes may be merged together into the same +// callback. No concurrent calls to the callback will be made for the same +// flag. Callbacks are allowed to read the current value of the flag but must +// not mutate that flag. +// +// The update mechanism guarantees "eventual consistency"; if the callback +// derives an auxiliary data structure from the flag value, it is guaranteed +// that eventually the flag value and the derived data structure will be +// consistent. +// +// Note: ABSL_FLAG.OnUpdate() does not have a public definition. Hence, this +// comment serves as its API documentation. + +// ----------------------------------------------------------------------------- +// Implementation details below this section +// ----------------------------------------------------------------------------- + +// ABSL_FLAG_IMPL macro definition conditional on ABSL_FLAGS_STRIP_NAMES +#if !defined(_MSC_VER) || defined(__clang__) +#define ABSL_FLAG_IMPL_FLAG_PTR(flag) flag +#define ABSL_FLAG_IMPL_HELP_ARG(name) \ + absl::flags_internal::HelpArg( \ + FLAGS_help_storage_##name) +#define ABSL_FLAG_IMPL_DEFAULT_ARG(Type, name) \ + absl::flags_internal::DefaultArg(0) +#else +#define ABSL_FLAG_IMPL_FLAG_PTR(flag) flag.GetImpl() +#define ABSL_FLAG_IMPL_HELP_ARG(name) &AbslFlagHelpGenFor##name::NonConst +#define ABSL_FLAG_IMPL_DEFAULT_ARG(Type, name) &AbslFlagDefaultGenFor##name::Gen +#endif + +#if ABSL_FLAGS_STRIP_NAMES +#define ABSL_FLAG_IMPL_FLAGNAME(txt) "" +#define ABSL_FLAG_IMPL_FILENAME() "" +#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \ + absl::flags_internal::FlagRegistrar(ABSL_FLAG_IMPL_FLAG_PTR(flag), \ + nullptr) +#else +#define ABSL_FLAG_IMPL_FLAGNAME(txt) txt +#define ABSL_FLAG_IMPL_FILENAME() __FILE__ +#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \ + absl::flags_internal::FlagRegistrar(ABSL_FLAG_IMPL_FLAG_PTR(flag), \ + __FILE__) +#endif + +// ABSL_FLAG_IMPL macro definition conditional on ABSL_FLAGS_STRIP_HELP + +#if ABSL_FLAGS_STRIP_HELP +#define ABSL_FLAG_IMPL_FLAGHELP(txt) absl::flags_internal::kStrippedFlagHelp +#else +#define ABSL_FLAG_IMPL_FLAGHELP(txt) txt +#endif + +// AbslFlagHelpGenFor##name is used to encapsulate both immediate (method Const) +// and lazy (method NonConst) evaluation of help message expression. We choose +// between the two via the call to HelpArg in absl::Flag instantiation below. +// If help message expression is constexpr evaluable compiler will optimize +// away this whole struct. +// TODO(rogeeff): place these generated structs into local namespace and apply +// ABSL_INTERNAL_UNIQUE_SHORT_NAME. +// TODO(rogeeff): Apply __attribute__((nodebug)) to FLAGS_help_storage_##name +#define ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, txt) \ + struct AbslFlagHelpGenFor##name { \ + /* The expression is run in the caller as part of the */ \ + /* default value argument. That keeps temporaries alive */ \ + /* long enough for NonConst to work correctly. */ \ + static constexpr absl::string_view Value( \ + absl::string_view absl_flag_help = ABSL_FLAG_IMPL_FLAGHELP(txt)) { \ + return absl_flag_help; \ + } \ + static std::string NonConst() { return std::string(Value()); } \ + }; \ + constexpr auto FLAGS_help_storage_##name ABSL_INTERNAL_UNIQUE_SMALL_NAME() \ + ABSL_ATTRIBUTE_SECTION_VARIABLE(flags_help_cold) = \ + absl::flags_internal::HelpStringAsArray( \ + 0); + +#define ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \ + struct AbslFlagDefaultGenFor##name { \ + Type value = absl::flags_internal::InitDefaultValue(default_value); \ + static void Gen(void* absl_flag_default_loc) { \ + new (absl_flag_default_loc) Type(AbslFlagDefaultGenFor##name{}.value); \ + } \ + }; + +// ABSL_FLAG_IMPL +// +// Note: Name of registrar object is not arbitrary. It is used to "grab" +// global name for FLAGS_no symbol, thus preventing the possibility +// of defining two flags with names foo and nofoo. +#define ABSL_FLAG_IMPL(Type, name, default_value, help) \ + extern ::absl::Flag FLAGS_##name; \ + namespace absl /* block flags in namespaces */ {} \ + ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \ + ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, help) \ + ABSL_CONST_INIT absl::Flag FLAGS_##name{ \ + ABSL_FLAG_IMPL_FLAGNAME(#name), ABSL_FLAG_IMPL_FILENAME(), \ + ABSL_FLAG_IMPL_HELP_ARG(name), ABSL_FLAG_IMPL_DEFAULT_ARG(Type, name)}; \ + extern absl::flags_internal::FlagRegistrarEmpty FLAGS_no##name; \ + absl::flags_internal::FlagRegistrarEmpty FLAGS_no##name = \ + ABSL_FLAG_IMPL_REGISTRAR(Type, FLAGS_##name) + +// ABSL_RETIRED_FLAG +// +// Designates the flag (which is usually pre-existing) as "retired." A retired +// flag is a flag that is now unused by the program, but may still be passed on +// the command line, usually by production scripts. A retired flag is ignored +// and code can't access it at runtime. +// +// This macro registers a retired flag with given name and type, with a name +// identical to the name of the original flag you are retiring. The retired +// flag's type can change over time, so that you can retire code to support a +// custom flag type. +// +// This macro has the same signature as `ABSL_FLAG`. To retire a flag, simply +// replace an `ABSL_FLAG` definition with `ABSL_RETIRED_FLAG`, leaving the +// arguments unchanged (unless of course you actually want to retire the flag +// type at this time as well). +// +// `default_value` is only used as a double check on the type. `explanation` is +// unused. +// TODO(rogeeff): replace RETIRED_FLAGS with FLAGS once forward declarations of +// retired flags are cleaned up. +#define ABSL_RETIRED_FLAG(type, name, default_value, explanation) \ + static absl::flags_internal::RetiredFlag RETIRED_FLAGS_##name; \ + ABSL_ATTRIBUTE_UNUSED static const auto RETIRED_FLAGS_REG_##name = \ + (RETIRED_FLAGS_##name.Retire(#name), \ + ::absl::flags_internal::FlagRegistrarEmpty{}) + +#endif // ABSL_FLAGS_FLAG_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/commandlineflag.h b/CAPI/cpp/grpc/include/absl/flags/internal/commandlineflag.h new file mode 100644 index 0000000..ebfe81b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/commandlineflag.h @@ -0,0 +1,68 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_COMMANDLINEFLAG_H_ +#define ABSL_FLAGS_INTERNAL_COMMANDLINEFLAG_H_ + +#include "absl/base/config.h" +#include "absl/base/internal/fast_type_id.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace flags_internal { + +// An alias for flag fast type id. This value identifies the flag value type +// similarly to typeid(T), without relying on RTTI being available. In most +// cases this id is enough to uniquely identify the flag's value type. In a few +// cases we'll have to resort to using actual RTTI implementation if it is +// available. +using FlagFastTypeId = absl::base_internal::FastTypeIdType; + +// Options that control SetCommandLineOptionWithMode. +enum FlagSettingMode { + // update the flag's value unconditionally (can call this multiple times). + SET_FLAGS_VALUE, + // update the flag's value, but *only if* it has not yet been updated + // with SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef". + SET_FLAG_IF_DEFAULT, + // set the flag's default value to this. If the flag has not been updated + // yet (via SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef") + // change the flag's current value to the new default value as well. + SET_FLAGS_DEFAULT +}; + +// Options that control ParseFrom: Source of a value. +enum ValueSource { + // Flag is being set by value specified on a command line. + kCommandLine, + // Flag is being set by value specified in the code. + kProgrammaticChange, +}; + +// Handle to FlagState objects. Specific flag state objects will restore state +// of a flag produced this flag state from method CommandLineFlag::SaveState(). +class FlagStateInterface { + public: + virtual ~FlagStateInterface(); + + // Restores the flag originated this object to the saved state. + virtual void Restore() const = 0; +}; + +} // namespace flags_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_COMMANDLINEFLAG_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/flag.h b/CAPI/cpp/grpc/include/absl/flags/internal/flag.h new file mode 100644 index 0000000..6154638 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/flag.h @@ -0,0 +1,800 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_FLAG_H_ +#define ABSL_FLAGS_INTERNAL_FLAG_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/call_once.h" +#include "absl/base/casts.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" +#include "absl/base/thread_annotations.h" +#include "absl/flags/commandlineflag.h" +#include "absl/flags/config.h" +#include "absl/flags/internal/commandlineflag.h" +#include "absl/flags/internal/registry.h" +#include "absl/flags/internal/sequence_lock.h" +#include "absl/flags/marshalling.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" +#include "absl/synchronization/mutex.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// Forward declaration of absl::Flag public API. +namespace flags_internal { +template +class Flag; +} // namespace flags_internal + +#if defined(_MSC_VER) && !defined(__clang__) +template +class Flag; +#else +template +using Flag = flags_internal::Flag; +#endif + +template +ABSL_MUST_USE_RESULT T GetFlag(const absl::Flag& flag); + +template +void SetFlag(absl::Flag* flag, const T& v); + +template +void SetFlag(absl::Flag* flag, const V& v); + +template +const CommandLineFlag& GetFlagReflectionHandle(const absl::Flag& f); + +/////////////////////////////////////////////////////////////////////////////// +// Flag value type operations, eg., parsing, copying, etc. are provided +// by function specific to that type with a signature matching FlagOpFn. + +namespace flags_internal { + +enum class FlagOp { + kAlloc, + kDelete, + kCopy, + kCopyConstruct, + kSizeof, + kFastTypeId, + kRuntimeTypeId, + kParse, + kUnparse, + kValueOffset, +}; +using FlagOpFn = void* (*)(FlagOp, const void*, void*, void*); + +// Forward declaration for Flag value specific operations. +template +void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3); + +// Allocate aligned memory for a flag value. +inline void* Alloc(FlagOpFn op) { + return op(FlagOp::kAlloc, nullptr, nullptr, nullptr); +} +// Deletes memory interpreting obj as flag value type pointer. +inline void Delete(FlagOpFn op, void* obj) { + op(FlagOp::kDelete, nullptr, obj, nullptr); +} +// Copies src to dst interpreting as flag value type pointers. +inline void Copy(FlagOpFn op, const void* src, void* dst) { + op(FlagOp::kCopy, src, dst, nullptr); +} +// Construct a copy of flag value in a location pointed by dst +// based on src - pointer to the flag's value. +inline void CopyConstruct(FlagOpFn op, const void* src, void* dst) { + op(FlagOp::kCopyConstruct, src, dst, nullptr); +} +// Makes a copy of flag value pointed by obj. +inline void* Clone(FlagOpFn op, const void* obj) { + void* res = flags_internal::Alloc(op); + flags_internal::CopyConstruct(op, obj, res); + return res; +} +// Returns true if parsing of input text is successfull. +inline bool Parse(FlagOpFn op, absl::string_view text, void* dst, + std::string* error) { + return op(FlagOp::kParse, &text, dst, error) != nullptr; +} +// Returns string representing supplied value. +inline std::string Unparse(FlagOpFn op, const void* val) { + std::string result; + op(FlagOp::kUnparse, val, &result, nullptr); + return result; +} +// Returns size of flag value type. +inline size_t Sizeof(FlagOpFn op) { + // This sequence of casts reverses the sequence from + // `flags_internal::FlagOps()` + return static_cast(reinterpret_cast( + op(FlagOp::kSizeof, nullptr, nullptr, nullptr))); +} +// Returns fast type id coresponding to the value type. +inline FlagFastTypeId FastTypeId(FlagOpFn op) { + return reinterpret_cast( + op(FlagOp::kFastTypeId, nullptr, nullptr, nullptr)); +} +// Returns fast type id coresponding to the value type. +inline const std::type_info* RuntimeTypeId(FlagOpFn op) { + return reinterpret_cast( + op(FlagOp::kRuntimeTypeId, nullptr, nullptr, nullptr)); +} +// Returns offset of the field value_ from the field impl_ inside of +// absl::Flag data. Given FlagImpl pointer p you can get the +// location of the corresponding value as: +// reinterpret_cast(p) + ValueOffset(). +inline ptrdiff_t ValueOffset(FlagOpFn op) { + // This sequence of casts reverses the sequence from + // `flags_internal::FlagOps()` + return static_cast(reinterpret_cast( + op(FlagOp::kValueOffset, nullptr, nullptr, nullptr))); +} + +// Returns an address of RTTI's typeid(T). +template +inline const std::type_info* GenRuntimeTypeId() { +#ifdef ABSL_INTERNAL_HAS_RTTI + return &typeid(T); +#else + return nullptr; +#endif +} + +/////////////////////////////////////////////////////////////////////////////// +// Flag help auxiliary structs. + +// This is help argument for absl::Flag encapsulating the string literal pointer +// or pointer to function generating it as well as enum descriminating two +// cases. +using HelpGenFunc = std::string (*)(); + +template +struct FixedCharArray { + char value[N]; + + template + static constexpr FixedCharArray FromLiteralString( + absl::string_view str, absl::index_sequence) { + return (void)str, FixedCharArray({{str[I]..., '\0'}}); + } +}; + +template +constexpr FixedCharArray HelpStringAsArray(int) { + return FixedCharArray::FromLiteralString( + Gen::Value(), absl::make_index_sequence{}); +} + +template +constexpr std::false_type HelpStringAsArray(char) { + return std::false_type{}; +} + +union FlagHelpMsg { + constexpr explicit FlagHelpMsg(const char* help_msg) : literal(help_msg) {} + constexpr explicit FlagHelpMsg(HelpGenFunc help_gen) : gen_func(help_gen) {} + + const char* literal; + HelpGenFunc gen_func; +}; + +enum class FlagHelpKind : uint8_t { kLiteral = 0, kGenFunc = 1 }; + +struct FlagHelpArg { + FlagHelpMsg source; + FlagHelpKind kind; +}; + +extern const char kStrippedFlagHelp[]; + +// These two HelpArg overloads allows us to select at compile time one of two +// way to pass Help argument to absl::Flag. We'll be passing +// AbslFlagHelpGenFor##name as Gen and integer 0 as a single argument to prefer +// first overload if possible. If help message is evaluatable on constexpr +// context We'll be able to make FixedCharArray out of it and we'll choose first +// overload. In this case the help message expression is immediately evaluated +// and is used to construct the absl::Flag. No additionl code is generated by +// ABSL_FLAG Otherwise SFINAE kicks in and first overload is dropped from the +// consideration, in which case the second overload will be used. The second +// overload does not attempt to evaluate the help message expression +// immediately and instead delays the evaluation by returing the function +// pointer (&T::NonConst) genering the help message when necessary. This is +// evaluatable in constexpr context, but the cost is an extra function being +// generated in the ABSL_FLAG code. +template +constexpr FlagHelpArg HelpArg(const FixedCharArray& value) { + return {FlagHelpMsg(value.value), FlagHelpKind::kLiteral}; +} + +template +constexpr FlagHelpArg HelpArg(std::false_type) { + return {FlagHelpMsg(&Gen::NonConst), FlagHelpKind::kGenFunc}; +} + +/////////////////////////////////////////////////////////////////////////////// +// Flag default value auxiliary structs. + +// Signature for the function generating the initial flag value (usually +// based on default value supplied in flag's definition) +using FlagDfltGenFunc = void (*)(void*); + +union FlagDefaultSrc { + constexpr explicit FlagDefaultSrc(FlagDfltGenFunc gen_func_arg) + : gen_func(gen_func_arg) {} + +#define ABSL_FLAGS_INTERNAL_DFLT_FOR_TYPE(T, name) \ + T name##_value; \ + constexpr explicit FlagDefaultSrc(T value) : name##_value(value) {} // NOLINT + ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(ABSL_FLAGS_INTERNAL_DFLT_FOR_TYPE) +#undef ABSL_FLAGS_INTERNAL_DFLT_FOR_TYPE + + void* dynamic_value; + FlagDfltGenFunc gen_func; +}; + +enum class FlagDefaultKind : uint8_t { + kDynamicValue = 0, + kGenFunc = 1, + kOneWord = 2 // for default values UP to one word in size +}; + +struct FlagDefaultArg { + FlagDefaultSrc source; + FlagDefaultKind kind; +}; + +// This struct and corresponding overload to InitDefaultValue are used to +// facilitate usage of {} as default value in ABSL_FLAG macro. +// TODO(rogeeff): Fix handling types with explicit constructors. +struct EmptyBraces {}; + +template +constexpr T InitDefaultValue(T t) { + return t; +} + +template +constexpr T InitDefaultValue(EmptyBraces) { + return T{}; +} + +template ::value, int>::type = + ((void)GenT{}, 0)> +constexpr FlagDefaultArg DefaultArg(int) { + return {FlagDefaultSrc(GenT{}.value), FlagDefaultKind::kOneWord}; +} + +template +constexpr FlagDefaultArg DefaultArg(char) { + return {FlagDefaultSrc(&GenT::Gen), FlagDefaultKind::kGenFunc}; +} + +/////////////////////////////////////////////////////////////////////////////// +// Flag current value auxiliary structs. + +constexpr int64_t UninitializedFlagValue() { + return static_cast(0xababababababababll); +} + +template +using FlagUseValueAndInitBitStorage = std::integral_constant< + bool, absl::type_traits_internal::is_trivially_copyable::value && + std::is_default_constructible::value && (sizeof(T) < 8)>; + +template +using FlagUseOneWordStorage = std::integral_constant< + bool, absl::type_traits_internal::is_trivially_copyable::value && + (sizeof(T) <= 8)>; + +template +using FlagUseSequenceLockStorage = std::integral_constant< + bool, absl::type_traits_internal::is_trivially_copyable::value && + (sizeof(T) > 8)>; + +enum class FlagValueStorageKind : uint8_t { + kValueAndInitBit = 0, + kOneWordAtomic = 1, + kSequenceLocked = 2, + kAlignedBuffer = 3, +}; + +template +static constexpr FlagValueStorageKind StorageKind() { + return FlagUseValueAndInitBitStorage::value + ? FlagValueStorageKind::kValueAndInitBit + : FlagUseOneWordStorage::value + ? FlagValueStorageKind::kOneWordAtomic + : FlagUseSequenceLockStorage::value + ? FlagValueStorageKind::kSequenceLocked + : FlagValueStorageKind::kAlignedBuffer; +} + +struct FlagOneWordValue { + constexpr explicit FlagOneWordValue(int64_t v) : value(v) {} + std::atomic value; +}; + +template +struct alignas(8) FlagValueAndInitBit { + T value; + // Use an int instead of a bool to guarantee that a non-zero value has + // a bit set. + uint8_t init; +}; + +template ()> +struct FlagValue; + +template +struct FlagValue : FlagOneWordValue { + constexpr FlagValue() : FlagOneWordValue(0) {} + bool Get(const SequenceLock&, T& dst) const { + int64_t storage = value.load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(storage == 0)) { + return false; + } + dst = absl::bit_cast>(storage).value; + return true; + } +}; + +template +struct FlagValue : FlagOneWordValue { + constexpr FlagValue() : FlagOneWordValue(UninitializedFlagValue()) {} + bool Get(const SequenceLock&, T& dst) const { + int64_t one_word_val = value.load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(one_word_val == UninitializedFlagValue())) { + return false; + } + std::memcpy(&dst, static_cast(&one_word_val), sizeof(T)); + return true; + } +}; + +template +struct FlagValue { + bool Get(const SequenceLock& lock, T& dst) const { + return lock.TryRead(&dst, value_words, sizeof(T)); + } + + static constexpr int kNumWords = + flags_internal::AlignUp(sizeof(T), sizeof(uint64_t)) / sizeof(uint64_t); + + alignas(T) alignas( + std::atomic) std::atomic value_words[kNumWords]; +}; + +template +struct FlagValue { + bool Get(const SequenceLock&, T&) const { return false; } + + alignas(T) char value[sizeof(T)]; +}; + +/////////////////////////////////////////////////////////////////////////////// +// Flag callback auxiliary structs. + +// Signature for the mutation callback used by watched Flags +// The callback is noexcept. +// TODO(rogeeff): add noexcept after C++17 support is added. +using FlagCallbackFunc = void (*)(); + +struct FlagCallback { + FlagCallbackFunc func; + absl::Mutex guard; // Guard for concurrent callback invocations. +}; + +/////////////////////////////////////////////////////////////////////////////// +// Flag implementation, which does not depend on flag value type. +// The class encapsulates the Flag's data and access to it. + +struct DynValueDeleter { + explicit DynValueDeleter(FlagOpFn op_arg = nullptr); + void operator()(void* ptr) const; + + FlagOpFn op; +}; + +class FlagState; + +class FlagImpl final : public CommandLineFlag { + public: + constexpr FlagImpl(const char* name, const char* filename, FlagOpFn op, + FlagHelpArg help, FlagValueStorageKind value_kind, + FlagDefaultArg default_arg) + : name_(name), + filename_(filename), + op_(op), + help_(help.source), + help_source_kind_(static_cast(help.kind)), + value_storage_kind_(static_cast(value_kind)), + def_kind_(static_cast(default_arg.kind)), + modified_(false), + on_command_line_(false), + callback_(nullptr), + default_value_(default_arg.source), + data_guard_{} {} + + // Constant access methods + int64_t ReadOneWord() const ABSL_LOCKS_EXCLUDED(*DataGuard()); + bool ReadOneBool() const ABSL_LOCKS_EXCLUDED(*DataGuard()); + void Read(void* dst) const override ABSL_LOCKS_EXCLUDED(*DataGuard()); + void Read(bool* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) { + *value = ReadOneBool(); + } + template () == + FlagValueStorageKind::kOneWordAtomic, + int> = 0> + void Read(T* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) { + int64_t v = ReadOneWord(); + std::memcpy(value, static_cast(&v), sizeof(T)); + } + template () == + FlagValueStorageKind::kValueAndInitBit, + int>::type = 0> + void Read(T* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) { + *value = absl::bit_cast>(ReadOneWord()).value; + } + + // Mutating access methods + void Write(const void* src) ABSL_LOCKS_EXCLUDED(*DataGuard()); + + // Interfaces to operate on callbacks. + void SetCallback(const FlagCallbackFunc mutation_callback) + ABSL_LOCKS_EXCLUDED(*DataGuard()); + void InvokeCallback() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + + // Used in read/write operations to validate source/target has correct type. + // For example if flag is declared as absl::Flag FLAGS_foo, a call to + // absl::GetFlag(FLAGS_foo) validates that the type of FLAGS_foo is indeed + // int. To do that we pass the "assumed" type id (which is deduced from type + // int) as an argument `type_id`, which is in turn is validated against the + // type id stored in flag object by flag definition statement. + void AssertValidType(FlagFastTypeId type_id, + const std::type_info* (*gen_rtti)()) const; + + private: + template + friend class Flag; + friend class FlagState; + + // Ensures that `data_guard_` is initialized and returns it. + absl::Mutex* DataGuard() const + ABSL_LOCK_RETURNED(reinterpret_cast(data_guard_)); + // Returns heap allocated value of type T initialized with default value. + std::unique_ptr MakeInitValue() const + ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + // Flag initialization called via absl::call_once. + void Init(); + + // Offset value access methods. One per storage kind. These methods to not + // respect const correctness, so be very carefull using them. + + // This is a shared helper routine which encapsulates most of the magic. Since + // it is only used inside the three routines below, which are defined in + // flag.cc, we can define it in that file as well. + template + StorageT* OffsetValue() const; + // This is an accessor for a value stored in an aligned buffer storage + // used for non-trivially-copyable data types. + // Returns a mutable pointer to the start of a buffer. + void* AlignedBufferValue() const; + + // The same as above, but used for sequencelock-protected storage. + std::atomic* AtomicBufferValue() const; + + // This is an accessor for a value stored as one word atomic. Returns a + // mutable reference to an atomic value. + std::atomic& OneWordValue() const; + + // Attempts to parse supplied `value` string. If parsing is successful, + // returns new value. Otherwise returns nullptr. + std::unique_ptr TryParse(absl::string_view value, + std::string& err) const + ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + // Stores the flag value based on the pointer to the source. + void StoreValue(const void* src) ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + + // Copy the flag data, protected by `seq_lock_` into `dst`. + // + // REQUIRES: ValueStorageKind() == kSequenceLocked. + void ReadSequenceLockedData(void* dst) const + ABSL_LOCKS_EXCLUDED(*DataGuard()); + + FlagHelpKind HelpSourceKind() const { + return static_cast(help_source_kind_); + } + FlagValueStorageKind ValueStorageKind() const { + return static_cast(value_storage_kind_); + } + FlagDefaultKind DefaultKind() const + ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()) { + return static_cast(def_kind_); + } + + // CommandLineFlag interface implementation + absl::string_view Name() const override; + std::string Filename() const override; + std::string Help() const override; + FlagFastTypeId TypeId() const override; + bool IsSpecifiedOnCommandLine() const override + ABSL_LOCKS_EXCLUDED(*DataGuard()); + std::string DefaultValue() const override ABSL_LOCKS_EXCLUDED(*DataGuard()); + std::string CurrentValue() const override ABSL_LOCKS_EXCLUDED(*DataGuard()); + bool ValidateInputValue(absl::string_view value) const override + ABSL_LOCKS_EXCLUDED(*DataGuard()); + void CheckDefaultValueParsingRoundtrip() const override + ABSL_LOCKS_EXCLUDED(*DataGuard()); + + int64_t ModificationCount() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + + // Interfaces to save and restore flags to/from persistent state. + // Returns current flag state or nullptr if flag does not support + // saving and restoring a state. + std::unique_ptr SaveState() override + ABSL_LOCKS_EXCLUDED(*DataGuard()); + + // Restores the flag state to the supplied state object. If there is + // nothing to restore returns false. Otherwise returns true. + bool RestoreState(const FlagState& flag_state) + ABSL_LOCKS_EXCLUDED(*DataGuard()); + + bool ParseFrom(absl::string_view value, FlagSettingMode set_mode, + ValueSource source, std::string& error) override + ABSL_LOCKS_EXCLUDED(*DataGuard()); + + // Immutable flag's state. + + // Flags name passed to ABSL_FLAG as second arg. + const char* const name_; + // The file name where ABSL_FLAG resides. + const char* const filename_; + // Type-specific operations "vtable". + const FlagOpFn op_; + // Help message literal or function to generate it. + const FlagHelpMsg help_; + // Indicates if help message was supplied as literal or generator func. + const uint8_t help_source_kind_ : 1; + // Kind of storage this flag is using for the flag's value. + const uint8_t value_storage_kind_ : 2; + + uint8_t : 0; // The bytes containing the const bitfields must not be + // shared with bytes containing the mutable bitfields. + + // Mutable flag's state (guarded by `data_guard_`). + + // def_kind_ is not guard by DataGuard() since it is accessed in Init without + // locks. + uint8_t def_kind_ : 2; + // Has this flag's value been modified? + bool modified_ : 1 ABSL_GUARDED_BY(*DataGuard()); + // Has this flag been specified on command line. + bool on_command_line_ : 1 ABSL_GUARDED_BY(*DataGuard()); + + // Unique tag for absl::call_once call to initialize this flag. + absl::once_flag init_control_; + + // Sequence lock / mutation counter. + flags_internal::SequenceLock seq_lock_; + + // Optional flag's callback and absl::Mutex to guard the invocations. + FlagCallback* callback_ ABSL_GUARDED_BY(*DataGuard()); + // Either a pointer to the function generating the default value based on the + // value specified in ABSL_FLAG or pointer to the dynamically set default + // value via SetCommandLineOptionWithMode. def_kind_ is used to distinguish + // these two cases. + FlagDefaultSrc default_value_; + + // This is reserved space for an absl::Mutex to guard flag data. It will be + // initialized in FlagImpl::Init via placement new. + // We can't use "absl::Mutex data_guard_", since this class is not literal. + // We do not want to use "absl::Mutex* data_guard_", since this would require + // heap allocation during initialization, which is both slows program startup + // and can fail. Using reserved space + placement new allows us to avoid both + // problems. + alignas(absl::Mutex) mutable char data_guard_[sizeof(absl::Mutex)]; +}; + +/////////////////////////////////////////////////////////////////////////////// +// The Flag object parameterized by the flag's value type. This class implements +// flag reflection handle interface. + +template +class Flag { + public: + constexpr Flag(const char* name, const char* filename, FlagHelpArg help, + const FlagDefaultArg default_arg) + : impl_(name, filename, &FlagOps, help, + flags_internal::StorageKind(), default_arg), + value_() {} + + // CommandLineFlag interface + absl::string_view Name() const { return impl_.Name(); } + std::string Filename() const { return impl_.Filename(); } + std::string Help() const { return impl_.Help(); } + // Do not use. To be removed. + bool IsSpecifiedOnCommandLine() const { + return impl_.IsSpecifiedOnCommandLine(); + } + std::string DefaultValue() const { return impl_.DefaultValue(); } + std::string CurrentValue() const { return impl_.CurrentValue(); } + + private: + template + friend class FlagRegistrar; + friend class FlagImplPeer; + + T Get() const { + // See implementation notes in CommandLineFlag::Get(). + union U { + T value; + U() {} + ~U() { value.~T(); } + }; + U u; + +#if !defined(NDEBUG) + impl_.AssertValidType(base_internal::FastTypeId(), &GenRuntimeTypeId); +#endif + + if (ABSL_PREDICT_FALSE(!value_.Get(impl_.seq_lock_, u.value))) { + impl_.Read(&u.value); + } + return std::move(u.value); + } + void Set(const T& v) { + impl_.AssertValidType(base_internal::FastTypeId(), &GenRuntimeTypeId); + impl_.Write(&v); + } + + // Access to the reflection. + const CommandLineFlag& Reflect() const { return impl_; } + + // Flag's data + // The implementation depends on value_ field to be placed exactly after the + // impl_ field, so that impl_ can figure out the offset to the value and + // access it. + FlagImpl impl_; + FlagValue value_; +}; + +/////////////////////////////////////////////////////////////////////////////// +// Trampoline for friend access + +class FlagImplPeer { + public: + template + static T InvokeGet(const FlagType& flag) { + return flag.Get(); + } + template + static void InvokeSet(FlagType& flag, const T& v) { + flag.Set(v); + } + template + static const CommandLineFlag& InvokeReflect(const FlagType& f) { + return f.Reflect(); + } +}; + +/////////////////////////////////////////////////////////////////////////////// +// Implementation of Flag value specific operations routine. +template +void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) { + switch (op) { + case FlagOp::kAlloc: { + std::allocator alloc; + return std::allocator_traits>::allocate(alloc, 1); + } + case FlagOp::kDelete: { + T* p = static_cast(v2); + p->~T(); + std::allocator alloc; + std::allocator_traits>::deallocate(alloc, p, 1); + return nullptr; + } + case FlagOp::kCopy: + *static_cast(v2) = *static_cast(v1); + return nullptr; + case FlagOp::kCopyConstruct: + new (v2) T(*static_cast(v1)); + return nullptr; + case FlagOp::kSizeof: + return reinterpret_cast(static_cast(sizeof(T))); + case FlagOp::kFastTypeId: + return const_cast(base_internal::FastTypeId()); + case FlagOp::kRuntimeTypeId: + return const_cast(GenRuntimeTypeId()); + case FlagOp::kParse: { + // Initialize the temporary instance of type T based on current value in + // destination (which is going to be flag's default value). + T temp(*static_cast(v2)); + if (!absl::ParseFlag(*static_cast(v1), &temp, + static_cast(v3))) { + return nullptr; + } + *static_cast(v2) = std::move(temp); + return v2; + } + case FlagOp::kUnparse: + *static_cast(v2) = + absl::UnparseFlag(*static_cast(v1)); + return nullptr; + case FlagOp::kValueOffset: { + // Round sizeof(FlagImp) to a multiple of alignof(FlagValue) to get the + // offset of the data. + size_t round_to = alignof(FlagValue); + size_t offset = + (sizeof(FlagImpl) + round_to - 1) / round_to * round_to; + return reinterpret_cast(offset); + } + } + return nullptr; +} + +/////////////////////////////////////////////////////////////////////////////// +// This class facilitates Flag object registration and tail expression-based +// flag definition, for example: +// ABSL_FLAG(int, foo, 42, "Foo help").OnUpdate(NotifyFooWatcher); +struct FlagRegistrarEmpty {}; +template +class FlagRegistrar { + public: + explicit FlagRegistrar(Flag& flag, const char* filename) : flag_(flag) { + if (do_register) + flags_internal::RegisterCommandLineFlag(flag_.impl_, filename); + } + + FlagRegistrar OnUpdate(FlagCallbackFunc cb) && { + flag_.impl_.SetCallback(cb); + return *this; + } + + // Make the registrar "die" gracefully as an empty struct on a line where + // registration happens. Registrar objects are intended to live only as + // temporary. + operator FlagRegistrarEmpty() const { return {}; } // NOLINT + + private: + Flag& flag_; // Flag being registered (not owned). +}; + +} // namespace flags_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_FLAG_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/flag_msvc.inc b/CAPI/cpp/grpc/include/absl/flags/internal/flag_msvc.inc new file mode 100644 index 0000000..c31bd27 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/flag_msvc.inc @@ -0,0 +1,116 @@ +// +// Copyright 2021 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Do not include this file directly. +// Include absl/flags/flag.h instead. + +// MSVC debug builds do not implement initialization with constexpr constructors +// correctly. To work around this we add a level of indirection, so that the +// class `absl::Flag` contains an `internal::Flag*` (instead of being an alias +// to that class) and dynamically allocates an instance when necessary. We also +// forward all calls to internal::Flag methods via trampoline methods. In this +// setup the `absl::Flag` class does not have constructor and virtual methods, +// all the data members are public and thus MSVC is able to initialize it at +// link time. To deal with multiple threads accessing the flag for the first +// time concurrently we use an atomic boolean indicating if flag object is +// initialized. We also employ the double-checked locking pattern where the +// second level of protection is a global Mutex, so if two threads attempt to +// construct the flag concurrently only one wins. +// +// This solution is based on a recomendation here: +// https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html?childToView=648454#comment-648454 + +namespace flags_internal { +absl::Mutex* GetGlobalConstructionGuard(); +} // namespace flags_internal + +// Public methods of `absl::Flag` are NOT part of the Abseil Flags API. +// See https://abseil.io/docs/cpp/guides/flags +template +class Flag { + public: + // No constructor and destructor to ensure this is an aggregate type. + // Visual Studio 2015 still requires the constructor for class to be + // constexpr initializable. +#if _MSC_VER <= 1900 + constexpr Flag(const char* name, const char* filename, + const flags_internal::HelpGenFunc help_gen, + const flags_internal::FlagDfltGenFunc default_value_gen) + : name_(name), + filename_(filename), + help_gen_(help_gen), + default_value_gen_(default_value_gen), + inited_(false), + impl_(nullptr) {} +#endif + + flags_internal::Flag& GetImpl() const { + if (!inited_.load(std::memory_order_acquire)) { + absl::MutexLock l(flags_internal::GetGlobalConstructionGuard()); + + if (inited_.load(std::memory_order_acquire)) { + return *impl_; + } + + impl_ = new flags_internal::Flag( + name_, filename_, + {flags_internal::FlagHelpMsg(help_gen_), + flags_internal::FlagHelpKind::kGenFunc}, + {flags_internal::FlagDefaultSrc(default_value_gen_), + flags_internal::FlagDefaultKind::kGenFunc}); + inited_.store(true, std::memory_order_release); + } + + return *impl_; + } + + // Public methods of `absl::Flag` are NOT part of the Abseil Flags API. + // See https://abseil.io/docs/cpp/guides/flags + bool IsRetired() const { return GetImpl().IsRetired(); } + absl::string_view Name() const { return GetImpl().Name(); } + std::string Help() const { return GetImpl().Help(); } + bool IsModified() const { return GetImpl().IsModified(); } + bool IsSpecifiedOnCommandLine() const { + return GetImpl().IsSpecifiedOnCommandLine(); + } + std::string Filename() const { return GetImpl().Filename(); } + std::string DefaultValue() const { return GetImpl().DefaultValue(); } + std::string CurrentValue() const { return GetImpl().CurrentValue(); } + template + inline bool IsOfType() const { + return GetImpl().template IsOfType(); + } + T Get() const { + return flags_internal::FlagImplPeer::InvokeGet(GetImpl()); + } + void Set(const T& v) { + flags_internal::FlagImplPeer::InvokeSet(GetImpl(), v); + } + void InvokeCallback() { GetImpl().InvokeCallback(); } + + const CommandLineFlag& Reflect() const { + return flags_internal::FlagImplPeer::InvokeReflect(GetImpl()); + } + + // The data members are logically private, but they need to be public for + // this to be an aggregate type. + const char* name_; + const char* filename_; + const flags_internal::HelpGenFunc help_gen_; + const flags_internal::FlagDfltGenFunc default_value_gen_; + + mutable std::atomic inited_; + mutable flags_internal::Flag* impl_; +}; diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/parse.h b/CAPI/cpp/grpc/include/absl/flags/internal/parse.h new file mode 100644 index 0000000..de706c8 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/parse.h @@ -0,0 +1,59 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_PARSE_H_ +#define ABSL_FLAGS_INTERNAL_PARSE_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/flags/declare.h" +#include "absl/strings/string_view.h" + +ABSL_DECLARE_FLAG(std::vector, flagfile); +ABSL_DECLARE_FLAG(std::vector, fromenv); +ABSL_DECLARE_FLAG(std::vector, tryfromenv); +ABSL_DECLARE_FLAG(std::vector, undefok); + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace flags_internal { + +enum class ArgvListAction { kRemoveParsedArgs, kKeepParsedArgs }; +enum class UsageFlagsAction { kHandleUsage, kIgnoreUsage }; +enum class OnUndefinedFlag { + kIgnoreUndefined, + kReportUndefined, + kAbortIfUndefined +}; + +std::vector ParseCommandLineImpl(int argc, char* argv[], + ArgvListAction arg_list_act, + UsageFlagsAction usage_flag_act, + OnUndefinedFlag on_undef_flag); + +// -------------------------------------------------------------------- +// Inspect original command line + +// Returns true if flag with specified name was either present on the original +// command line or specified in flag file present on the original command line. +bool WasPresentOnCommandLine(absl::string_view flag_name); + +} // namespace flags_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_PARSE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/path_util.h b/CAPI/cpp/grpc/include/absl/flags/internal/path_util.h new file mode 100644 index 0000000..a6594d3 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/path_util.h @@ -0,0 +1,62 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_PATH_UTIL_H_ +#define ABSL_FLAGS_INTERNAL_PATH_UTIL_H_ + +#include "absl/base/config.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace flags_internal { + +// A portable interface that returns the basename of the filename passed as an +// argument. It is similar to basename(3) +// . +// For example: +// flags_internal::Basename("a/b/prog/file.cc") +// returns "file.cc" +// flags_internal::Basename("file.cc") +// returns "file.cc" +inline absl::string_view Basename(absl::string_view filename) { + auto last_slash_pos = filename.find_last_of("/\\"); + + return last_slash_pos == absl::string_view::npos + ? filename + : filename.substr(last_slash_pos + 1); +} + +// A portable interface that returns the directory name of the filename +// passed as an argument, including the trailing slash. +// Returns the empty string if a slash is not found in the input file name. +// For example: +// flags_internal::Package("a/b/prog/file.cc") +// returns "a/b/prog/" +// flags_internal::Package("file.cc") +// returns "" +inline absl::string_view Package(absl::string_view filename) { + auto last_slash_pos = filename.find_last_of("/\\"); + + return last_slash_pos == absl::string_view::npos + ? absl::string_view() + : filename.substr(0, last_slash_pos + 1); +} + +} // namespace flags_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_PATH_UTIL_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/private_handle_accessor.h b/CAPI/cpp/grpc/include/absl/flags/internal/private_handle_accessor.h new file mode 100644 index 0000000..c64435c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/private_handle_accessor.h @@ -0,0 +1,61 @@ +// +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_PRIVATE_HANDLE_ACCESSOR_H_ +#define ABSL_FLAGS_INTERNAL_PRIVATE_HANDLE_ACCESSOR_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/flags/commandlineflag.h" +#include "absl/flags/internal/commandlineflag.h" +#include "absl/strings/string_view.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace flags_internal { + +// This class serves as a trampoline to access private methods of +// CommandLineFlag. This class is intended for use exclusively internally inside +// of the Abseil Flags implementation. +class PrivateHandleAccessor { + public: + // Access to CommandLineFlag::TypeId. + static FlagFastTypeId TypeId(const CommandLineFlag& flag); + + // Access to CommandLineFlag::SaveState. + static std::unique_ptr SaveState(CommandLineFlag& flag); + + // Access to CommandLineFlag::IsSpecifiedOnCommandLine. + static bool IsSpecifiedOnCommandLine(const CommandLineFlag& flag); + + // Access to CommandLineFlag::ValidateInputValue. + static bool ValidateInputValue(const CommandLineFlag& flag, + absl::string_view value); + + // Access to CommandLineFlag::CheckDefaultValueParsingRoundtrip. + static void CheckDefaultValueParsingRoundtrip(const CommandLineFlag& flag); + + static bool ParseFrom(CommandLineFlag& flag, absl::string_view value, + flags_internal::FlagSettingMode set_mode, + flags_internal::ValueSource source, std::string& error); +}; + +} // namespace flags_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_PRIVATE_HANDLE_ACCESSOR_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/program_name.h b/CAPI/cpp/grpc/include/absl/flags/internal/program_name.h new file mode 100644 index 0000000..b99b94f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/program_name.h @@ -0,0 +1,50 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_PROGRAM_NAME_H_ +#define ABSL_FLAGS_INTERNAL_PROGRAM_NAME_H_ + +#include + +#include "absl/base/config.h" +#include "absl/strings/string_view.h" + +// -------------------------------------------------------------------- +// Program name + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace flags_internal { + +// Returns program invocation name or "UNKNOWN" if `SetProgramInvocationName()` +// is never called. At the moment this is always set to argv[0] as part of +// library initialization. +std::string ProgramInvocationName(); + +// Returns base name for program invocation name. For example, if +// ProgramInvocationName() == "a/b/mybinary" +// then +// ShortProgramInvocationName() == "mybinary" +std::string ShortProgramInvocationName(); + +// Sets program invocation name to a new value. Should only be called once +// during program initialization, before any threads are spawned. +void SetProgramInvocationName(absl::string_view prog_name_str); + +} // namespace flags_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_PROGRAM_NAME_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/registry.h b/CAPI/cpp/grpc/include/absl/flags/internal/registry.h new file mode 100644 index 0000000..4b68c85 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/registry.h @@ -0,0 +1,97 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_REGISTRY_H_ +#define ABSL_FLAGS_INTERNAL_REGISTRY_H_ + +#include + +#include "absl/base/config.h" +#include "absl/flags/commandlineflag.h" +#include "absl/flags/internal/commandlineflag.h" +#include "absl/strings/string_view.h" + +// -------------------------------------------------------------------- +// Global flags registry API. + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace flags_internal { + +// Executes specified visitor for each non-retired flag in the registry. While +// callback are executed, the registry is locked and can't be changed. +void ForEachFlag(std::function visitor); + +//----------------------------------------------------------------------------- + +bool RegisterCommandLineFlag(CommandLineFlag&, const char* filename); + +void FinalizeRegistry(); + +//----------------------------------------------------------------------------- +// Retired registrations: +// +// Retired flag registrations are treated specially. A 'retired' flag is +// provided only for compatibility with automated invocations that still +// name it. A 'retired' flag: +// - is not bound to a C++ FLAGS_ reference. +// - has a type and a value, but that value is intentionally inaccessible. +// - does not appear in --help messages. +// - is fully supported by _all_ flag parsing routines. +// - consumes args normally, and complains about type mismatches in its +// argument. +// - emits a complaint but does not die (e.g. LOG(ERROR)) if it is +// accessed by name through the flags API for parsing or otherwise. +// +// The registrations for a flag happen in an unspecified order as the +// initializers for the namespace-scope objects of a program are run. +// Any number of weak registrations for a flag can weakly define the flag. +// One non-weak registration will upgrade the flag from weak to non-weak. +// Further weak registrations of a non-weak flag are ignored. +// +// This mechanism is designed to support moving dead flags into a +// 'graveyard' library. An example migration: +// +// 0: Remove references to this FLAGS_flagname in the C++ codebase. +// 1: Register as 'retired' in old_lib. +// 2: Make old_lib depend on graveyard. +// 3: Add a redundant 'retired' registration to graveyard. +// 4: Remove the old_lib 'retired' registration. +// 5: Eventually delete the graveyard registration entirely. +// + +// Retire flag with name "name" and type indicated by ops. +void Retire(const char* name, FlagFastTypeId type_id, char* buf); + +constexpr size_t kRetiredFlagObjSize = 3 * sizeof(void*); +constexpr size_t kRetiredFlagObjAlignment = alignof(void*); + +// Registered a retired flag with name 'flag_name' and type 'T'. +template +class RetiredFlag { + public: + void Retire(const char* flag_name) { + flags_internal::Retire(flag_name, base_internal::FastTypeId(), buf_); + } + + private: + alignas(kRetiredFlagObjAlignment) char buf_[kRetiredFlagObjSize]; +}; + +} // namespace flags_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_REGISTRY_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/sequence_lock.h b/CAPI/cpp/grpc/include/absl/flags/internal/sequence_lock.h new file mode 100644 index 0000000..36318ab --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/sequence_lock.h @@ -0,0 +1,187 @@ +// +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_ +#define ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_ + +#include +#include + +#include +#include +#include + +#include "absl/base/optimization.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace flags_internal { + +// Align 'x' up to the nearest 'align' bytes. +inline constexpr size_t AlignUp(size_t x, size_t align) { + return align * ((x + align - 1) / align); +} + +// A SequenceLock implements lock-free reads. A sequence counter is incremented +// before and after each write, and readers access the counter before and after +// accessing the protected data. If the counter is verified to not change during +// the access, and the sequence counter value was even, then the reader knows +// that the read was race-free and valid. Otherwise, the reader must fall back +// to a Mutex-based code path. +// +// This particular SequenceLock starts in an "uninitialized" state in which +// TryRead() returns false. It must be enabled by calling MarkInitialized(). +// This serves as a marker that the associated flag value has not yet been +// initialized and a slow path needs to be taken. +// +// The memory reads and writes protected by this lock must use the provided +// `TryRead()` and `Write()` functions. These functions behave similarly to +// `memcpy()`, with one oddity: the protected data must be an array of +// `std::atomic`. This is to comply with the C++ standard, which +// considers data races on non-atomic objects to be undefined behavior. See "Can +// Seqlocks Get Along With Programming Language Memory Models?"[1] by Hans J. +// Boehm for more details. +// +// [1] https://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf +class SequenceLock { + public: + constexpr SequenceLock() : lock_(kUninitialized) {} + + // Mark that this lock is ready for use. + void MarkInitialized() { + assert(lock_.load(std::memory_order_relaxed) == kUninitialized); + lock_.store(0, std::memory_order_release); + } + + // Copy "size" bytes of data from "src" to "dst", protected as a read-side + // critical section of the sequence lock. + // + // Unlike traditional sequence lock implementations which loop until getting a + // clean read, this implementation returns false in the case of concurrent + // calls to `Write`. In such a case, the caller should fall back to a + // locking-based slow path. + // + // Returns false if the sequence lock was not yet marked as initialized. + // + // NOTE: If this returns false, "dst" may be overwritten with undefined + // (potentially uninitialized) data. + bool TryRead(void* dst, const std::atomic* src, size_t size) const { + // Acquire barrier ensures that no loads done by f() are reordered + // above the first load of the sequence counter. + int64_t seq_before = lock_.load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(seq_before & 1) == 1) return false; + RelaxedCopyFromAtomic(dst, src, size); + // Another acquire fence ensures that the load of 'lock_' below is + // strictly ordered after the RelaxedCopyToAtomic call above. + std::atomic_thread_fence(std::memory_order_acquire); + int64_t seq_after = lock_.load(std::memory_order_relaxed); + return ABSL_PREDICT_TRUE(seq_before == seq_after); + } + + // Copy "size" bytes from "src" to "dst" as a write-side critical section + // of the sequence lock. Any concurrent readers will be forced to retry + // until they get a read that does not conflict with this write. + // + // This call must be externally synchronized against other calls to Write, + // but may proceed concurrently with reads. + void Write(std::atomic* dst, const void* src, size_t size) { + // We can use relaxed instructions to increment the counter since we + // are extenally synchronized. The std::atomic_thread_fence below + // ensures that the counter updates don't get interleaved with the + // copy to the data. + int64_t orig_seq = lock_.load(std::memory_order_relaxed); + assert((orig_seq & 1) == 0); // Must be initially unlocked. + lock_.store(orig_seq + 1, std::memory_order_relaxed); + + // We put a release fence between update to lock_ and writes to shared data. + // Thus all stores to shared data are effectively release operations and + // update to lock_ above cannot be re-ordered past any of them. Note that + // this barrier is not for the fetch_add above. A release barrier for the + // fetch_add would be before it, not after. + std::atomic_thread_fence(std::memory_order_release); + RelaxedCopyToAtomic(dst, src, size); + // "Release" semantics ensure that none of the writes done by + // RelaxedCopyToAtomic() can be reordered after the following modification. + lock_.store(orig_seq + 2, std::memory_order_release); + } + + // Return the number of times that Write() has been called. + // + // REQUIRES: This must be externally synchronized against concurrent calls to + // `Write()` or `IncrementModificationCount()`. + // REQUIRES: `MarkInitialized()` must have been previously called. + int64_t ModificationCount() const { + int64_t val = lock_.load(std::memory_order_relaxed); + assert(val != kUninitialized && (val & 1) == 0); + return val / 2; + } + + // REQUIRES: This must be externally synchronized against concurrent calls to + // `Write()` or `ModificationCount()`. + // REQUIRES: `MarkInitialized()` must have been previously called. + void IncrementModificationCount() { + int64_t val = lock_.load(std::memory_order_relaxed); + assert(val != kUninitialized); + lock_.store(val + 2, std::memory_order_relaxed); + } + + private: + // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed + // atomics. + static void RelaxedCopyFromAtomic(void* dst, const std::atomic* src, + size_t size) { + char* dst_byte = static_cast(dst); + while (size >= sizeof(uint64_t)) { + uint64_t word = src->load(std::memory_order_relaxed); + std::memcpy(dst_byte, &word, sizeof(word)); + dst_byte += sizeof(word); + src++; + size -= sizeof(word); + } + if (size > 0) { + uint64_t word = src->load(std::memory_order_relaxed); + std::memcpy(dst_byte, &word, size); + } + } + + // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed + // atomics. + static void RelaxedCopyToAtomic(std::atomic* dst, const void* src, + size_t size) { + const char* src_byte = static_cast(src); + while (size >= sizeof(uint64_t)) { + uint64_t word; + std::memcpy(&word, src_byte, sizeof(word)); + dst->store(word, std::memory_order_relaxed); + src_byte += sizeof(word); + dst++; + size -= sizeof(word); + } + if (size > 0) { + uint64_t word = 0; + std::memcpy(&word, src_byte, size); + dst->store(word, std::memory_order_relaxed); + } + } + + static constexpr int64_t kUninitialized = -1; + std::atomic lock_; +}; + +} // namespace flags_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/usage.h b/CAPI/cpp/grpc/include/absl/flags/internal/usage.h new file mode 100644 index 0000000..c0bcac5 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/internal/usage.h @@ -0,0 +1,104 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_INTERNAL_USAGE_H_ +#define ABSL_FLAGS_INTERNAL_USAGE_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/flags/commandlineflag.h" +#include "absl/flags/declare.h" +#include "absl/strings/string_view.h" + +// -------------------------------------------------------------------- +// Usage reporting interfaces + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace flags_internal { + +// The format to report the help messages in. +enum class HelpFormat { + kHumanReadable, +}; + +// Streams the help message describing `flag` to `out`. +// The default value for `flag` is included in the output. +void FlagHelp(std::ostream& out, const CommandLineFlag& flag, + HelpFormat format = HelpFormat::kHumanReadable); + +// Produces the help messages for all flags matching the filter. A flag matches +// the filter if it is defined in a file with a filename which includes +// filter string as a substring. You can use '/' and '.' to restrict the +// matching to a specific file names. For example: +// FlagsHelp(out, "/path/to/file."); +// restricts help to only flags which resides in files named like: +// .../path/to/file. +// for any extension 'ext'. If the filter is empty this function produces help +// messages for all flags. +void FlagsHelp(std::ostream& out, absl::string_view filter, + HelpFormat format, absl::string_view program_usage_message); + +// -------------------------------------------------------------------- + +// If any of the 'usage' related command line flags (listed on the bottom of +// this file) has been set this routine produces corresponding help message in +// the specified output stream and returns: +// 0 - if "version" or "only_check_flags" flags were set and handled. +// 1 - if some other 'usage' related flag was set and handled. +// -1 - if no usage flags were set on a commmand line. +// Non negative return values are expected to be used as an exit code for a +// binary. +int HandleUsageFlags(std::ostream& out, + absl::string_view program_usage_message); + +// -------------------------------------------------------------------- +// Globals representing usage reporting flags + +enum class HelpMode { + kNone, + kImportant, + kShort, + kFull, + kPackage, + kMatch, + kVersion, + kOnlyCheckArgs +}; + +// Returns substring to filter help output (--help=substr argument) +std::string GetFlagsHelpMatchSubstr(); +// Returns the requested help mode. +HelpMode GetFlagsHelpMode(); +// Returns the requested help format. +HelpFormat GetFlagsHelpFormat(); + +// These are corresponding setters to the attributes above. +void SetFlagsHelpMatchSubstr(absl::string_view); +void SetFlagsHelpMode(HelpMode); +void SetFlagsHelpFormat(HelpFormat); + +// Deduces usage flags from the input argument in a form --name=value or +// --name. argument is already split into name and value before we call this +// function. +bool DeduceUsageFlags(absl::string_view name, absl::string_view value); + +} // namespace flags_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_INTERNAL_USAGE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/marshalling.h b/CAPI/cpp/grpc/include/absl/flags/marshalling.h new file mode 100644 index 0000000..b1e2ffa --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/marshalling.h @@ -0,0 +1,356 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: marshalling.h +// ----------------------------------------------------------------------------- +// +// This header file defines the API for extending Abseil flag support to +// custom types, and defines the set of overloads for fundamental types. +// +// Out of the box, the Abseil flags library supports the following types: +// +// * `bool` +// * `int16_t` +// * `uint16_t` +// * `int32_t` +// * `uint32_t` +// * `int64_t` +// * `uint64_t` +// * `float` +// * `double` +// * `std::string` +// * `std::vector` +// * `std::optional` +// * `absl::LogSeverity` (provided natively for layering reasons) +// +// Note that support for integral types is implemented using overloads for +// variable-width fundamental types (`short`, `int`, `long`, etc.). However, +// you should prefer the fixed-width integral types (`int32_t`, `uint64_t`, +// etc.) we've noted above within flag definitions. +// +// In addition, several Abseil libraries provide their own custom support for +// Abseil flags. Documentation for these formats is provided in the type's +// `AbslParseFlag()` definition. +// +// The Abseil time library provides the following support for civil time values: +// +// * `absl::CivilSecond` +// * `absl::CivilMinute` +// * `absl::CivilHour` +// * `absl::CivilDay` +// * `absl::CivilMonth` +// * `absl::CivilYear` +// +// and also provides support for the following absolute time values: +// +// * `absl::Duration` +// * `absl::Time` +// +// Additional support for Abseil types will be noted here as it is added. +// +// You can also provide your own custom flags by adding overloads for +// `AbslParseFlag()` and `AbslUnparseFlag()` to your type definitions. (See +// below.) +// +// ----------------------------------------------------------------------------- +// Optional Flags +// ----------------------------------------------------------------------------- +// +// The Abseil flags library supports flags of type `std::optional` where +// `T` is a type of one of the supported flags. We refer to this flag type as +// an "optional flag." An optional flag is either "valueless", holding no value +// of type `T` (indicating that the flag has not been set) or a value of type +// `T`. The valueless state in C++ code is represented by a value of +// `std::nullopt` for the optional flag. +// +// Using `std::nullopt` as an optional flag's default value allows you to check +// whether such a flag was ever specified on the command line: +// +// if (absl::GetFlag(FLAGS_foo).has_value()) { +// // flag was set on command line +// } else { +// // flag was not passed on command line +// } +// +// Using an optional flag in this manner avoids common workarounds for +// indicating such an unset flag (such as using sentinal values to indicate this +// state). +// +// An optional flag also allows a developer to pass a flag in an "unset" +// valueless state on the command line, allowing the flag to later be set in +// binary logic. An optional flag's valueless state is indicated by the special +// notation of passing the value as an empty string through the syntax `--flag=` +// or `--flag ""`. +// +// $ binary_with_optional --flag_in_unset_state= +// $ binary_with_optional --flag_in_unset_state "" +// +// Note: as a result of the above syntax requirements, an optional flag cannot +// be set to a `T` of any value which unparses to the empty string. +// +// ----------------------------------------------------------------------------- +// Adding Type Support for Abseil Flags +// ----------------------------------------------------------------------------- +// +// To add support for your user-defined type, add overloads of `AbslParseFlag()` +// and `AbslUnparseFlag()` as free (non-member) functions to your type. If `T` +// is a class type, these functions can be friend function definitions. These +// overloads must be added to the same namespace where the type is defined, so +// that they can be discovered by Argument-Dependent Lookup (ADL). +// +// Example: +// +// namespace foo { +// +// enum OutputMode { kPlainText, kHtml }; +// +// // AbslParseFlag converts from a string to OutputMode. +// // Must be in same namespace as OutputMode. +// +// // Parses an OutputMode from the command line flag value `text`. Returns +// // `true` and sets `*mode` on success; returns `false` and sets `*error` +// // on failure. +// bool AbslParseFlag(absl::string_view text, +// OutputMode* mode, +// std::string* error) { +// if (text == "plaintext") { +// *mode = kPlainText; +// return true; +// } +// if (text == "html") { +// *mode = kHtml; +// return true; +// } +// *error = "unknown value for enumeration"; +// return false; +// } +// +// // AbslUnparseFlag converts from an OutputMode to a string. +// // Must be in same namespace as OutputMode. +// +// // Returns a textual flag value corresponding to the OutputMode `mode`. +// std::string AbslUnparseFlag(OutputMode mode) { +// switch (mode) { +// case kPlainText: return "plaintext"; +// case kHtml: return "html"; +// } +// return absl::StrCat(mode); +// } +// +// Notice that neither `AbslParseFlag()` nor `AbslUnparseFlag()` are class +// members, but free functions. `AbslParseFlag/AbslUnparseFlag()` overloads +// for a type should only be declared in the same file and namespace as said +// type. The proper `AbslParseFlag/AbslUnparseFlag()` implementations for a +// given type will be discovered via Argument-Dependent Lookup (ADL). +// +// `AbslParseFlag()` may need, in turn, to parse simpler constituent types +// using `absl::ParseFlag()`. For example, a custom struct `MyFlagType` +// consisting of a `std::pair` would add an `AbslParseFlag()` +// overload for its `MyFlagType` like so: +// +// Example: +// +// namespace my_flag_type { +// +// struct MyFlagType { +// std::pair my_flag_data; +// }; +// +// bool AbslParseFlag(absl::string_view text, MyFlagType* flag, +// std::string* err); +// +// std::string AbslUnparseFlag(const MyFlagType&); +// +// // Within the implementation, `AbslParseFlag()` will, in turn invoke +// // `absl::ParseFlag()` on its constituent `int` and `std::string` types +// // (which have built-in Abseil flag support). +// +// bool AbslParseFlag(absl::string_view text, MyFlagType* flag, +// std::string* err) { +// std::pair tokens = +// absl::StrSplit(text, ','); +// if (!absl::ParseFlag(tokens.first, &flag->my_flag_data.first, err)) +// return false; +// if (!absl::ParseFlag(tokens.second, &flag->my_flag_data.second, err)) +// return false; +// return true; +// } +// +// // Similarly, for unparsing, we can simply invoke `absl::UnparseFlag()` on +// // the constituent types. +// std::string AbslUnparseFlag(const MyFlagType& flag) { +// return absl::StrCat(absl::UnparseFlag(flag.my_flag_data.first), +// ",", +// absl::UnparseFlag(flag.my_flag_data.second)); +// } +#ifndef ABSL_FLAGS_MARSHALLING_H_ +#define ABSL_FLAGS_MARSHALLING_H_ + +#include "absl/base/config.h" + +#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) +#include +#endif +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// Forward declaration to be used inside composable flag parse/unparse +// implementations +template +inline bool ParseFlag(absl::string_view input, T* dst, std::string* error); +template +inline std::string UnparseFlag(const T& v); + +namespace flags_internal { + +// Overloads of `AbslParseFlag()` and `AbslUnparseFlag()` for fundamental types. +bool AbslParseFlag(absl::string_view, bool*, std::string*); +bool AbslParseFlag(absl::string_view, short*, std::string*); // NOLINT +bool AbslParseFlag(absl::string_view, unsigned short*, std::string*); // NOLINT +bool AbslParseFlag(absl::string_view, int*, std::string*); // NOLINT +bool AbslParseFlag(absl::string_view, unsigned int*, std::string*); // NOLINT +bool AbslParseFlag(absl::string_view, long*, std::string*); // NOLINT +bool AbslParseFlag(absl::string_view, unsigned long*, std::string*); // NOLINT +bool AbslParseFlag(absl::string_view, long long*, std::string*); // NOLINT +bool AbslParseFlag(absl::string_view, unsigned long long*, // NOLINT + std::string*); +bool AbslParseFlag(absl::string_view, float*, std::string*); +bool AbslParseFlag(absl::string_view, double*, std::string*); +bool AbslParseFlag(absl::string_view, std::string*, std::string*); +bool AbslParseFlag(absl::string_view, std::vector*, std::string*); + +template +bool AbslParseFlag(absl::string_view text, absl::optional* f, + std::string* err) { + if (text.empty()) { + *f = absl::nullopt; + return true; + } + T value; + if (!absl::ParseFlag(text, &value, err)) return false; + + *f = std::move(value); + return true; +} + +#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) +template +bool AbslParseFlag(absl::string_view text, std::optional* f, + std::string* err) { + if (text.empty()) { + *f = std::nullopt; + return true; + } + T value; + if (!absl::ParseFlag(text, &value, err)) return false; + + *f = std::move(value); + return true; +} +#endif + +template +bool InvokeParseFlag(absl::string_view input, T* dst, std::string* err) { + // Comment on next line provides a good compiler error message if T + // does not have AbslParseFlag(absl::string_view, T*, std::string*). + return AbslParseFlag(input, dst, err); // Is T missing AbslParseFlag? +} + +// Strings and std:: containers do not have the same overload resolution +// considerations as fundamental types. Naming these 'AbslUnparseFlag' means we +// can avoid the need for additional specializations of Unparse (below). +std::string AbslUnparseFlag(absl::string_view v); +std::string AbslUnparseFlag(const std::vector&); + +template +std::string AbslUnparseFlag(const absl::optional& f) { + return f.has_value() ? absl::UnparseFlag(*f) : ""; +} + +#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) +template +std::string AbslUnparseFlag(const std::optional& f) { + return f.has_value() ? absl::UnparseFlag(*f) : ""; +} +#endif + +template +std::string Unparse(const T& v) { + // Comment on next line provides a good compiler error message if T does not + // have UnparseFlag. + return AbslUnparseFlag(v); // Is T missing AbslUnparseFlag? +} + +// Overloads for builtin types. +std::string Unparse(bool v); +std::string Unparse(short v); // NOLINT +std::string Unparse(unsigned short v); // NOLINT +std::string Unparse(int v); // NOLINT +std::string Unparse(unsigned int v); // NOLINT +std::string Unparse(long v); // NOLINT +std::string Unparse(unsigned long v); // NOLINT +std::string Unparse(long long v); // NOLINT +std::string Unparse(unsigned long long v); // NOLINT +std::string Unparse(float v); +std::string Unparse(double v); + +} // namespace flags_internal + +// ParseFlag() +// +// Parses a string value into a flag value of type `T`. Do not add overloads of +// this function for your type directly; instead, add an `AbslParseFlag()` +// free function as documented above. +// +// Some implementations of `AbslParseFlag()` for types which consist of other, +// constituent types which already have Abseil flag support, may need to call +// `absl::ParseFlag()` on those consituent string values. (See above.) +template +inline bool ParseFlag(absl::string_view input, T* dst, std::string* error) { + return flags_internal::InvokeParseFlag(input, dst, error); +} + +// UnparseFlag() +// +// Unparses a flag value of type `T` into a string value. Do not add overloads +// of this function for your type directly; instead, add an `AbslUnparseFlag()` +// free function as documented above. +// +// Some implementations of `AbslUnparseFlag()` for types which consist of other, +// constituent types which already have Abseil flag support, may want to call +// `absl::UnparseFlag()` on those constituent types. (See above.) +template +inline std::string UnparseFlag(const T& v) { + return flags_internal::Unparse(v); +} + +// Overloads for `absl::LogSeverity` can't (easily) appear alongside that type's +// definition because it is layered below flags. See proper documentation in +// base/log_severity.h. +enum class LogSeverity : int; +bool AbslParseFlag(absl::string_view, absl::LogSeverity*, std::string*); +std::string AbslUnparseFlag(absl::LogSeverity); + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_MARSHALLING_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/parse.h b/CAPI/cpp/grpc/include/absl/flags/parse.h new file mode 100644 index 0000000..929de2c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/parse.h @@ -0,0 +1,60 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: parse.h +// ----------------------------------------------------------------------------- +// +// This file defines the main parsing function for Abseil flags: +// `absl::ParseCommandLine()`. + +#ifndef ABSL_FLAGS_PARSE_H_ +#define ABSL_FLAGS_PARSE_H_ + +#include + +#include "absl/base/config.h" +#include "absl/flags/internal/parse.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// ParseCommandLine() +// +// Parses the set of command-line arguments passed in the `argc` (argument +// count) and `argv[]` (argument vector) parameters from `main()`, assigning +// values to any defined Abseil flags. (Any arguments passed after the +// flag-terminating delimiter (`--`) are treated as positional arguments and +// ignored.) +// +// Any command-line flags (and arguments to those flags) are parsed into Abseil +// Flag values, if those flags are defined. Any undefined flags will either +// return an error, or be ignored if that flag is designated using `undefok` to +// indicate "undefined is OK." +// +// Any command-line positional arguments not part of any command-line flag (or +// arguments to a flag) are returned in a vector, with the program invocation +// name at position 0 of that vector. (Note that this includes positional +// arguments after the flag-terminating delimiter `--`.) +// +// After all flags and flag arguments are parsed, this function looks for any +// built-in usage flags (e.g. `--help`), and if any were specified, it reports +// help messages and then exits the program. +std::vector ParseCommandLine(int argc, char* argv[]); + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_PARSE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/reflection.h b/CAPI/cpp/grpc/include/absl/flags/reflection.h new file mode 100644 index 0000000..e6baf5d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/reflection.h @@ -0,0 +1,90 @@ +// +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: reflection.h +// ----------------------------------------------------------------------------- +// +// This file defines the routines to access and operate on an Abseil Flag's +// reflection handle. + +#ifndef ABSL_FLAGS_REFLECTION_H_ +#define ABSL_FLAGS_REFLECTION_H_ + +#include + +#include "absl/base/config.h" +#include "absl/container/flat_hash_map.h" +#include "absl/flags/commandlineflag.h" +#include "absl/flags/internal/commandlineflag.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace flags_internal { +class FlagSaverImpl; +} // namespace flags_internal + +// FindCommandLineFlag() +// +// Returns the reflection handle of an Abseil flag of the specified name, or +// `nullptr` if not found. This function will emit a warning if the name of a +// 'retired' flag is specified. +absl::CommandLineFlag* FindCommandLineFlag(absl::string_view name); + +// Returns current state of the Flags registry in a form of mapping from flag +// name to a flag reflection handle. +absl::flat_hash_map GetAllFlags(); + +//------------------------------------------------------------------------------ +// FlagSaver +//------------------------------------------------------------------------------ +// +// A FlagSaver object stores the state of flags in the scope where the FlagSaver +// is defined, allowing modification of those flags within that scope and +// automatic restoration of the flags to their previous state upon leaving the +// scope. +// +// A FlagSaver can be used within tests to temporarily change the test +// environment and restore the test case to its previous state. +// +// Example: +// +// void MyFunc() { +// absl::FlagSaver fs; +// ... +// absl::SetFlag(&FLAGS_myFlag, otherValue); +// ... +// } // scope of FlagSaver left, flags return to previous state +// +// This class is thread-safe. + +class FlagSaver { + public: + FlagSaver(); + ~FlagSaver(); + + FlagSaver(const FlagSaver&) = delete; + void operator=(const FlagSaver&) = delete; + + private: + flags_internal::FlagSaverImpl* impl_; +}; + +//----------------------------------------------------------------------------- + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_REFLECTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/usage.h b/CAPI/cpp/grpc/include/absl/flags/usage.h new file mode 100644 index 0000000..ad12ab7 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/usage.h @@ -0,0 +1,43 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FLAGS_USAGE_H_ +#define ABSL_FLAGS_USAGE_H_ + +#include "absl/base/config.h" +#include "absl/strings/string_view.h" + +// -------------------------------------------------------------------- +// Usage reporting interfaces + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// Sets the "usage" message to be used by help reporting routines. +// For example: +// absl::SetProgramUsageMessage( +// absl::StrCat("This program does nothing. Sample usage:\n", argv[0], +// " ")); +// Do not include commandline flags in the usage: we do that for you! +// Note: Calling SetProgramUsageMessage twice will trigger a call to std::exit. +void SetProgramUsageMessage(absl::string_view new_usage_message); + +// Returns the usage message set by SetProgramUsageMessage(). +absl::string_view ProgramUsageMessage(); + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FLAGS_USAGE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/usage_config.h b/CAPI/cpp/grpc/include/absl/flags/usage_config.h new file mode 100644 index 0000000..ded7030 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/flags/usage_config.h @@ -0,0 +1,135 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: usage_config.h +// ----------------------------------------------------------------------------- +// +// This file defines the main usage reporting configuration interfaces and +// documents Abseil's supported built-in usage flags. If these flags are found +// when parsing a command-line, Abseil will exit the program and display +// appropriate help messages. +#ifndef ABSL_FLAGS_USAGE_CONFIG_H_ +#define ABSL_FLAGS_USAGE_CONFIG_H_ + +#include +#include + +#include "absl/base/config.h" +#include "absl/strings/string_view.h" + +// ----------------------------------------------------------------------------- +// Built-in Usage Flags +// ----------------------------------------------------------------------------- +// +// Abseil supports the following built-in usage flags. When passed, these flags +// exit the program and : +// +// * --help +// Shows help on important flags for this binary +// * --helpfull +// Shows help on all flags +// * --helpshort +// Shows help on only the main module for this program +// * --helppackage +// Shows help on all modules in the main package +// * --version +// Shows the version and build info for this binary and exits +// * --only_check_args +// Exits after checking all flags +// * --helpon +// Shows help on the modules named by this flag value +// * --helpmatch +// Shows help on modules whose name contains the specified substring + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace flags_internal { +using FlagKindFilter = std::function; +} // namespace flags_internal + +// FlagsUsageConfig +// +// This structure contains the collection of callbacks for changing the behavior +// of the usage reporting routines in Abseil Flags. +struct FlagsUsageConfig { + // Returns true if flags defined in the given source code file should be + // reported with --helpshort flag. For example, if the file + // "path/to/my/code.cc" defines the flag "--my_flag", and + // contains_helpshort_flags("path/to/my/code.cc") returns true, invoking the + // program with --helpshort will include information about --my_flag in the + // program output. + flags_internal::FlagKindFilter contains_helpshort_flags; + + // Returns true if flags defined in the filename should be reported with + // --help flag. For example, if the file + // "path/to/my/code.cc" defines the flag "--my_flag", and + // contains_help_flags("path/to/my/code.cc") returns true, invoking the + // program with --help will include information about --my_flag in the + // program output. + flags_internal::FlagKindFilter contains_help_flags; + + // Returns true if flags defined in the filename should be reported with + // --helppackage flag. For example, if the file + // "path/to/my/code.cc" defines the flag "--my_flag", and + // contains_helppackage_flags("path/to/my/code.cc") returns true, invoking the + // program with --helppackage will include information about --my_flag in the + // program output. + flags_internal::FlagKindFilter contains_helppackage_flags; + + // Generates string containing program version. This is the string reported + // when user specifies --version in a command line. + std::function version_string; + + // Normalizes the filename specific to the build system/filesystem used. This + // routine is used when we report the information about the flag definition + // location. For instance, if your build resides at some location you do not + // want to expose in the usage output, you can trim it to show only relevant + // part. + // For example: + // normalize_filename("/my_company/some_long_path/src/project/file.cc") + // might produce + // "project/file.cc". + std::function normalize_filename; +}; + +// SetFlagsUsageConfig() +// +// Sets the usage reporting configuration callbacks. If any of the callbacks are +// not set in usage_config instance, then the default value of the callback is +// used. +void SetFlagsUsageConfig(FlagsUsageConfig usage_config); + +namespace flags_internal { + +FlagsUsageConfig GetUsageConfig(); + +void ReportUsageError(absl::string_view msg, bool is_fatal); + +} // namespace flags_internal +ABSL_NAMESPACE_END +} // namespace absl + +extern "C" { + +// Additional report of fatal usage error message before we std::exit. Error is +// fatal if is_fatal argument to ReportUsageError is true. +void ABSL_INTERNAL_C_SYMBOL(AbslInternalReportFatalUsageError)( + absl::string_view); + +} // extern "C" + +#endif // ABSL_FLAGS_USAGE_CONFIG_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/any_invocable.h b/CAPI/cpp/grpc/include/absl/functional/any_invocable.h new file mode 100644 index 0000000..0c5faca --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/functional/any_invocable.h @@ -0,0 +1,313 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: any_invocable.h +// ----------------------------------------------------------------------------- +// +// This header file defines an `absl::AnyInvocable` type that assumes ownership +// and wraps an object of an invocable type. (Invocable types adhere to the +// concept specified in https://en.cppreference.com/w/cpp/concepts/invocable.) +// +// In general, prefer `absl::AnyInvocable` when you need a type-erased +// function parameter that needs to take ownership of the type. +// +// NOTE: `absl::AnyInvocable` is similar to the C++23 `std::move_only_function` +// abstraction, but has a slightly different API and is not designed to be a +// drop-in replacement or C++11-compatible backfill of that type. + +#ifndef ABSL_FUNCTIONAL_ANY_INVOCABLE_H_ +#define ABSL_FUNCTIONAL_ANY_INVOCABLE_H_ + +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/functional/internal/any_invocable.h" +#include "absl/meta/type_traits.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// absl::AnyInvocable +// +// `absl::AnyInvocable` is a functional wrapper type, like `std::function`, that +// assumes ownership of an invocable object. Unlike `std::function`, an +// `absl::AnyInvocable` is more type-safe and provides the following additional +// benefits: +// +// * Properly adheres to const correctness of the underlying type +// * Is move-only so avoids concurrency problems with copied invocables and +// unnecessary copies in general. +// * Supports reference qualifiers allowing it to perform unique actions (noted +// below). +// +// `absl::AnyInvocable` is a template, and an `absl::AnyInvocable` instantiation +// may wrap any invocable object with a compatible function signature, e.g. +// having arguments and return types convertible to types matching the +// `absl::AnyInvocable` signature, and also matching any stated reference +// qualifiers, as long as that type is moveable. It therefore provides broad +// type erasure for functional objects. +// +// An `absl::AnyInvocable` is typically used as a type-erased function parameter +// for accepting various functional objects: +// +// // Define a function taking an AnyInvocable parameter. +// void my_func(absl::AnyInvocable f) { +// ... +// }; +// +// // That function can accept any invocable type: +// +// // Accept a function reference. We don't need to move a reference. +// int func1() { return 0; }; +// my_func(func1); +// +// // Accept a lambda. We use std::move here because otherwise my_func would +// // copy the lambda. +// auto lambda = []() { return 0; }; +// my_func(std::move(lambda)); +// +// // Accept a function pointer. We don't need to move a function pointer. +// func2 = &func1; +// my_func(func2); +// +// // Accept an std::function by moving it. Note that the lambda is copyable +// // (satisfying std::function requirements) and moveable (satisfying +// // absl::AnyInvocable requirements). +// std::function func6 = []() { return 0; }; +// my_func(std::move(func6)); +// +// `AnyInvocable` also properly respects `const` qualifiers, reference +// qualifiers, and the `noexcept` specification (only in C++ 17 and beyond) as +// part of the user-specified function type (e.g. +// `AnyInvocable`). These qualifiers will be applied to +// the `AnyInvocable` object's `operator()`, and the underlying invocable must +// be compatible with those qualifiers. +// +// Comparison of const and non-const function types: +// +// // Store a closure inside of `func` with the function type `int()`. +// // Note that we have made `func` itself `const`. +// const AnyInvocable func = [](){ return 0; }; +// +// func(); // Compile-error: the passed type `int()` isn't `const`. +// +// // Store a closure inside of `const_func` with the function type +// // `int() const`. +// // Note that we have also made `const_func` itself `const`. +// const AnyInvocable const_func = [](){ return 0; }; +// +// const_func(); // Fine: `int() const` is `const`. +// +// In the above example, the call `func()` would have compiled if +// `std::function` were used even though the types are not const compatible. +// This is a bug, and using `absl::AnyInvocable` properly detects that bug. +// +// In addition to affecting the signature of `operator()`, the `const` and +// reference qualifiers of the function type also appropriately constrain which +// kinds of invocable objects you are allowed to place into the `AnyInvocable` +// instance. If you specify a function type that is const-qualified, then +// anything that you attempt to put into the `AnyInvocable` must be callable on +// a `const` instance of that type. +// +// Constraint example: +// +// // Fine because the lambda is callable when `const`. +// AnyInvocable func = [=](){ return 0; }; +// +// // This is a compile-error because the lambda isn't callable when `const`. +// AnyInvocable error = [=]() mutable { return 0; }; +// +// An `&&` qualifier can be used to express that an `absl::AnyInvocable` +// instance should be invoked at most once: +// +// // Invokes `continuation` with the logical result of an operation when +// // that operation completes (common in asynchronous code). +// void CallOnCompletion(AnyInvocable continuation) { +// int result_of_foo = foo(); +// +// // `std::move` is required because the `operator()` of `continuation` is +// // rvalue-reference qualified. +// std::move(continuation)(result_of_foo); +// } +// +// Credits to Matt Calabrese (https://github.com/mattcalabrese) for the original +// implementation. +template +class AnyInvocable : private internal_any_invocable::Impl { + private: + static_assert( + std::is_function::value, + "The template argument of AnyInvocable must be a function type."); + + using Impl = internal_any_invocable::Impl; + + public: + // The return type of Sig + using result_type = typename Impl::result_type; + + // Constructors + + // Constructs the `AnyInvocable` in an empty state. + AnyInvocable() noexcept = default; + AnyInvocable(std::nullptr_t) noexcept {} // NOLINT + + // Constructs the `AnyInvocable` from an existing `AnyInvocable` by a move. + // Note that `f` is not guaranteed to be empty after move-construction, + // although it may be. + AnyInvocable(AnyInvocable&& /*f*/) noexcept = default; + + // Constructs an `AnyInvocable` from an invocable object. + // + // Upon construction, `*this` is only empty if `f` is a function pointer or + // member pointer type and is null, or if `f` is an `AnyInvocable` that is + // empty. + template ::value>> + AnyInvocable(F&& f) // NOLINT + : Impl(internal_any_invocable::ConversionConstruct(), + std::forward(f)) {} + + // Constructs an `AnyInvocable` that holds an invocable object of type `T`, + // which is constructed in-place from the given arguments. + // + // Example: + // + // AnyInvocable func( + // absl::in_place_type, arg1, arg2); + // + template ::value>> + explicit AnyInvocable(absl::in_place_type_t, Args&&... args) + : Impl(absl::in_place_type>, + std::forward(args)...) { + static_assert(std::is_same>::value, + "The explicit template argument of in_place_type is required " + "to be an unqualified object type."); + } + + // Overload of the above constructor to support list-initialization. + template &, Args...>::value>> + explicit AnyInvocable(absl::in_place_type_t, + std::initializer_list ilist, Args&&... args) + : Impl(absl::in_place_type>, ilist, + std::forward(args)...) { + static_assert(std::is_same>::value, + "The explicit template argument of in_place_type is required " + "to be an unqualified object type."); + } + + // Assignment Operators + + // Assigns an `AnyInvocable` through move-assignment. + // Note that `f` is not guaranteed to be empty after move-assignment + // although it may be. + AnyInvocable& operator=(AnyInvocable&& /*f*/) noexcept = default; + + // Assigns an `AnyInvocable` from a nullptr, clearing the `AnyInvocable`. If + // not empty, destroys the target, putting `*this` into an empty state. + AnyInvocable& operator=(std::nullptr_t) noexcept { + this->Clear(); + return *this; + } + + // Assigns an `AnyInvocable` from an existing `AnyInvocable` instance. + // + // Upon assignment, `*this` is only empty if `f` is a function pointer or + // member pointer type and is null, or if `f` is an `AnyInvocable` that is + // empty. + template ::value>> + AnyInvocable& operator=(F&& f) { + *this = AnyInvocable(std::forward(f)); + return *this; + } + + // Assigns an `AnyInvocable` from a reference to an invocable object. + // Upon assignment, stores a reference to the invocable object in the + // `AnyInvocable` instance. + template < + class F, + typename = absl::enable_if_t< + internal_any_invocable::CanAssignReferenceWrapper::value>> + AnyInvocable& operator=(std::reference_wrapper f) noexcept { + *this = AnyInvocable(f); + return *this; + } + + // Destructor + + // If not empty, destroys the target. + ~AnyInvocable() = default; + + // absl::AnyInvocable::swap() + // + // Exchanges the targets of `*this` and `other`. + void swap(AnyInvocable& other) noexcept { std::swap(*this, other); } + + // abl::AnyInvocable::operator bool() + // + // Returns `true` if `*this` is not empty. + explicit operator bool() const noexcept { return this->HasValue(); } + + // Invokes the target object of `*this`. `*this` must not be empty. + // + // Note: The signature of this function call operator is the same as the + // template parameter `Sig`. + using Impl::operator(); + + // Equality operators + + // Returns `true` if `*this` is empty. + friend bool operator==(const AnyInvocable& f, std::nullptr_t) noexcept { + return !f.HasValue(); + } + + // Returns `true` if `*this` is empty. + friend bool operator==(std::nullptr_t, const AnyInvocable& f) noexcept { + return !f.HasValue(); + } + + // Returns `false` if `*this` is empty. + friend bool operator!=(const AnyInvocable& f, std::nullptr_t) noexcept { + return f.HasValue(); + } + + // Returns `false` if `*this` is empty. + friend bool operator!=(std::nullptr_t, const AnyInvocable& f) noexcept { + return f.HasValue(); + } + + // swap() + // + // Exchanges the targets of `f1` and `f2`. + friend void swap(AnyInvocable& f1, AnyInvocable& f2) noexcept { f1.swap(f2); } + + private: + // Friending other instantiations is necessary for conversions. + template + friend class internal_any_invocable::CoreImpl; +}; + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FUNCTIONAL_ANY_INVOCABLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/bind_front.h b/CAPI/cpp/grpc/include/absl/functional/bind_front.h new file mode 100644 index 0000000..f9075bd --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/functional/bind_front.h @@ -0,0 +1,193 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: bind_front.h +// ----------------------------------------------------------------------------- +// +// `absl::bind_front()` returns a functor by binding a number of arguments to +// the front of a provided (usually more generic) functor. Unlike `std::bind`, +// it does not require the use of argument placeholders. The simpler syntax of +// `absl::bind_front()` allows you to avoid known misuses with `std::bind()`. +// +// `absl::bind_front()` is meant as a drop-in replacement for C++20's upcoming +// `std::bind_front()`, which similarly resolves these issues with +// `std::bind()`. Both `bind_front()` alternatives, unlike `std::bind()`, allow +// partial function application. (See +// https://en.wikipedia.org/wiki/Partial_application). + +#ifndef ABSL_FUNCTIONAL_BIND_FRONT_H_ +#define ABSL_FUNCTIONAL_BIND_FRONT_H_ + +#if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L +#include // For std::bind_front. +#endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L + +#include "absl/functional/internal/front_binder.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// bind_front() +// +// Binds the first N arguments of an invocable object and stores them by value. +// +// Like `std::bind()`, `absl::bind_front()` is implicitly convertible to +// `std::function`. In particular, it may be used as a simpler replacement for +// `std::bind()` in most cases, as it does not require placeholders to be +// specified. More importantly, it provides more reliable correctness guarantees +// than `std::bind()`; while `std::bind()` will silently ignore passing more +// parameters than expected, for example, `absl::bind_front()` will report such +// mis-uses as errors. In C++20, `absl::bind_front` is replaced by +// `std::bind_front`. +// +// absl::bind_front(a...) can be seen as storing the results of +// std::make_tuple(a...). +// +// Example: Binding a free function. +// +// int Minus(int a, int b) { return a - b; } +// +// assert(absl::bind_front(Minus)(3, 2) == 3 - 2); +// assert(absl::bind_front(Minus, 3)(2) == 3 - 2); +// assert(absl::bind_front(Minus, 3, 2)() == 3 - 2); +// +// Example: Binding a member function. +// +// struct Math { +// int Double(int a) const { return 2 * a; } +// }; +// +// Math math; +// +// assert(absl::bind_front(&Math::Double)(&math, 3) == 2 * 3); +// // Stores a pointer to math inside the functor. +// assert(absl::bind_front(&Math::Double, &math)(3) == 2 * 3); +// // Stores a copy of math inside the functor. +// assert(absl::bind_front(&Math::Double, math)(3) == 2 * 3); +// // Stores std::unique_ptr inside the functor. +// assert(absl::bind_front(&Math::Double, +// std::unique_ptr(new Math))(3) == 2 * 3); +// +// Example: Using `absl::bind_front()`, instead of `std::bind()`, with +// `std::function`. +// +// class FileReader { +// public: +// void ReadFileAsync(const std::string& filename, std::string* content, +// const std::function& done) { +// // Calls Executor::Schedule(std::function). +// Executor::DefaultExecutor()->Schedule( +// absl::bind_front(&FileReader::BlockingRead, this, +// filename, content, done)); +// } +// +// private: +// void BlockingRead(const std::string& filename, std::string* content, +// const std::function& done) { +// CHECK_OK(file::GetContents(filename, content, {})); +// done(); +// } +// }; +// +// `absl::bind_front()` stores bound arguments explicitly using the type passed +// rather than implicitly based on the type accepted by its functor. +// +// Example: Binding arguments explicitly. +// +// void LogStringView(absl::string_view sv) { +// LOG(INFO) << sv; +// } +// +// Executor* e = Executor::DefaultExecutor(); +// std::string s = "hello"; +// absl::string_view sv = s; +// +// // absl::bind_front(LogStringView, arg) makes a copy of arg and stores it. +// e->Schedule(absl::bind_front(LogStringView, sv)); // ERROR: dangling +// // string_view. +// +// e->Schedule(absl::bind_front(LogStringView, s)); // OK: stores a copy of +// // s. +// +// To store some of the arguments passed to `absl::bind_front()` by reference, +// use std::ref()` and `std::cref()`. +// +// Example: Storing some of the bound arguments by reference. +// +// class Service { +// public: +// void Serve(const Request& req, std::function* done) { +// // The request protocol buffer won't be deleted until done is called. +// // It's safe to store a reference to it inside the functor. +// Executor::DefaultExecutor()->Schedule( +// absl::bind_front(&Service::BlockingServe, this, std::cref(req), +// done)); +// } +// +// private: +// void BlockingServe(const Request& req, std::function* done); +// }; +// +// Example: Storing bound arguments by reference. +// +// void Print(const std::string& a, const std::string& b) { +// std::cerr << a << b; +// } +// +// std::string hi = "Hello, "; +// std::vector names = {"Chuk", "Gek"}; +// // Doesn't copy hi. +// for_each(names.begin(), names.end(), +// absl::bind_front(Print, std::ref(hi))); +// +// // DO NOT DO THIS: the functor may outlive "hi", resulting in +// // dangling references. +// foo->DoInFuture(absl::bind_front(Print, std::ref(hi), "Guest")); // BAD! +// auto f = absl::bind_front(Print, std::ref(hi), "Guest"); // BAD! +// +// Example: Storing reference-like types. +// +// void Print(absl::string_view a, const std::string& b) { +// std::cerr << a << b; +// } +// +// std::string hi = "Hello, "; +// // Copies "hi". +// absl::bind_front(Print, hi)("Chuk"); +// +// // Compile error: std::reference_wrapper is not implicitly +// // convertible to string_view. +// // absl::bind_front(Print, std::cref(hi))("Chuk"); +// +// // Doesn't copy "hi". +// absl::bind_front(Print, absl::string_view(hi))("Chuk"); +// +#if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L +using std::bind_front; +#else // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L +template +constexpr functional_internal::bind_front_t bind_front( + F&& func, BoundArgs&&... args) { + return functional_internal::bind_front_t( + absl::in_place, absl::forward(func), + absl::forward(args)...); +} +#endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FUNCTIONAL_BIND_FRONT_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/function_ref.h b/CAPI/cpp/grpc/include/absl/functional/function_ref.h new file mode 100644 index 0000000..f977960 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/functional/function_ref.h @@ -0,0 +1,143 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: function_ref.h +// ----------------------------------------------------------------------------- +// +// This header file defines the `absl::FunctionRef` type for holding a +// non-owning reference to an object of any invocable type. This function +// reference is typically most useful as a type-erased argument type for +// accepting function types that neither take ownership nor copy the type; using +// the reference type in this case avoids a copy and an allocation. Best +// practices of other non-owning reference-like objects (such as +// `absl::string_view`) apply here. +// +// An `absl::FunctionRef` is similar in usage to a `std::function` but has the +// following differences: +// +// * It doesn't own the underlying object. +// * It doesn't have a null or empty state. +// * It never performs deep copies or allocations. +// * It's much faster and cheaper to construct. +// * It's trivially copyable and destructable. +// +// Generally, `absl::FunctionRef` should not be used as a return value, data +// member, or to initialize a `std::function`. Such usages will often lead to +// problematic lifetime issues. Once you convert something to an +// `absl::FunctionRef` you cannot make a deep copy later. +// +// This class is suitable for use wherever a "const std::function<>&" +// would be used without making a copy. ForEach functions and other versions of +// the visitor pattern are a good example of when this class should be used. +// +// This class is trivial to copy and should be passed by value. +#ifndef ABSL_FUNCTIONAL_FUNCTION_REF_H_ +#define ABSL_FUNCTIONAL_FUNCTION_REF_H_ + +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/functional/internal/function_ref.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// FunctionRef +// +// Dummy class declaration to allow the partial specialization based on function +// types below. +template +class FunctionRef; + +// FunctionRef +// +// An `absl::FunctionRef` is a lightweight wrapper to any invokable object with +// a compatible signature. Generally, an `absl::FunctionRef` should only be used +// as an argument type and should be preferred as an argument over a const +// reference to a `std::function`. `absl::FunctionRef` itself does not allocate, +// although the wrapped invokable may. +// +// Example: +// +// // The following function takes a function callback by const reference +// bool Visitor(const std::function& callback); +// +// // Assuming that the function is not stored or otherwise copied, it can be +// // replaced by an `absl::FunctionRef`: +// bool Visitor(absl::FunctionRef +// callback); +// +// Note: the assignment operator within an `absl::FunctionRef` is intentionally +// deleted to prevent misuse; because the `absl::FunctionRef` does not own the +// underlying type, assignment likely indicates misuse. +template +class FunctionRef { + private: + // Used to disable constructors for objects that are not compatible with the + // signature of this FunctionRef. + template > + using EnableIfCompatible = + typename std::enable_if::value || + std::is_convertible::value>::type; + + public: + // Constructs a FunctionRef from any invokable type. + template > + // NOLINTNEXTLINE(runtime/explicit) + FunctionRef(const F& f ABSL_ATTRIBUTE_LIFETIME_BOUND) + : invoker_(&absl::functional_internal::InvokeObject) { + absl::functional_internal::AssertNonNull(f); + ptr_.obj = &f; + } + + // Overload for function pointers. This eliminates a level of indirection that + // would happen if the above overload was used (it lets us store the pointer + // instead of a pointer to a pointer). + // + // This overload is also used for references to functions, since references to + // functions can decay to function pointers implicitly. + template < + typename F, typename = EnableIfCompatible, + absl::functional_internal::EnableIf::value> = 0> + FunctionRef(F* f) // NOLINT(runtime/explicit) + : invoker_(&absl::functional_internal::InvokeFunction) { + assert(f != nullptr); + ptr_.fun = reinterpret_cast(f); + } + + // To help prevent subtle lifetime bugs, FunctionRef is not assignable. + // Typically, it should only be used as an argument type. + FunctionRef& operator=(const FunctionRef& rhs) = delete; + FunctionRef(const FunctionRef& rhs) = default; + + // Call the underlying object. + R operator()(Args... args) const { + return invoker_(ptr_, std::forward(args)...); + } + + private: + absl::functional_internal::VoidPtr ptr_; + absl::functional_internal::Invoker invoker_; +}; + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FUNCTIONAL_FUNCTION_REF_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/internal/any_invocable.h b/CAPI/cpp/grpc/include/absl/functional/internal/any_invocable.h new file mode 100644 index 0000000..f353139 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/functional/internal/any_invocable.h @@ -0,0 +1,857 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Implementation details for `absl::AnyInvocable` + +#ifndef ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_ +#define ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_ + +//////////////////////////////////////////////////////////////////////////////// +// // +// This implementation of the proposed `any_invocable` uses an approach that // +// chooses between local storage and remote storage for the contained target // +// object based on the target object's size, alignment requirements, and // +// whether or not it has a nothrow move constructor. Additional optimizations // +// are performed when the object is a trivially copyable type [basic.types]. // +// // +// There are three datamembers per `AnyInvocable` instance // +// // +// 1) A union containing either // +// - A pointer to the target object referred to via a void*, or // +// - the target object, emplaced into a raw char buffer // +// // +// 2) A function pointer to a "manager" function operation that takes a // +// discriminator and logically branches to either perform a move operation // +// or destroy operation based on that discriminator. // +// // +// 3) A function pointer to an "invoker" function operation that invokes the // +// target object, directly returning the result. // +// // +// When in the logically empty state, the manager function is an empty // +// function and the invoker function is one that would be undefined-behavior // +// to call. // +// // +// An additional optimization is performed when converting from one // +// AnyInvocable to another where only the noexcept specification and/or the // +// cv/ref qualifiers of the function type differ. In these cases, the // +// conversion works by "moving the guts", similar to if they were the same // +// exact type, as opposed to having to perform an additional layer of // +// wrapping through remote storage. // +// // +//////////////////////////////////////////////////////////////////////////////// + +// IWYU pragma: private, include "absl/functional/any_invocable.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/invoke.h" +#include "absl/base/macros.h" +#include "absl/meta/type_traits.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// Helper macro used to prevent spelling `noexcept` in language versions older +// than C++17, where it is not part of the type system, in order to avoid +// compilation failures and internal compiler errors. +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L +#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) noexcept(noex) +#else +#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) +#endif + +// Defined in functional/any_invocable.h +template +class AnyInvocable; + +namespace internal_any_invocable { + +// Constants relating to the small-object-storage for AnyInvocable +enum StorageProperty : std::size_t { + kAlignment = alignof(std::max_align_t), // The alignment of the storage + kStorageSize = sizeof(void*) * 2 // The size of the storage +}; + +//////////////////////////////////////////////////////////////////////////////// +// +// A metafunction for checking if a type is an AnyInvocable instantiation. +// This is used during conversion operations. +template +struct IsAnyInvocable : std::false_type {}; + +template +struct IsAnyInvocable> : std::true_type {}; +// +//////////////////////////////////////////////////////////////////////////////// + +// A type trait that tells us whether or not a target function type should be +// stored locally in the small object optimization storage +template +using IsStoredLocally = std::integral_constant< + bool, sizeof(T) <= kStorageSize && alignof(T) <= kAlignment && + kAlignment % alignof(T) == 0 && + std::is_nothrow_move_constructible::value>; + +// An implementation of std::remove_cvref_t of C++20. +template +using RemoveCVRef = + typename std::remove_cv::type>::type; + +//////////////////////////////////////////////////////////////////////////////// +// +// An implementation of the C++ standard INVOKE pseudo-macro, operation is +// equivalent to std::invoke except that it forces an implicit conversion to the +// specified return type. If "R" is void, the function is executed and the +// return value is simply ignored. +template ::value>> +void InvokeR(F&& f, P&&... args) { + absl::base_internal::invoke(std::forward(f), std::forward

(args)...); +} + +template ::value, int> = 0> +ReturnType InvokeR(F&& f, P&&... args) { + return absl::base_internal::invoke(std::forward(f), + std::forward

(args)...); +} + +// +//////////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////// +/// +// A metafunction that takes a "T" corresponding to a parameter type of the +// user's specified function type, and yields the parameter type to use for the +// type-erased invoker. In order to prevent observable moves, this must be +// either a reference or, if the type is trivial, the original parameter type +// itself. Since the parameter type may be incomplete at the point that this +// metafunction is used, we can only do this optimization for scalar types +// rather than for any trivial type. +template +T ForwardImpl(std::true_type); + +template +T&& ForwardImpl(std::false_type); + +// NOTE: We deliberately use an intermediate struct instead of a direct alias, +// as a workaround for b/206991861 on MSVC versions < 1924. +template +struct ForwardedParameter { + using type = decltype(( + ForwardImpl)(std::integral_constant::value>())); +}; + +template +using ForwardedParameterType = typename ForwardedParameter::type; +// +//////////////////////////////////////////////////////////////////////////////// + +// A discriminator when calling the "manager" function that describes operation +// type-erased operation should be invoked. +// +// "relocate_from_to" specifies that the manager should perform a move. +// +// "dispose" specifies that the manager should perform a destroy. +enum class FunctionToCall : bool { relocate_from_to, dispose }; + +// The portion of `AnyInvocable` state that contains either a pointer to the +// target object or the object itself in local storage +union TypeErasedState { + struct { + // A pointer to the type-erased object when remotely stored + void* target; + // The size of the object for `RemoteManagerTrivial` + std::size_t size; + } remote; + + // Local-storage for the type-erased object when small and trivial enough + alignas(kAlignment) char storage[kStorageSize]; +}; + +// A typed accessor for the object in `TypeErasedState` storage +template +T& ObjectInLocalStorage(TypeErasedState* const state) { + // We launder here because the storage may be reused with the same type. +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + return *std::launder(reinterpret_cast(&state->storage)); +#elif ABSL_HAVE_BUILTIN(__builtin_launder) + return *__builtin_launder(reinterpret_cast(&state->storage)); +#else + + // When `std::launder` or equivalent are not available, we rely on undefined + // behavior, which works as intended on Abseil's officially supported + // platforms as of Q2 2022. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#pragma GCC diagnostic push +#endif + return *reinterpret_cast(&state->storage); +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + +#endif +} + +// The type for functions issuing lifetime-related operations: move and dispose +// A pointer to such a function is contained in each `AnyInvocable` instance. +// NOTE: When specifying `FunctionToCall::`dispose, the same state must be +// passed as both "from" and "to". +using ManagerType = void(FunctionToCall /*operation*/, + TypeErasedState* /*from*/, TypeErasedState* /*to*/) + ABSL_INTERNAL_NOEXCEPT_SPEC(true); + +// The type for functions issuing the actual invocation of the object +// A pointer to such a function is contained in each AnyInvocable instance. +template +using InvokerType = ReturnType(TypeErasedState*, ForwardedParameterType

...) + ABSL_INTERNAL_NOEXCEPT_SPEC(SigIsNoexcept); + +// The manager that is used when AnyInvocable is empty +inline void EmptyManager(FunctionToCall /*operation*/, + TypeErasedState* /*from*/, + TypeErasedState* /*to*/) noexcept {} + +// The manager that is used when a target function is in local storage and is +// a trivially copyable type. +inline void LocalManagerTrivial(FunctionToCall /*operation*/, + TypeErasedState* const from, + TypeErasedState* const to) noexcept { + // This single statement without branching handles both possible operations. + // + // For FunctionToCall::dispose, "from" and "to" point to the same state, and + // so this assignment logically would do nothing. + // + // Note: Correctness here relies on http://wg21.link/p0593, which has only + // become standard in C++20, though implementations do not break it in + // practice for earlier versions of C++. + // + // The correct way to do this without that paper is to first placement-new a + // default-constructed T in "to->storage" prior to the memmove, but doing so + // requires a different function to be created for each T that is stored + // locally, which can cause unnecessary bloat and be less cache friendly. + *to = *from; + + // Note: Because the type is trivially copyable, the destructor does not need + // to be called ("trivially copyable" requires a trivial destructor). +} + +// The manager that is used when a target function is in local storage and is +// not a trivially copyable type. +template +void LocalManagerNontrivial(FunctionToCall operation, + TypeErasedState* const from, + TypeErasedState* const to) noexcept { + static_assert(IsStoredLocally::value, + "Local storage must only be used for supported types."); + static_assert(!std::is_trivially_copyable::value, + "Locally stored types must be trivially copyable."); + + T& from_object = (ObjectInLocalStorage)(from); + + switch (operation) { + case FunctionToCall::relocate_from_to: + // NOTE: Requires that the left-hand operand is already empty. + ::new (static_cast(&to->storage)) T(std::move(from_object)); + ABSL_FALLTHROUGH_INTENDED; + case FunctionToCall::dispose: + from_object.~T(); // Must not throw. // NOLINT + return; + } + ABSL_INTERNAL_UNREACHABLE; +} + +// The invoker that is used when a target function is in local storage +// Note: QualTRef here is the target function type along with cv and reference +// qualifiers that must be used when calling the function. +template +ReturnType LocalInvoker( + TypeErasedState* const state, + ForwardedParameterType

... args) noexcept(SigIsNoexcept) { + using RawT = RemoveCVRef; + static_assert( + IsStoredLocally::value, + "Target object must be in local storage in order to be invoked from it."); + + auto& f = (ObjectInLocalStorage)(state); + return (InvokeR)(static_cast(f), + static_cast>(args)...); +} + +// The manager that is used when a target function is in remote storage and it +// has a trivial destructor +inline void RemoteManagerTrivial(FunctionToCall operation, + TypeErasedState* const from, + TypeErasedState* const to) noexcept { + switch (operation) { + case FunctionToCall::relocate_from_to: + // NOTE: Requires that the left-hand operand is already empty. + to->remote = from->remote; + return; + case FunctionToCall::dispose: +#if defined(__cpp_sized_deallocation) + ::operator delete(from->remote.target, from->remote.size); +#else // __cpp_sized_deallocation + ::operator delete(from->remote.target); +#endif // __cpp_sized_deallocation + return; + } + ABSL_INTERNAL_UNREACHABLE; +} + +// The manager that is used when a target function is in remote storage and the +// destructor of the type is not trivial +template +void RemoteManagerNontrivial(FunctionToCall operation, + TypeErasedState* const from, + TypeErasedState* const to) noexcept { + static_assert(!IsStoredLocally::value, + "Remote storage must only be used for types that do not " + "qualify for local storage."); + + switch (operation) { + case FunctionToCall::relocate_from_to: + // NOTE: Requires that the left-hand operand is already empty. + to->remote.target = from->remote.target; + return; + case FunctionToCall::dispose: + ::delete static_cast(from->remote.target); // Must not throw. + return; + } + ABSL_INTERNAL_UNREACHABLE; +} + +// The invoker that is used when a target function is in remote storage +template +ReturnType RemoteInvoker( + TypeErasedState* const state, + ForwardedParameterType

... args) noexcept(SigIsNoexcept) { + using RawT = RemoveCVRef; + static_assert(!IsStoredLocally::value, + "Target object must be in remote storage in order to be " + "invoked from it."); + + auto& f = *static_cast(state->remote.target); + return (InvokeR)(static_cast(f), + static_cast>(args)...); +} + +//////////////////////////////////////////////////////////////////////////////// +// +// A metafunction that checks if a type T is an instantiation of +// absl::in_place_type_t (needed for constructor constraints of AnyInvocable). +template +struct IsInPlaceType : std::false_type {}; + +template +struct IsInPlaceType> : std::true_type {}; +// +//////////////////////////////////////////////////////////////////////////////// + +// A constructor name-tag used with CoreImpl (below) to request the +// conversion-constructor. QualDecayedTRef is the decayed-type of the object to +// wrap, along with the cv and reference qualifiers that must be applied when +// performing an invocation of the wrapped object. +template +struct TypedConversionConstruct {}; + +// A helper base class for all core operations of AnyInvocable. Most notably, +// this class creates the function call operator and constraint-checkers so that +// the top-level class does not have to be a series of partial specializations. +// +// Note: This definition exists (as opposed to being a declaration) so that if +// the user of the top-level template accidentally passes a template argument +// that is not a function type, they will get a static_assert in AnyInvocable's +// class body rather than an error stating that Impl is not defined. +template +class Impl {}; // Note: This is partially-specialized later. + +// A std::unique_ptr deleter that deletes memory allocated via ::operator new. +#if defined(__cpp_sized_deallocation) +class TrivialDeleter { + public: + explicit TrivialDeleter(std::size_t size) : size_(size) {} + + void operator()(void* target) const { + ::operator delete(target, size_); + } + + private: + std::size_t size_; +}; +#else // __cpp_sized_deallocation +class TrivialDeleter { + public: + explicit TrivialDeleter(std::size_t) {} + + void operator()(void* target) const { ::operator delete(target); } +}; +#endif // __cpp_sized_deallocation + +template +class CoreImpl; + +constexpr bool IsCompatibleConversion(void*, void*) { return false; } +template +constexpr bool IsCompatibleConversion(CoreImpl*, + CoreImpl*) { + return !NoExceptDest || NoExceptSrc; +} + +// A helper base class for all core operations of AnyInvocable that do not +// depend on the cv/ref qualifiers of the function type. +template +class CoreImpl { + public: + using result_type = ReturnType; + + CoreImpl() noexcept : manager_(EmptyManager), invoker_(nullptr) {} + + enum class TargetType : int { + kPointer = 0, + kCompatibleAnyInvocable = 1, + kIncompatibleAnyInvocable = 2, + kOther = 3, + }; + + // Note: QualDecayedTRef here includes the cv-ref qualifiers associated with + // the invocation of the Invocable. The unqualified type is the target object + // type to be stored. + template + explicit CoreImpl(TypedConversionConstruct, F&& f) { + using DecayedT = RemoveCVRef; + + constexpr TargetType kTargetType = + (std::is_pointer::value || + std::is_member_pointer::value) + ? TargetType::kPointer + : IsCompatibleAnyInvocable::value + ? TargetType::kCompatibleAnyInvocable + : IsAnyInvocable::value + ? TargetType::kIncompatibleAnyInvocable + : TargetType::kOther; + // NOTE: We only use integers instead of enums as template parameters in + // order to work around a bug on C++14 under MSVC 2017. + // See b/236131881. + Initialize(kTargetType), QualDecayedTRef>( + std::forward(f)); + } + + // Note: QualTRef here includes the cv-ref qualifiers associated with the + // invocation of the Invocable. The unqualified type is the target object + // type to be stored. + template + explicit CoreImpl(absl::in_place_type_t, Args&&... args) { + InitializeStorage(std::forward(args)...); + } + + CoreImpl(CoreImpl&& other) noexcept { + other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_); + manager_ = other.manager_; + invoker_ = other.invoker_; + other.manager_ = EmptyManager; + other.invoker_ = nullptr; + } + + CoreImpl& operator=(CoreImpl&& other) noexcept { + // Put the left-hand operand in an empty state. + // + // Note: A full reset that leaves us with an object that has its invariants + // intact is necessary in order to handle self-move. This is required by + // types that are used with certain operations of the standard library, such + // as the default definition of std::swap when both operands target the same + // object. + Clear(); + + // Perform the actual move/destory operation on the target function. + other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_); + manager_ = other.manager_; + invoker_ = other.invoker_; + other.manager_ = EmptyManager; + other.invoker_ = nullptr; + + return *this; + } + + ~CoreImpl() { manager_(FunctionToCall::dispose, &state_, &state_); } + + // Check whether or not the AnyInvocable is in the empty state. + bool HasValue() const { return invoker_ != nullptr; } + + // Effects: Puts the object into its empty state. + void Clear() { + manager_(FunctionToCall::dispose, &state_, &state_); + manager_ = EmptyManager; + invoker_ = nullptr; + } + + template = 0> + void Initialize(F&& f) { +// This condition handles types that decay into pointers, which includes +// function references. Since function references cannot be null, GCC warns +// against comparing their decayed form with nullptr. +// Since this is template-heavy code, we prefer to disable these warnings +// locally instead of adding yet another overload of this function. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Waddress" +#pragma GCC diagnostic ignored "-Wnonnull-compare" +#pragma GCC diagnostic push +#endif + if (static_cast>(f) == nullptr) { +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + manager_ = EmptyManager; + invoker_ = nullptr; + return; + } + InitializeStorage(std::forward(f)); + } + + template = 0> + void Initialize(F&& f) { + // In this case we can "steal the guts" of the other AnyInvocable. + f.manager_(FunctionToCall::relocate_from_to, &f.state_, &state_); + manager_ = f.manager_; + invoker_ = f.invoker_; + + f.manager_ = EmptyManager; + f.invoker_ = nullptr; + } + + template = 0> + void Initialize(F&& f) { + if (f.HasValue()) { + InitializeStorage(std::forward(f)); + } else { + manager_ = EmptyManager; + invoker_ = nullptr; + } + } + + template > + void Initialize(F&& f) { + InitializeStorage(std::forward(f)); + } + + // Use local (inline) storage for applicable target object types. + template >::value>> + void InitializeStorage(Args&&... args) { + using RawT = RemoveCVRef; + ::new (static_cast(&state_.storage)) + RawT(std::forward(args)...); + + invoker_ = LocalInvoker; + // We can simplify our manager if we know the type is trivially copyable. + InitializeLocalManager(); + } + + // Use remote storage for target objects that cannot be stored locally. + template >::value, + int> = 0> + void InitializeStorage(Args&&... args) { + InitializeRemoteManager>(std::forward(args)...); + // This is set after everything else in case an exception is thrown in an + // earlier step of the initialization. + invoker_ = RemoteInvoker; + } + + template ::value>> + void InitializeLocalManager() { + manager_ = LocalManagerTrivial; + } + + template ::value, int> = 0> + void InitializeLocalManager() { + manager_ = LocalManagerNontrivial; + } + + template + using HasTrivialRemoteStorage = + std::integral_constant::value && + alignof(T) <= + ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT>; + + template ::value>> + void InitializeRemoteManager(Args&&... args) { + // unique_ptr is used for exception-safety in case construction throws. + std::unique_ptr uninitialized_target( + ::operator new(sizeof(T)), TrivialDeleter(sizeof(T))); + ::new (uninitialized_target.get()) T(std::forward(args)...); + state_.remote.target = uninitialized_target.release(); + state_.remote.size = sizeof(T); + manager_ = RemoteManagerTrivial; + } + + template ::value, int> = 0> + void InitializeRemoteManager(Args&&... args) { + state_.remote.target = ::new T(std::forward(args)...); + manager_ = RemoteManagerNontrivial; + } + + ////////////////////////////////////////////////////////////////////////////// + // + // Type trait to determine if the template argument is an AnyInvocable whose + // function type is compatible enough with ours such that we can + // "move the guts" out of it when moving, rather than having to place a new + // object into remote storage. + + template + struct IsCompatibleAnyInvocable { + static constexpr bool value = false; + }; + + template + struct IsCompatibleAnyInvocable> { + static constexpr bool value = + (IsCompatibleConversion)(static_cast< + typename AnyInvocable::CoreImpl*>( + nullptr), + static_cast(nullptr)); + }; + + // + ////////////////////////////////////////////////////////////////////////////// + + TypeErasedState state_; + ManagerType* manager_; + InvokerType* invoker_; +}; + +// A constructor name-tag used with Impl to request the +// conversion-constructor +struct ConversionConstruct {}; + +//////////////////////////////////////////////////////////////////////////////// +// +// A metafunction that is normally an identity metafunction except that when +// given a std::reference_wrapper, it yields T&. This is necessary because +// currently std::reference_wrapper's operator() is not conditionally noexcept, +// so when checking if such an Invocable is nothrow-invocable, we must pull out +// the underlying type. +template +struct UnwrapStdReferenceWrapperImpl { + using type = T; +}; + +template +struct UnwrapStdReferenceWrapperImpl> { + using type = T&; +}; + +template +using UnwrapStdReferenceWrapper = + typename UnwrapStdReferenceWrapperImpl::type; +// +//////////////////////////////////////////////////////////////////////////////// + +// An alias that always yields std::true_type (used with constraints) where +// substitution failures happen when forming the template arguments. +template +using True = + std::integral_constant*) != 0>; + +/*SFINAE constraints for the conversion-constructor.*/ +template , AnyInvocable>::value>> +using CanConvert = + True>::value>, + absl::enable_if_t::template CallIsValid::value>, + absl::enable_if_t< + Impl::template CallIsNoexceptIfSigIsNoexcept::value>, + absl::enable_if_t, F>::value>>; + +/*SFINAE constraints for the std::in_place constructors.*/ +template +using CanEmplace = True< + absl::enable_if_t::template CallIsValid::value>, + absl::enable_if_t< + Impl::template CallIsNoexceptIfSigIsNoexcept::value>, + absl::enable_if_t, Args...>::value>>; + +/*SFINAE constraints for the conversion-assign operator.*/ +template , AnyInvocable>::value>> +using CanAssign = + True::template CallIsValid::value>, + absl::enable_if_t< + Impl::template CallIsNoexceptIfSigIsNoexcept::value>, + absl::enable_if_t, F>::value>>; + +/*SFINAE constraints for the reference-wrapper conversion-assign operator.*/ +template +using CanAssignReferenceWrapper = + True::template CallIsValid>::value>, + absl::enable_if_t::template CallIsNoexceptIfSigIsNoexcept< + std::reference_wrapper>::value>>; + +//////////////////////////////////////////////////////////////////////////////// +// +// The constraint for checking whether or not a call meets the noexcept +// callability requirements. This is a preprocessor macro because specifying it +// this way as opposed to a disjunction/branch can improve the user-side error +// messages and avoids an instantiation of std::is_nothrow_invocable_r in the +// cases where the user did not specify a noexcept function type. +// +#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT(inv_quals, noex) \ + ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_##noex(inv_quals) + +// The disjunction below is because we can't rely on std::is_nothrow_invocable_r +// to give the right result when ReturnType is non-moveable in toolchains that +// don't treat non-moveable result types correctly. For example this was the +// case in libc++ before commit c3a24882 (2022-05). +#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true(inv_quals) \ + absl::enable_if_t> inv_quals, \ + P...>, \ + std::conjunction< \ + std::is_nothrow_invocable< \ + UnwrapStdReferenceWrapper> inv_quals, P...>, \ + std::is_same< \ + ReturnType, \ + absl::base_internal::invoke_result_t< \ + UnwrapStdReferenceWrapper> inv_quals, \ + P...>>>>::value> + +#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false(inv_quals) +// +//////////////////////////////////////////////////////////////////////////////// + +// A macro to generate partial specializations of Impl with the different +// combinations of supported cv/reference qualifiers and noexcept specifier. +// +// Here, `cv` are the cv-qualifiers if any, `ref` is the ref-qualifier if any, +// inv_quals is the reference type to be used when invoking the target, and +// noex is "true" if the function type is noexcept, or false if it is not. +// +// The CallIsValid condition is more complicated than simply using +// absl::base_internal::is_invocable_r because we can't rely on it to give the +// right result when ReturnType is non-moveable in toolchains that don't treat +// non-moveable result types correctly. For example this was the case in libc++ +// before commit c3a24882 (2022-05). +#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, noex) \ + template \ + class Impl \ + : public CoreImpl { \ + public: \ + /*The base class, which contains the datamembers and core operations*/ \ + using Core = CoreImpl; \ + \ + /*SFINAE constraint to check if F is invocable with the proper signature*/ \ + template \ + using CallIsValid = True inv_quals, P...>, \ + std::is_same inv_quals, P...>>>::value>>; \ + \ + /*SFINAE constraint to check if F is nothrow-invocable when necessary*/ \ + template \ + using CallIsNoexceptIfSigIsNoexcept = \ + True; \ + \ + /*Put the AnyInvocable into an empty state.*/ \ + Impl() = default; \ + \ + /*The implementation of a conversion-constructor from "f*/ \ + /*This forwards to Core, attaching inv_quals so that the base class*/ \ + /*knows how to properly type-erase the invocation.*/ \ + template \ + explicit Impl(ConversionConstruct, F&& f) \ + : Core(TypedConversionConstruct< \ + typename std::decay::type inv_quals>(), \ + std::forward(f)) {} \ + \ + /*Forward along the in-place construction parameters.*/ \ + template \ + explicit Impl(absl::in_place_type_t, Args&&... args) \ + : Core(absl::in_place_type inv_quals>, \ + std::forward(args)...) {} \ + \ + /*The actual invocation operation with the proper signature*/ \ + ReturnType operator()(P... args) cv ref noexcept(noex) { \ + assert(this->invoker_ != nullptr); \ + return this->invoker_(const_cast(&this->state_), \ + static_cast>(args)...); \ + } \ + } + +// Define the `noexcept(true)` specialization only for C++17 and beyond, when +// `noexcept` is part of the type system. +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L +// A convenience macro that defines specializations for the noexcept(true) and +// noexcept(false) forms, given the other properties. +#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \ + ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false); \ + ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, true) +#else +#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \ + ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false) +#endif + +// Non-ref-qualified partial specializations +ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, , &); +ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, , const&); + +// Lvalue-ref-qualified partial specializations +ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &, &); +ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &, const&); + +// Rvalue-ref-qualified partial specializations +ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &&, &&); +ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &&, const&&); + +// Undef the detail-only macros. +#undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL +#undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL_ +#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false +#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true +#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT +#undef ABSL_INTERNAL_NOEXCEPT_SPEC + +} // namespace internal_any_invocable +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/internal/front_binder.h b/CAPI/cpp/grpc/include/absl/functional/internal/front_binder.h new file mode 100644 index 0000000..45f52de --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/functional/internal/front_binder.h @@ -0,0 +1,95 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implementation details for `absl::bind_front()`. + +#ifndef ABSL_FUNCTIONAL_INTERNAL_FRONT_BINDER_H_ +#define ABSL_FUNCTIONAL_INTERNAL_FRONT_BINDER_H_ + +#include +#include +#include + +#include "absl/base/internal/invoke.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/meta/type_traits.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace functional_internal { + +// Invoke the method, expanding the tuple of bound arguments. +template +R Apply(Tuple&& bound, absl::index_sequence, Args&&... free) { + return base_internal::invoke( + absl::forward(bound).template get()..., + absl::forward(free)...); +} + +template +class FrontBinder { + using BoundArgsT = absl::container_internal::CompressedTuple; + using Idx = absl::make_index_sequence; + + BoundArgsT bound_args_; + + public: + template + constexpr explicit FrontBinder(absl::in_place_t, Ts&&... ts) + : bound_args_(absl::forward(ts)...) {} + + template > + R operator()(FreeArgs&&... free_args) & { + return functional_internal::Apply(bound_args_, Idx(), + absl::forward(free_args)...); + } + + template > + R operator()(FreeArgs&&... free_args) const& { + return functional_internal::Apply(bound_args_, Idx(), + absl::forward(free_args)...); + } + + template > + R operator()(FreeArgs&&... free_args) && { + // This overload is called when *this is an rvalue. If some of the bound + // arguments are stored by value or rvalue reference, we move them. + return functional_internal::Apply(absl::move(bound_args_), Idx(), + absl::forward(free_args)...); + } + + template > + R operator()(FreeArgs&&... free_args) const&& { + // This overload is called when *this is an rvalue. If some of the bound + // arguments are stored by value or rvalue reference, we move them. + return functional_internal::Apply(absl::move(bound_args_), Idx(), + absl::forward(free_args)...); + } +}; + +template +using bind_front_t = FrontBinder, absl::decay_t...>; + +} // namespace functional_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FUNCTIONAL_INTERNAL_FRONT_BINDER_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/internal/function_ref.h b/CAPI/cpp/grpc/include/absl/functional/internal/function_ref.h new file mode 100644 index 0000000..b5bb8b4 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/functional/internal/function_ref.h @@ -0,0 +1,106 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_FUNCTIONAL_INTERNAL_FUNCTION_REF_H_ +#define ABSL_FUNCTIONAL_INTERNAL_FUNCTION_REF_H_ + +#include +#include +#include + +#include "absl/base/internal/invoke.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace functional_internal { + +// Like a void* that can handle function pointers as well. The standard does not +// allow function pointers to round-trip through void*, but void(*)() is fine. +// +// Note: It's important that this class remains trivial and is the same size as +// a pointer, since this allows the compiler to perform tail-call optimizations +// when the underlying function is a callable object with a matching signature. +union VoidPtr { + const void* obj; + void (*fun)(); +}; + +// Chooses the best type for passing T as an argument. +// Attempt to be close to SystemV AMD64 ABI. Objects with trivial copy ctor are +// passed by value. +template +constexpr bool PassByValue() { + return !std::is_lvalue_reference::value && + absl::is_trivially_copy_constructible::value && + absl::is_trivially_copy_assignable< + typename std::remove_cv::type>::value && + std::is_trivially_destructible::value && + sizeof(T) <= 2 * sizeof(void*); +} + +template +struct ForwardT : std::conditional(), T, T&&> {}; + +// An Invoker takes a pointer to the type-erased invokable object, followed by +// the arguments that the invokable object expects. +// +// Note: The order of arguments here is an optimization, since member functions +// have an implicit "this" pointer as their first argument, putting VoidPtr +// first allows the compiler to perform tail-call optimization in many cases. +template +using Invoker = R (*)(VoidPtr, typename ForwardT::type...); + +// +// InvokeObject and InvokeFunction provide static "Invoke" functions that can be +// used as Invokers for objects or functions respectively. +// +// static_cast handles the case the return type is void. +template +R InvokeObject(VoidPtr ptr, typename ForwardT::type... args) { + auto o = static_cast(ptr.obj); + return static_cast( + absl::base_internal::invoke(*o, std::forward(args)...)); +} + +template +R InvokeFunction(VoidPtr ptr, typename ForwardT::type... args) { + auto f = reinterpret_cast(ptr.fun); + return static_cast( + absl::base_internal::invoke(f, std::forward(args)...)); +} + +template +void AssertNonNull(const std::function& f) { + assert(f != nullptr); + (void)f; +} + +template +void AssertNonNull(const F&) {} + +template +void AssertNonNull(F C::*f) { + assert(f != nullptr); + (void)f; +} + +template +using EnableIf = typename ::std::enable_if::type; + +} // namespace functional_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_FUNCTIONAL_INTERNAL_FUNCTION_REF_H_ diff --git a/CAPI/cpp/grpc/include/absl/hash/hash.h b/CAPI/cpp/grpc/include/absl/hash/hash.h new file mode 100644 index 0000000..74e2d7c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/hash/hash.h @@ -0,0 +1,421 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: hash.h +// ----------------------------------------------------------------------------- +// +// This header file defines the Abseil `hash` library and the Abseil hashing +// framework. This framework consists of the following: +// +// * The `absl::Hash` functor, which is used to invoke the hasher within the +// Abseil hashing framework. `absl::Hash` supports most basic types and +// a number of Abseil types out of the box. +// * `AbslHashValue`, an extension point that allows you to extend types to +// support Abseil hashing without requiring you to define a hashing +// algorithm. +// * `HashState`, a type-erased class which implements the manipulation of the +// hash state (H) itself; contains member functions `combine()`, +// `combine_contiguous()`, and `combine_unordered()`; and which you can use +// to contribute to an existing hash state when hashing your types. +// +// Unlike `std::hash` or other hashing frameworks, the Abseil hashing framework +// provides most of its utility by abstracting away the hash algorithm (and its +// implementation) entirely. Instead, a type invokes the Abseil hashing +// framework by simply combining its state with the state of known, hashable +// types. Hashing of that combined state is separately done by `absl::Hash`. +// +// One should assume that a hash algorithm is chosen randomly at the start of +// each process. E.g., `absl::Hash{}(9)` in one process and +// `absl::Hash{}(9)` in another process are likely to differ. +// +// `absl::Hash` may also produce different values from different dynamically +// loaded libraries. For this reason, `absl::Hash` values must never cross +// boundries in dynamically loaded libraries (including when used in types like +// hash containers.) +// +// `absl::Hash` is intended to strongly mix input bits with a target of passing +// an [Avalanche Test](https://en.wikipedia.org/wiki/Avalanche_effect). +// +// Example: +// +// // Suppose we have a class `Circle` for which we want to add hashing: +// class Circle { +// public: +// ... +// private: +// std::pair center_; +// int radius_; +// }; +// +// // To add hashing support to `Circle`, we simply need to add a free +// // (non-member) function `AbslHashValue()`, and return the combined hash +// // state of the existing hash state and the class state. You can add such a +// // free function using a friend declaration within the body of the class: +// class Circle { +// public: +// ... +// template +// friend H AbslHashValue(H h, const Circle& c) { +// return H::combine(std::move(h), c.center_, c.radius_); +// } +// ... +// }; +// +// For more information, see Adding Type Support to `absl::Hash` below. +// +#ifndef ABSL_HASH_HASH_H_ +#define ABSL_HASH_HASH_H_ + +#include +#include + +#include "absl/functional/function_ref.h" +#include "absl/hash/internal/hash.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// ----------------------------------------------------------------------------- +// `absl::Hash` +// ----------------------------------------------------------------------------- +// +// `absl::Hash` is a convenient general-purpose hash functor for any type `T` +// satisfying any of the following conditions (in order): +// +// * T is an arithmetic or pointer type +// * T defines an overload for `AbslHashValue(H, const T&)` for an arbitrary +// hash state `H`. +// - T defines a specialization of `std::hash` +// +// `absl::Hash` intrinsically supports the following types: +// +// * All integral types (including bool) +// * All enum types +// * All floating-point types (although hashing them is discouraged) +// * All pointer types, including nullptr_t +// * std::pair, if T1 and T2 are hashable +// * std::tuple, if all the Ts... are hashable +// * std::unique_ptr and std::shared_ptr +// * All string-like types including: +// * absl::Cord +// * std::string +// * std::string_view (as well as any instance of std::basic_string that +// uses char and std::char_traits) +// * All the standard sequence containers (provided the elements are hashable) +// * All the standard associative containers (provided the elements are +// hashable) +// * absl types such as the following: +// * absl::string_view +// * absl::uint128 +// * absl::Time, absl::Duration, and absl::TimeZone +// * absl containers (provided the elements are hashable) such as the +// following: +// * absl::flat_hash_set, absl::node_hash_set, absl::btree_set +// * absl::flat_hash_map, absl::node_hash_map, absl::btree_map +// * absl::btree_multiset, absl::btree_multimap +// * absl::InlinedVector +// * absl::FixedArray +// +// When absl::Hash is used to hash an unordered container with a custom hash +// functor, the elements are hashed using default absl::Hash semantics, not +// the custom hash functor. This is consistent with the behavior of +// operator==() on unordered containers, which compares elements pairwise with +// operator==() rather than the custom equality functor. It is usually a +// mistake to use either operator==() or absl::Hash on unordered collections +// that use functors incompatible with operator==() equality. +// +// Note: the list above is not meant to be exhaustive. Additional type support +// may be added, in which case the above list will be updated. +// +// ----------------------------------------------------------------------------- +// absl::Hash Invocation Evaluation +// ----------------------------------------------------------------------------- +// +// When invoked, `absl::Hash` searches for supplied hash functions in the +// following order: +// +// * Natively supported types out of the box (see above) +// * Types for which an `AbslHashValue()` overload is provided (such as +// user-defined types). See "Adding Type Support to `absl::Hash`" below. +// * Types which define a `std::hash` specialization +// +// The fallback to legacy hash functions exists mainly for backwards +// compatibility. If you have a choice, prefer defining an `AbslHashValue` +// overload instead of specializing any legacy hash functors. +// +// ----------------------------------------------------------------------------- +// The Hash State Concept, and using `HashState` for Type Erasure +// ----------------------------------------------------------------------------- +// +// The `absl::Hash` framework relies on the Concept of a "hash state." Such a +// hash state is used in several places: +// +// * Within existing implementations of `absl::Hash` to store the hashed +// state of an object. Note that it is up to the implementation how it stores +// such state. A hash table, for example, may mix the state to produce an +// integer value; a testing framework may simply hold a vector of that state. +// * Within implementations of `AbslHashValue()` used to extend user-defined +// types. (See "Adding Type Support to absl::Hash" below.) +// * Inside a `HashState`, providing type erasure for the concept of a hash +// state, which you can use to extend the `absl::Hash` framework for types +// that are otherwise difficult to extend using `AbslHashValue()`. (See the +// `HashState` class below.) +// +// The "hash state" concept contains three member functions for mixing hash +// state: +// +// * `H::combine(state, values...)` +// +// Combines an arbitrary number of values into a hash state, returning the +// updated state. Note that the existing hash state is move-only and must be +// passed by value. +// +// Each of the value types T must be hashable by H. +// +// NOTE: +// +// state = H::combine(std::move(state), value1, value2, value3); +// +// must be guaranteed to produce the same hash expansion as +// +// state = H::combine(std::move(state), value1); +// state = H::combine(std::move(state), value2); +// state = H::combine(std::move(state), value3); +// +// * `H::combine_contiguous(state, data, size)` +// +// Combines a contiguous array of `size` elements into a hash state, +// returning the updated state. Note that the existing hash state is +// move-only and must be passed by value. +// +// NOTE: +// +// state = H::combine_contiguous(std::move(state), data, size); +// +// need NOT be guaranteed to produce the same hash expansion as a loop +// (it may perform internal optimizations). If you need this guarantee, use a +// loop instead. +// +// * `H::combine_unordered(state, begin, end)` +// +// Combines a set of elements denoted by an iterator pair into a hash +// state, returning the updated state. Note that the existing hash +// state is move-only and must be passed by value. +// +// Unlike the other two methods, the hashing is order-independent. +// This can be used to hash unordered collections. +// +// ----------------------------------------------------------------------------- +// Adding Type Support to `absl::Hash` +// ----------------------------------------------------------------------------- +// +// To add support for your user-defined type, add a proper `AbslHashValue()` +// overload as a free (non-member) function. The overload will take an +// existing hash state and should combine that state with state from the type. +// +// Example: +// +// template +// H AbslHashValue(H state, const MyType& v) { +// return H::combine(std::move(state), v.field1, ..., v.fieldN); +// } +// +// where `(field1, ..., fieldN)` are the members you would use on your +// `operator==` to define equality. +// +// Notice that `AbslHashValue` is not a class member, but an ordinary function. +// An `AbslHashValue` overload for a type should only be declared in the same +// file and namespace as said type. The proper `AbslHashValue` implementation +// for a given type will be discovered via ADL. +// +// Note: unlike `std::hash', `absl::Hash` should never be specialized. It must +// only be extended by adding `AbslHashValue()` overloads. +// +template +using Hash = absl::hash_internal::Hash; + +// HashOf +// +// absl::HashOf() is a helper that generates a hash from the values of its +// arguments. It dispatches to absl::Hash directly, as follows: +// * HashOf(t) == absl::Hash{}(t) +// * HashOf(a, b, c) == HashOf(std::make_tuple(a, b, c)) +// +// HashOf(a1, a2, ...) == HashOf(b1, b2, ...) is guaranteed when +// * The argument lists have pairwise identical C++ types +// * a1 == b1 && a2 == b2 && ... +// +// The requirement that the arguments match in both type and value is critical. +// It means that `a == b` does not necessarily imply `HashOf(a) == HashOf(b)` if +// `a` and `b` have different types. For example, `HashOf(2) != HashOf(2.0)`. +template +size_t HashOf(const Types&... values) { + auto tuple = std::tie(values...); + return absl::Hash{}(tuple); +} + +// HashState +// +// A type erased version of the hash state concept, for use in user-defined +// `AbslHashValue` implementations that can't use templates (such as PImpl +// classes, virtual functions, etc.). The type erasure adds overhead so it +// should be avoided unless necessary. +// +// Note: This wrapper will only erase calls to +// combine_contiguous(H, const unsigned char*, size_t) +// RunCombineUnordered(H, CombinerF) +// +// All other calls will be handled internally and will not invoke overloads +// provided by the wrapped class. +// +// Users of this class should still define a template `AbslHashValue` function, +// but can use `absl::HashState::Create(&state)` to erase the type of the hash +// state and dispatch to their private hashing logic. +// +// This state can be used like any other hash state. In particular, you can call +// `HashState::combine()` and `HashState::combine_contiguous()` on it. +// +// Example: +// +// class Interface { +// public: +// template +// friend H AbslHashValue(H state, const Interface& value) { +// state = H::combine(std::move(state), std::type_index(typeid(*this))); +// value.HashValue(absl::HashState::Create(&state)); +// return state; +// } +// private: +// virtual void HashValue(absl::HashState state) const = 0; +// }; +// +// class Impl : Interface { +// private: +// void HashValue(absl::HashState state) const override { +// absl::HashState::combine(std::move(state), v1_, v2_); +// } +// int v1_; +// std::string v2_; +// }; +class HashState : public hash_internal::HashStateBase { + public: + // HashState::Create() + // + // Create a new `HashState` instance that wraps `state`. All calls to + // `combine()` and `combine_contiguous()` on the new instance will be + // redirected to the original `state` object. The `state` object must outlive + // the `HashState` instance. + template + static HashState Create(T* state) { + HashState s; + s.Init(state); + return s; + } + + HashState(const HashState&) = delete; + HashState& operator=(const HashState&) = delete; + HashState(HashState&&) = default; + HashState& operator=(HashState&&) = default; + + // HashState::combine() + // + // Combines an arbitrary number of values into a hash state, returning the + // updated state. + using HashState::HashStateBase::combine; + + // HashState::combine_contiguous() + // + // Combines a contiguous array of `size` elements into a hash state, returning + // the updated state. + static HashState combine_contiguous(HashState hash_state, + const unsigned char* first, size_t size) { + hash_state.combine_contiguous_(hash_state.state_, first, size); + return hash_state; + } + using HashState::HashStateBase::combine_contiguous; + + private: + HashState() = default; + + friend class HashState::HashStateBase; + + template + static void CombineContiguousImpl(void* p, const unsigned char* first, + size_t size) { + T& state = *static_cast(p); + state = T::combine_contiguous(std::move(state), first, size); + } + + template + void Init(T* state) { + state_ = state; + combine_contiguous_ = &CombineContiguousImpl; + run_combine_unordered_ = &RunCombineUnorderedImpl; + } + + template + struct CombineUnorderedInvoker { + template + void operator()(T inner_state, ConsumerT inner_cb) { + f(HashState::Create(&inner_state), + [&](HashState& inner_erased) { inner_cb(inner_erased.Real()); }); + } + + absl::FunctionRef)> f; + }; + + template + static HashState RunCombineUnorderedImpl( + HashState state, + absl::FunctionRef)> + f) { + // Note that this implementation assumes that inner_state and outer_state + // are the same type. This isn't true in the SpyHash case, but SpyHash + // types are move-convertible to each other, so this still works. + T& real_state = state.Real(); + real_state = T::RunCombineUnordered( + std::move(real_state), CombineUnorderedInvoker{f}); + return state; + } + + template + static HashState RunCombineUnordered(HashState state, CombinerT combiner) { + auto* run = state.run_combine_unordered_; + return run(std::move(state), std::ref(combiner)); + } + + // Do not erase an already erased state. + void Init(HashState* state) { + state_ = state->state_; + combine_contiguous_ = state->combine_contiguous_; + run_combine_unordered_ = state->run_combine_unordered_; + } + + template + T& Real() { + return *static_cast(state_); + } + + void* state_; + void (*combine_contiguous_)(void*, const unsigned char*, size_t); + HashState (*run_combine_unordered_)( + HashState state, + absl::FunctionRef)>); +}; + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_HASH_HASH_H_ diff --git a/CAPI/cpp/grpc/include/absl/hash/hash_testing.h b/CAPI/cpp/grpc/include/absl/hash/hash_testing.h new file mode 100644 index 0000000..1e1c574 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/hash/hash_testing.h @@ -0,0 +1,378 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_HASH_HASH_TESTING_H_ +#define ABSL_HASH_HASH_TESTING_H_ + +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/hash/internal/spy_hash_state.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/str_cat.h" +#include "absl/types/variant.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// Run the absl::Hash algorithm over all the elements passed in and verify that +// their hash expansion is congruent with their `==` operator. +// +// It is used in conjunction with EXPECT_TRUE. Failures will output information +// on what requirement failed and on which objects. +// +// Users should pass a collection of types as either an initializer list or a +// container of cases. +// +// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( +// {v1, v2, ..., vN})); +// +// std::vector cases; +// // Fill cases... +// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases)); +// +// Users can pass a variety of types for testing heterogeneous lookup with +// `std::make_tuple`: +// +// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( +// std::make_tuple(v1, v2, ..., vN))); +// +// +// Ideally, the values passed should provide enough coverage of the `==` +// operator and the AbslHashValue implementations. +// For dynamically sized types, the empty state should usually be included in +// the values. +// +// The function accepts an optional comparator function, in case that `==` is +// not enough for the values provided. +// +// Usage: +// +// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( +// std::make_tuple(v1, v2, ..., vN), MyCustomEq{})); +// +// It checks the following requirements: +// 1. The expansion for a value is deterministic. +// 2. For any two objects `a` and `b` in the sequence, if `a == b` evaluates +// to true, then their hash expansion must be equal. +// 3. If `a == b` evaluates to false their hash expansion must be unequal. +// 4. If `a == b` evaluates to false neither hash expansion can be a +// suffix of the other. +// 5. AbslHashValue overloads should not be called by the user. They are only +// meant to be called by the framework. Users should call H::combine() and +// H::combine_contiguous(). +// 6. No moved-from instance of the hash state is used in the implementation +// of AbslHashValue. +// +// The values do not have to have the same type. This can be useful for +// equivalent types that support heterogeneous lookup. +// +// A possible reason for breaking (2) is combining state in the hash expansion +// that was not used in `==`. +// For example: +// +// struct Bad2 { +// int a, b; +// template +// friend H AbslHashValue(H state, Bad2 x) { +// // Uses a and b. +// return H::combine(std::move(state), x.a, x.b); +// } +// friend bool operator==(Bad2 x, Bad2 y) { +// // Only uses a. +// return x.a == y.a; +// } +// }; +// +// As for (3), breaking this usually means that there is state being passed to +// the `==` operator that is not used in the hash expansion. +// For example: +// +// struct Bad3 { +// int a, b; +// template +// friend H AbslHashValue(H state, Bad3 x) { +// // Only uses a. +// return H::combine(std::move(state), x.a); +// } +// friend bool operator==(Bad3 x, Bad3 y) { +// // Uses a and b. +// return x.a == y.a && x.b == y.b; +// } +// }; +// +// Finally, a common way to break 4 is by combining dynamic ranges without +// combining the size of the range. +// For example: +// +// struct Bad4 { +// int *p, size; +// template +// friend H AbslHashValue(H state, Bad4 x) { +// return H::combine_contiguous(std::move(state), x.p, x.p + x.size); +// } +// friend bool operator==(Bad4 x, Bad4 y) { +// // Compare two ranges for equality. C++14 code can instead use std::equal. +// return absl::equal(x.p, x.p + x.size, y.p, y.p + y.size); +// } +// }; +// +// An easy solution to this is to combine the size after combining the range, +// like so: +// template +// friend H AbslHashValue(H state, Bad4 x) { +// return H::combine( +// H::combine_contiguous(std::move(state), x.p, x.p + x.size), x.size); +// } +// +template +ABSL_MUST_USE_RESULT testing::AssertionResult +VerifyTypeImplementsAbslHashCorrectly(const Container& values); + +template +ABSL_MUST_USE_RESULT testing::AssertionResult +VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals); + +template +ABSL_MUST_USE_RESULT testing::AssertionResult +VerifyTypeImplementsAbslHashCorrectly(std::initializer_list values); + +template +ABSL_MUST_USE_RESULT testing::AssertionResult +VerifyTypeImplementsAbslHashCorrectly(std::initializer_list values, + Eq equals); + +namespace hash_internal { + +struct PrintVisitor { + size_t index; + template + std::string operator()(const T* value) const { + return absl::StrCat("#", index, "(", testing::PrintToString(*value), ")"); + } +}; + +template +struct EqVisitor { + Eq eq; + template + bool operator()(const T* t, const U* u) const { + return eq(*t, *u); + } +}; + +struct ExpandVisitor { + template + SpyHashState operator()(const T* value) const { + return SpyHashState::combine(SpyHashState(), *value); + } +}; + +template +ABSL_MUST_USE_RESULT testing::AssertionResult +VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals) { + using V = typename Container::value_type; + + struct Info { + const V& value; + size_t index; + std::string ToString() const { + return absl::visit(PrintVisitor{index}, value); + } + SpyHashState expand() const { return absl::visit(ExpandVisitor{}, value); } + }; + + using EqClass = std::vector; + std::vector classes; + + // Gather the values in equivalence classes. + size_t i = 0; + for (const auto& value : values) { + EqClass* c = nullptr; + for (auto& eqclass : classes) { + if (absl::visit(EqVisitor{equals}, value, eqclass[0].value)) { + c = &eqclass; + break; + } + } + if (c == nullptr) { + classes.emplace_back(); + c = &classes.back(); + } + c->push_back({value, i}); + ++i; + + // Verify potential errors captured by SpyHashState. + if (auto error = c->back().expand().error()) { + return testing::AssertionFailure() << *error; + } + } + + if (classes.size() < 2) { + return testing::AssertionFailure() + << "At least two equivalence classes are expected."; + } + + // We assume that equality is correctly implemented. + // Now we verify that AbslHashValue is also correctly implemented. + + for (const auto& c : classes) { + // All elements of the equivalence class must have the same hash + // expansion. + const SpyHashState expected = c[0].expand(); + for (const Info& v : c) { + if (v.expand() != v.expand()) { + return testing::AssertionFailure() + << "Hash expansion for " << v.ToString() + << " is non-deterministic."; + } + if (v.expand() != expected) { + return testing::AssertionFailure() + << "Values " << c[0].ToString() << " and " << v.ToString() + << " evaluate as equal but have an unequal hash expansion."; + } + } + + // Elements from other classes must have different hash expansion. + for (const auto& c2 : classes) { + if (&c == &c2) continue; + const SpyHashState c2_hash = c2[0].expand(); + switch (SpyHashState::Compare(expected, c2_hash)) { + case SpyHashState::CompareResult::kEqual: + return testing::AssertionFailure() + << "Values " << c[0].ToString() << " and " << c2[0].ToString() + << " evaluate as unequal but have an equal hash expansion."; + case SpyHashState::CompareResult::kBSuffixA: + return testing::AssertionFailure() + << "Hash expansion of " << c2[0].ToString() + << " is a suffix of the hash expansion of " << c[0].ToString() + << "."; + case SpyHashState::CompareResult::kASuffixB: + return testing::AssertionFailure() + << "Hash expansion of " << c[0].ToString() + << " is a suffix of the hash expansion of " << c2[0].ToString() + << "."; + case SpyHashState::CompareResult::kUnequal: + break; + } + } + } + return testing::AssertionSuccess(); +} + +template +struct TypeSet { + template ...>::value> + struct Insert { + using type = TypeSet; + }; + template + struct Insert { + using type = TypeSet; + }; + + template

LPO;BIKSDdx;-sW!T*8)g3NDVM;2v~MV zz=zq?fcs#C1I*aNJaasD5`iu={OWKYp=&(^|wVuSARQXvl0Pp=x6Eg`x)&kGA6x#+?4C;-C+$F=wI?-$Oa#_mC-t6z zLpjI0H2b{vhefL<4P%Wf8vQQnGcYg{_wM=Qh*ED%zr}IfZsS;E4QcA+K_#Am{+pxi z6OFNzd_&HUbChZOu%k1HL}y8J&!8gOKwOJ!bF#{Si{}l`_X5T(`h27v(c0@@B}1LQ zlP6Q}=Nop7FPu$1heMfUV=nz~M+TYAnN zOP;6t^4d9}aY*C57$*R+>1AqYXcO?WbUQ?M$=R2mTDu&h>Qzp3Ob^Y7LeyRKwI`*V z?>tY%B=z0^Ve*bsX;|xiPmAE>0-?v=R4*;QY>v+Ul)>$Rg<^pJ^kH! zEc1v_ZSv*MghJ>5)^$QCSucZj7UnfmD8^^U`x--g+~v_UOIJdv;v`oe}qL zl|m3Wwa_&+jc$pEkC+Fi@PYvWLm#r22Tp%~|6nqW{yww#&$kM(N zaQi+1Tpr!h;L5soJk@{049AK$Sqv|tK>VEBp@kh5hP1{hj8ac94M{Qt@&UD$eTw<7 z3qu|aB{iGSi}<(4%#id_uQ(qb&RSklYU)Gk=*vK<;i}jdvEPpwI#_Hq^gADm9|BCF zty{V?!teWRd`1N>HOz|%iXMbP;jf~gJ5-^mA#vZl%)|eFL?rbPk-X^Yw2VUI#Ge51 z*7*YFFOqDy967aon(^&luF5220O?n^U}h;=KbVy6u2J5;=w=5*reQGO&C|84RzEGX zi?Ia&yw7GBxW{XRlqfN-F<|S`XE+azPy1JQ53YC7GEBP~9dLVb=C^zNy(F5s*DGg{%8%#o>gWAmIN#y@WluJ_p1Uh0e!iwah&--9PU$@`Ew;ELBVcY0OX5H?S$0mT% zpeyrG+q?(pZv%=x&dnKFj&7(=aR2U!zHteWOgzyvG1msvZmC6BZo>wO2-sCchCWC` z&7RtT2Ud2f&+3Lx8J;xAE77ur1jEbbae}`pfCYSfP$vp~BmCFd$kOsi(CV(=8bNm+ z9A{{|6~BB;*Zr)*;CiZ;9S9oed_{Jxp7#>_;Zjd&x~Bn&2xkTS zX$g%~7CkKiG5o?`UCL;{&92n{v?gvX$3!-5&EM>}DnO;Eu#LotVJp^Zg;MSx{FX)U z4OgrRL;vu^;tbx-{(irVO-NH%{|Jp%r{D^?a|oLkx~@AuC;L)3tEBU3dlp}3=gQ9h z$$`2n@H`^2IlH~n$vnHM{CLfmwtjHe%V`8ja?4>@tjJHpw2pOD_brgMyrEUHq}!?6 z2X#Dz#Keab@Hf-z<)=_0_Bm6tDS5&A_vI`3eM{Fr)iplR;fj?oiFX;l1$rn&knkqv z*&PjQcMQryH3rcu)TJMV z;lrT^>S0MY`FRdTu&YKml6Jv>l9cJB!Z{yF8299UVQ!pXW&#nS`l6(%% z3k+N2GZzz`LkE8y`T6_L+zMHbhM2$Zb8|xsfeRqpm_Vo+@bBL)5nh(xV|3|fL<(Yt z(P2SjQQs~b3YhW1M5te8(^J&)T}ykd6()B|1t~o4V*Iv513M5LF3)0hb@dI*?0##L z+z}q@YpG~REYnXQ%uJWn0_U6l`EcOo;R|KSvJHpdH)=$ph)->x(CR*ohR6o`iQ``3 zG>ju$lIjRZnJKzIA6`iRFT3Pe^r-N)??AG1=VM$zMBV)L#JjL0DYMt#sC(bew+!Q% zG>?ZnHmxixa(ZzRTlyqUBCVkFMvlYz_R zab|Wss?HDZI87%i@BS0s4M+7m{saTLb3Qk9_JX_KCijwPoZcNF{C zZJw-4p2m4~n?D+SVbRoBYZp9?ZweB#OO4^w78Cf%l+_jG+O;R`u)_gI$IN=PmMXlK z=V%Nr>1@c_0iX4G_cKRpS${HMeY)~dippd7$FwxR=00{^{ToU}BsCYx;{K*UM)j0% zwYCP?)YUA}(V$*Tv;D;Fd{C%m$lYSq4KJvMLF>o2^a&NA7RXjp)qa{@v zeFW3_$BV5lHrJ;0;j3Wf#KEVO`j0M0oGm9xsJ%_m6eU9@5eQFHrA>#yN&4~XCX4at zb{+PTnQq0dG%I|q$s2K2d!c&A|-qd~h`Q4RJEo2`W4IN9p zp6Zs5qpk7OiEM_$wC9um82$V>`JqPY`*z=-JrdqnN~x4py$Iz2aS)e}ldddZZ+yg6 xC4c|%C-03Pjb)BDTjPK6^ Date: Sat, 15 Apr 2023 23:49:21 +0800 Subject: [PATCH 04/10] build(deps): add grpc headers for windows --- CAPI/cpp/grpc/LICENSE | 610 + .../grpc/include/absl/algorithm/algorithm.h | 159 + .../grpc/include/absl/algorithm/container.h | 1774 ++ CAPI/cpp/grpc/include/absl/base/attributes.h | 762 + CAPI/cpp/grpc/include/absl/base/call_once.h | 219 + CAPI/cpp/grpc/include/absl/base/casts.h | 180 + CAPI/cpp/grpc/include/absl/base/config.h | 913 + CAPI/cpp/grpc/include/absl/base/const_init.h | 76 + .../include/absl/base/dynamic_annotations.h | 471 + .../include/absl/base/internal/atomic_hook.h | 200 + .../base/internal/atomic_hook_test_helper.h | 34 + .../include/absl/base/internal/cycleclock.h | 159 + .../include/absl/base/internal/direct_mmap.h | 169 + .../absl/base/internal/dynamic_annotations.h | 398 + .../grpc/include/absl/base/internal/endian.h | 282 + .../include/absl/base/internal/errno_saver.h | 43 + .../base/internal/exception_safety_testing.h | 1109 ++ .../absl/base/internal/exception_testing.h | 42 + .../include/absl/base/internal/fast_type_id.h | 50 + .../include/absl/base/internal/hide_ptr.h | 51 + .../include/absl/base/internal/identity.h | 37 + .../absl/base/internal/inline_variable.h | 107 + .../base/internal/inline_variable_testing.h | 46 + .../grpc/include/absl/base/internal/invoke.h | 241 + .../absl/base/internal/low_level_alloc.h | 126 + .../absl/base/internal/low_level_scheduling.h | 134 + .../absl/base/internal/per_thread_tls.h | 52 + .../include/absl/base/internal/prefetch.h | 138 + .../absl/base/internal/pretty_function.h | 33 + .../include/absl/base/internal/raw_logging.h | 196 + .../absl/base/internal/scheduling_mode.h | 58 + .../absl/base/internal/scoped_set_env.h | 45 + .../include/absl/base/internal/spinlock.h | 256 + .../absl/base/internal/spinlock_akaros.inc | 35 + .../absl/base/internal/spinlock_linux.inc | 71 + .../absl/base/internal/spinlock_posix.inc | 46 + .../absl/base/internal/spinlock_wait.h | 95 + .../absl/base/internal/spinlock_win32.inc | 37 + .../include/absl/base/internal/strerror.h | 39 + .../grpc/include/absl/base/internal/sysinfo.h | 74 + .../absl/base/internal/thread_annotations.h | 271 + .../absl/base/internal/thread_identity.h | 265 + .../absl/base/internal/throw_delegate.h | 75 + .../absl/base/internal/tsan_mutex_interface.h | 68 + .../absl/base/internal/unaligned_access.h | 82 + .../absl/base/internal/unscaledcycleclock.h | 133 + .../cpp/grpc/include/absl/base/log_severity.h | 172 + CAPI/cpp/grpc/include/absl/base/macros.h | 158 + .../cpp/grpc/include/absl/base/optimization.h | 252 + CAPI/cpp/grpc/include/absl/base/options.h | 238 + .../grpc/include/absl/base/policy_checks.h | 111 + CAPI/cpp/grpc/include/absl/base/port.h | 25 + .../include/absl/base/thread_annotations.h | 335 + CAPI/cpp/grpc/include/absl/cleanup/cleanup.h | 140 + .../include/absl/cleanup/internal/cleanup.h | 100 + .../grpc/include/absl/container/btree_map.h | 851 + .../grpc/include/absl/container/btree_set.h | 793 + .../grpc/include/absl/container/btree_test.h | 166 + .../grpc/include/absl/container/fixed_array.h | 529 + .../include/absl/container/flat_hash_map.h | 613 + .../include/absl/container/flat_hash_set.h | 510 + .../include/absl/container/inlined_vector.h | 866 + .../include/absl/container/internal/btree.h | 2854 +++ .../absl/container/internal/btree_container.h | 699 + .../include/absl/container/internal/common.h | 207 + .../container/internal/compressed_tuple.h | 290 + .../container/internal/container_memory.h | 442 + .../container/internal/counting_allocator.h | 122 + .../internal/hash_function_defaults.h | 163 + .../internal/hash_generator_testing.h | 182 + .../container/internal/hash_policy_testing.h | 184 + .../container/internal/hash_policy_traits.h | 208 + .../absl/container/internal/hashtable_debug.h | 110 + .../internal/hashtable_debug_hooks.h | 85 + .../container/internal/hashtablez_sampler.h | 299 + .../absl/container/internal/inlined_vector.h | 953 + .../include/absl/container/internal/layout.h | 743 + .../container/internal/node_slot_policy.h | 92 + .../absl/container/internal/raw_hash_map.h | 198 + .../absl/container/internal/raw_hash_set.h | 2365 +++ .../internal/test_instance_tracker.h | 274 + .../include/absl/container/internal/tracked.h | 83 + .../internal/unordered_map_constructor_test.h | 494 + .../internal/unordered_map_lookup_test.h | 117 + .../internal/unordered_map_members_test.h | 87 + .../internal/unordered_map_modifiers_test.h | 352 + .../internal/unordered_set_constructor_test.h | 496 + .../internal/unordered_set_lookup_test.h | 91 + .../internal/unordered_set_members_test.h | 86 + .../internal/unordered_set_modifiers_test.h | 221 + .../include/absl/container/node_hash_map.h | 604 + .../include/absl/container/node_hash_set.h | 500 + .../absl/debugging/failure_signal_handler.h | 121 + .../debugging/internal/address_is_readable.h | 32 + .../absl/debugging/internal/demangle.h | 71 + .../absl/debugging/internal/elf_mem_image.h | 139 + .../absl/debugging/internal/examine_stack.h | 64 + .../debugging/internal/stack_consumption.h | 50 + .../internal/stacktrace_aarch64-inl.inc | 204 + .../debugging/internal/stacktrace_arm-inl.inc | 139 + .../debugging/internal/stacktrace_config.h | 88 + .../internal/stacktrace_emscripten-inl.inc | 110 + .../internal/stacktrace_generic-inl.inc | 108 + .../internal/stacktrace_powerpc-inl.inc | 258 + .../internal/stacktrace_riscv-inl.inc | 236 + .../internal/stacktrace_unimplemented-inl.inc | 24 + .../internal/stacktrace_win32-inl.inc | 93 + .../debugging/internal/stacktrace_x86-inl.inc | 369 + .../absl/debugging/internal/symbolize.h | 153 + .../absl/debugging/internal/vdso_support.h | 158 + .../grpc/include/absl/debugging/leak_check.h | 150 + .../grpc/include/absl/debugging/stacktrace.h | 231 + .../grpc/include/absl/debugging/symbolize.h | 99 + .../absl/debugging/symbolize_darwin.inc | 101 + .../include/absl/debugging/symbolize_elf.inc | 1613 ++ .../absl/debugging/symbolize_emscripten.inc | 72 + .../debugging/symbolize_unimplemented.inc | 40 + .../absl/debugging/symbolize_win32.inc | 81 + .../grpc/include/absl/flags/commandlineflag.h | 200 + CAPI/cpp/grpc/include/absl/flags/config.h | 68 + CAPI/cpp/grpc/include/absl/flags/declare.h | 73 + CAPI/cpp/grpc/include/absl/flags/flag.h | 310 + .../absl/flags/internal/commandlineflag.h | 68 + .../grpc/include/absl/flags/internal/flag.h | 800 + .../include/absl/flags/internal/flag_msvc.inc | 116 + .../grpc/include/absl/flags/internal/parse.h | 59 + .../include/absl/flags/internal/path_util.h | 62 + .../flags/internal/private_handle_accessor.h | 61 + .../absl/flags/internal/program_name.h | 50 + .../include/absl/flags/internal/registry.h | 97 + .../absl/flags/internal/sequence_lock.h | 187 + .../grpc/include/absl/flags/internal/usage.h | 104 + .../cpp/grpc/include/absl/flags/marshalling.h | 356 + CAPI/cpp/grpc/include/absl/flags/parse.h | 60 + CAPI/cpp/grpc/include/absl/flags/reflection.h | 90 + CAPI/cpp/grpc/include/absl/flags/usage.h | 43 + .../grpc/include/absl/flags/usage_config.h | 135 + .../include/absl/functional/any_invocable.h | 313 + .../grpc/include/absl/functional/bind_front.h | 193 + .../include/absl/functional/function_ref.h | 143 + .../absl/functional/internal/any_invocable.h | 857 + .../absl/functional/internal/front_binder.h | 95 + .../absl/functional/internal/function_ref.h | 106 + CAPI/cpp/grpc/include/absl/hash/hash.h | 421 + .../cpp/grpc/include/absl/hash/hash_testing.h | 378 + .../grpc/include/absl/hash/internal/city.h | 78 + .../grpc/include/absl/hash/internal/hash.h | 1291 ++ .../absl/hash/internal/low_level_hash.h | 50 + .../absl/hash/internal/spy_hash_state.h | 266 + CAPI/cpp/grpc/include/absl/memory/memory.h | 699 + CAPI/cpp/grpc/include/absl/meta/type_traits.h | 797 + CAPI/cpp/grpc/include/absl/numeric/bits.h | 178 + CAPI/cpp/grpc/include/absl/numeric/int128.h | 1165 ++ .../absl/numeric/int128_have_intrinsic.inc | 296 + .../absl/numeric/int128_no_intrinsic.inc | 311 + .../grpc/include/absl/numeric/internal/bits.h | 358 + .../absl/numeric/internal/representation.h | 55 + .../profiling/internal/exponential_biased.h | 130 + .../profiling/internal/periodic_sampler.h | 211 + .../absl/profiling/internal/sample_recorder.h | 245 + .../absl/random/bernoulli_distribution.h | 200 + .../include/absl/random/beta_distribution.h | 427 + .../grpc/include/absl/random/bit_gen_ref.h | 185 + .../absl/random/discrete_distribution.h | 247 + .../grpc/include/absl/random/distributions.h | 452 + .../absl/random/exponential_distribution.h | 165 + .../absl/random/gaussian_distribution.h | 275 + .../include/absl/random/internal/chi_square.h | 89 + .../random/internal/distribution_caller.h | 95 + .../random/internal/distribution_test_util.h | 113 + .../absl/random/internal/explicit_seed_seq.h | 92 + .../absl/random/internal/fast_uniform_bits.h | 269 + .../include/absl/random/internal/fastmath.h | 57 + .../absl/random/internal/generate_real.h | 144 + .../random/internal/iostream_state_saver.h | 245 + .../absl/random/internal/mock_helpers.h | 135 + .../absl/random/internal/mock_overload_set.h | 100 + .../absl/random/internal/nanobenchmark.h | 172 + .../absl/random/internal/nonsecure_base.h | 161 + .../include/absl/random/internal/pcg_engine.h | 308 + .../include/absl/random/internal/platform.h | 171 + .../include/absl/random/internal/pool_urbg.h | 131 + .../include/absl/random/internal/randen.h | 96 + .../absl/random/internal/randen_detect.h | 33 + .../absl/random/internal/randen_engine.h | 264 + .../absl/random/internal/randen_hwaes.h | 50 + .../absl/random/internal/randen_slow.h | 40 + .../absl/random/internal/randen_traits.h | 88 + .../absl/random/internal/salted_seed_seq.h | 165 + .../absl/random/internal/seed_material.h | 104 + .../absl/random/internal/sequence_urbg.h | 60 + .../include/absl/random/internal/traits.h | 149 + .../absl/random/internal/uniform_helper.h | 244 + .../absl/random/internal/wide_multiply.h | 96 + .../random/log_uniform_int_distribution.h | 256 + .../include/absl/random/mock_distributions.h | 266 + .../include/absl/random/mocking_bit_gen.h | 240 + .../absl/random/poisson_distribution.h | 261 + CAPI/cpp/grpc/include/absl/random/random.h | 189 + .../include/absl/random/seed_gen_exception.h | 55 + .../grpc/include/absl/random/seed_sequences.h | 111 + .../absl/random/uniform_int_distribution.h | 275 + .../absl/random/uniform_real_distribution.h | 202 + .../include/absl/random/zipf_distribution.h | 272 + .../absl/status/internal/status_internal.h | 86 + .../absl/status/internal/statusor_internal.h | 396 + CAPI/cpp/grpc/include/absl/status/status.h | 892 + .../absl/status/status_payload_printer.h | 51 + CAPI/cpp/grpc/include/absl/status/statusor.h | 776 + CAPI/cpp/grpc/include/absl/strings/ascii.h | 242 + CAPI/cpp/grpc/include/absl/strings/charconv.h | 120 + CAPI/cpp/grpc/include/absl/strings/cord.h | 1642 ++ .../grpc/include/absl/strings/cord_analysis.h | 44 + .../grpc/include/absl/strings/cord_buffer.h | 572 + .../include/absl/strings/cord_test_helpers.h | 122 + .../include/absl/strings/cordz_test_helpers.h | 151 + CAPI/cpp/grpc/include/absl/strings/escaping.h | 164 + .../include/absl/strings/internal/char_map.h | 156 + .../absl/strings/internal/charconv_bigint.h | 423 + .../absl/strings/internal/charconv_parse.h | 99 + .../absl/strings/internal/cord_data_edge.h | 63 + .../absl/strings/internal/cord_internal.h | 655 + .../absl/strings/internal/cord_rep_btree.h | 924 + .../internal/cord_rep_btree_navigator.h | 267 + .../strings/internal/cord_rep_btree_reader.h | 212 + .../absl/strings/internal/cord_rep_consume.h | 50 + .../absl/strings/internal/cord_rep_crc.h | 102 + .../absl/strings/internal/cord_rep_flat.h | 187 + .../absl/strings/internal/cord_rep_ring.h | 607 + .../strings/internal/cord_rep_ring_reader.h | 118 + .../strings/internal/cord_rep_test_util.h | 205 + .../absl/strings/internal/cordz_functions.h | 85 + .../absl/strings/internal/cordz_handle.h | 131 + .../absl/strings/internal/cordz_info.h | 298 + .../strings/internal/cordz_sample_token.h | 97 + .../absl/strings/internal/cordz_statistics.h | 88 + .../strings/internal/cordz_update_scope.h | 71 + .../strings/internal/cordz_update_tracker.h | 123 + .../include/absl/strings/internal/escaping.h | 58 + .../strings/internal/escaping_test_common.h | 133 + .../include/absl/strings/internal/memutil.h | 148 + .../strings/internal/numbers_test_common.h | 184 + .../absl/strings/internal/ostringstream.h | 89 + .../absl/strings/internal/pow10_helper.h | 40 + .../strings/internal/resize_uninitialized.h | 119 + .../absl/strings/internal/stl_type_traits.h | 248 + .../absl/strings/internal/str_format/arg.h | 526 + .../absl/strings/internal/str_format/bind.h | 248 + .../strings/internal/str_format/checker.h | 338 + .../strings/internal/str_format/extension.h | 448 + .../internal/str_format/float_conversion.h | 37 + .../absl/strings/internal/str_format/output.h | 97 + .../absl/strings/internal/str_format/parser.h | 359 + .../absl/strings/internal/str_join_internal.h | 317 + .../strings/internal/str_split_internal.h | 430 + .../absl/strings/internal/string_constant.h | 72 + .../grpc/include/absl/strings/internal/utf8.h | 50 + CAPI/cpp/grpc/include/absl/strings/match.h | 100 + CAPI/cpp/grpc/include/absl/strings/numbers.h | 303 + CAPI/cpp/grpc/include/absl/strings/str_cat.h | 415 + .../grpc/include/absl/strings/str_format.h | 812 + CAPI/cpp/grpc/include/absl/strings/str_join.h | 287 + .../grpc/include/absl/strings/str_replace.h | 219 + .../cpp/grpc/include/absl/strings/str_split.h | 547 + .../grpc/include/absl/strings/string_view.h | 711 + CAPI/cpp/grpc/include/absl/strings/strip.h | 93 + .../grpc/include/absl/strings/substitute.h | 729 + .../include/absl/synchronization/barrier.h | 79 + .../absl/synchronization/blocking_counter.h | 101 + .../internal/create_thread_identity.h | 56 + .../absl/synchronization/internal/futex.h | 154 + .../synchronization/internal/graphcycles.h | 141 + .../synchronization/internal/kernel_timeout.h | 156 + .../synchronization/internal/per_thread_sem.h | 110 + .../synchronization/internal/thread_pool.h | 93 + .../absl/synchronization/internal/waiter.h | 161 + .../grpc/include/absl/synchronization/mutex.h | 1090 ++ .../absl/synchronization/notification.h | 124 + CAPI/cpp/grpc/include/absl/time/civil_time.h | 538 + CAPI/cpp/grpc/include/absl/time/clock.h | 74 + .../internal/cctz/include/cctz/civil_time.h | 332 + .../cctz/include/cctz/civil_time_detail.h | 632 + .../internal/cctz/include/cctz/time_zone.h | 459 + .../cctz/include/cctz/zone_info_source.h | 102 + .../time/internal/cctz/src/time_zone_fixed.h | 52 + .../time/internal/cctz/src/time_zone_if.h | 77 + .../time/internal/cctz/src/time_zone_impl.h | 93 + .../time/internal/cctz/src/time_zone_info.h | 137 + .../time/internal/cctz/src/time_zone_libc.h | 55 + .../time/internal/cctz/src/time_zone_posix.h | 132 + .../absl/time/internal/cctz/src/tzfile.h | 122 + .../time/internal/get_current_time_chrono.inc | 31 + .../time/internal/get_current_time_posix.inc | 24 + .../include/absl/time/internal/test_util.h | 33 + .../include/absl/time/internal/zoneinfo.inc | 724 + CAPI/cpp/grpc/include/absl/time/time.h | 1620 ++ CAPI/cpp/grpc/include/absl/types/any.h | 517 + .../grpc/include/absl/types/bad_any_cast.h | 75 + .../include/absl/types/bad_optional_access.h | 78 + .../include/absl/types/bad_variant_access.h | 82 + CAPI/cpp/grpc/include/absl/types/compare.h | 600 + .../absl/types/internal/conformance_aliases.h | 447 + .../types/internal/conformance_archetype.h | 978 + .../absl/types/internal/conformance_profile.h | 933 + .../absl/types/internal/conformance_testing.h | 1386 ++ .../internal/conformance_testing_helpers.h | 391 + .../include/absl/types/internal/optional.h | 404 + .../include/absl/types/internal/parentheses.h | 34 + .../grpc/include/absl/types/internal/span.h | 128 + .../absl/types/internal/transform_args.h | 246 + .../include/absl/types/internal/variant.h | 1646 ++ CAPI/cpp/grpc/include/absl/types/optional.h | 779 + CAPI/cpp/grpc/include/absl/types/span.h | 727 + CAPI/cpp/grpc/include/absl/types/variant.h | 866 + CAPI/cpp/grpc/include/absl/utility/utility.h | 350 + CAPI/cpp/grpc/include/ares.h | 755 + CAPI/cpp/grpc/include/ares_build.h | 43 + CAPI/cpp/grpc/include/ares_dns.h | 112 + CAPI/cpp/grpc/include/ares_nameser.h | 484 + CAPI/cpp/grpc/include/ares_rules.h | 125 + CAPI/cpp/grpc/include/ares_version.h | 24 + CAPI/cpp/grpc/include/google/protobuf/any.h | 157 + .../cpp/grpc/include/google/protobuf/any.pb.h | 384 + .../grpc/include/google/protobuf/any.proto | 158 + .../cpp/grpc/include/google/protobuf/api.pb.h | 1437 ++ .../grpc/include/google/protobuf/api.proto | 208 + CAPI/cpp/grpc/include/google/protobuf/arena.h | 851 + .../grpc/include/google/protobuf/arena_impl.h | 686 + .../include/google/protobuf/arenastring.h | 480 + .../include/google/protobuf/arenaz_sampler.h | 207 + .../google/protobuf/compiler/code_generator.h | 207 + .../compiler/command_line_interface.h | 464 + .../protobuf/compiler/cpp/cpp_generator.h | 6 + .../google/protobuf/compiler/cpp/file.h | 209 + .../google/protobuf/compiler/cpp/generator.h | 107 + .../google/protobuf/compiler/cpp/helpers.h | 1064 ++ .../google/protobuf/compiler/cpp/names.h | 97 + .../compiler/csharp/csharp_doc_comment.h | 51 + .../compiler/csharp/csharp_generator.h | 70 + .../protobuf/compiler/csharp/csharp_names.h | 109 + .../protobuf/compiler/csharp/csharp_options.h | 81 + .../google/protobuf/compiler/importer.h | 338 + .../google/protobuf/compiler/java/generator.h | 77 + .../protobuf/compiler/java/java_generator.h | 6 + .../protobuf/compiler/java/kotlin_generator.h | 74 + .../google/protobuf/compiler/java/names.h | 100 + .../objectivec/objectivec_generator.h | 79 + .../compiler/objectivec/objectivec_helpers.h | 353 + .../include/google/protobuf/compiler/parser.h | 602 + .../protobuf/compiler/php/php_generator.h | 92 + .../include/google/protobuf/compiler/plugin.h | 96 + .../google/protobuf/compiler/plugin.pb.h | 1901 ++ .../google/protobuf/compiler/plugin.proto | 183 + .../protobuf/compiler/python/generator.h | 185 + .../protobuf/compiler/python/pyi_generator.h | 120 + .../compiler/python/python_generator.h | 6 + .../protobuf/compiler/ruby/ruby_generator.h | 67 + .../grpc/include/google/protobuf/descriptor.h | 2440 +++ .../include/google/protobuf/descriptor.pb.h | 14821 ++++++++++++++++ .../include/google/protobuf/descriptor.proto | 921 + .../google/protobuf/descriptor_database.h | 398 + .../include/google/protobuf/duration.pb.h | 278 + .../include/google/protobuf/duration.proto | 116 + .../include/google/protobuf/dynamic_message.h | 227 + .../grpc/include/google/protobuf/empty.pb.h | 198 + .../grpc/include/google/protobuf/empty.proto | 51 + .../cpp/grpc/include/google/protobuf/endian.h | 198 + .../google/protobuf/explicitly_constructed.h | 97 + .../include/google/protobuf/extension_set.h | 1561 ++ .../google/protobuf/extension_set_inl.h | 285 + .../google/protobuf/field_access_listener.h | 172 + .../include/google/protobuf/field_mask.pb.h | 317 + .../include/google/protobuf/field_mask.proto | 245 + .../protobuf/generated_enum_reflection.h | 100 + .../google/protobuf/generated_enum_util.h | 85 + .../google/protobuf/generated_message_bases.h | 87 + .../protobuf/generated_message_reflection.h | 354 + .../protobuf/generated_message_tctable_decl.h | 312 + .../protobuf/generated_message_tctable_impl.h | 553 + .../google/protobuf/generated_message_util.h | 214 + .../grpc/include/google/protobuf/has_bits.h | 117 + .../google/protobuf/implicit_weak_message.h | 213 + .../google/protobuf/inlined_string_field.h | 532 + .../include/google/protobuf/io/coded_stream.h | 1799 ++ .../include/google/protobuf/io/gzip_stream.h | 205 + .../include/google/protobuf/io/io_win32.h | 141 + .../grpc/include/google/protobuf/io/printer.h | 387 + .../grpc/include/google/protobuf/io/strtod.h | 55 + .../include/google/protobuf/io/tokenizer.h | 442 + .../google/protobuf/io/zero_copy_stream.h | 260 + .../protobuf/io/zero_copy_stream_impl.h | 336 + .../protobuf/io/zero_copy_stream_impl_lite.h | 413 + CAPI/cpp/grpc/include/google/protobuf/map.h | 1448 ++ .../grpc/include/google/protobuf/map_entry.h | 134 + .../include/google/protobuf/map_entry_lite.h | 563 + .../grpc/include/google/protobuf/map_field.h | 946 + .../include/google/protobuf/map_field_inl.h | 375 + .../include/google/protobuf/map_field_lite.h | 209 + .../google/protobuf/map_type_handler.h | 736 + .../grpc/include/google/protobuf/message.h | 1497 ++ .../include/google/protobuf/message_lite.h | 591 + .../grpc/include/google/protobuf/metadata.h | 36 + .../include/google/protobuf/metadata_lite.h | 316 + .../include/google/protobuf/parse_context.h | 1025 ++ CAPI/cpp/grpc/include/google/protobuf/port.h | 80 + .../grpc/include/google/protobuf/port_def.inc | 928 + .../include/google/protobuf/port_undef.inc | 160 + .../grpc/include/google/protobuf/reflection.h | 570 + .../include/google/protobuf/reflection_ops.h | 92 + .../include/google/protobuf/repeated_field.h | 1219 ++ .../google/protobuf/repeated_ptr_field.h | 1970 ++ .../grpc/include/google/protobuf/service.h | 295 + .../google/protobuf/source_context.pb.h | 282 + .../google/protobuf/source_context.proto | 48 + .../grpc/include/google/protobuf/struct.pb.h | 1177 ++ .../grpc/include/google/protobuf/struct.proto | 95 + .../google/protobuf/stubs/bytestream.h | 351 + .../include/google/protobuf/stubs/callback.h | 583 + .../include/google/protobuf/stubs/casts.h | 138 + .../include/google/protobuf/stubs/common.h | 197 + .../grpc/include/google/protobuf/stubs/hash.h | 114 + .../include/google/protobuf/stubs/logging.h | 239 + .../include/google/protobuf/stubs/macros.h | 93 + .../include/google/protobuf/stubs/map_util.h | 769 + .../include/google/protobuf/stubs/mutex.h | 218 + .../grpc/include/google/protobuf/stubs/once.h | 55 + .../google/protobuf/stubs/platform_macros.h | 138 + .../grpc/include/google/protobuf/stubs/port.h | 413 + .../include/google/protobuf/stubs/status.h | 196 + .../include/google/protobuf/stubs/stl_util.h | 90 + .../google/protobuf/stubs/stringpiece.h | 402 + .../include/google/protobuf/stubs/strutil.h | 950 + .../google/protobuf/stubs/template_util.h | 138 + .../include/google/protobuf/text_format.h | 693 + .../include/google/protobuf/timestamp.pb.h | 278 + .../include/google/protobuf/timestamp.proto | 147 + .../grpc/include/google/protobuf/type.pb.h | 2571 +++ .../grpc/include/google/protobuf/type.proto | 187 + .../google/protobuf/unknown_field_set.h | 407 + .../protobuf/util/delimited_message_util.h | 109 + .../google/protobuf/util/field_comparator.h | 288 + .../google/protobuf/util/field_mask_util.h | 263 + .../include/google/protobuf/util/json_util.h | 204 + .../protobuf/util/message_differencer.h | 980 + .../include/google/protobuf/util/time_util.h | 314 + .../google/protobuf/util/type_resolver.h | 77 + .../google/protobuf/util/type_resolver_util.h | 58 + .../include/google/protobuf/wire_format.h | 414 + .../google/protobuf/wire_format_lite.h | 1908 ++ .../include/google/protobuf/wrappers.pb.h | 1741 ++ .../include/google/protobuf/wrappers.proto | 123 + CAPI/cpp/grpc/include/grpc++/alarm.h | 28 + CAPI/cpp/grpc/include/grpc++/channel.h | 28 + CAPI/cpp/grpc/include/grpc++/client_context.h | 28 + .../grpc/include/grpc++/completion_queue.h | 28 + CAPI/cpp/grpc/include/grpc++/create_channel.h | 28 + .../include/grpc++/create_channel_posix.h | 28 + ...alth_check_service_server_builder_option.h | 28 + .../grpc++/generic/async_generic_service.h | 28 + .../include/grpc++/generic/generic_stub.h | 28 + CAPI/cpp/grpc/include/grpc++/grpc++.h | 28 + .../grpc++/health_check_service_interface.h | 28 + CAPI/cpp/grpc/include/grpc++/impl/call.h | 28 + .../grpc++/impl/channel_argument_option.h | 28 + .../include/grpc++/impl/client_unary_call.h | 28 + .../grpc++/impl/codegen/async_stream.h | 28 + .../grpc++/impl/codegen/async_unary_call.h | 28 + .../include/grpc++/impl/codegen/byte_buffer.h | 28 + .../grpc/include/grpc++/impl/codegen/call.h | 28 + .../include/grpc++/impl/codegen/call_hook.h | 28 + .../grpc++/impl/codegen/channel_interface.h | 28 + .../grpc++/impl/codegen/client_context.h | 28 + .../grpc++/impl/codegen/client_unary_call.h | 28 + .../grpc++/impl/codegen/completion_queue.h | 28 + .../impl/codegen/completion_queue_tag.h | 28 + .../grpc/include/grpc++/impl/codegen/config.h | 28 + .../grpc++/impl/codegen/config_protobuf.h | 28 + .../grpc++/impl/codegen/core_codegen.h | 28 + .../impl/codegen/core_codegen_interface.h | 28 + .../grpc++/impl/codegen/create_auth_context.h | 28 + .../grpc++/impl/codegen/grpc_library.h | 28 + .../grpc++/impl/codegen/metadata_map.h | 28 + .../grpc++/impl/codegen/method_handler_impl.h | 28 + .../include/grpc++/impl/codegen/proto_utils.h | 28 + .../include/grpc++/impl/codegen/rpc_method.h | 28 + .../grpc++/impl/codegen/rpc_service_method.h | 28 + .../impl/codegen/security/auth_context.h | 28 + .../impl/codegen/serialization_traits.h | 28 + .../grpc++/impl/codegen/server_context.h | 28 + .../grpc++/impl/codegen/server_interface.h | 28 + .../grpc++/impl/codegen/service_type.h | 28 + .../grpc/include/grpc++/impl/codegen/slice.h | 28 + .../grpc/include/grpc++/impl/codegen/status.h | 28 + .../grpc++/impl/codegen/status_code_enum.h | 28 + .../include/grpc++/impl/codegen/string_ref.h | 28 + .../grpc++/impl/codegen/stub_options.h | 28 + .../include/grpc++/impl/codegen/sync_stream.h | 28 + .../grpc/include/grpc++/impl/codegen/time.h | 28 + .../grpc/include/grpc++/impl/grpc_library.h | 28 + .../include/grpc++/impl/method_handler_impl.h | 28 + .../cpp/grpc/include/grpc++/impl/rpc_method.h | 28 + .../include/grpc++/impl/rpc_service_method.h | 28 + .../grpc++/impl/serialization_traits.h | 28 + .../grpc++/impl/server_builder_option.h | 28 + .../grpc++/impl/server_builder_plugin.h | 28 + .../include/grpc++/impl/server_initializer.h | 28 + .../grpc/include/grpc++/impl/service_type.h | 28 + CAPI/cpp/grpc/include/grpc++/resource_quota.h | 28 + .../include/grpc++/security/auth_context.h | 28 + .../grpc++/security/auth_metadata_processor.h | 28 + .../include/grpc++/security/credentials.h | 28 + .../grpc++/security/server_credentials.h | 28 + CAPI/cpp/grpc/include/grpc++/server.h | 28 + CAPI/cpp/grpc/include/grpc++/server_builder.h | 28 + CAPI/cpp/grpc/include/grpc++/server_context.h | 28 + CAPI/cpp/grpc/include/grpc++/server_posix.h | 28 + .../include/grpc++/support/async_stream.h | 28 + .../include/grpc++/support/async_unary_call.h | 28 + .../grpc/include/grpc++/support/byte_buffer.h | 28 + .../grpc++/support/channel_arguments.h | 28 + CAPI/cpp/grpc/include/grpc++/support/config.h | 28 + .../include/grpc++/support/error_details.h | 28 + CAPI/cpp/grpc/include/grpc++/support/slice.h | 28 + CAPI/cpp/grpc/include/grpc++/support/status.h | 28 + .../include/grpc++/support/status_code_enum.h | 28 + .../grpc/include/grpc++/support/string_ref.h | 28 + .../include/grpc++/support/stub_options.h | 28 + .../grpc/include/grpc++/support/sync_stream.h | 28 + CAPI/cpp/grpc/include/grpc++/support/time.h | 28 + CAPI/cpp/grpc/include/grpc/byte_buffer.h | 27 + .../grpc/include/grpc/byte_buffer_reader.h | 26 + CAPI/cpp/grpc/include/grpc/census.h | 40 + CAPI/cpp/grpc/include/grpc/compression.h | 75 + .../grpc/event_engine/endpoint_config.h | 43 + .../include/grpc/event_engine/event_engine.h | 446 + .../internal/memory_allocator_impl.h | 68 + .../grpc/event_engine/memory_allocator.h | 211 + .../grpc/event_engine/memory_request.h | 57 + .../cpp/grpc/include/grpc/event_engine/port.h | 39 + .../grpc/include/grpc/event_engine/slice.h | 286 + .../include/grpc/event_engine/slice_buffer.h | 118 + CAPI/cpp/grpc/include/grpc/fork.h | 26 + CAPI/cpp/grpc/include/grpc/grpc.h | 605 + CAPI/cpp/grpc/include/grpc/grpc_posix.h | 63 + CAPI/cpp/grpc/include/grpc/grpc_security.h | 1270 ++ .../include/grpc/grpc_security_constants.h | 152 + CAPI/cpp/grpc/include/grpc/impl/codegen/atm.h | 97 + .../grpc/impl/codegen/atm_gcc_atomic.h | 93 + .../include/grpc/impl/codegen/atm_gcc_sync.h | 87 + .../include/grpc/impl/codegen/atm_windows.h | 134 + .../include/grpc/impl/codegen/byte_buffer.h | 103 + .../grpc/impl/codegen/byte_buffer_reader.h | 44 + .../grpc/impl/codegen/compression_types.h | 109 + .../grpc/impl/codegen/connectivity_state.h | 47 + .../cpp/grpc/include/grpc/impl/codegen/fork.h | 50 + .../include/grpc/impl/codegen/gpr_slice.h | 71 + .../include/grpc/impl/codegen/gpr_types.h | 62 + .../include/grpc/impl/codegen/grpc_types.h | 812 + CAPI/cpp/grpc/include/grpc/impl/codegen/log.h | 112 + .../include/grpc/impl/codegen/port_platform.h | 782 + .../grpc/impl/codegen/propagation_bits.h | 54 + .../grpc/include/grpc/impl/codegen/slice.h | 132 + .../grpc/include/grpc/impl/codegen/status.h | 156 + .../cpp/grpc/include/grpc/impl/codegen/sync.h | 68 + .../include/grpc/impl/codegen/sync_abseil.h | 38 + .../include/grpc/impl/codegen/sync_custom.h | 40 + .../include/grpc/impl/codegen/sync_generic.h | 51 + .../include/grpc/impl/codegen/sync_posix.h | 54 + .../include/grpc/impl/codegen/sync_windows.h | 42 + CAPI/cpp/grpc/include/grpc/load_reporting.h | 48 + CAPI/cpp/grpc/include/grpc/slice.h | 161 + CAPI/cpp/grpc/include/grpc/slice_buffer.h | 84 + CAPI/cpp/grpc/include/grpc/status.h | 26 + CAPI/cpp/grpc/include/grpc/support/alloc.h | 52 + CAPI/cpp/grpc/include/grpc/support/atm.h | 26 + .../include/grpc/support/atm_gcc_atomic.h | 26 + .../grpc/include/grpc/support/atm_gcc_sync.h | 26 + .../grpc/include/grpc/support/atm_windows.h | 26 + CAPI/cpp/grpc/include/grpc/support/cpu.h | 44 + CAPI/cpp/grpc/include/grpc/support/log.h | 26 + .../grpc/include/grpc/support/log_windows.h | 38 + .../grpc/include/grpc/support/port_platform.h | 24 + .../grpc/include/grpc/support/string_util.h | 51 + CAPI/cpp/grpc/include/grpc/support/sync.h | 282 + .../grpc/include/grpc/support/sync_abseil.h | 26 + .../grpc/include/grpc/support/sync_custom.h | 26 + .../grpc/include/grpc/support/sync_generic.h | 26 + .../grpc/include/grpc/support/sync_posix.h | 26 + .../grpc/include/grpc/support/sync_windows.h | 26 + CAPI/cpp/grpc/include/grpc/support/thd_id.h | 44 + CAPI/cpp/grpc/include/grpc/support/time.h | 92 + .../include/grpc/support/workaround_list.h | 31 + CAPI/cpp/grpc/include/grpcpp/alarm.h | 101 + CAPI/cpp/grpc/include/grpcpp/channel.h | 126 + CAPI/cpp/grpc/include/grpcpp/client_context.h | 39 + .../grpc/include/grpcpp/completion_queue.h | 24 + CAPI/cpp/grpc/include/grpcpp/create_channel.h | 78 + .../include/grpcpp/create_channel_binder.h | 126 + .../include/grpcpp/create_channel_posix.h | 71 + .../include/grpcpp/ext/call_metric_recorder.h | 94 + ...alth_check_service_server_builder_option.h | 47 + .../grpcpp/generic/async_generic_service.h | 24 + .../include/grpcpp/generic/generic_stub.h | 175 + CAPI/cpp/grpc/include/grpcpp/grpcpp.h | 68 + .../grpcpp/health_check_service_interface.h | 58 + CAPI/cpp/grpc/include/grpcpp/impl/call.h | 24 + .../grpcpp/impl/channel_argument_option.h | 37 + .../include/grpcpp/impl/client_unary_call.h | 24 + .../impl/codegen/async_generic_service.h | 137 + .../grpcpp/impl/codegen/async_stream.h | 1132 ++ .../grpcpp/impl/codegen/async_unary_call.h | 420 + .../include/grpcpp/impl/codegen/byte_buffer.h | 239 + .../grpc/include/grpcpp/impl/codegen/call.h | 95 + .../include/grpcpp/impl/codegen/call_hook.h | 41 + .../include/grpcpp/impl/codegen/call_op_set.h | 1045 ++ .../impl/codegen/call_op_set_interface.h | 61 + .../grpcpp/impl/codegen/callback_common.h | 222 + .../grpcpp/impl/codegen/channel_interface.h | 172 + .../grpcpp/impl/codegen/client_callback.h | 1229 ++ .../grpcpp/impl/codegen/client_context.h | 518 + .../grpcpp/impl/codegen/client_interceptor.h | 199 + .../grpcpp/impl/codegen/client_unary_call.h | 105 + .../grpcpp/impl/codegen/completion_queue.h | 466 + .../impl/codegen/completion_queue_tag.h | 56 + .../grpc/include/grpcpp/impl/codegen/config.h | 46 + .../grpcpp/impl/codegen/config_protobuf.h | 108 + .../grpcpp/impl/codegen/core_codegen.h | 131 + .../impl/codegen/core_codegen_interface.h | 169 + .../grpcpp/impl/codegen/create_auth_context.h | 35 + .../grpcpp/impl/codegen/delegating_channel.h | 93 + .../grpcpp/impl/codegen/grpc_library.h | 67 + .../grpcpp/impl/codegen/intercepted_channel.h | 85 + .../include/grpcpp/impl/codegen/interceptor.h | 233 + .../grpcpp/impl/codegen/interceptor_common.h | 540 + .../grpcpp/impl/codegen/message_allocator.h | 73 + .../grpcpp/impl/codegen/metadata_map.h | 107 + .../grpcpp/impl/codegen/method_handler.h | 406 + .../grpcpp/impl/codegen/method_handler_impl.h | 24 + .../grpcpp/impl/codegen/proto_buffer_reader.h | 151 + .../grpcpp/impl/codegen/proto_buffer_writer.h | 181 + .../include/grpcpp/impl/codegen/proto_utils.h | 121 + .../include/grpcpp/impl/codegen/rpc_method.h | 82 + .../grpcpp/impl/codegen/rpc_service_method.h | 155 + .../impl/codegen/security/auth_context.h | 101 + .../impl/codegen/serialization_traits.h | 64 + .../grpcpp/impl/codegen/server_callback.h | 799 + .../impl/codegen/server_callback_handlers.h | 889 + .../grpcpp/impl/codegen/server_context.h | 679 + .../grpcpp/impl/codegen/server_interceptor.h | 141 + .../grpcpp/impl/codegen/server_interface.h | 370 + .../grpcpp/impl/codegen/service_type.h | 238 + .../grpc/include/grpcpp/impl/codegen/slice.h | 156 + .../grpc/include/grpcpp/impl/codegen/status.h | 141 + .../grpcpp/impl/codegen/status_code_enum.h | 145 + .../include/grpcpp/impl/codegen/string_ref.h | 151 + .../grpcpp/impl/codegen/stub_options.h | 44 + .../grpc/include/grpcpp/impl/codegen/sync.h | 158 + .../include/grpcpp/impl/codegen/sync_stream.h | 950 + .../grpc/include/grpcpp/impl/codegen/time.h | 91 + .../grpc/include/grpcpp/impl/grpc_library.h | 60 + .../include/grpcpp/impl/method_handler_impl.h | 24 + .../cpp/grpc/include/grpcpp/impl/rpc_method.h | 24 + .../include/grpcpp/impl/rpc_service_method.h | 24 + .../grpcpp/impl/serialization_traits.h | 24 + .../grpcpp/impl/server_builder_option.h | 43 + .../grpcpp/impl/server_builder_plugin.h | 66 + .../include/grpcpp/impl/server_initializer.h | 54 + .../grpc/include/grpcpp/impl/service_type.h | 24 + CAPI/cpp/grpc/include/grpcpp/resource_quota.h | 68 + .../include/grpcpp/security/alts_context.h | 69 + .../grpc/include/grpcpp/security/alts_util.h | 50 + .../include/grpcpp/security/auth_context.h | 24 + .../grpcpp/security/auth_metadata_processor.h | 73 + .../security/authorization_policy_provider.h | 88 + .../grpcpp/security/binder_credentials.h | 43 + .../grpcpp/security/binder_security_policy.h | 82 + .../include/grpcpp/security/credentials.h | 349 + .../grpcpp/security/server_credentials.h | 145 + .../security/tls_certificate_provider.h | 127 + .../security/tls_certificate_verifier.h | 238 + .../grpcpp/security/tls_credentials_options.h | 157 + CAPI/cpp/grpc/include/grpcpp/server.h | 347 + CAPI/cpp/grpc/include/grpcpp/server_builder.h | 419 + CAPI/cpp/grpc/include/grpcpp/server_context.h | 24 + CAPI/cpp/grpc/include/grpcpp/server_posix.h | 43 + .../include/grpcpp/support/async_stream.h | 24 + .../include/grpcpp/support/async_unary_call.h | 24 + .../grpc/include/grpcpp/support/byte_buffer.h | 31 + .../grpcpp/support/channel_arguments.h | 146 + .../include/grpcpp/support/client_callback.h | 24 + .../grpcpp/support/client_interceptor.h | 24 + CAPI/cpp/grpc/include/grpcpp/support/config.h | 24 + .../include/grpcpp/support/error_details.h | 76 + .../grpc/include/grpcpp/support/interceptor.h | 24 + .../grpcpp/support/message_allocator.h | 24 + .../include/grpcpp/support/method_handler.h | 24 + .../grpcpp/support/proto_buffer_reader.h | 24 + .../grpcpp/support/proto_buffer_writer.h | 24 + .../include/grpcpp/support/server_callback.h | 24 + .../grpcpp/support/server_interceptor.h | 24 + CAPI/cpp/grpc/include/grpcpp/support/slice.h | 26 + CAPI/cpp/grpc/include/grpcpp/support/status.h | 24 + .../include/grpcpp/support/status_code_enum.h | 24 + .../grpc/include/grpcpp/support/string_ref.h | 24 + .../include/grpcpp/support/stub_options.h | 24 + .../grpc/include/grpcpp/support/sync_stream.h | 24 + CAPI/cpp/grpc/include/grpcpp/support/time.h | 24 + .../grpcpp/support/validate_service_config.h | 36 + .../grpc/include/grpcpp/xds_server_builder.h | 123 + .../include/openssl/__DECC_INCLUDE_EPILOGUE.H | 22 + .../include/openssl/__DECC_INCLUDE_PROLOGUE.H | 26 + CAPI/cpp/grpc/include/openssl/aes.h | 111 + CAPI/cpp/grpc/include/openssl/asn1.h | 1128 ++ CAPI/cpp/grpc/include/openssl/asn1_mac.h | 10 + CAPI/cpp/grpc/include/openssl/asn1err.h | 140 + CAPI/cpp/grpc/include/openssl/asn1t.h | 946 + CAPI/cpp/grpc/include/openssl/async.h | 96 + CAPI/cpp/grpc/include/openssl/asyncerr.h | 29 + CAPI/cpp/grpc/include/openssl/bio.h | 886 + CAPI/cpp/grpc/include/openssl/bioerr.h | 65 + CAPI/cpp/grpc/include/openssl/blowfish.h | 78 + CAPI/cpp/grpc/include/openssl/bn.h | 583 + CAPI/cpp/grpc/include/openssl/bnerr.h | 47 + CAPI/cpp/grpc/include/openssl/buffer.h | 62 + CAPI/cpp/grpc/include/openssl/buffererr.h | 25 + CAPI/cpp/grpc/include/openssl/camellia.h | 117 + CAPI/cpp/grpc/include/openssl/cast.h | 71 + CAPI/cpp/grpc/include/openssl/cmac.h | 52 + CAPI/cpp/grpc/include/openssl/cmp.h | 592 + CAPI/cpp/grpc/include/openssl/cmp_util.h | 56 + CAPI/cpp/grpc/include/openssl/cmperr.h | 112 + CAPI/cpp/grpc/include/openssl/cms.h | 493 + CAPI/cpp/grpc/include/openssl/cmserr.h | 122 + CAPI/cpp/grpc/include/openssl/comp.h | 59 + CAPI/cpp/grpc/include/openssl/comperr.h | 31 + CAPI/cpp/grpc/include/openssl/conf.h | 211 + CAPI/cpp/grpc/include/openssl/conf_api.h | 46 + CAPI/cpp/grpc/include/openssl/conferr.h | 51 + CAPI/cpp/grpc/include/openssl/configuration.h | 146 + CAPI/cpp/grpc/include/openssl/conftypes.h | 44 + CAPI/cpp/grpc/include/openssl/core.h | 233 + CAPI/cpp/grpc/include/openssl/core_dispatch.h | 943 + CAPI/cpp/grpc/include/openssl/core_names.h | 556 + CAPI/cpp/grpc/include/openssl/core_object.h | 41 + CAPI/cpp/grpc/include/openssl/crmf.h | 227 + CAPI/cpp/grpc/include/openssl/crmferr.h | 50 + CAPI/cpp/grpc/include/openssl/crypto.h | 558 + CAPI/cpp/grpc/include/openssl/cryptoerr.h | 46 + .../grpc/include/openssl/cryptoerr_legacy.h | 1466 ++ CAPI/cpp/grpc/include/openssl/ct.h | 573 + CAPI/cpp/grpc/include/openssl/cterr.h | 45 + CAPI/cpp/grpc/include/openssl/decoder.h | 133 + CAPI/cpp/grpc/include/openssl/decodererr.h | 28 + CAPI/cpp/grpc/include/openssl/des.h | 211 + CAPI/cpp/grpc/include/openssl/dh.h | 328 + CAPI/cpp/grpc/include/openssl/dherr.h | 57 + CAPI/cpp/grpc/include/openssl/dsa.h | 275 + CAPI/cpp/grpc/include/openssl/dsaerr.h | 43 + CAPI/cpp/grpc/include/openssl/dtls1.h | 57 + CAPI/cpp/grpc/include/openssl/e_os2.h | 305 + CAPI/cpp/grpc/include/openssl/ebcdic.h | 39 + CAPI/cpp/grpc/include/openssl/ec.h | 1569 ++ CAPI/cpp/grpc/include/openssl/ecdh.h | 10 + CAPI/cpp/grpc/include/openssl/ecdsa.h | 10 + CAPI/cpp/grpc/include/openssl/ecerr.h | 103 + CAPI/cpp/grpc/include/openssl/encoder.h | 124 + CAPI/cpp/grpc/include/openssl/encodererr.h | 28 + CAPI/cpp/grpc/include/openssl/engine.h | 833 + CAPI/cpp/grpc/include/openssl/engineerr.h | 63 + CAPI/cpp/grpc/include/openssl/err.h | 492 + CAPI/cpp/grpc/include/openssl/ess.h | 128 + CAPI/cpp/grpc/include/openssl/esserr.h | 32 + CAPI/cpp/grpc/include/openssl/evp.h | 2170 +++ CAPI/cpp/grpc/include/openssl/evperr.h | 134 + CAPI/cpp/grpc/include/openssl/fips_names.h | 60 + CAPI/cpp/grpc/include/openssl/fipskey.h | 36 + CAPI/cpp/grpc/include/openssl/hmac.h | 62 + CAPI/cpp/grpc/include/openssl/http.h | 109 + CAPI/cpp/grpc/include/openssl/httperr.h | 55 + CAPI/cpp/grpc/include/openssl/idea.h | 82 + CAPI/cpp/grpc/include/openssl/kdf.h | 138 + CAPI/cpp/grpc/include/openssl/kdferr.h | 16 + CAPI/cpp/grpc/include/openssl/lhash.h | 288 + CAPI/cpp/grpc/include/openssl/macros.h | 304 + CAPI/cpp/grpc/include/openssl/md2.h | 56 + CAPI/cpp/grpc/include/openssl/md4.h | 63 + CAPI/cpp/grpc/include/openssl/md5.h | 62 + CAPI/cpp/grpc/include/openssl/mdc2.h | 55 + CAPI/cpp/grpc/include/openssl/modes.h | 219 + CAPI/cpp/grpc/include/openssl/obj_mac.h | 5481 ++++++ CAPI/cpp/grpc/include/openssl/objects.h | 183 + CAPI/cpp/grpc/include/openssl/objectserr.h | 28 + CAPI/cpp/grpc/include/openssl/ocsp.h | 483 + CAPI/cpp/grpc/include/openssl/ocsperr.h | 53 + CAPI/cpp/grpc/include/openssl/opensslconf.h | 17 + CAPI/cpp/grpc/include/openssl/opensslv.h | 114 + CAPI/cpp/grpc/include/openssl/ossl_typ.h | 16 + CAPI/cpp/grpc/include/openssl/param_build.h | 63 + CAPI/cpp/grpc/include/openssl/params.h | 160 + CAPI/cpp/grpc/include/openssl/pem.h | 538 + CAPI/cpp/grpc/include/openssl/pem2.h | 19 + CAPI/cpp/grpc/include/openssl/pemerr.h | 58 + CAPI/cpp/grpc/include/openssl/pkcs12.h | 350 + CAPI/cpp/grpc/include/openssl/pkcs12err.h | 45 + CAPI/cpp/grpc/include/openssl/pkcs7.h | 427 + CAPI/cpp/grpc/include/openssl/pkcs7err.h | 63 + CAPI/cpp/grpc/include/openssl/prov_ssl.h | 34 + CAPI/cpp/grpc/include/openssl/proverr.h | 148 + CAPI/cpp/grpc/include/openssl/provider.h | 60 + CAPI/cpp/grpc/include/openssl/rand.h | 123 + CAPI/cpp/grpc/include/openssl/randerr.h | 68 + CAPI/cpp/grpc/include/openssl/rc2.h | 68 + CAPI/cpp/grpc/include/openssl/rc4.h | 47 + CAPI/cpp/grpc/include/openssl/rc5.h | 79 + CAPI/cpp/grpc/include/openssl/ripemd.h | 59 + CAPI/cpp/grpc/include/openssl/rsa.h | 604 + CAPI/cpp/grpc/include/openssl/rsaerr.h | 107 + CAPI/cpp/grpc/include/openssl/safestack.h | 297 + CAPI/cpp/grpc/include/openssl/seed.h | 113 + CAPI/cpp/grpc/include/openssl/self_test.h | 92 + CAPI/cpp/grpc/include/openssl/sha.h | 138 + CAPI/cpp/grpc/include/openssl/srp.h | 285 + CAPI/cpp/grpc/include/openssl/srtp.h | 56 + CAPI/cpp/grpc/include/openssl/ssl.h | 2599 +++ CAPI/cpp/grpc/include/openssl/ssl2.h | 30 + CAPI/cpp/grpc/include/openssl/ssl3.h | 347 + CAPI/cpp/grpc/include/openssl/sslerr.h | 343 + CAPI/cpp/grpc/include/openssl/sslerr_legacy.h | 468 + CAPI/cpp/grpc/include/openssl/stack.h | 90 + CAPI/cpp/grpc/include/openssl/store.h | 369 + CAPI/cpp/grpc/include/openssl/storeerr.h | 49 + CAPI/cpp/grpc/include/openssl/symhacks.h | 39 + CAPI/cpp/grpc/include/openssl/tls1.h | 1223 ++ CAPI/cpp/grpc/include/openssl/trace.h | 312 + CAPI/cpp/grpc/include/openssl/ts.h | 503 + CAPI/cpp/grpc/include/openssl/tserr.h | 67 + CAPI/cpp/grpc/include/openssl/txt_db.h | 63 + CAPI/cpp/grpc/include/openssl/types.h | 236 + CAPI/cpp/grpc/include/openssl/ui.h | 407 + CAPI/cpp/grpc/include/openssl/uierr.h | 38 + CAPI/cpp/grpc/include/openssl/whrlpool.h | 62 + CAPI/cpp/grpc/include/openssl/x509.h | 1276 ++ CAPI/cpp/grpc/include/openssl/x509_vfy.h | 894 + CAPI/cpp/grpc/include/openssl/x509err.h | 68 + CAPI/cpp/grpc/include/openssl/x509v3.h | 1450 ++ CAPI/cpp/grpc/include/openssl/x509v3err.h | 93 + CAPI/cpp/grpc/include/re2/filtered_re2.h | 114 + CAPI/cpp/grpc/include/re2/re2.h | 1017 ++ CAPI/cpp/grpc/include/re2/set.h | 85 + CAPI/cpp/grpc/include/re2/stringpiece.h | 210 + CAPI/cpp/grpc/include/upb/arena.h | 225 + CAPI/cpp/grpc/include/upb/array.h | 83 + CAPI/cpp/grpc/include/upb/bindings/lua/upb.h | 132 + CAPI/cpp/grpc/include/upb/collections.h | 36 + CAPI/cpp/grpc/include/upb/decode.h | 95 + CAPI/cpp/grpc/include/upb/decode_fast.h | 153 + CAPI/cpp/grpc/include/upb/decode_internal.h | 36 + CAPI/cpp/grpc/include/upb/def.h | 416 + CAPI/cpp/grpc/include/upb/def.hpp | 441 + CAPI/cpp/grpc/include/upb/encode.h | 81 + .../cpp/grpc/include/upb/extension_registry.h | 84 + CAPI/cpp/grpc/include/upb/internal/decode.h | 211 + .../upb/internal/mini_table_accessors.h | 59 + CAPI/cpp/grpc/include/upb/internal/table.h | 385 + CAPI/cpp/grpc/include/upb/internal/upb.h | 68 + .../include/upb/internal/vsnprintf_compat.h | 52 + CAPI/cpp/grpc/include/upb/json_decode.h | 47 + CAPI/cpp/grpc/include/upb/json_encode.h | 65 + CAPI/cpp/grpc/include/upb/map.h | 117 + CAPI/cpp/grpc/include/upb/message_value.h | 66 + CAPI/cpp/grpc/include/upb/mini_descriptor.h | 58 + CAPI/cpp/grpc/include/upb/mini_table.h | 189 + CAPI/cpp/grpc/include/upb/mini_table.hpp | 112 + .../grpc/include/upb/mini_table_accessors.h | 271 + .../upb/mini_table_accessors_internal.h | 36 + CAPI/cpp/grpc/include/upb/msg.h | 71 + CAPI/cpp/grpc/include/upb/msg_internal.h | 837 + CAPI/cpp/grpc/include/upb/port_def.inc | 262 + CAPI/cpp/grpc/include/upb/port_undef.inc | 63 + CAPI/cpp/grpc/include/upb/reflection.h | 110 + CAPI/cpp/grpc/include/upb/reflection.hpp | 37 + CAPI/cpp/grpc/include/upb/status.h | 66 + CAPI/cpp/grpc/include/upb/table_internal.h | 36 + CAPI/cpp/grpc/include/upb/text_encode.h | 64 + CAPI/cpp/grpc/include/upb/upb.h | 184 + CAPI/cpp/grpc/include/upb/upb.hpp | 115 + CAPI/cpp/grpc/include/upb/upb_internal.h | 36 + CAPI/cpp/grpc/include/upb/util/compare.h | 66 + CAPI/cpp/grpc/include/upb/util/def_to_proto.h | 62 + .../grpc/include/upb/util/required_fields.h | 94 + CAPI/cpp/grpc/include/zconf.h | 544 + CAPI/cpp/grpc/include/zlib.h | 1935 ++ 892 files changed, 242964 insertions(+) create mode 100644 CAPI/cpp/grpc/LICENSE create mode 100644 CAPI/cpp/grpc/include/absl/algorithm/algorithm.h create mode 100644 CAPI/cpp/grpc/include/absl/algorithm/container.h create mode 100644 CAPI/cpp/grpc/include/absl/base/attributes.h create mode 100644 CAPI/cpp/grpc/include/absl/base/call_once.h create mode 100644 CAPI/cpp/grpc/include/absl/base/casts.h create mode 100644 CAPI/cpp/grpc/include/absl/base/config.h create mode 100644 CAPI/cpp/grpc/include/absl/base/const_init.h create mode 100644 CAPI/cpp/grpc/include/absl/base/dynamic_annotations.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/atomic_hook.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/atomic_hook_test_helper.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/cycleclock.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/direct_mmap.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/dynamic_annotations.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/endian.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/errno_saver.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/exception_safety_testing.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/exception_testing.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/fast_type_id.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/hide_ptr.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/identity.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/inline_variable.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/inline_variable_testing.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/invoke.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/low_level_alloc.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/low_level_scheduling.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/per_thread_tls.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/prefetch.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/pretty_function.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/raw_logging.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/scheduling_mode.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/scoped_set_env.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/spinlock.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/spinlock_akaros.inc create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/spinlock_linux.inc create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/spinlock_posix.inc create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/spinlock_wait.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/spinlock_win32.inc create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/strerror.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/sysinfo.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/thread_annotations.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/thread_identity.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/throw_delegate.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/tsan_mutex_interface.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/unaligned_access.h create mode 100644 CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock.h create mode 100644 CAPI/cpp/grpc/include/absl/base/log_severity.h create mode 100644 CAPI/cpp/grpc/include/absl/base/macros.h create mode 100644 CAPI/cpp/grpc/include/absl/base/optimization.h create mode 100644 CAPI/cpp/grpc/include/absl/base/options.h create mode 100644 CAPI/cpp/grpc/include/absl/base/policy_checks.h create mode 100644 CAPI/cpp/grpc/include/absl/base/port.h create mode 100644 CAPI/cpp/grpc/include/absl/base/thread_annotations.h create mode 100644 CAPI/cpp/grpc/include/absl/cleanup/cleanup.h create mode 100644 CAPI/cpp/grpc/include/absl/cleanup/internal/cleanup.h create mode 100644 CAPI/cpp/grpc/include/absl/container/btree_map.h create mode 100644 CAPI/cpp/grpc/include/absl/container/btree_set.h create mode 100644 CAPI/cpp/grpc/include/absl/container/btree_test.h create mode 100644 CAPI/cpp/grpc/include/absl/container/fixed_array.h create mode 100644 CAPI/cpp/grpc/include/absl/container/flat_hash_map.h create mode 100644 CAPI/cpp/grpc/include/absl/container/flat_hash_set.h create mode 100644 CAPI/cpp/grpc/include/absl/container/inlined_vector.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/btree.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/btree_container.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/common.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/compressed_tuple.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/container_memory.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/counting_allocator.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/hash_function_defaults.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/hash_generator_testing.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/hash_policy_testing.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/hash_policy_traits.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug_hooks.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/hashtablez_sampler.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/inlined_vector.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/layout.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/node_slot_policy.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/raw_hash_map.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/raw_hash_set.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/test_instance_tracker.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/tracked.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/unordered_map_constructor_test.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/unordered_map_lookup_test.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/unordered_map_members_test.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/unordered_map_modifiers_test.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/unordered_set_constructor_test.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/unordered_set_lookup_test.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/unordered_set_members_test.h create mode 100644 CAPI/cpp/grpc/include/absl/container/internal/unordered_set_modifiers_test.h create mode 100644 CAPI/cpp/grpc/include/absl/container/node_hash_map.h create mode 100644 CAPI/cpp/grpc/include/absl/container/node_hash_set.h create mode 100644 CAPI/cpp/grpc/include/absl/debugging/failure_signal_handler.h create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/address_is_readable.h create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/demangle.h create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/elf_mem_image.h create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/examine_stack.h create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/stack_consumption.h create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_aarch64-inl.inc create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_arm-inl.inc create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_config.h create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_emscripten-inl.inc create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_generic-inl.inc create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_powerpc-inl.inc create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_riscv-inl.inc create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_unimplemented-inl.inc create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_win32-inl.inc create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_x86-inl.inc create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/symbolize.h create mode 100644 CAPI/cpp/grpc/include/absl/debugging/internal/vdso_support.h create mode 100644 CAPI/cpp/grpc/include/absl/debugging/leak_check.h create mode 100644 CAPI/cpp/grpc/include/absl/debugging/stacktrace.h create mode 100644 CAPI/cpp/grpc/include/absl/debugging/symbolize.h create mode 100644 CAPI/cpp/grpc/include/absl/debugging/symbolize_darwin.inc create mode 100644 CAPI/cpp/grpc/include/absl/debugging/symbolize_elf.inc create mode 100644 CAPI/cpp/grpc/include/absl/debugging/symbolize_emscripten.inc create mode 100644 CAPI/cpp/grpc/include/absl/debugging/symbolize_unimplemented.inc create mode 100644 CAPI/cpp/grpc/include/absl/debugging/symbolize_win32.inc create mode 100644 CAPI/cpp/grpc/include/absl/flags/commandlineflag.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/config.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/declare.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/flag.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/internal/commandlineflag.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/internal/flag.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/internal/flag_msvc.inc create mode 100644 CAPI/cpp/grpc/include/absl/flags/internal/parse.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/internal/path_util.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/internal/private_handle_accessor.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/internal/program_name.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/internal/registry.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/internal/sequence_lock.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/internal/usage.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/marshalling.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/parse.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/reflection.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/usage.h create mode 100644 CAPI/cpp/grpc/include/absl/flags/usage_config.h create mode 100644 CAPI/cpp/grpc/include/absl/functional/any_invocable.h create mode 100644 CAPI/cpp/grpc/include/absl/functional/bind_front.h create mode 100644 CAPI/cpp/grpc/include/absl/functional/function_ref.h create mode 100644 CAPI/cpp/grpc/include/absl/functional/internal/any_invocable.h create mode 100644 CAPI/cpp/grpc/include/absl/functional/internal/front_binder.h create mode 100644 CAPI/cpp/grpc/include/absl/functional/internal/function_ref.h create mode 100644 CAPI/cpp/grpc/include/absl/hash/hash.h create mode 100644 CAPI/cpp/grpc/include/absl/hash/hash_testing.h create mode 100644 CAPI/cpp/grpc/include/absl/hash/internal/city.h create mode 100644 CAPI/cpp/grpc/include/absl/hash/internal/hash.h create mode 100644 CAPI/cpp/grpc/include/absl/hash/internal/low_level_hash.h create mode 100644 CAPI/cpp/grpc/include/absl/hash/internal/spy_hash_state.h create mode 100644 CAPI/cpp/grpc/include/absl/memory/memory.h create mode 100644 CAPI/cpp/grpc/include/absl/meta/type_traits.h create mode 100644 CAPI/cpp/grpc/include/absl/numeric/bits.h create mode 100644 CAPI/cpp/grpc/include/absl/numeric/int128.h create mode 100644 CAPI/cpp/grpc/include/absl/numeric/int128_have_intrinsic.inc create mode 100644 CAPI/cpp/grpc/include/absl/numeric/int128_no_intrinsic.inc create mode 100644 CAPI/cpp/grpc/include/absl/numeric/internal/bits.h create mode 100644 CAPI/cpp/grpc/include/absl/numeric/internal/representation.h create mode 100644 CAPI/cpp/grpc/include/absl/profiling/internal/exponential_biased.h create mode 100644 CAPI/cpp/grpc/include/absl/profiling/internal/periodic_sampler.h create mode 100644 CAPI/cpp/grpc/include/absl/profiling/internal/sample_recorder.h create mode 100644 CAPI/cpp/grpc/include/absl/random/bernoulli_distribution.h create mode 100644 CAPI/cpp/grpc/include/absl/random/beta_distribution.h create mode 100644 CAPI/cpp/grpc/include/absl/random/bit_gen_ref.h create mode 100644 CAPI/cpp/grpc/include/absl/random/discrete_distribution.h create mode 100644 CAPI/cpp/grpc/include/absl/random/distributions.h create mode 100644 CAPI/cpp/grpc/include/absl/random/exponential_distribution.h create mode 100644 CAPI/cpp/grpc/include/absl/random/gaussian_distribution.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/chi_square.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/distribution_caller.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/distribution_test_util.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/explicit_seed_seq.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/fast_uniform_bits.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/fastmath.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/generate_real.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/iostream_state_saver.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/mock_helpers.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/mock_overload_set.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/nanobenchmark.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/nonsecure_base.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/pcg_engine.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/platform.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/pool_urbg.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/randen.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/randen_detect.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/randen_engine.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/randen_hwaes.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/randen_slow.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/randen_traits.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/salted_seed_seq.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/seed_material.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/sequence_urbg.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/traits.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/uniform_helper.h create mode 100644 CAPI/cpp/grpc/include/absl/random/internal/wide_multiply.h create mode 100644 CAPI/cpp/grpc/include/absl/random/log_uniform_int_distribution.h create mode 100644 CAPI/cpp/grpc/include/absl/random/mock_distributions.h create mode 100644 CAPI/cpp/grpc/include/absl/random/mocking_bit_gen.h create mode 100644 CAPI/cpp/grpc/include/absl/random/poisson_distribution.h create mode 100644 CAPI/cpp/grpc/include/absl/random/random.h create mode 100644 CAPI/cpp/grpc/include/absl/random/seed_gen_exception.h create mode 100644 CAPI/cpp/grpc/include/absl/random/seed_sequences.h create mode 100644 CAPI/cpp/grpc/include/absl/random/uniform_int_distribution.h create mode 100644 CAPI/cpp/grpc/include/absl/random/uniform_real_distribution.h create mode 100644 CAPI/cpp/grpc/include/absl/random/zipf_distribution.h create mode 100644 CAPI/cpp/grpc/include/absl/status/internal/status_internal.h create mode 100644 CAPI/cpp/grpc/include/absl/status/internal/statusor_internal.h create mode 100644 CAPI/cpp/grpc/include/absl/status/status.h create mode 100644 CAPI/cpp/grpc/include/absl/status/status_payload_printer.h create mode 100644 CAPI/cpp/grpc/include/absl/status/statusor.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/ascii.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/charconv.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/cord.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/cord_analysis.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/cord_buffer.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/cord_test_helpers.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/cordz_test_helpers.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/escaping.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/char_map.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/charconv_bigint.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/charconv_parse.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cord_data_edge.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cord_internal.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_btree.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_btree_navigator.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_btree_reader.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_consume.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_crc.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_flat.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_ring.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_ring_reader.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cord_rep_test_util.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cordz_functions.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cordz_handle.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cordz_info.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cordz_sample_token.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cordz_statistics.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cordz_update_scope.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/cordz_update_tracker.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/escaping.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/escaping_test_common.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/memutil.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/numbers_test_common.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/ostringstream.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/pow10_helper.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/resize_uninitialized.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/stl_type_traits.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/str_format/arg.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/str_format/bind.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/str_format/checker.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/str_format/extension.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/str_format/float_conversion.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/str_format/output.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/str_format/parser.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/str_join_internal.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/str_split_internal.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/string_constant.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/internal/utf8.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/match.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/numbers.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/str_cat.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/str_format.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/str_join.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/str_replace.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/str_split.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/string_view.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/strip.h create mode 100644 CAPI/cpp/grpc/include/absl/strings/substitute.h create mode 100644 CAPI/cpp/grpc/include/absl/synchronization/barrier.h create mode 100644 CAPI/cpp/grpc/include/absl/synchronization/blocking_counter.h create mode 100644 CAPI/cpp/grpc/include/absl/synchronization/internal/create_thread_identity.h create mode 100644 CAPI/cpp/grpc/include/absl/synchronization/internal/futex.h create mode 100644 CAPI/cpp/grpc/include/absl/synchronization/internal/graphcycles.h create mode 100644 CAPI/cpp/grpc/include/absl/synchronization/internal/kernel_timeout.h create mode 100644 CAPI/cpp/grpc/include/absl/synchronization/internal/per_thread_sem.h create mode 100644 CAPI/cpp/grpc/include/absl/synchronization/internal/thread_pool.h create mode 100644 CAPI/cpp/grpc/include/absl/synchronization/internal/waiter.h create mode 100644 CAPI/cpp/grpc/include/absl/synchronization/mutex.h create mode 100644 CAPI/cpp/grpc/include/absl/synchronization/notification.h create mode 100644 CAPI/cpp/grpc/include/absl/time/civil_time.h create mode 100644 CAPI/cpp/grpc/include/absl/time/clock.h create mode 100644 CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/civil_time.h create mode 100644 CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/civil_time_detail.h create mode 100644 CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/time_zone.h create mode 100644 CAPI/cpp/grpc/include/absl/time/internal/cctz/include/cctz/zone_info_source.h create mode 100644 CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_fixed.h create mode 100644 CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_if.h create mode 100644 CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_impl.h create mode 100644 CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_info.h create mode 100644 CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_libc.h create mode 100644 CAPI/cpp/grpc/include/absl/time/internal/cctz/src/time_zone_posix.h create mode 100644 CAPI/cpp/grpc/include/absl/time/internal/cctz/src/tzfile.h create mode 100644 CAPI/cpp/grpc/include/absl/time/internal/get_current_time_chrono.inc create mode 100644 CAPI/cpp/grpc/include/absl/time/internal/get_current_time_posix.inc create mode 100644 CAPI/cpp/grpc/include/absl/time/internal/test_util.h create mode 100644 CAPI/cpp/grpc/include/absl/time/internal/zoneinfo.inc create mode 100644 CAPI/cpp/grpc/include/absl/time/time.h create mode 100644 CAPI/cpp/grpc/include/absl/types/any.h create mode 100644 CAPI/cpp/grpc/include/absl/types/bad_any_cast.h create mode 100644 CAPI/cpp/grpc/include/absl/types/bad_optional_access.h create mode 100644 CAPI/cpp/grpc/include/absl/types/bad_variant_access.h create mode 100644 CAPI/cpp/grpc/include/absl/types/compare.h create mode 100644 CAPI/cpp/grpc/include/absl/types/internal/conformance_aliases.h create mode 100644 CAPI/cpp/grpc/include/absl/types/internal/conformance_archetype.h create mode 100644 CAPI/cpp/grpc/include/absl/types/internal/conformance_profile.h create mode 100644 CAPI/cpp/grpc/include/absl/types/internal/conformance_testing.h create mode 100644 CAPI/cpp/grpc/include/absl/types/internal/conformance_testing_helpers.h create mode 100644 CAPI/cpp/grpc/include/absl/types/internal/optional.h create mode 100644 CAPI/cpp/grpc/include/absl/types/internal/parentheses.h create mode 100644 CAPI/cpp/grpc/include/absl/types/internal/span.h create mode 100644 CAPI/cpp/grpc/include/absl/types/internal/transform_args.h create mode 100644 CAPI/cpp/grpc/include/absl/types/internal/variant.h create mode 100644 CAPI/cpp/grpc/include/absl/types/optional.h create mode 100644 CAPI/cpp/grpc/include/absl/types/span.h create mode 100644 CAPI/cpp/grpc/include/absl/types/variant.h create mode 100644 CAPI/cpp/grpc/include/absl/utility/utility.h create mode 100644 CAPI/cpp/grpc/include/ares.h create mode 100644 CAPI/cpp/grpc/include/ares_build.h create mode 100644 CAPI/cpp/grpc/include/ares_dns.h create mode 100644 CAPI/cpp/grpc/include/ares_nameser.h create mode 100644 CAPI/cpp/grpc/include/ares_rules.h create mode 100644 CAPI/cpp/grpc/include/ares_version.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/any.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/any.pb.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/any.proto create mode 100644 CAPI/cpp/grpc/include/google/protobuf/api.pb.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/api.proto create mode 100644 CAPI/cpp/grpc/include/google/protobuf/arena.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/arena_impl.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/arenastring.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/arenaz_sampler.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/code_generator.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/command_line_interface.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/cpp_generator.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/file.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/generator.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/helpers.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/cpp/names.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_doc_comment.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_generator.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_names.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/csharp/csharp_options.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/importer.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/java/generator.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/java/java_generator.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/java/kotlin_generator.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/java/names.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/objectivec/objectivec_generator.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/objectivec/objectivec_helpers.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/parser.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/php/php_generator.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/plugin.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/plugin.pb.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/plugin.proto create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/python/generator.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/python/pyi_generator.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/python/python_generator.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/compiler/ruby/ruby_generator.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/descriptor.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/descriptor.pb.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/descriptor.proto create mode 100644 CAPI/cpp/grpc/include/google/protobuf/descriptor_database.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/duration.pb.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/duration.proto create mode 100644 CAPI/cpp/grpc/include/google/protobuf/dynamic_message.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/empty.pb.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/empty.proto create mode 100644 CAPI/cpp/grpc/include/google/protobuf/endian.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/explicitly_constructed.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/extension_set.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/extension_set_inl.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/field_access_listener.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/field_mask.pb.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/field_mask.proto create mode 100644 CAPI/cpp/grpc/include/google/protobuf/generated_enum_reflection.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/generated_enum_util.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/generated_message_bases.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/generated_message_reflection.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/generated_message_tctable_decl.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/generated_message_tctable_impl.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/generated_message_util.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/has_bits.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/implicit_weak_message.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/inlined_string_field.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/io/coded_stream.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/io/gzip_stream.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/io/io_win32.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/io/printer.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/io/strtod.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/io/tokenizer.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/io/zero_copy_stream.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/io/zero_copy_stream_impl.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/io/zero_copy_stream_impl_lite.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/map.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/map_entry.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/map_entry_lite.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/map_field.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/map_field_inl.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/map_field_lite.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/map_type_handler.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/message.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/message_lite.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/metadata.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/metadata_lite.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/parse_context.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/port.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/port_def.inc create mode 100644 CAPI/cpp/grpc/include/google/protobuf/port_undef.inc create mode 100644 CAPI/cpp/grpc/include/google/protobuf/reflection.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/reflection_ops.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/repeated_field.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/repeated_ptr_field.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/service.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/source_context.pb.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/source_context.proto create mode 100644 CAPI/cpp/grpc/include/google/protobuf/struct.pb.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/struct.proto create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/bytestream.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/callback.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/casts.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/common.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/hash.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/logging.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/macros.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/map_util.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/mutex.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/once.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/platform_macros.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/port.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/status.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/stl_util.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/stringpiece.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/strutil.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/stubs/template_util.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/text_format.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/timestamp.pb.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/timestamp.proto create mode 100644 CAPI/cpp/grpc/include/google/protobuf/type.pb.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/type.proto create mode 100644 CAPI/cpp/grpc/include/google/protobuf/unknown_field_set.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/util/delimited_message_util.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/util/field_comparator.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/util/field_mask_util.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/util/json_util.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/util/message_differencer.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/util/time_util.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/util/type_resolver.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/util/type_resolver_util.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/wire_format.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/wire_format_lite.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/wrappers.pb.h create mode 100644 CAPI/cpp/grpc/include/google/protobuf/wrappers.proto create mode 100644 CAPI/cpp/grpc/include/grpc++/alarm.h create mode 100644 CAPI/cpp/grpc/include/grpc++/channel.h create mode 100644 CAPI/cpp/grpc/include/grpc++/client_context.h create mode 100644 CAPI/cpp/grpc/include/grpc++/completion_queue.h create mode 100644 CAPI/cpp/grpc/include/grpc++/create_channel.h create mode 100644 CAPI/cpp/grpc/include/grpc++/create_channel_posix.h create mode 100644 CAPI/cpp/grpc/include/grpc++/ext/health_check_service_server_builder_option.h create mode 100644 CAPI/cpp/grpc/include/grpc++/generic/async_generic_service.h create mode 100644 CAPI/cpp/grpc/include/grpc++/generic/generic_stub.h create mode 100644 CAPI/cpp/grpc/include/grpc++/grpc++.h create mode 100644 CAPI/cpp/grpc/include/grpc++/health_check_service_interface.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/call.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/channel_argument_option.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/client_unary_call.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/async_stream.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/async_unary_call.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/byte_buffer.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/call.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/call_hook.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/channel_interface.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/client_context.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/client_unary_call.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/completion_queue.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/completion_queue_tag.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/config.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/config_protobuf.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/core_codegen.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/core_codegen_interface.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/create_auth_context.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/grpc_library.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/metadata_map.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/method_handler_impl.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/proto_utils.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/rpc_method.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/rpc_service_method.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/security/auth_context.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/serialization_traits.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/server_context.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/server_interface.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/service_type.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/slice.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/status.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/status_code_enum.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/string_ref.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/stub_options.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/sync_stream.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/codegen/time.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/grpc_library.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/method_handler_impl.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/rpc_method.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/rpc_service_method.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/serialization_traits.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/server_builder_option.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/server_builder_plugin.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/server_initializer.h create mode 100644 CAPI/cpp/grpc/include/grpc++/impl/service_type.h create mode 100644 CAPI/cpp/grpc/include/grpc++/resource_quota.h create mode 100644 CAPI/cpp/grpc/include/grpc++/security/auth_context.h create mode 100644 CAPI/cpp/grpc/include/grpc++/security/auth_metadata_processor.h create mode 100644 CAPI/cpp/grpc/include/grpc++/security/credentials.h create mode 100644 CAPI/cpp/grpc/include/grpc++/security/server_credentials.h create mode 100644 CAPI/cpp/grpc/include/grpc++/server.h create mode 100644 CAPI/cpp/grpc/include/grpc++/server_builder.h create mode 100644 CAPI/cpp/grpc/include/grpc++/server_context.h create mode 100644 CAPI/cpp/grpc/include/grpc++/server_posix.h create mode 100644 CAPI/cpp/grpc/include/grpc++/support/async_stream.h create mode 100644 CAPI/cpp/grpc/include/grpc++/support/async_unary_call.h create mode 100644 CAPI/cpp/grpc/include/grpc++/support/byte_buffer.h create mode 100644 CAPI/cpp/grpc/include/grpc++/support/channel_arguments.h create mode 100644 CAPI/cpp/grpc/include/grpc++/support/config.h create mode 100644 CAPI/cpp/grpc/include/grpc++/support/error_details.h create mode 100644 CAPI/cpp/grpc/include/grpc++/support/slice.h create mode 100644 CAPI/cpp/grpc/include/grpc++/support/status.h create mode 100644 CAPI/cpp/grpc/include/grpc++/support/status_code_enum.h create mode 100644 CAPI/cpp/grpc/include/grpc++/support/string_ref.h create mode 100644 CAPI/cpp/grpc/include/grpc++/support/stub_options.h create mode 100644 CAPI/cpp/grpc/include/grpc++/support/sync_stream.h create mode 100644 CAPI/cpp/grpc/include/grpc++/support/time.h create mode 100644 CAPI/cpp/grpc/include/grpc/byte_buffer.h create mode 100644 CAPI/cpp/grpc/include/grpc/byte_buffer_reader.h create mode 100644 CAPI/cpp/grpc/include/grpc/census.h create mode 100644 CAPI/cpp/grpc/include/grpc/compression.h create mode 100644 CAPI/cpp/grpc/include/grpc/event_engine/endpoint_config.h create mode 100644 CAPI/cpp/grpc/include/grpc/event_engine/event_engine.h create mode 100644 CAPI/cpp/grpc/include/grpc/event_engine/internal/memory_allocator_impl.h create mode 100644 CAPI/cpp/grpc/include/grpc/event_engine/memory_allocator.h create mode 100644 CAPI/cpp/grpc/include/grpc/event_engine/memory_request.h create mode 100644 CAPI/cpp/grpc/include/grpc/event_engine/port.h create mode 100644 CAPI/cpp/grpc/include/grpc/event_engine/slice.h create mode 100644 CAPI/cpp/grpc/include/grpc/event_engine/slice_buffer.h create mode 100644 CAPI/cpp/grpc/include/grpc/fork.h create mode 100644 CAPI/cpp/grpc/include/grpc/grpc.h create mode 100644 CAPI/cpp/grpc/include/grpc/grpc_posix.h create mode 100644 CAPI/cpp/grpc/include/grpc/grpc_security.h create mode 100644 CAPI/cpp/grpc/include/grpc/grpc_security_constants.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/atm.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/atm_gcc_atomic.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/atm_gcc_sync.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/atm_windows.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/byte_buffer.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/byte_buffer_reader.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/compression_types.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/connectivity_state.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/fork.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/gpr_slice.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/gpr_types.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/grpc_types.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/log.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/port_platform.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/propagation_bits.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/slice.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/status.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/sync.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/sync_abseil.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/sync_custom.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/sync_generic.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/sync_posix.h create mode 100644 CAPI/cpp/grpc/include/grpc/impl/codegen/sync_windows.h create mode 100644 CAPI/cpp/grpc/include/grpc/load_reporting.h create mode 100644 CAPI/cpp/grpc/include/grpc/slice.h create mode 100644 CAPI/cpp/grpc/include/grpc/slice_buffer.h create mode 100644 CAPI/cpp/grpc/include/grpc/status.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/alloc.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/atm.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/atm_gcc_atomic.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/atm_gcc_sync.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/atm_windows.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/cpu.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/log.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/log_windows.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/port_platform.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/string_util.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/sync.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/sync_abseil.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/sync_custom.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/sync_generic.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/sync_posix.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/sync_windows.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/thd_id.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/time.h create mode 100644 CAPI/cpp/grpc/include/grpc/support/workaround_list.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/alarm.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/channel.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/client_context.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/completion_queue.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/create_channel.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/create_channel_binder.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/create_channel_posix.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/ext/call_metric_recorder.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/ext/health_check_service_server_builder_option.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/generic/async_generic_service.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/generic/generic_stub.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/grpcpp.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/health_check_service_interface.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/call.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/channel_argument_option.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/client_unary_call.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/async_generic_service.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/async_stream.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/async_unary_call.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/byte_buffer.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/call.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/call_hook.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/call_op_set.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/call_op_set_interface.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/callback_common.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/channel_interface.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/client_callback.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/client_context.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/client_interceptor.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/client_unary_call.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/completion_queue.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/completion_queue_tag.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/config.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/config_protobuf.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/core_codegen.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/core_codegen_interface.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/create_auth_context.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/delegating_channel.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/grpc_library.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/intercepted_channel.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/interceptor.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/interceptor_common.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/message_allocator.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/metadata_map.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/method_handler.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/method_handler_impl.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/proto_buffer_reader.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/proto_buffer_writer.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/proto_utils.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/rpc_method.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/rpc_service_method.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/security/auth_context.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/serialization_traits.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/server_callback.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/server_callback_handlers.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/server_context.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/server_interceptor.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/server_interface.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/service_type.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/slice.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/status.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/status_code_enum.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/string_ref.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/stub_options.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/sync.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/sync_stream.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/codegen/time.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/grpc_library.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/method_handler_impl.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/rpc_method.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/rpc_service_method.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/serialization_traits.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/server_builder_option.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/server_builder_plugin.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/server_initializer.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/impl/service_type.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/resource_quota.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/security/alts_context.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/security/alts_util.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/security/auth_context.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/security/auth_metadata_processor.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/security/authorization_policy_provider.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/security/binder_credentials.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/security/binder_security_policy.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/security/credentials.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/security/server_credentials.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/security/tls_certificate_provider.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/security/tls_certificate_verifier.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/security/tls_credentials_options.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/server.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/server_builder.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/server_context.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/server_posix.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/async_stream.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/async_unary_call.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/byte_buffer.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/channel_arguments.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/client_callback.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/client_interceptor.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/config.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/error_details.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/interceptor.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/message_allocator.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/method_handler.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/proto_buffer_reader.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/proto_buffer_writer.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/server_callback.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/server_interceptor.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/slice.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/status.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/status_code_enum.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/string_ref.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/stub_options.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/sync_stream.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/time.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/support/validate_service_config.h create mode 100644 CAPI/cpp/grpc/include/grpcpp/xds_server_builder.h create mode 100644 CAPI/cpp/grpc/include/openssl/__DECC_INCLUDE_EPILOGUE.H create mode 100644 CAPI/cpp/grpc/include/openssl/__DECC_INCLUDE_PROLOGUE.H create mode 100644 CAPI/cpp/grpc/include/openssl/aes.h create mode 100644 CAPI/cpp/grpc/include/openssl/asn1.h create mode 100644 CAPI/cpp/grpc/include/openssl/asn1_mac.h create mode 100644 CAPI/cpp/grpc/include/openssl/asn1err.h create mode 100644 CAPI/cpp/grpc/include/openssl/asn1t.h create mode 100644 CAPI/cpp/grpc/include/openssl/async.h create mode 100644 CAPI/cpp/grpc/include/openssl/asyncerr.h create mode 100644 CAPI/cpp/grpc/include/openssl/bio.h create mode 100644 CAPI/cpp/grpc/include/openssl/bioerr.h create mode 100644 CAPI/cpp/grpc/include/openssl/blowfish.h create mode 100644 CAPI/cpp/grpc/include/openssl/bn.h create mode 100644 CAPI/cpp/grpc/include/openssl/bnerr.h create mode 100644 CAPI/cpp/grpc/include/openssl/buffer.h create mode 100644 CAPI/cpp/grpc/include/openssl/buffererr.h create mode 100644 CAPI/cpp/grpc/include/openssl/camellia.h create mode 100644 CAPI/cpp/grpc/include/openssl/cast.h create mode 100644 CAPI/cpp/grpc/include/openssl/cmac.h create mode 100644 CAPI/cpp/grpc/include/openssl/cmp.h create mode 100644 CAPI/cpp/grpc/include/openssl/cmp_util.h create mode 100644 CAPI/cpp/grpc/include/openssl/cmperr.h create mode 100644 CAPI/cpp/grpc/include/openssl/cms.h create mode 100644 CAPI/cpp/grpc/include/openssl/cmserr.h create mode 100644 CAPI/cpp/grpc/include/openssl/comp.h create mode 100644 CAPI/cpp/grpc/include/openssl/comperr.h create mode 100644 CAPI/cpp/grpc/include/openssl/conf.h create mode 100644 CAPI/cpp/grpc/include/openssl/conf_api.h create mode 100644 CAPI/cpp/grpc/include/openssl/conferr.h create mode 100644 CAPI/cpp/grpc/include/openssl/configuration.h create mode 100644 CAPI/cpp/grpc/include/openssl/conftypes.h create mode 100644 CAPI/cpp/grpc/include/openssl/core.h create mode 100644 CAPI/cpp/grpc/include/openssl/core_dispatch.h create mode 100644 CAPI/cpp/grpc/include/openssl/core_names.h create mode 100644 CAPI/cpp/grpc/include/openssl/core_object.h create mode 100644 CAPI/cpp/grpc/include/openssl/crmf.h create mode 100644 CAPI/cpp/grpc/include/openssl/crmferr.h create mode 100644 CAPI/cpp/grpc/include/openssl/crypto.h create mode 100644 CAPI/cpp/grpc/include/openssl/cryptoerr.h create mode 100644 CAPI/cpp/grpc/include/openssl/cryptoerr_legacy.h create mode 100644 CAPI/cpp/grpc/include/openssl/ct.h create mode 100644 CAPI/cpp/grpc/include/openssl/cterr.h create mode 100644 CAPI/cpp/grpc/include/openssl/decoder.h create mode 100644 CAPI/cpp/grpc/include/openssl/decodererr.h create mode 100644 CAPI/cpp/grpc/include/openssl/des.h create mode 100644 CAPI/cpp/grpc/include/openssl/dh.h create mode 100644 CAPI/cpp/grpc/include/openssl/dherr.h create mode 100644 CAPI/cpp/grpc/include/openssl/dsa.h create mode 100644 CAPI/cpp/grpc/include/openssl/dsaerr.h create mode 100644 CAPI/cpp/grpc/include/openssl/dtls1.h create mode 100644 CAPI/cpp/grpc/include/openssl/e_os2.h create mode 100644 CAPI/cpp/grpc/include/openssl/ebcdic.h create mode 100644 CAPI/cpp/grpc/include/openssl/ec.h create mode 100644 CAPI/cpp/grpc/include/openssl/ecdh.h create mode 100644 CAPI/cpp/grpc/include/openssl/ecdsa.h create mode 100644 CAPI/cpp/grpc/include/openssl/ecerr.h create mode 100644 CAPI/cpp/grpc/include/openssl/encoder.h create mode 100644 CAPI/cpp/grpc/include/openssl/encodererr.h create mode 100644 CAPI/cpp/grpc/include/openssl/engine.h create mode 100644 CAPI/cpp/grpc/include/openssl/engineerr.h create mode 100644 CAPI/cpp/grpc/include/openssl/err.h create mode 100644 CAPI/cpp/grpc/include/openssl/ess.h create mode 100644 CAPI/cpp/grpc/include/openssl/esserr.h create mode 100644 CAPI/cpp/grpc/include/openssl/evp.h create mode 100644 CAPI/cpp/grpc/include/openssl/evperr.h create mode 100644 CAPI/cpp/grpc/include/openssl/fips_names.h create mode 100644 CAPI/cpp/grpc/include/openssl/fipskey.h create mode 100644 CAPI/cpp/grpc/include/openssl/hmac.h create mode 100644 CAPI/cpp/grpc/include/openssl/http.h create mode 100644 CAPI/cpp/grpc/include/openssl/httperr.h create mode 100644 CAPI/cpp/grpc/include/openssl/idea.h create mode 100644 CAPI/cpp/grpc/include/openssl/kdf.h create mode 100644 CAPI/cpp/grpc/include/openssl/kdferr.h create mode 100644 CAPI/cpp/grpc/include/openssl/lhash.h create mode 100644 CAPI/cpp/grpc/include/openssl/macros.h create mode 100644 CAPI/cpp/grpc/include/openssl/md2.h create mode 100644 CAPI/cpp/grpc/include/openssl/md4.h create mode 100644 CAPI/cpp/grpc/include/openssl/md5.h create mode 100644 CAPI/cpp/grpc/include/openssl/mdc2.h create mode 100644 CAPI/cpp/grpc/include/openssl/modes.h create mode 100644 CAPI/cpp/grpc/include/openssl/obj_mac.h create mode 100644 CAPI/cpp/grpc/include/openssl/objects.h create mode 100644 CAPI/cpp/grpc/include/openssl/objectserr.h create mode 100644 CAPI/cpp/grpc/include/openssl/ocsp.h create mode 100644 CAPI/cpp/grpc/include/openssl/ocsperr.h create mode 100644 CAPI/cpp/grpc/include/openssl/opensslconf.h create mode 100644 CAPI/cpp/grpc/include/openssl/opensslv.h create mode 100644 CAPI/cpp/grpc/include/openssl/ossl_typ.h create mode 100644 CAPI/cpp/grpc/include/openssl/param_build.h create mode 100644 CAPI/cpp/grpc/include/openssl/params.h create mode 100644 CAPI/cpp/grpc/include/openssl/pem.h create mode 100644 CAPI/cpp/grpc/include/openssl/pem2.h create mode 100644 CAPI/cpp/grpc/include/openssl/pemerr.h create mode 100644 CAPI/cpp/grpc/include/openssl/pkcs12.h create mode 100644 CAPI/cpp/grpc/include/openssl/pkcs12err.h create mode 100644 CAPI/cpp/grpc/include/openssl/pkcs7.h create mode 100644 CAPI/cpp/grpc/include/openssl/pkcs7err.h create mode 100644 CAPI/cpp/grpc/include/openssl/prov_ssl.h create mode 100644 CAPI/cpp/grpc/include/openssl/proverr.h create mode 100644 CAPI/cpp/grpc/include/openssl/provider.h create mode 100644 CAPI/cpp/grpc/include/openssl/rand.h create mode 100644 CAPI/cpp/grpc/include/openssl/randerr.h create mode 100644 CAPI/cpp/grpc/include/openssl/rc2.h create mode 100644 CAPI/cpp/grpc/include/openssl/rc4.h create mode 100644 CAPI/cpp/grpc/include/openssl/rc5.h create mode 100644 CAPI/cpp/grpc/include/openssl/ripemd.h create mode 100644 CAPI/cpp/grpc/include/openssl/rsa.h create mode 100644 CAPI/cpp/grpc/include/openssl/rsaerr.h create mode 100644 CAPI/cpp/grpc/include/openssl/safestack.h create mode 100644 CAPI/cpp/grpc/include/openssl/seed.h create mode 100644 CAPI/cpp/grpc/include/openssl/self_test.h create mode 100644 CAPI/cpp/grpc/include/openssl/sha.h create mode 100644 CAPI/cpp/grpc/include/openssl/srp.h create mode 100644 CAPI/cpp/grpc/include/openssl/srtp.h create mode 100644 CAPI/cpp/grpc/include/openssl/ssl.h create mode 100644 CAPI/cpp/grpc/include/openssl/ssl2.h create mode 100644 CAPI/cpp/grpc/include/openssl/ssl3.h create mode 100644 CAPI/cpp/grpc/include/openssl/sslerr.h create mode 100644 CAPI/cpp/grpc/include/openssl/sslerr_legacy.h create mode 100644 CAPI/cpp/grpc/include/openssl/stack.h create mode 100644 CAPI/cpp/grpc/include/openssl/store.h create mode 100644 CAPI/cpp/grpc/include/openssl/storeerr.h create mode 100644 CAPI/cpp/grpc/include/openssl/symhacks.h create mode 100644 CAPI/cpp/grpc/include/openssl/tls1.h create mode 100644 CAPI/cpp/grpc/include/openssl/trace.h create mode 100644 CAPI/cpp/grpc/include/openssl/ts.h create mode 100644 CAPI/cpp/grpc/include/openssl/tserr.h create mode 100644 CAPI/cpp/grpc/include/openssl/txt_db.h create mode 100644 CAPI/cpp/grpc/include/openssl/types.h create mode 100644 CAPI/cpp/grpc/include/openssl/ui.h create mode 100644 CAPI/cpp/grpc/include/openssl/uierr.h create mode 100644 CAPI/cpp/grpc/include/openssl/whrlpool.h create mode 100644 CAPI/cpp/grpc/include/openssl/x509.h create mode 100644 CAPI/cpp/grpc/include/openssl/x509_vfy.h create mode 100644 CAPI/cpp/grpc/include/openssl/x509err.h create mode 100644 CAPI/cpp/grpc/include/openssl/x509v3.h create mode 100644 CAPI/cpp/grpc/include/openssl/x509v3err.h create mode 100644 CAPI/cpp/grpc/include/re2/filtered_re2.h create mode 100644 CAPI/cpp/grpc/include/re2/re2.h create mode 100644 CAPI/cpp/grpc/include/re2/set.h create mode 100644 CAPI/cpp/grpc/include/re2/stringpiece.h create mode 100644 CAPI/cpp/grpc/include/upb/arena.h create mode 100644 CAPI/cpp/grpc/include/upb/array.h create mode 100644 CAPI/cpp/grpc/include/upb/bindings/lua/upb.h create mode 100644 CAPI/cpp/grpc/include/upb/collections.h create mode 100644 CAPI/cpp/grpc/include/upb/decode.h create mode 100644 CAPI/cpp/grpc/include/upb/decode_fast.h create mode 100644 CAPI/cpp/grpc/include/upb/decode_internal.h create mode 100644 CAPI/cpp/grpc/include/upb/def.h create mode 100644 CAPI/cpp/grpc/include/upb/def.hpp create mode 100644 CAPI/cpp/grpc/include/upb/encode.h create mode 100644 CAPI/cpp/grpc/include/upb/extension_registry.h create mode 100644 CAPI/cpp/grpc/include/upb/internal/decode.h create mode 100644 CAPI/cpp/grpc/include/upb/internal/mini_table_accessors.h create mode 100644 CAPI/cpp/grpc/include/upb/internal/table.h create mode 100644 CAPI/cpp/grpc/include/upb/internal/upb.h create mode 100644 CAPI/cpp/grpc/include/upb/internal/vsnprintf_compat.h create mode 100644 CAPI/cpp/grpc/include/upb/json_decode.h create mode 100644 CAPI/cpp/grpc/include/upb/json_encode.h create mode 100644 CAPI/cpp/grpc/include/upb/map.h create mode 100644 CAPI/cpp/grpc/include/upb/message_value.h create mode 100644 CAPI/cpp/grpc/include/upb/mini_descriptor.h create mode 100644 CAPI/cpp/grpc/include/upb/mini_table.h create mode 100644 CAPI/cpp/grpc/include/upb/mini_table.hpp create mode 100644 CAPI/cpp/grpc/include/upb/mini_table_accessors.h create mode 100644 CAPI/cpp/grpc/include/upb/mini_table_accessors_internal.h create mode 100644 CAPI/cpp/grpc/include/upb/msg.h create mode 100644 CAPI/cpp/grpc/include/upb/msg_internal.h create mode 100644 CAPI/cpp/grpc/include/upb/port_def.inc create mode 100644 CAPI/cpp/grpc/include/upb/port_undef.inc create mode 100644 CAPI/cpp/grpc/include/upb/reflection.h create mode 100644 CAPI/cpp/grpc/include/upb/reflection.hpp create mode 100644 CAPI/cpp/grpc/include/upb/status.h create mode 100644 CAPI/cpp/grpc/include/upb/table_internal.h create mode 100644 CAPI/cpp/grpc/include/upb/text_encode.h create mode 100644 CAPI/cpp/grpc/include/upb/upb.h create mode 100644 CAPI/cpp/grpc/include/upb/upb.hpp create mode 100644 CAPI/cpp/grpc/include/upb/upb_internal.h create mode 100644 CAPI/cpp/grpc/include/upb/util/compare.h create mode 100644 CAPI/cpp/grpc/include/upb/util/def_to_proto.h create mode 100644 CAPI/cpp/grpc/include/upb/util/required_fields.h create mode 100644 CAPI/cpp/grpc/include/zconf.h create mode 100644 CAPI/cpp/grpc/include/zlib.h diff --git a/CAPI/cpp/grpc/LICENSE b/CAPI/cpp/grpc/LICENSE new file mode 100644 index 0000000..0e09a3e --- /dev/null +++ b/CAPI/cpp/grpc/LICENSE @@ -0,0 +1,610 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +----------------------------------------------------------- + +BSD 3-Clause License + +Copyright 2016, Google Inc. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from this +software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + +----------------------------------------------------------- + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/CAPI/cpp/grpc/include/absl/algorithm/algorithm.h b/CAPI/cpp/grpc/include/absl/algorithm/algorithm.h new file mode 100644 index 0000000..e9b4733 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/algorithm/algorithm.h @@ -0,0 +1,159 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: algorithm.h +// ----------------------------------------------------------------------------- +// +// This header file contains Google extensions to the standard C++ +// header. + +#ifndef ABSL_ALGORITHM_ALGORITHM_H_ +#define ABSL_ALGORITHM_ALGORITHM_H_ + +#include +#include +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace algorithm_internal { + +// Performs comparisons with operator==, similar to C++14's `std::equal_to<>`. +struct EqualTo { + template + bool operator()(const T& a, const U& b) const { + return a == b; + } +}; + +template +bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, + InputIter2 last2, Pred pred, std::input_iterator_tag, + std::input_iterator_tag) { + while (true) { + if (first1 == last1) return first2 == last2; + if (first2 == last2) return false; + if (!pred(*first1, *first2)) return false; + ++first1; + ++first2; + } +} + +template +bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, + InputIter2 last2, Pred&& pred, std::random_access_iterator_tag, + std::random_access_iterator_tag) { + return (last1 - first1 == last2 - first2) && + std::equal(first1, last1, first2, std::forward(pred)); +} + +// When we are using our own internal predicate that just applies operator==, we +// forward to the non-predicate form of std::equal. This enables an optimization +// in libstdc++ that can result in std::memcmp being used for integer types. +template +bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, + InputIter2 last2, algorithm_internal::EqualTo /* unused */, + std::random_access_iterator_tag, + std::random_access_iterator_tag) { + return (last1 - first1 == last2 - first2) && + std::equal(first1, last1, first2); +} + +template +It RotateImpl(It first, It middle, It last, std::true_type) { + return std::rotate(first, middle, last); +} + +template +It RotateImpl(It first, It middle, It last, std::false_type) { + std::rotate(first, middle, last); + return std::next(first, std::distance(middle, last)); +} + +} // namespace algorithm_internal + +// equal() +// +// Compares the equality of two ranges specified by pairs of iterators, using +// the given predicate, returning true iff for each corresponding iterator i1 +// and i2 in the first and second range respectively, pred(*i1, *i2) == true +// +// This comparison takes at most min(`last1` - `first1`, `last2` - `first2`) +// invocations of the predicate. Additionally, if InputIter1 and InputIter2 are +// both random-access iterators, and `last1` - `first1` != `last2` - `first2`, +// then the predicate is never invoked and the function returns false. +// +// This is a C++11-compatible implementation of C++14 `std::equal`. See +// https://en.cppreference.com/w/cpp/algorithm/equal for more information. +template +bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, + InputIter2 last2, Pred&& pred) { + return algorithm_internal::EqualImpl( + first1, last1, first2, last2, std::forward(pred), + typename std::iterator_traits::iterator_category{}, + typename std::iterator_traits::iterator_category{}); +} + +// Overload of equal() that performs comparison of two ranges specified by pairs +// of iterators using operator==. +template +bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, + InputIter2 last2) { + return absl::equal(first1, last1, first2, last2, + algorithm_internal::EqualTo{}); +} + +// linear_search() +// +// Performs a linear search for `value` using the iterator `first` up to +// but not including `last`, returning true if [`first`, `last`) contains an +// element equal to `value`. +// +// A linear search is of O(n) complexity which is guaranteed to make at most +// n = (`last` - `first`) comparisons. A linear search over short containers +// may be faster than a binary search, even when the container is sorted. +template +bool linear_search(InputIterator first, InputIterator last, + const EqualityComparable& value) { + return std::find(first, last, value) != last; +} + +// rotate() +// +// Performs a left rotation on a range of elements (`first`, `last`) such that +// `middle` is now the first element. `rotate()` returns an iterator pointing to +// the first element before rotation. This function is exactly the same as +// `std::rotate`, but fixes a bug in gcc +// <= 4.9 where `std::rotate` returns `void` instead of an iterator. +// +// The complexity of this algorithm is the same as that of `std::rotate`, but if +// `ForwardIterator` is not a random-access iterator, then `absl::rotate` +// performs an additional pass over the range to construct the return value. +template +ForwardIterator rotate(ForwardIterator first, ForwardIterator middle, + ForwardIterator last) { + return algorithm_internal::RotateImpl( + first, middle, last, + std::is_same()); +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_ALGORITHM_ALGORITHM_H_ diff --git a/CAPI/cpp/grpc/include/absl/algorithm/container.h b/CAPI/cpp/grpc/include/absl/algorithm/container.h new file mode 100644 index 0000000..26b1952 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/algorithm/container.h @@ -0,0 +1,1774 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: container.h +// ----------------------------------------------------------------------------- +// +// This header file provides Container-based versions of algorithmic functions +// within the C++ standard library. The following standard library sets of +// functions are covered within this file: +// +// * Algorithmic functions +// * Algorithmic functions +// * functions +// +// The standard library functions operate on iterator ranges; the functions +// within this API operate on containers, though many return iterator ranges. +// +// All functions within this API are named with a `c_` prefix. Calls such as +// `absl::c_xx(container, ...) are equivalent to std:: functions such as +// `std::xx(std::begin(cont), std::end(cont), ...)`. Functions that act on +// iterators but not conceptually on iterator ranges (e.g. `std::iter_swap`) +// have no equivalent here. +// +// For template parameter and variable naming, `C` indicates the container type +// to which the function is applied, `Pred` indicates the predicate object type +// to be used by the function and `T` indicates the applicable element type. + +#ifndef ABSL_ALGORITHM_CONTAINER_H_ +#define ABSL_ALGORITHM_CONTAINER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/macros.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_algorithm_internal { + +// NOTE: it is important to defer to ADL lookup for building with C++ modules, +// especially for headers like which are not visible from this file +// but specialize std::begin and std::end. +using std::begin; +using std::end; + +// The type of the iterator given by begin(c) (possibly std::begin(c)). +// ContainerIter> gives vector::const_iterator, +// while ContainerIter> gives vector::iterator. +template +using ContainerIter = decltype(begin(std::declval())); + +// An MSVC bug involving template parameter substitution requires us to use +// decltype() here instead of just std::pair. +template +using ContainerIterPairType = + decltype(std::make_pair(ContainerIter(), ContainerIter())); + +template +using ContainerDifferenceType = + decltype(std::distance(std::declval>(), + std::declval>())); + +template +using ContainerPointerType = + typename std::iterator_traits>::pointer; + +// container_algorithm_internal::c_begin and +// container_algorithm_internal::c_end are abbreviations for proper ADL +// lookup of std::begin and std::end, i.e. +// using std::begin; +// using std::end; +// std::foo(begin(c), end(c)); +// becomes +// std::foo(container_algorithm_internal::begin(c), +// container_algorithm_internal::end(c)); +// These are meant for internal use only. + +template +ContainerIter c_begin(C& c) { return begin(c); } + +template +ContainerIter c_end(C& c) { return end(c); } + +template +struct IsUnorderedContainer : std::false_type {}; + +template +struct IsUnorderedContainer< + std::unordered_map> : std::true_type {}; + +template +struct IsUnorderedContainer> + : std::true_type {}; + +// container_algorithm_internal::c_size. It is meant for internal use only. + +template +auto c_size(C& c) -> decltype(c.size()) { + return c.size(); +} + +template +constexpr std::size_t c_size(T (&)[N]) { + return N; +} + +} // namespace container_algorithm_internal + +// PUBLIC API + +//------------------------------------------------------------------------------ +// Abseil algorithm.h functions +//------------------------------------------------------------------------------ + +// c_linear_search() +// +// Container-based version of absl::linear_search() for performing a linear +// search within a container. +template +bool c_linear_search(const C& c, EqualityComparable&& value) { + return linear_search(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(value)); +} + +//------------------------------------------------------------------------------ +// algorithms +//------------------------------------------------------------------------------ + +// c_distance() +// +// Container-based version of the `std::distance()` function to +// return the number of elements within a container. +template +container_algorithm_internal::ContainerDifferenceType c_distance( + const C& c) { + return std::distance(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +//------------------------------------------------------------------------------ +// Non-modifying sequence operations +//------------------------------------------------------------------------------ + +// c_all_of() +// +// Container-based version of the `std::all_of()` function to +// test if all elements within a container satisfy a condition. +template +bool c_all_of(const C& c, Pred&& pred) { + return std::all_of(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_any_of() +// +// Container-based version of the `std::any_of()` function to +// test if any element in a container fulfills a condition. +template +bool c_any_of(const C& c, Pred&& pred) { + return std::any_of(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_none_of() +// +// Container-based version of the `std::none_of()` function to +// test if no elements in a container fulfill a condition. +template +bool c_none_of(const C& c, Pred&& pred) { + return std::none_of(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_for_each() +// +// Container-based version of the `std::for_each()` function to +// apply a function to a container's elements. +template +decay_t c_for_each(C&& c, Function&& f) { + return std::for_each(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(f)); +} + +// c_find() +// +// Container-based version of the `std::find()` function to find +// the first element containing the passed value within a container value. +template +container_algorithm_internal::ContainerIter c_find(C& c, T&& value) { + return std::find(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(value)); +} + +// c_find_if() +// +// Container-based version of the `std::find_if()` function to find +// the first element in a container matching the given condition. +template +container_algorithm_internal::ContainerIter c_find_if(C& c, Pred&& pred) { + return std::find_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_find_if_not() +// +// Container-based version of the `std::find_if_not()` function to +// find the first element in a container not matching the given condition. +template +container_algorithm_internal::ContainerIter c_find_if_not(C& c, + Pred&& pred) { + return std::find_if_not(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_find_end() +// +// Container-based version of the `std::find_end()` function to +// find the last subsequence within a container. +template +container_algorithm_internal::ContainerIter c_find_end( + Sequence1& sequence, Sequence2& subsequence) { + return std::find_end(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence)); +} + +// Overload of c_find_end() for using a predicate evaluation other than `==` as +// the function's test condition. +template +container_algorithm_internal::ContainerIter c_find_end( + Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) { + return std::find_end(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence), + std::forward(pred)); +} + +// c_find_first_of() +// +// Container-based version of the `std::find_first_of()` function to +// find the first element within the container that is also within the options +// container. +template +container_algorithm_internal::ContainerIter c_find_first_of(C1& container, + C2& options) { + return std::find_first_of(container_algorithm_internal::c_begin(container), + container_algorithm_internal::c_end(container), + container_algorithm_internal::c_begin(options), + container_algorithm_internal::c_end(options)); +} + +// Overload of c_find_first_of() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_find_first_of( + C1& container, C2& options, BinaryPredicate&& pred) { + return std::find_first_of(container_algorithm_internal::c_begin(container), + container_algorithm_internal::c_end(container), + container_algorithm_internal::c_begin(options), + container_algorithm_internal::c_end(options), + std::forward(pred)); +} + +// c_adjacent_find() +// +// Container-based version of the `std::adjacent_find()` function to +// find equal adjacent elements within a container. +template +container_algorithm_internal::ContainerIter c_adjacent_find( + Sequence& sequence) { + return std::adjacent_find(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_adjacent_find() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_adjacent_find( + Sequence& sequence, BinaryPredicate&& pred) { + return std::adjacent_find(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(pred)); +} + +// c_count() +// +// Container-based version of the `std::count()` function to count +// values that match within a container. +template +container_algorithm_internal::ContainerDifferenceType c_count( + const C& c, T&& value) { + return std::count(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(value)); +} + +// c_count_if() +// +// Container-based version of the `std::count_if()` function to +// count values matching a condition within a container. +template +container_algorithm_internal::ContainerDifferenceType c_count_if( + const C& c, Pred&& pred) { + return std::count_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_mismatch() +// +// Container-based version of the `std::mismatch()` function to +// return the first element where two ordered containers differ. Applies `==` to +// the first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). +template +container_algorithm_internal::ContainerIterPairType +c_mismatch(C1& c1, C2& c2) { + auto first1 = container_algorithm_internal::c_begin(c1); + auto last1 = container_algorithm_internal::c_end(c1); + auto first2 = container_algorithm_internal::c_begin(c2); + auto last2 = container_algorithm_internal::c_end(c2); + + for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) { + // Negates equality because Cpp17EqualityComparable doesn't require clients + // to overload both `operator==` and `operator!=`. + if (!(*first1 == *first2)) { + break; + } + } + + return std::make_pair(first1, first2); +} + +// Overload of c_mismatch() for using a predicate evaluation other than `==` as +// the function's test condition. Applies `pred`to the first N elements of `c1` +// and `c2`, where N = min(size(c1), size(c2)). +template +container_algorithm_internal::ContainerIterPairType +c_mismatch(C1& c1, C2& c2, BinaryPredicate pred) { + auto first1 = container_algorithm_internal::c_begin(c1); + auto last1 = container_algorithm_internal::c_end(c1); + auto first2 = container_algorithm_internal::c_begin(c2); + auto last2 = container_algorithm_internal::c_end(c2); + + for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) { + if (!pred(*first1, *first2)) { + break; + } + } + + return std::make_pair(first1, first2); +} + +// c_equal() +// +// Container-based version of the `std::equal()` function to +// test whether two containers are equal. +// +// NOTE: the semantics of c_equal() are slightly different than those of +// equal(): while the latter iterates over the second container only up to the +// size of the first container, c_equal() also checks whether the container +// sizes are equal. This better matches expectations about c_equal() based on +// its signature. +// +// Example: +// vector v1 = <1, 2, 3>; +// vector v2 = <1, 2, 3, 4>; +// equal(std::begin(v1), std::end(v1), std::begin(v2)) returns true +// c_equal(v1, v2) returns false + +template +bool c_equal(const C1& c1, const C2& c2) { + return ((container_algorithm_internal::c_size(c1) == + container_algorithm_internal::c_size(c2)) && + std::equal(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2))); +} + +// Overload of c_equal() for using a predicate evaluation other than `==` as +// the function's test condition. +template +bool c_equal(const C1& c1, const C2& c2, BinaryPredicate&& pred) { + return ((container_algorithm_internal::c_size(c1) == + container_algorithm_internal::c_size(c2)) && + std::equal(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + std::forward(pred))); +} + +// c_is_permutation() +// +// Container-based version of the `std::is_permutation()` function +// to test whether a container is a permutation of another. +template +bool c_is_permutation(const C1& c1, const C2& c2) { + using std::begin; + using std::end; + return c1.size() == c2.size() && + std::is_permutation(begin(c1), end(c1), begin(c2)); +} + +// Overload of c_is_permutation() for using a predicate evaluation other than +// `==` as the function's test condition. +template +bool c_is_permutation(const C1& c1, const C2& c2, BinaryPredicate&& pred) { + using std::begin; + using std::end; + return c1.size() == c2.size() && + std::is_permutation(begin(c1), end(c1), begin(c2), + std::forward(pred)); +} + +// c_search() +// +// Container-based version of the `std::search()` function to search +// a container for a subsequence. +template +container_algorithm_internal::ContainerIter c_search( + Sequence1& sequence, Sequence2& subsequence) { + return std::search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence)); +} + +// Overload of c_search() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_search( + Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) { + return std::search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(subsequence), + container_algorithm_internal::c_end(subsequence), + std::forward(pred)); +} + +// c_search_n() +// +// Container-based version of the `std::search_n()` function to +// search a container for the first sequence of N elements. +template +container_algorithm_internal::ContainerIter c_search_n( + Sequence& sequence, Size count, T&& value) { + return std::search_n(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), count, + std::forward(value)); +} + +// Overload of c_search_n() for using a predicate evaluation other than +// `==` as the function's test condition. +template +container_algorithm_internal::ContainerIter c_search_n( + Sequence& sequence, Size count, T&& value, BinaryPredicate&& pred) { + return std::search_n(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), count, + std::forward(value), + std::forward(pred)); +} + +//------------------------------------------------------------------------------ +// Modifying sequence operations +//------------------------------------------------------------------------------ + +// c_copy() +// +// Container-based version of the `std::copy()` function to copy a +// container's elements into an iterator. +template +OutputIterator c_copy(const InputSequence& input, OutputIterator output) { + return std::copy(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), output); +} + +// c_copy_n() +// +// Container-based version of the `std::copy_n()` function to copy a +// container's first N elements into an iterator. +template +OutputIterator c_copy_n(const C& input, Size n, OutputIterator output) { + return std::copy_n(container_algorithm_internal::c_begin(input), n, output); +} + +// c_copy_if() +// +// Container-based version of the `std::copy_if()` function to copy +// a container's elements satisfying some condition into an iterator. +template +OutputIterator c_copy_if(const InputSequence& input, OutputIterator output, + Pred&& pred) { + return std::copy_if(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), output, + std::forward(pred)); +} + +// c_copy_backward() +// +// Container-based version of the `std::copy_backward()` function to +// copy a container's elements in reverse order into an iterator. +template +BidirectionalIterator c_copy_backward(const C& src, + BidirectionalIterator dest) { + return std::copy_backward(container_algorithm_internal::c_begin(src), + container_algorithm_internal::c_end(src), dest); +} + +// c_move() +// +// Container-based version of the `std::move()` function to move +// a container's elements into an iterator. +template +OutputIterator c_move(C&& src, OutputIterator dest) { + return std::move(container_algorithm_internal::c_begin(src), + container_algorithm_internal::c_end(src), dest); +} + +// c_move_backward() +// +// Container-based version of the `std::move_backward()` function to +// move a container's elements into an iterator in reverse order. +template +BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) { + return std::move_backward(container_algorithm_internal::c_begin(src), + container_algorithm_internal::c_end(src), dest); +} + +// c_swap_ranges() +// +// Container-based version of the `std::swap_ranges()` function to +// swap a container's elements with another container's elements. Swaps the +// first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). +template +container_algorithm_internal::ContainerIter c_swap_ranges(C1& c1, C2& c2) { + auto first1 = container_algorithm_internal::c_begin(c1); + auto last1 = container_algorithm_internal::c_end(c1); + auto first2 = container_algorithm_internal::c_begin(c2); + auto last2 = container_algorithm_internal::c_end(c2); + + using std::swap; + for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) { + swap(*first1, *first2); + } + return first2; +} + +// c_transform() +// +// Container-based version of the `std::transform()` function to +// transform a container's elements using the unary operation, storing the +// result in an iterator pointing to the last transformed element in the output +// range. +template +OutputIterator c_transform(const InputSequence& input, OutputIterator output, + UnaryOp&& unary_op) { + return std::transform(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), output, + std::forward(unary_op)); +} + +// Overload of c_transform() for performing a transformation using a binary +// predicate. Applies `binary_op` to the first N elements of `c1` and `c2`, +// where N = min(size(c1), size(c2)). +template +OutputIterator c_transform(const InputSequence1& input1, + const InputSequence2& input2, OutputIterator output, + BinaryOp&& binary_op) { + auto first1 = container_algorithm_internal::c_begin(input1); + auto last1 = container_algorithm_internal::c_end(input1); + auto first2 = container_algorithm_internal::c_begin(input2); + auto last2 = container_algorithm_internal::c_end(input2); + for (; first1 != last1 && first2 != last2; + ++first1, (void)++first2, ++output) { + *output = binary_op(*first1, *first2); + } + + return output; +} + +// c_replace() +// +// Container-based version of the `std::replace()` function to +// replace a container's elements of some value with a new value. The container +// is modified in place. +template +void c_replace(Sequence& sequence, const T& old_value, const T& new_value) { + std::replace(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), old_value, + new_value); +} + +// c_replace_if() +// +// Container-based version of the `std::replace_if()` function to +// replace a container's elements of some value with a new value based on some +// condition. The container is modified in place. +template +void c_replace_if(C& c, Pred&& pred, T&& new_value) { + std::replace_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred), std::forward(new_value)); +} + +// c_replace_copy() +// +// Container-based version of the `std::replace_copy()` function to +// replace a container's elements of some value with a new value and return the +// results within an iterator. +template +OutputIterator c_replace_copy(const C& c, OutputIterator result, T&& old_value, + T&& new_value) { + return std::replace_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(old_value), + std::forward(new_value)); +} + +// c_replace_copy_if() +// +// Container-based version of the `std::replace_copy_if()` function +// to replace a container's elements of some value with a new value based on +// some condition, and return the results within an iterator. +template +OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred, + T&& new_value) { + return std::replace_copy_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(pred), + std::forward(new_value)); +} + +// c_fill() +// +// Container-based version of the `std::fill()` function to fill a +// container with some value. +template +void c_fill(C& c, T&& value) { + std::fill(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), std::forward(value)); +} + +// c_fill_n() +// +// Container-based version of the `std::fill_n()` function to fill +// the first N elements in a container with some value. +template +void c_fill_n(C& c, Size n, T&& value) { + std::fill_n(container_algorithm_internal::c_begin(c), n, + std::forward(value)); +} + +// c_generate() +// +// Container-based version of the `std::generate()` function to +// assign a container's elements to the values provided by the given generator. +template +void c_generate(C& c, Generator&& gen) { + std::generate(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(gen)); +} + +// c_generate_n() +// +// Container-based version of the `std::generate_n()` function to +// assign a container's first N elements to the values provided by the given +// generator. +template +container_algorithm_internal::ContainerIter c_generate_n(C& c, Size n, + Generator&& gen) { + return std::generate_n(container_algorithm_internal::c_begin(c), n, + std::forward(gen)); +} + +// Note: `c_xx()` container versions for `remove()`, `remove_if()`, +// and `unique()` are omitted, because it's not clear whether or not such +// functions should call erase on their supplied sequences afterwards. Either +// behavior would be surprising for a different set of users. + +// c_remove_copy() +// +// Container-based version of the `std::remove_copy()` function to +// copy a container's elements while removing any elements matching the given +// `value`. +template +OutputIterator c_remove_copy(const C& c, OutputIterator result, T&& value) { + return std::remove_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(value)); +} + +// c_remove_copy_if() +// +// Container-based version of the `std::remove_copy_if()` function +// to copy a container's elements while removing any elements matching the given +// condition. +template +OutputIterator c_remove_copy_if(const C& c, OutputIterator result, + Pred&& pred) { + return std::remove_copy_if(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(pred)); +} + +// c_unique_copy() +// +// Container-based version of the `std::unique_copy()` function to +// copy a container's elements while removing any elements containing duplicate +// values. +template +OutputIterator c_unique_copy(const C& c, OutputIterator result) { + return std::unique_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result); +} + +// Overload of c_unique_copy() for using a predicate evaluation other than +// `==` for comparing uniqueness of the element values. +template +OutputIterator c_unique_copy(const C& c, OutputIterator result, + BinaryPredicate&& pred) { + return std::unique_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), result, + std::forward(pred)); +} + +// c_reverse() +// +// Container-based version of the `std::reverse()` function to +// reverse a container's elements. +template +void c_reverse(Sequence& sequence) { + std::reverse(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// c_reverse_copy() +// +// Container-based version of the `std::reverse()` function to +// reverse a container's elements and write them to an iterator range. +template +OutputIterator c_reverse_copy(const C& sequence, OutputIterator result) { + return std::reverse_copy(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + result); +} + +// c_rotate() +// +// Container-based version of the `std::rotate()` function to +// shift a container's elements leftward such that the `middle` element becomes +// the first element in the container. +template > +Iterator c_rotate(C& sequence, Iterator middle) { + return absl::rotate(container_algorithm_internal::c_begin(sequence), middle, + container_algorithm_internal::c_end(sequence)); +} + +// c_rotate_copy() +// +// Container-based version of the `std::rotate_copy()` function to +// shift a container's elements leftward such that the `middle` element becomes +// the first element in a new iterator range. +template +OutputIterator c_rotate_copy( + const C& sequence, + container_algorithm_internal::ContainerIter middle, + OutputIterator result) { + return std::rotate_copy(container_algorithm_internal::c_begin(sequence), + middle, container_algorithm_internal::c_end(sequence), + result); +} + +// c_shuffle() +// +// Container-based version of the `std::shuffle()` function to +// randomly shuffle elements within the container using a `gen()` uniform random +// number generator. +template +void c_shuffle(RandomAccessContainer& c, UniformRandomBitGenerator&& gen) { + std::shuffle(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(gen)); +} + +//------------------------------------------------------------------------------ +// Partition functions +//------------------------------------------------------------------------------ + +// c_is_partitioned() +// +// Container-based version of the `std::is_partitioned()` function +// to test whether all elements in the container for which `pred` returns `true` +// precede those for which `pred` is `false`. +template +bool c_is_partitioned(const C& c, Pred&& pred) { + return std::is_partitioned(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_partition() +// +// Container-based version of the `std::partition()` function +// to rearrange all elements in a container in such a way that all elements for +// which `pred` returns `true` precede all those for which it returns `false`, +// returning an iterator to the first element of the second group. +template +container_algorithm_internal::ContainerIter c_partition(C& c, Pred&& pred) { + return std::partition(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_stable_partition() +// +// Container-based version of the `std::stable_partition()` function +// to rearrange all elements in a container in such a way that all elements for +// which `pred` returns `true` precede all those for which it returns `false`, +// preserving the relative ordering between the two groups. The function returns +// an iterator to the first element of the second group. +template +container_algorithm_internal::ContainerIter c_stable_partition(C& c, + Pred&& pred) { + return std::stable_partition(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +// c_partition_copy() +// +// Container-based version of the `std::partition_copy()` function +// to partition a container's elements and return them into two iterators: one +// for which `pred` returns `true`, and one for which `pred` returns `false.` + +template +std::pair c_partition_copy( + const C& c, OutputIterator1 out_true, OutputIterator2 out_false, + Pred&& pred) { + return std::partition_copy(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), out_true, + out_false, std::forward(pred)); +} + +// c_partition_point() +// +// Container-based version of the `std::partition_point()` function +// to return the first element of an already partitioned container for which +// the given `pred` is not `true`. +template +container_algorithm_internal::ContainerIter c_partition_point(C& c, + Pred&& pred) { + return std::partition_point(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(pred)); +} + +//------------------------------------------------------------------------------ +// Sorting functions +//------------------------------------------------------------------------------ + +// c_sort() +// +// Container-based version of the `std::sort()` function +// to sort elements in ascending order of their values. +template +void c_sort(C& c) { + std::sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_sort() for performing a `comp` comparison other than the +// default `operator<`. +template +void c_sort(C& c, LessThan&& comp) { + std::sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_stable_sort() +// +// Container-based version of the `std::stable_sort()` function +// to sort elements in ascending order of their values, preserving the order +// of equivalents. +template +void c_stable_sort(C& c) { + std::stable_sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_stable_sort() for performing a `comp` comparison other than the +// default `operator<`. +template +void c_stable_sort(C& c, LessThan&& comp) { + std::stable_sort(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_is_sorted() +// +// Container-based version of the `std::is_sorted()` function +// to evaluate whether the given container is sorted in ascending order. +template +bool c_is_sorted(const C& c) { + return std::is_sorted(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// c_is_sorted() overload for performing a `comp` comparison other than the +// default `operator<`. +template +bool c_is_sorted(const C& c, LessThan&& comp) { + return std::is_sorted(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_partial_sort() +// +// Container-based version of the `std::partial_sort()` function +// to rearrange elements within a container such that elements before `middle` +// are sorted in ascending order. +template +void c_partial_sort( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter middle) { + std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_partial_sort() for performing a `comp` comparison other than +// the default `operator<`. +template +void c_partial_sort( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter middle, + LessThan&& comp) { + std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_partial_sort_copy() +// +// Container-based version of the `std::partial_sort_copy()` +// function to sort the elements in the given range `result` within the larger +// `sequence` in ascending order (and using `result` as the output parameter). +// At most min(result.last - result.first, sequence.last - sequence.first) +// elements from the sequence will be stored in the result. +template +container_algorithm_internal::ContainerIter +c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) { + return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(result), + container_algorithm_internal::c_end(result)); +} + +// Overload of c_partial_sort_copy() for performing a `comp` comparison other +// than the default `operator<`. +template +container_algorithm_internal::ContainerIter +c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, + LessThan&& comp) { + return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + container_algorithm_internal::c_begin(result), + container_algorithm_internal::c_end(result), + std::forward(comp)); +} + +// c_is_sorted_until() +// +// Container-based version of the `std::is_sorted_until()` function +// to return the first element within a container that is not sorted in +// ascending order as an iterator. +template +container_algorithm_internal::ContainerIter c_is_sorted_until(C& c) { + return std::is_sorted_until(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_is_sorted_until() for performing a `comp` comparison other than +// the default `operator<`. +template +container_algorithm_internal::ContainerIter c_is_sorted_until( + C& c, LessThan&& comp) { + return std::is_sorted_until(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_nth_element() +// +// Container-based version of the `std::nth_element()` function +// to rearrange the elements within a container such that the `nth` element +// would be in that position in an ordered sequence; other elements may be in +// any order, except that all preceding `nth` will be less than that element, +// and all following `nth` will be greater than that element. +template +void c_nth_element( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter nth) { + std::nth_element(container_algorithm_internal::c_begin(sequence), nth, + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_nth_element() for performing a `comp` comparison other than +// the default `operator<`. +template +void c_nth_element( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter nth, + LessThan&& comp) { + std::nth_element(container_algorithm_internal::c_begin(sequence), nth, + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Binary Search +//------------------------------------------------------------------------------ + +// c_lower_bound() +// +// Container-based version of the `std::lower_bound()` function +// to return an iterator pointing to the first element in a sorted container +// which does not compare less than `value`. +template +container_algorithm_internal::ContainerIter c_lower_bound( + Sequence& sequence, T&& value) { + return std::lower_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value)); +} + +// Overload of c_lower_bound() for performing a `comp` comparison other than +// the default `operator<`. +template +container_algorithm_internal::ContainerIter c_lower_bound( + Sequence& sequence, T&& value, LessThan&& comp) { + return std::lower_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value), std::forward(comp)); +} + +// c_upper_bound() +// +// Container-based version of the `std::upper_bound()` function +// to return an iterator pointing to the first element in a sorted container +// which is greater than `value`. +template +container_algorithm_internal::ContainerIter c_upper_bound( + Sequence& sequence, T&& value) { + return std::upper_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value)); +} + +// Overload of c_upper_bound() for performing a `comp` comparison other than +// the default `operator<`. +template +container_algorithm_internal::ContainerIter c_upper_bound( + Sequence& sequence, T&& value, LessThan&& comp) { + return std::upper_bound(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value), std::forward(comp)); +} + +// c_equal_range() +// +// Container-based version of the `std::equal_range()` function +// to return an iterator pair pointing to the first and last elements in a +// sorted container which compare equal to `value`. +template +container_algorithm_internal::ContainerIterPairType +c_equal_range(Sequence& sequence, T&& value) { + return std::equal_range(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value)); +} + +// Overload of c_equal_range() for performing a `comp` comparison other than +// the default `operator<`. +template +container_algorithm_internal::ContainerIterPairType +c_equal_range(Sequence& sequence, T&& value, LessThan&& comp) { + return std::equal_range(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value), std::forward(comp)); +} + +// c_binary_search() +// +// Container-based version of the `std::binary_search()` function +// to test if any element in the sorted container contains a value equivalent to +// 'value'. +template +bool c_binary_search(Sequence&& sequence, T&& value) { + return std::binary_search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value)); +} + +// Overload of c_binary_search() for performing a `comp` comparison other than +// the default `operator<`. +template +bool c_binary_search(Sequence&& sequence, T&& value, LessThan&& comp) { + return std::binary_search(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Merge functions +//------------------------------------------------------------------------------ + +// c_merge() +// +// Container-based version of the `std::merge()` function +// to merge two sorted containers into a single sorted iterator. +template +OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) { + return std::merge(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), result); +} + +// Overload of c_merge() for performing a `comp` comparison other than +// the default `operator<`. +template +OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result, + LessThan&& comp) { + return std::merge(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), result, + std::forward(comp)); +} + +// c_inplace_merge() +// +// Container-based version of the `std::inplace_merge()` function +// to merge a supplied iterator `middle` into a container. +template +void c_inplace_merge(C& c, + container_algorithm_internal::ContainerIter middle) { + std::inplace_merge(container_algorithm_internal::c_begin(c), middle, + container_algorithm_internal::c_end(c)); +} + +// Overload of c_inplace_merge() for performing a merge using a `comp` other +// than `operator<`. +template +void c_inplace_merge(C& c, + container_algorithm_internal::ContainerIter middle, + LessThan&& comp) { + std::inplace_merge(container_algorithm_internal::c_begin(c), middle, + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_includes() +// +// Container-based version of the `std::includes()` function +// to test whether a sorted container `c1` entirely contains another sorted +// container `c2`. +template +bool c_includes(const C1& c1, const C2& c2) { + return std::includes(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2)); +} + +// Overload of c_includes() for performing a merge using a `comp` other than +// `operator<`. +template +bool c_includes(const C1& c1, const C2& c2, LessThan&& comp) { + return std::includes(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), + std::forward(comp)); +} + +// c_set_union() +// +// Container-based version of the `std::set_union()` function +// to return an iterator containing the union of two containers; duplicate +// values are not copied into the output. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) { + return std::set_union(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_union() for performing a merge using a `comp` other than +// `operator<`. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output, + LessThan&& comp) { + return std::set_union(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +// c_set_intersection() +// +// Container-based version of the `std::set_intersection()` function +// to return an iterator containing the intersection of two sorted containers. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_intersection(const C1& c1, const C2& c2, + OutputIterator output) { + // In debug builds, ensure that both containers are sorted with respect to the + // default comparator. std::set_intersection requires the containers be sorted + // using operator<. + assert(absl::c_is_sorted(c1)); + assert(absl::c_is_sorted(c2)); + return std::set_intersection(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_intersection() for performing a merge using a `comp` other +// than `operator<`. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_intersection(const C1& c1, const C2& c2, + OutputIterator output, LessThan&& comp) { + // In debug builds, ensure that both containers are sorted with respect to the + // default comparator. std::set_intersection requires the containers be sorted + // using the same comparator. + assert(absl::c_is_sorted(c1, comp)); + assert(absl::c_is_sorted(c2, comp)); + return std::set_intersection(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +// c_set_difference() +// +// Container-based version of the `std::set_difference()` function +// to return an iterator containing elements present in the first container but +// not in the second. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_difference(const C1& c1, const C2& c2, + OutputIterator output) { + return std::set_difference(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_difference() for performing a merge using a `comp` other +// than `operator<`. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_difference(const C1& c1, const C2& c2, + OutputIterator output, LessThan&& comp) { + return std::set_difference(container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +// c_set_symmetric_difference() +// +// Container-based version of the `std::set_symmetric_difference()` +// function to return an iterator containing elements present in either one +// container or the other, but not both. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, + OutputIterator output) { + return std::set_symmetric_difference( + container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output); +} + +// Overload of c_set_symmetric_difference() for performing a merge using a +// `comp` other than `operator<`. +template ::value, + void>::type, + typename = typename std::enable_if< + !container_algorithm_internal::IsUnorderedContainer::value, + void>::type> +OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, + OutputIterator output, + LessThan&& comp) { + return std::set_symmetric_difference( + container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), output, + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Heap functions +//------------------------------------------------------------------------------ + +// c_push_heap() +// +// Container-based version of the `std::push_heap()` function +// to push a value onto a container heap. +template +void c_push_heap(RandomAccessContainer& sequence) { + std::push_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_push_heap() for performing a push operation on a heap using a +// `comp` other than `operator<`. +template +void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) { + std::push_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_pop_heap() +// +// Container-based version of the `std::pop_heap()` function +// to pop a value from a heap container. +template +void c_pop_heap(RandomAccessContainer& sequence) { + std::pop_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_pop_heap() for performing a pop operation on a heap using a +// `comp` other than `operator<`. +template +void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) { + std::pop_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_make_heap() +// +// Container-based version of the `std::make_heap()` function +// to make a container a heap. +template +void c_make_heap(RandomAccessContainer& sequence) { + std::make_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_make_heap() for performing heap comparisons using a +// `comp` other than `operator<` +template +void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) { + std::make_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_sort_heap() +// +// Container-based version of the `std::sort_heap()` function +// to sort a heap into ascending order (after which it is no longer a heap). +template +void c_sort_heap(RandomAccessContainer& sequence) { + std::sort_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_sort_heap() for performing heap comparisons using a +// `comp` other than `operator<` +template +void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) { + std::sort_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_is_heap() +// +// Container-based version of the `std::is_heap()` function +// to check whether the given container is a heap. +template +bool c_is_heap(const RandomAccessContainer& sequence) { + return std::is_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_is_heap() for performing heap comparisons using a +// `comp` other than `operator<` +template +bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) { + return std::is_heap(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_is_heap_until() +// +// Container-based version of the `std::is_heap_until()` function +// to find the first element in a given container which is not in heap order. +template +container_algorithm_internal::ContainerIter +c_is_heap_until(RandomAccessContainer& sequence) { + return std::is_heap_until(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_is_heap_until() for performing heap comparisons using a +// `comp` other than `operator<` +template +container_algorithm_internal::ContainerIter +c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) { + return std::is_heap_until(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Min/max +//------------------------------------------------------------------------------ + +// c_min_element() +// +// Container-based version of the `std::min_element()` function +// to return an iterator pointing to the element with the smallest value, using +// `operator<` to make the comparisons. +template +container_algorithm_internal::ContainerIter c_min_element( + Sequence& sequence) { + return std::min_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_min_element() for performing a `comp` comparison other than +// `operator<`. +template +container_algorithm_internal::ContainerIter c_min_element( + Sequence& sequence, LessThan&& comp) { + return std::min_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_max_element() +// +// Container-based version of the `std::max_element()` function +// to return an iterator pointing to the element with the largest value, using +// `operator<` to make the comparisons. +template +container_algorithm_internal::ContainerIter c_max_element( + Sequence& sequence) { + return std::max_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence)); +} + +// Overload of c_max_element() for performing a `comp` comparison other than +// `operator<`. +template +container_algorithm_internal::ContainerIter c_max_element( + Sequence& sequence, LessThan&& comp) { + return std::max_element(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(comp)); +} + +// c_minmax_element() +// +// Container-based version of the `std::minmax_element()` function +// to return a pair of iterators pointing to the elements containing the +// smallest and largest values, respectively, using `operator<` to make the +// comparisons. +template +container_algorithm_internal::ContainerIterPairType +c_minmax_element(C& c) { + return std::minmax_element(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_minmax_element() for performing `comp` comparisons other than +// `operator<`. +template +container_algorithm_internal::ContainerIterPairType +c_minmax_element(C& c, LessThan&& comp) { + return std::minmax_element(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// Lexicographical Comparisons +//------------------------------------------------------------------------------ + +// c_lexicographical_compare() +// +// Container-based version of the `std::lexicographical_compare()` +// function to lexicographically compare (e.g. sort words alphabetically) two +// container sequences. The comparison is performed using `operator<`. Note +// that capital letters ("A-Z") have ASCII values less than lowercase letters +// ("a-z"). +template +bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2) { + return std::lexicographical_compare( + container_algorithm_internal::c_begin(sequence1), + container_algorithm_internal::c_end(sequence1), + container_algorithm_internal::c_begin(sequence2), + container_algorithm_internal::c_end(sequence2)); +} + +// Overload of c_lexicographical_compare() for performing a lexicographical +// comparison using a `comp` operator instead of `operator<`. +template +bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2, + LessThan&& comp) { + return std::lexicographical_compare( + container_algorithm_internal::c_begin(sequence1), + container_algorithm_internal::c_end(sequence1), + container_algorithm_internal::c_begin(sequence2), + container_algorithm_internal::c_end(sequence2), + std::forward(comp)); +} + +// c_next_permutation() +// +// Container-based version of the `std::next_permutation()` function +// to rearrange a container's elements into the next lexicographically greater +// permutation. +template +bool c_next_permutation(C& c) { + return std::next_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_next_permutation() for performing a lexicographical +// comparison using a `comp` operator instead of `operator<`. +template +bool c_next_permutation(C& c, LessThan&& comp) { + return std::next_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +// c_prev_permutation() +// +// Container-based version of the `std::prev_permutation()` function +// to rearrange a container's elements into the next lexicographically lesser +// permutation. +template +bool c_prev_permutation(C& c) { + return std::prev_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c)); +} + +// Overload of c_prev_permutation() for performing a lexicographical +// comparison using a `comp` operator instead of `operator<`. +template +bool c_prev_permutation(C& c, LessThan&& comp) { + return std::prev_permutation(container_algorithm_internal::c_begin(c), + container_algorithm_internal::c_end(c), + std::forward(comp)); +} + +//------------------------------------------------------------------------------ +// algorithms +//------------------------------------------------------------------------------ + +// c_iota() +// +// Container-based version of the `std::iota()` function +// to compute successive values of `value`, as if incremented with `++value` +// after each element is written. and write them to the container. +template +void c_iota(Sequence& sequence, T&& value) { + std::iota(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(value)); +} +// c_accumulate() +// +// Container-based version of the `std::accumulate()` function +// to accumulate the element values of a container to `init` and return that +// accumulation by value. +// +// Note: Due to a language technicality this function has return type +// absl::decay_t. As a user of this function you can casually read +// this as "returns T by value" and assume it does the right thing. +template +decay_t c_accumulate(const Sequence& sequence, T&& init) { + return std::accumulate(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(init)); +} + +// Overload of c_accumulate() for using a binary operations other than +// addition for computing the accumulation. +template +decay_t c_accumulate(const Sequence& sequence, T&& init, + BinaryOp&& binary_op) { + return std::accumulate(container_algorithm_internal::c_begin(sequence), + container_algorithm_internal::c_end(sequence), + std::forward(init), + std::forward(binary_op)); +} + +// c_inner_product() +// +// Container-based version of the `std::inner_product()` function +// to compute the cumulative inner product of container element pairs. +// +// Note: Due to a language technicality this function has return type +// absl::decay_t. As a user of this function you can casually read +// this as "returns T by value" and assume it does the right thing. +template +decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, + T&& sum) { + return std::inner_product(container_algorithm_internal::c_begin(factors1), + container_algorithm_internal::c_end(factors1), + container_algorithm_internal::c_begin(factors2), + std::forward(sum)); +} + +// Overload of c_inner_product() for using binary operations other than +// `operator+` (for computing the accumulation) and `operator*` (for computing +// the product between the two container's element pair). +template +decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, + T&& sum, BinaryOp1&& op1, BinaryOp2&& op2) { + return std::inner_product(container_algorithm_internal::c_begin(factors1), + container_algorithm_internal::c_end(factors1), + container_algorithm_internal::c_begin(factors2), + std::forward(sum), std::forward(op1), + std::forward(op2)); +} + +// c_adjacent_difference() +// +// Container-based version of the `std::adjacent_difference()` +// function to compute the difference between each element and the one preceding +// it and write it to an iterator. +template +OutputIt c_adjacent_difference(const InputSequence& input, + OutputIt output_first) { + return std::adjacent_difference(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first); +} + +// Overload of c_adjacent_difference() for using a binary operation other than +// subtraction to compute the adjacent difference. +template +OutputIt c_adjacent_difference(const InputSequence& input, + OutputIt output_first, BinaryOp&& op) { + return std::adjacent_difference(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first, std::forward(op)); +} + +// c_partial_sum() +// +// Container-based version of the `std::partial_sum()` function +// to compute the partial sum of the elements in a sequence and write them +// to an iterator. The partial sum is the sum of all element values so far in +// the sequence. +template +OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first) { + return std::partial_sum(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first); +} + +// Overload of c_partial_sum() for using a binary operation other than addition +// to compute the "partial sum". +template +OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first, + BinaryOp&& op) { + return std::partial_sum(container_algorithm_internal::c_begin(input), + container_algorithm_internal::c_end(input), + output_first, std::forward(op)); +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_ALGORITHM_CONTAINER_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/attributes.h b/CAPI/cpp/grpc/include/absl/base/attributes.h new file mode 100644 index 0000000..e4e7a3d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/attributes.h @@ -0,0 +1,762 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This header file defines macros for declaring attributes for functions, +// types, and variables. +// +// These macros are used within Abseil and allow the compiler to optimize, where +// applicable, certain function calls. +// +// Most macros here are exposing GCC or Clang features, and are stubbed out for +// other compilers. +// +// GCC attributes documentation: +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Variable-Attributes.html +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Type-Attributes.html +// +// Most attributes in this file are already supported by GCC 4.7. However, some +// of them are not supported in older version of Clang. Thus, we check +// `__has_attribute()` first. If the check fails, we check if we are on GCC and +// assume the attribute exists on GCC (which is verified on GCC 4.7). + +#ifndef ABSL_BASE_ATTRIBUTES_H_ +#define ABSL_BASE_ATTRIBUTES_H_ + +#include "absl/base/config.h" + +// ABSL_HAVE_ATTRIBUTE +// +// A function-like feature checking macro that is a wrapper around +// `__has_attribute`, which is defined by GCC 5+ and Clang and evaluates to a +// nonzero constant integer if the attribute is supported or 0 if not. +// +// It evaluates to zero if `__has_attribute` is not defined by the compiler. +// +// GCC: https://gcc.gnu.org/gcc-5/changes.html +// Clang: https://clang.llvm.org/docs/LanguageExtensions.html +#ifdef __has_attribute +#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x) +#else +#define ABSL_HAVE_ATTRIBUTE(x) 0 +#endif + +// ABSL_HAVE_CPP_ATTRIBUTE +// +// A function-like feature checking macro that accepts C++11 style attributes. +// It's a wrapper around `__has_cpp_attribute`, defined by ISO C++ SD-6 +// (https://en.cppreference.com/w/cpp/experimental/feature_test). If we don't +// find `__has_cpp_attribute`, will evaluate to 0. +#if defined(__cplusplus) && defined(__has_cpp_attribute) +// NOTE: requiring __cplusplus above should not be necessary, but +// works around https://bugs.llvm.org/show_bug.cgi?id=23435. +#define ABSL_HAVE_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +#define ABSL_HAVE_CPP_ATTRIBUTE(x) 0 +#endif + +// ----------------------------------------------------------------------------- +// Function Attributes +// ----------------------------------------------------------------------------- +// +// GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html +// Clang: https://clang.llvm.org/docs/AttributeReference.html + +// ABSL_PRINTF_ATTRIBUTE +// ABSL_SCANF_ATTRIBUTE +// +// Tells the compiler to perform `printf` format string checking if the +// compiler supports it; see the 'format' attribute in +// . +// +// Note: As the GCC manual states, "[s]ince non-static C++ methods +// have an implicit 'this' argument, the arguments of such methods +// should be counted from two, not one." +#if ABSL_HAVE_ATTRIBUTE(format) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__printf__, string_index, first_to_check))) +#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__scanf__, string_index, first_to_check))) +#else +#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) +#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) +#endif + +// ABSL_ATTRIBUTE_ALWAYS_INLINE +// ABSL_ATTRIBUTE_NOINLINE +// +// Forces functions to either inline or not inline. Introduced in gcc 3.1. +#if ABSL_HAVE_ATTRIBUTE(always_inline) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline)) +#define ABSL_HAVE_ATTRIBUTE_ALWAYS_INLINE 1 +#else +#define ABSL_ATTRIBUTE_ALWAYS_INLINE +#endif + +#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NOINLINE __attribute__((noinline)) +#define ABSL_HAVE_ATTRIBUTE_NOINLINE 1 +#else +#define ABSL_ATTRIBUTE_NOINLINE +#endif + +// ABSL_ATTRIBUTE_NO_TAIL_CALL +// +// Prevents the compiler from optimizing away stack frames for functions which +// end in a call to another function. +#if ABSL_HAVE_ATTRIBUTE(disable_tail_calls) +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 +#define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls)) +#elif defined(__GNUC__) && !defined(__clang__) && !defined(__e2k__) +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 +#define ABSL_ATTRIBUTE_NO_TAIL_CALL \ + __attribute__((optimize("no-optimize-sibling-calls"))) +#else +#define ABSL_ATTRIBUTE_NO_TAIL_CALL +#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 0 +#endif + +// ABSL_ATTRIBUTE_WEAK +// +// Tags a function as weak for the purposes of compilation and linking. +// Weak attributes did not work properly in LLVM's Windows backend before +// 9.0.0, so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598 +// for further information. +// The MinGW compiler doesn't complain about the weak attribute until the link +// step, presumably because Windows doesn't use ELF binaries. +#if (ABSL_HAVE_ATTRIBUTE(weak) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + (!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \ + !defined(__MINGW32__) +#undef ABSL_ATTRIBUTE_WEAK +#define ABSL_ATTRIBUTE_WEAK __attribute__((weak)) +#define ABSL_HAVE_ATTRIBUTE_WEAK 1 +#else +#define ABSL_ATTRIBUTE_WEAK +#define ABSL_HAVE_ATTRIBUTE_WEAK 0 +#endif + +// ABSL_ATTRIBUTE_NONNULL +// +// Tells the compiler either (a) that a particular function parameter +// should be a non-null pointer, or (b) that all pointer arguments should +// be non-null. +// +// Note: As the GCC manual states, "[s]ince non-static C++ methods +// have an implicit 'this' argument, the arguments of such methods +// should be counted from two, not one." +// +// Args are indexed starting at 1. +// +// For non-static class member functions, the implicit `this` argument +// is arg 1, and the first explicit argument is arg 2. For static class member +// functions, there is no implicit `this`, and the first explicit argument is +// arg 1. +// +// Example: +// +// /* arg_a cannot be null, but arg_b can */ +// void Function(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(1); +// +// class C { +// /* arg_a cannot be null, but arg_b can */ +// void Method(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(2); +// +// /* arg_a cannot be null, but arg_b can */ +// static void StaticMethod(void* arg_a, void* arg_b) +// ABSL_ATTRIBUTE_NONNULL(1); +// }; +// +// If no arguments are provided, then all pointer arguments should be non-null. +// +// /* No pointer arguments may be null. */ +// void Function(void* arg_a, void* arg_b, int arg_c) ABSL_ATTRIBUTE_NONNULL(); +// +// NOTE: The GCC nonnull attribute actually accepts a list of arguments, but +// ABSL_ATTRIBUTE_NONNULL does not. +#if ABSL_HAVE_ATTRIBUTE(nonnull) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NONNULL(arg_index) __attribute__((nonnull(arg_index))) +#else +#define ABSL_ATTRIBUTE_NONNULL(...) +#endif + +// ABSL_ATTRIBUTE_NORETURN +// +// Tells the compiler that a given function never returns. +#if ABSL_HAVE_ATTRIBUTE(noreturn) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_NORETURN __attribute__((noreturn)) +#elif defined(_MSC_VER) +#define ABSL_ATTRIBUTE_NORETURN __declspec(noreturn) +#else +#define ABSL_ATTRIBUTE_NORETURN +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS +// +// Tells the AddressSanitizer (or other memory testing tools) to ignore a given +// function. Useful for cases when a function reads random locations on stack, +// calls _exit from a cloned subprocess, deliberately accesses buffer +// out of bounds or does other scary things with memory. +// NOTE: GCC supports AddressSanitizer(asan) since 4.8. +// https://gcc.gnu.org/gcc-4.8/changes.html +#if ABSL_HAVE_ATTRIBUTE(no_sanitize_address) +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) +#elif defined(_MSC_VER) && _MSC_VER >= 1928 +// https://docs.microsoft.com/en-us/cpp/cpp/no-sanitize-address +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __declspec(no_sanitize_address) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY +// +// Tells the MemorySanitizer to relax the handling of a given function. All "Use +// of uninitialized value" warnings from such functions will be suppressed, and +// all values loaded from memory will be considered fully initialized. This +// attribute is similar to the ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS attribute +// above, but deals with initialized-ness rather than addressability issues. +// NOTE: MemorySanitizer(msan) is supported by Clang but not GCC. +#if ABSL_HAVE_ATTRIBUTE(no_sanitize_memory) +#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory)) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_THREAD +// +// Tells the ThreadSanitizer to not instrument a given function. +// NOTE: GCC supports ThreadSanitizer(tsan) since 4.8. +// https://gcc.gnu.org/gcc-4.8/changes.html +#if ABSL_HAVE_ATTRIBUTE(no_sanitize_thread) +#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread)) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED +// +// Tells the UndefinedSanitizer to ignore a given function. Useful for cases +// where certain behavior (eg. division by zero) is being used intentionally. +// NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9. +// https://gcc.gnu.org/gcc-4.9/changes.html +#if ABSL_HAVE_ATTRIBUTE(no_sanitize_undefined) +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ + __attribute__((no_sanitize_undefined)) +#elif ABSL_HAVE_ATTRIBUTE(no_sanitize) +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ + __attribute__((no_sanitize("undefined"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_CFI +// +// Tells the ControlFlowIntegrity sanitizer to not instrument a given function. +// See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details. +#if ABSL_HAVE_ATTRIBUTE(no_sanitize) +#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI +#endif + +// ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK +// +// Tells the SafeStack to not instrument a given function. +// See https://clang.llvm.org/docs/SafeStack.html for details. +#if ABSL_HAVE_ATTRIBUTE(no_sanitize) +#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \ + __attribute__((no_sanitize("safe-stack"))) +#else +#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK +#endif + +// ABSL_ATTRIBUTE_RETURNS_NONNULL +// +// Tells the compiler that a particular function never returns a null pointer. +#if ABSL_HAVE_ATTRIBUTE(returns_nonnull) +#define ABSL_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull)) +#else +#define ABSL_ATTRIBUTE_RETURNS_NONNULL +#endif + +// ABSL_HAVE_ATTRIBUTE_SECTION +// +// Indicates whether labeled sections are supported. Weak symbol support is +// a prerequisite. Labeled sections are not supported on Darwin/iOS. +#ifdef ABSL_HAVE_ATTRIBUTE_SECTION +#error ABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set +#elif (ABSL_HAVE_ATTRIBUTE(section) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + !defined(__APPLE__) && ABSL_HAVE_ATTRIBUTE_WEAK +#define ABSL_HAVE_ATTRIBUTE_SECTION 1 + +// ABSL_ATTRIBUTE_SECTION +// +// Tells the compiler/linker to put a given function into a section and define +// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. +// This functionality is supported by GNU linker. Any function annotated with +// `ABSL_ATTRIBUTE_SECTION` must not be inlined, or it will be placed into +// whatever section its caller is placed into. +// +#ifndef ABSL_ATTRIBUTE_SECTION +#define ABSL_ATTRIBUTE_SECTION(name) \ + __attribute__((section(#name))) __attribute__((noinline)) +#endif + +// ABSL_ATTRIBUTE_SECTION_VARIABLE +// +// Tells the compiler/linker to put a given variable into a section and define +// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. +// This functionality is supported by GNU linker. +#ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE +#ifdef _AIX +// __attribute__((section(#name))) on AIX is achived by using the `.csect` psudo +// op which includes an additional integer as part of its syntax indcating +// alignment. If data fall under different alignments then you might get a +// compilation error indicating a `Section type conflict`. +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) +#else +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name))) +#endif +#endif + +// ABSL_DECLARE_ATTRIBUTE_SECTION_VARS +// +// A weak section declaration to be used as a global declaration +// for ABSL_ATTRIBUTE_SECTION_START|STOP(name) to compile and link +// even without functions with ABSL_ATTRIBUTE_SECTION(name). +// ABSL_DEFINE_ATTRIBUTE_SECTION should be in the exactly one file; it's +// a no-op on ELF but not on Mach-O. +// +#ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ + extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \ + extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK +#endif +#ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS +#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) +#endif + +// ABSL_ATTRIBUTE_SECTION_START +// +// Returns `void*` pointers to start/end of a section of code with +// functions having ABSL_ATTRIBUTE_SECTION(name). +// Returns 0 if no such functions exist. +// One must ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) for this to compile and +// link. +// +#define ABSL_ATTRIBUTE_SECTION_START(name) \ + (reinterpret_cast(__start_##name)) +#define ABSL_ATTRIBUTE_SECTION_STOP(name) \ + (reinterpret_cast(__stop_##name)) + +#else // !ABSL_HAVE_ATTRIBUTE_SECTION + +#define ABSL_HAVE_ATTRIBUTE_SECTION 0 + +// provide dummy definitions +#define ABSL_ATTRIBUTE_SECTION(name) +#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) +#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) +#define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast(0)) +#define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast(0)) + +#endif // ABSL_ATTRIBUTE_SECTION + +// ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +// +// Support for aligning the stack on 32-bit x86. +#if ABSL_HAVE_ATTRIBUTE(force_align_arg_pointer) || \ + (defined(__GNUC__) && !defined(__clang__)) +#if defined(__i386__) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC \ + __attribute__((force_align_arg_pointer)) +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#elif defined(__x86_64__) +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (1) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#else // !__i386__ && !__x86_64 +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#endif // __i386__ +#else +#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#endif + +// ABSL_MUST_USE_RESULT +// +// Tells the compiler to warn about unused results. +// +// For code or headers that are assured to only build with C++17 and up, prefer +// just using the standard `[[nodiscard]]` directly over this macro. +// +// When annotating a function, it must appear as the first part of the +// declaration or definition. The compiler will warn if the return value from +// such a function is unused: +// +// ABSL_MUST_USE_RESULT Sprocket* AllocateSprocket(); +// AllocateSprocket(); // Triggers a warning. +// +// When annotating a class, it is equivalent to annotating every function which +// returns an instance. +// +// class ABSL_MUST_USE_RESULT Sprocket {}; +// Sprocket(); // Triggers a warning. +// +// Sprocket MakeSprocket(); +// MakeSprocket(); // Triggers a warning. +// +// Note that references and pointers are not instances: +// +// Sprocket* SprocketPointer(); +// SprocketPointer(); // Does *not* trigger a warning. +// +// ABSL_MUST_USE_RESULT allows using cast-to-void to suppress the unused result +// warning. For that, warn_unused_result is used only for clang but not for gcc. +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425 +// +// Note: past advice was to place the macro after the argument list. +// +// TODO(b/176172494): Use ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) when all code is +// compliant with the stricter [[nodiscard]]. +#if defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result) +#define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result)) +#else +#define ABSL_MUST_USE_RESULT +#endif + +// ABSL_ATTRIBUTE_HOT, ABSL_ATTRIBUTE_COLD +// +// Tells GCC that a function is hot or cold. GCC can use this information to +// improve static analysis, i.e. a conditional branch to a cold function +// is likely to be not-taken. +// This annotation is used for function declarations. +// +// Example: +// +// int foo() ABSL_ATTRIBUTE_HOT; +#if ABSL_HAVE_ATTRIBUTE(hot) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_HOT __attribute__((hot)) +#else +#define ABSL_ATTRIBUTE_HOT +#endif + +#if ABSL_HAVE_ATTRIBUTE(cold) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_COLD __attribute__((cold)) +#else +#define ABSL_ATTRIBUTE_COLD +#endif + +// ABSL_XRAY_ALWAYS_INSTRUMENT, ABSL_XRAY_NEVER_INSTRUMENT, ABSL_XRAY_LOG_ARGS +// +// We define the ABSL_XRAY_ALWAYS_INSTRUMENT and ABSL_XRAY_NEVER_INSTRUMENT +// macro used as an attribute to mark functions that must always or never be +// instrumented by XRay. Currently, this is only supported in Clang/LLVM. +// +// For reference on the LLVM XRay instrumentation, see +// http://llvm.org/docs/XRay.html. +// +// A function with the XRAY_ALWAYS_INSTRUMENT macro attribute in its declaration +// will always get the XRay instrumentation sleds. These sleds may introduce +// some binary size and runtime overhead and must be used sparingly. +// +// These attributes only take effect when the following conditions are met: +// +// * The file/target is built in at least C++11 mode, with a Clang compiler +// that supports XRay attributes. +// * The file/target is built with the -fxray-instrument flag set for the +// Clang/LLVM compiler. +// * The function is defined in the translation unit (the compiler honors the +// attribute in either the definition or the declaration, and must match). +// +// There are cases when, even when building with XRay instrumentation, users +// might want to control specifically which functions are instrumented for a +// particular build using special-case lists provided to the compiler. These +// special case lists are provided to Clang via the +// -fxray-always-instrument=... and -fxray-never-instrument=... flags. The +// attributes in source take precedence over these special-case lists. +// +// To disable the XRay attributes at build-time, users may define +// ABSL_NO_XRAY_ATTRIBUTES. Do NOT define ABSL_NO_XRAY_ATTRIBUTES on specific +// packages/targets, as this may lead to conflicting definitions of functions at +// link-time. +// +// XRay isn't currently supported on Android: +// https://github.com/android/ndk/issues/368 +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_always_instrument) && \ + !defined(ABSL_NO_XRAY_ATTRIBUTES) && !defined(__ANDROID__) +#define ABSL_XRAY_ALWAYS_INSTRUMENT [[clang::xray_always_instrument]] +#define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]] +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args) +#define ABSL_XRAY_LOG_ARGS(N) \ + [[clang::xray_always_instrument, clang::xray_log_args(N)]] +#else +#define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]] +#endif +#else +#define ABSL_XRAY_ALWAYS_INSTRUMENT +#define ABSL_XRAY_NEVER_INSTRUMENT +#define ABSL_XRAY_LOG_ARGS(N) +#endif + +// ABSL_ATTRIBUTE_REINITIALIZES +// +// Indicates that a member function reinitializes the entire object to a known +// state, independent of the previous state of the object. +// +// The clang-tidy check bugprone-use-after-move allows member functions marked +// with this attribute to be called on objects that have been moved from; +// without the attribute, this would result in a use-after-move warning. +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::reinitializes) +#define ABSL_ATTRIBUTE_REINITIALIZES [[clang::reinitializes]] +#else +#define ABSL_ATTRIBUTE_REINITIALIZES +#endif + +// ----------------------------------------------------------------------------- +// Variable Attributes +// ----------------------------------------------------------------------------- + +// ABSL_ATTRIBUTE_UNUSED +// +// Prevents the compiler from complaining about variables that appear unused. +// +// For code or headers that are assured to only build with C++17 and up, prefer +// just using the standard '[[maybe_unused]]' directly over this macro. +// +// Due to differences in positioning requirements between the old, compiler +// specific __attribute__ syntax and the now standard [[maybe_unused]], this +// macro does not attempt to take advantage of '[[maybe_unused]]'. +#if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__)) +#undef ABSL_ATTRIBUTE_UNUSED +#define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__)) +#else +#define ABSL_ATTRIBUTE_UNUSED +#endif + +// ABSL_ATTRIBUTE_INITIAL_EXEC +// +// Tells the compiler to use "initial-exec" mode for a thread-local variable. +// See http://people.redhat.com/drepper/tls.pdf for the gory details. +#if ABSL_HAVE_ATTRIBUTE(tls_model) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_INITIAL_EXEC __attribute__((tls_model("initial-exec"))) +#else +#define ABSL_ATTRIBUTE_INITIAL_EXEC +#endif + +// ABSL_ATTRIBUTE_PACKED +// +// Instructs the compiler not to use natural alignment for a tagged data +// structure, but instead to reduce its alignment to 1. +// +// Therefore, DO NOT APPLY THIS ATTRIBUTE TO STRUCTS CONTAINING ATOMICS. Doing +// so can cause atomic variables to be mis-aligned and silently violate +// atomicity on x86. +// +// This attribute can either be applied to members of a structure or to a +// structure in its entirety. Applying this attribute (judiciously) to a +// structure in its entirety to optimize the memory footprint of very +// commonly-used structs is fine. Do not apply this attribute to a structure in +// its entirety if the purpose is to control the offsets of the members in the +// structure. Instead, apply this attribute only to structure members that need +// it. +// +// When applying ABSL_ATTRIBUTE_PACKED only to specific structure members the +// natural alignment of structure members not annotated is preserved. Aligned +// member accesses are faster than non-aligned member accesses even if the +// targeted microprocessor supports non-aligned accesses. +#if ABSL_HAVE_ATTRIBUTE(packed) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_PACKED __attribute__((__packed__)) +#else +#define ABSL_ATTRIBUTE_PACKED +#endif + +// ABSL_ATTRIBUTE_FUNC_ALIGN +// +// Tells the compiler to align the function start at least to certain +// alignment boundary +#if ABSL_HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) __attribute__((aligned(bytes))) +#else +#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) +#endif + +// ABSL_FALLTHROUGH_INTENDED +// +// Annotates implicit fall-through between switch labels, allowing a case to +// indicate intentional fallthrough and turn off warnings about any lack of a +// `break` statement. The ABSL_FALLTHROUGH_INTENDED macro should be followed by +// a semicolon and can be used in most places where `break` can, provided that +// no statements exist between it and the next switch label. +// +// Example: +// +// switch (x) { +// case 40: +// case 41: +// if (truth_is_out_there) { +// ++x; +// ABSL_FALLTHROUGH_INTENDED; // Use instead of/along with annotations +// // in comments +// } else { +// return x; +// } +// case 42: +// ... +// +// Notes: When supported, GCC and Clang can issue a warning on switch labels +// with unannotated fallthrough using the warning `-Wimplicit-fallthrough`. See +// clang documentation on language extensions for details: +// https://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough +// +// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro has +// no effect on diagnostics. In any case this macro has no effect on runtime +// behavior and performance of code. + +#ifdef ABSL_FALLTHROUGH_INTENDED +#error "ABSL_FALLTHROUGH_INTENDED should not be defined." +#elif ABSL_HAVE_CPP_ATTRIBUTE(fallthrough) +#define ABSL_FALLTHROUGH_INTENDED [[fallthrough]] +#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::fallthrough) +#define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]] +#elif ABSL_HAVE_CPP_ATTRIBUTE(gnu::fallthrough) +#define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]] +#else +#define ABSL_FALLTHROUGH_INTENDED \ + do { \ + } while (0) +#endif + +// ABSL_DEPRECATED() +// +// Marks a deprecated class, struct, enum, function, method and variable +// declarations. The macro argument is used as a custom diagnostic message (e.g. +// suggestion of a better alternative). +// +// For code or headers that are assured to only build with C++14 and up, prefer +// just using the standard `[[deprecated("message")]]` directly over this macro. +// +// Examples: +// +// class ABSL_DEPRECATED("Use Bar instead") Foo {...}; +// +// ABSL_DEPRECATED("Use Baz() instead") void Bar() {...} +// +// template +// ABSL_DEPRECATED("Use DoThat() instead") +// void DoThis(); +// +// enum FooEnum { +// kBar ABSL_DEPRECATED("Use kBaz instead"), +// }; +// +// Every usage of a deprecated entity will trigger a warning when compiled with +// GCC/Clang's `-Wdeprecated-declarations` option. Google's production toolchain +// turns this warning off by default, instead relying on clang-tidy to report +// new uses of deprecated code. +#if ABSL_HAVE_ATTRIBUTE(deprecated) +#define ABSL_DEPRECATED(message) __attribute__((deprecated(message))) +#else +#define ABSL_DEPRECATED(message) +#endif + +// ABSL_CONST_INIT +// +// A variable declaration annotated with the `ABSL_CONST_INIT` attribute will +// not compile (on supported platforms) unless the variable has a constant +// initializer. This is useful for variables with static and thread storage +// duration, because it guarantees that they will not suffer from the so-called +// "static init order fiasco". +// +// This attribute must be placed on the initializing declaration of the +// variable. Some compilers will give a -Wmissing-constinit warning when this +// attribute is placed on some other declaration but missing from the +// initializing declaration. +// +// In some cases (notably with thread_local variables), `ABSL_CONST_INIT` can +// also be used in a non-initializing declaration to tell the compiler that a +// variable is already initialized, reducing overhead that would otherwise be +// incurred by a hidden guard variable. Thus annotating all declarations with +// this attribute is recommended to potentially enhance optimization. +// +// Example: +// +// class MyClass { +// public: +// ABSL_CONST_INIT static MyType my_var; +// }; +// +// ABSL_CONST_INIT MyType MyClass::my_var = MakeMyType(...); +// +// For code or headers that are assured to only build with C++20 and up, prefer +// just using the standard `constinit` keyword directly over this macro. +// +// Note that this attribute is redundant if the variable is declared constexpr. +#if defined(__cpp_constinit) && __cpp_constinit >= 201907L +#define ABSL_CONST_INIT constinit +#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) +#define ABSL_CONST_INIT [[clang::require_constant_initialization]] +#else +#define ABSL_CONST_INIT +#endif + +// ABSL_ATTRIBUTE_PURE_FUNCTION +// +// ABSL_ATTRIBUTE_PURE_FUNCTION is used to annotate declarations of "pure" +// functions. A function is pure if its return value is only a function of its +// arguments. The pure attribute prohibits a function from modifying the state +// of the program that is observable by means other than inspecting the +// function's return value. Declaring such functions with the pure attribute +// allows the compiler to avoid emitting some calls in repeated invocations of +// the function with the same argument values. +// +// Example: +// +// ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Milliseconds(Duration d); +#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::pure) +#define ABSL_ATTRIBUTE_PURE_FUNCTION [[gnu::pure]] +#elif ABSL_HAVE_ATTRIBUTE(pure) +#define ABSL_ATTRIBUTE_PURE_FUNCTION __attribute__((pure)) +#else +#define ABSL_ATTRIBUTE_PURE_FUNCTION +#endif + +// ABSL_ATTRIBUTE_LIFETIME_BOUND indicates that a resource owned by a function +// parameter or implicit object parameter is retained by the return value of the +// annotated function (or, for a parameter of a constructor, in the value of the +// constructed object). This attribute causes warnings to be produced if a +// temporary object does not live long enough. +// +// When applied to a reference parameter, the referenced object is assumed to be +// retained by the return value of the function. When applied to a non-reference +// parameter (for example, a pointer or a class type), all temporaries +// referenced by the parameter are assumed to be retained by the return value of +// the function. +// +// See also the upstream documentation: +// https://clang.llvm.org/docs/AttributeReference.html#lifetimebound +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::lifetimebound) +#define ABSL_ATTRIBUTE_LIFETIME_BOUND [[clang::lifetimebound]] +#elif ABSL_HAVE_ATTRIBUTE(lifetimebound) +#define ABSL_ATTRIBUTE_LIFETIME_BOUND __attribute__((lifetimebound)) +#else +#define ABSL_ATTRIBUTE_LIFETIME_BOUND +#endif + +#endif // ABSL_BASE_ATTRIBUTES_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/call_once.h b/CAPI/cpp/grpc/include/absl/base/call_once.h new file mode 100644 index 0000000..96109f5 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/call_once.h @@ -0,0 +1,219 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: call_once.h +// ----------------------------------------------------------------------------- +// +// This header file provides an Abseil version of `std::call_once` for invoking +// a given function at most once, across all threads. This Abseil version is +// faster than the C++11 version and incorporates the C++17 argument-passing +// fix, so that (for example) non-const references may be passed to the invoked +// function. + +#ifndef ABSL_BASE_CALL_ONCE_H_ +#define ABSL_BASE_CALL_ONCE_H_ + +#include +#include +#include +#include +#include + +#include "absl/base/internal/invoke.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/spinlock_wait.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +class once_flag; + +namespace base_internal { +std::atomic* ControlWord(absl::once_flag* flag); +} // namespace base_internal + +// call_once() +// +// For all invocations using a given `once_flag`, invokes a given `fn` exactly +// once across all threads. The first call to `call_once()` with a particular +// `once_flag` argument (that does not throw an exception) will run the +// specified function with the provided `args`; other calls with the same +// `once_flag` argument will not run the function, but will wait +// for the provided function to finish running (if it is still running). +// +// This mechanism provides a safe, simple, and fast mechanism for one-time +// initialization in a multi-threaded process. +// +// Example: +// +// class MyInitClass { +// public: +// ... +// mutable absl::once_flag once_; +// +// MyInitClass* init() const { +// absl::call_once(once_, &MyInitClass::Init, this); +// return ptr_; +// } +// +template +void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args); + +// once_flag +// +// Objects of this type are used to distinguish calls to `call_once()` and +// ensure the provided function is only invoked once across all threads. This +// type is not copyable or movable. However, it has a `constexpr` +// constructor, and is safe to use as a namespace-scoped global variable. +class once_flag { + public: + constexpr once_flag() : control_(0) {} + once_flag(const once_flag&) = delete; + once_flag& operator=(const once_flag&) = delete; + + private: + friend std::atomic* base_internal::ControlWord(once_flag* flag); + std::atomic control_; +}; + +//------------------------------------------------------------------------------ +// End of public interfaces. +// Implementation details follow. +//------------------------------------------------------------------------------ + +namespace base_internal { + +// Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to +// initialize entities used by the scheduler implementation. +template +void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args); + +// Disables scheduling while on stack when scheduling mode is non-cooperative. +// No effect for cooperative scheduling modes. +class SchedulingHelper { + public: + explicit SchedulingHelper(base_internal::SchedulingMode mode) : mode_(mode) { + if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) { + guard_result_ = base_internal::SchedulingGuard::DisableRescheduling(); + } + } + + ~SchedulingHelper() { + if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) { + base_internal::SchedulingGuard::EnableRescheduling(guard_result_); + } + } + + private: + base_internal::SchedulingMode mode_; + bool guard_result_; +}; + +// Bit patterns for call_once state machine values. Internal implementation +// detail, not for use by clients. +// +// The bit patterns are arbitrarily chosen from unlikely values, to aid in +// debugging. However, kOnceInit must be 0, so that a zero-initialized +// once_flag will be valid for immediate use. +enum { + kOnceInit = 0, + kOnceRunning = 0x65C2937B, + kOnceWaiter = 0x05A308D2, + // A very small constant is chosen for kOnceDone so that it fit in a single + // compare with immediate instruction for most common ISAs. This is verified + // for x86, POWER and ARM. + kOnceDone = 221, // Random Number +}; + +template +ABSL_ATTRIBUTE_NOINLINE +void CallOnceImpl(std::atomic* control, + base_internal::SchedulingMode scheduling_mode, Callable&& fn, + Args&&... args) { +#ifndef NDEBUG + { + uint32_t old_control = control->load(std::memory_order_relaxed); + if (old_control != kOnceInit && + old_control != kOnceRunning && + old_control != kOnceWaiter && + old_control != kOnceDone) { + ABSL_RAW_LOG(FATAL, "Unexpected value for control word: 0x%lx", + static_cast(old_control)); // NOLINT + } + } +#endif // NDEBUG + static const base_internal::SpinLockWaitTransition trans[] = { + {kOnceInit, kOnceRunning, true}, + {kOnceRunning, kOnceWaiter, false}, + {kOnceDone, kOnceDone, true}}; + + // Must do this before potentially modifying control word's state. + base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode); + // Short circuit the simplest case to avoid procedure call overhead. + // The base_internal::SpinLockWait() call returns either kOnceInit or + // kOnceDone. If it returns kOnceDone, it must have loaded the control word + // with std::memory_order_acquire and seen a value of kOnceDone. + uint32_t old_control = kOnceInit; + if (control->compare_exchange_strong(old_control, kOnceRunning, + std::memory_order_relaxed) || + base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans, + scheduling_mode) == kOnceInit) { + base_internal::invoke(std::forward(fn), + std::forward(args)...); + old_control = + control->exchange(base_internal::kOnceDone, std::memory_order_release); + if (old_control == base_internal::kOnceWaiter) { + base_internal::SpinLockWake(control, true); + } + } // else *control is already kOnceDone +} + +inline std::atomic* ControlWord(once_flag* flag) { + return &flag->control_; +} + +template +void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args) { + std::atomic* once = base_internal::ControlWord(flag); + uint32_t s = once->load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) { + base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY, + std::forward(fn), + std::forward(args)...); + } +} + +} // namespace base_internal + +template +void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args) { + std::atomic* once = base_internal::ControlWord(&flag); + uint32_t s = once->load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) { + base_internal::CallOnceImpl( + once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL, + std::forward(fn), std::forward(args)...); + } +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CALL_ONCE_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/casts.h b/CAPI/cpp/grpc/include/absl/base/casts.h new file mode 100644 index 0000000..b99adb0 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/casts.h @@ -0,0 +1,180 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: casts.h +// ----------------------------------------------------------------------------- +// +// This header file defines casting templates to fit use cases not covered by +// the standard casts provided in the C++ standard. As with all cast operations, +// use these with caution and only if alternatives do not exist. + +#ifndef ABSL_BASE_CASTS_H_ +#define ABSL_BASE_CASTS_H_ + +#include +#include +#include +#include + +#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L +#include // For std::bit_cast. +#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +#include "absl/base/internal/identity.h" +#include "absl/base/macros.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// implicit_cast() +// +// Performs an implicit conversion between types following the language +// rules for implicit conversion; if an implicit conversion is otherwise +// allowed by the language in the given context, this function performs such an +// implicit conversion. +// +// Example: +// +// // If the context allows implicit conversion: +// From from; +// To to = from; +// +// // Such code can be replaced by: +// implicit_cast(from); +// +// An `implicit_cast()` may also be used to annotate numeric type conversions +// that, although safe, may produce compiler warnings (such as `long` to `int`). +// Additionally, an `implicit_cast()` is also useful within return statements to +// indicate a specific implicit conversion is being undertaken. +// +// Example: +// +// return implicit_cast(size_in_bytes) / capacity_; +// +// Annotating code with `implicit_cast()` allows you to explicitly select +// particular overloads and template instantiations, while providing a safer +// cast than `reinterpret_cast()` or `static_cast()`. +// +// Additionally, an `implicit_cast()` can be used to allow upcasting within a +// type hierarchy where incorrect use of `static_cast()` could accidentally +// allow downcasting. +// +// Finally, an `implicit_cast()` can be used to perform implicit conversions +// from unrelated types that otherwise couldn't be implicitly cast directly; +// C++ will normally only implicitly cast "one step" in such conversions. +// +// That is, if C is a type which can be implicitly converted to B, with B being +// a type that can be implicitly converted to A, an `implicit_cast()` can be +// used to convert C to B (which the compiler can then implicitly convert to A +// using language rules). +// +// Example: +// +// // Assume an object C is convertible to B, which is implicitly convertible +// // to A +// A a = implicit_cast(C); +// +// Such implicit cast chaining may be useful within template logic. +template +constexpr To implicit_cast(typename absl::internal::identity_t to) { + return to; +} + +// bit_cast() +// +// Creates a value of the new type `Dest` whose representation is the same as +// that of the argument, which is of (deduced) type `Source` (a "bitwise cast"; +// every bit in the value representation of the result is equal to the +// corresponding bit in the object representation of the source). Source and +// destination types must be of the same size, and both types must be trivially +// copyable. +// +// As with most casts, use with caution. A `bit_cast()` might be needed when you +// need to treat a value as the value of some other type, for example, to access +// the individual bits of an object which are not normally accessible through +// the object's type, such as for working with the binary representation of a +// floating point value: +// +// float f = 3.14159265358979; +// int i = bit_cast(f); +// // i = 0x40490fdb +// +// Reinterpreting and accessing a value directly as a different type (as shown +// below) usually results in undefined behavior. +// +// Example: +// +// // WRONG +// float f = 3.14159265358979; +// int i = reinterpret_cast(f); // Wrong +// int j = *reinterpret_cast(&f); // Equally wrong +// int k = *bit_cast(&f); // Equally wrong +// +// Reinterpret-casting results in undefined behavior according to the ISO C++ +// specification, section [basic.lval]. Roughly, this section says: if an object +// in memory has one type, and a program accesses it with a different type, the +// result is undefined behavior for most "different type". +// +// Using bit_cast on a pointer and then dereferencing it is no better than using +// reinterpret_cast. You should only use bit_cast on the value itself. +// +// Such casting results in type punning: holding an object in memory of one type +// and reading its bits back using a different type. A `bit_cast()` avoids this +// issue by copying the object representation to a new value, which avoids +// introducing this undefined behavior (since the original value is never +// accessed in the wrong way). +// +// The requirements of `absl::bit_cast` are more strict than that of +// `std::bit_cast` unless compiler support is available. Specifically, without +// compiler support, this implementation also requires `Dest` to be +// default-constructible. In C++20, `absl::bit_cast` is replaced by +// `std::bit_cast`. +#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +using std::bit_cast; + +#else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +template ::value && + type_traits_internal::is_trivially_copyable::value +#if !ABSL_HAVE_BUILTIN(__builtin_bit_cast) + && std::is_default_constructible::value +#endif // !ABSL_HAVE_BUILTIN(__builtin_bit_cast) + , + int>::type = 0> +#if ABSL_HAVE_BUILTIN(__builtin_bit_cast) +inline constexpr Dest bit_cast(const Source& source) { + return __builtin_bit_cast(Dest, source); +} +#else // ABSL_HAVE_BUILTIN(__builtin_bit_cast) +inline Dest bit_cast(const Source& source) { + Dest dest; + memcpy(static_cast(std::addressof(dest)), + static_cast(std::addressof(source)), sizeof(dest)); + return dest; +} +#endif // ABSL_HAVE_BUILTIN(__builtin_bit_cast) + +#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CASTS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/config.h b/CAPI/cpp/grpc/include/absl/base/config.h new file mode 100644 index 0000000..705ecea --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/config.h @@ -0,0 +1,913 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: config.h +// ----------------------------------------------------------------------------- +// +// This header file defines a set of macros for checking the presence of +// important compiler and platform features. Such macros can be used to +// produce portable code by parameterizing compilation based on the presence or +// lack of a given feature. +// +// We define a "feature" as some interface we wish to program to: for example, +// a library function or system call. A value of `1` indicates support for +// that feature; any other value indicates the feature support is undefined. +// +// Example: +// +// Suppose a programmer wants to write a program that uses the 'mmap()' system +// call. The Abseil macro for that feature (`ABSL_HAVE_MMAP`) allows you to +// selectively include the `mmap.h` header and bracket code using that feature +// in the macro: +// +// #include "absl/base/config.h" +// +// #ifdef ABSL_HAVE_MMAP +// #include "sys/mman.h" +// #endif //ABSL_HAVE_MMAP +// +// ... +// #ifdef ABSL_HAVE_MMAP +// void *ptr = mmap(...); +// ... +// #endif // ABSL_HAVE_MMAP + +#ifndef ABSL_BASE_CONFIG_H_ +#define ABSL_BASE_CONFIG_H_ + +// Included for the __GLIBC__ macro (or similar macros on other systems). +#include + +#ifdef __cplusplus +// Included for __GLIBCXX__, _LIBCPP_VERSION +#include +#endif // __cplusplus + +// ABSL_INTERNAL_CPLUSPLUS_LANG +// +// MSVC does not set the value of __cplusplus correctly, but instead uses +// _MSVC_LANG as a stand-in. +// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros +// +// However, there are reports that MSVC even sets _MSVC_LANG incorrectly at +// times, for example: +// https://github.com/microsoft/vscode-cpptools/issues/1770 +// https://reviews.llvm.org/D70996 +// +// For this reason, this symbol is considered INTERNAL and code outside of +// Abseil must not use it. +#if defined(_MSVC_LANG) +#define ABSL_INTERNAL_CPLUSPLUS_LANG _MSVC_LANG +#elif defined(__cplusplus) +#define ABSL_INTERNAL_CPLUSPLUS_LANG __cplusplus +#endif + +#if defined(__APPLE__) +// Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED, +// __IPHONE_8_0. +#include +#include +#endif + +#include "absl/base/options.h" +#include "absl/base/policy_checks.h" + +// Abseil long-term support (LTS) releases will define +// `ABSL_LTS_RELEASE_VERSION` to the integer representing the date string of the +// LTS release version, and will define `ABSL_LTS_RELEASE_PATCH_LEVEL` to the +// integer representing the patch-level for that release. +// +// For example, for LTS release version "20300401.2", this would give us +// ABSL_LTS_RELEASE_VERSION == 20300401 && ABSL_LTS_RELEASE_PATCH_LEVEL == 2 +// +// These symbols will not be defined in non-LTS code. +// +// Abseil recommends that clients live-at-head. Therefore, if you are using +// these symbols to assert a minimum version requirement, we recommend you do it +// as +// +// #if defined(ABSL_LTS_RELEASE_VERSION) && ABSL_LTS_RELEASE_VERSION < 20300401 +// #error Project foo requires Abseil LTS version >= 20300401 +// #endif +// +// The `defined(ABSL_LTS_RELEASE_VERSION)` part of the check excludes +// live-at-head clients from the minimum version assertion. +// +// See https://abseil.io/about/releases for more information on Abseil release +// management. +// +// LTS releases can be obtained from +// https://github.com/abseil/abseil-cpp/releases. +#define ABSL_LTS_RELEASE_VERSION 20220623 +#define ABSL_LTS_RELEASE_PATCH_LEVEL 1 + +// Helper macro to convert a CPP variable to a string literal. +#define ABSL_INTERNAL_DO_TOKEN_STR(x) #x +#define ABSL_INTERNAL_TOKEN_STR(x) ABSL_INTERNAL_DO_TOKEN_STR(x) + +// ----------------------------------------------------------------------------- +// Abseil namespace annotations +// ----------------------------------------------------------------------------- + +// ABSL_NAMESPACE_BEGIN/ABSL_NAMESPACE_END +// +// An annotation placed at the beginning/end of each `namespace absl` scope. +// This is used to inject an inline namespace. +// +// The proper way to write Abseil code in the `absl` namespace is: +// +// namespace absl { +// ABSL_NAMESPACE_BEGIN +// +// void Foo(); // absl::Foo(). +// +// ABSL_NAMESPACE_END +// } // namespace absl +// +// Users of Abseil should not use these macros, because users of Abseil should +// not write `namespace absl {` in their own code for any reason. (Abseil does +// not support forward declarations of its own types, nor does it support +// user-provided specialization of Abseil templates. Code that violates these +// rules may be broken without warning.) +#if !defined(ABSL_OPTION_USE_INLINE_NAMESPACE) || \ + !defined(ABSL_OPTION_INLINE_NAMESPACE_NAME) +#error options.h is misconfigured. +#endif + +// Check that ABSL_OPTION_INLINE_NAMESPACE_NAME is neither "head" nor "" +#if defined(__cplusplus) && ABSL_OPTION_USE_INLINE_NAMESPACE == 1 + +#define ABSL_INTERNAL_INLINE_NAMESPACE_STR \ + ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) + +static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != '\0', + "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must " + "not be empty."); +static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || + ABSL_INTERNAL_INLINE_NAMESPACE_STR[1] != 'e' || + ABSL_INTERNAL_INLINE_NAMESPACE_STR[2] != 'a' || + ABSL_INTERNAL_INLINE_NAMESPACE_STR[3] != 'd' || + ABSL_INTERNAL_INLINE_NAMESPACE_STR[4] != '\0', + "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must " + "be changed to a new, unique identifier name."); + +#endif + +#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0 +#define ABSL_NAMESPACE_BEGIN +#define ABSL_NAMESPACE_END +#define ABSL_INTERNAL_C_SYMBOL(x) x +#elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1 +#define ABSL_NAMESPACE_BEGIN \ + inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME { +#define ABSL_NAMESPACE_END } +#define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v +#define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \ + ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) +#define ABSL_INTERNAL_C_SYMBOL(x) \ + ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME) +#else +#error options.h is misconfigured. +#endif + +// ----------------------------------------------------------------------------- +// Compiler Feature Checks +// ----------------------------------------------------------------------------- + +// ABSL_HAVE_BUILTIN() +// +// Checks whether the compiler supports a Clang Feature Checking Macro, and if +// so, checks whether it supports the provided builtin function "x" where x +// is one of the functions noted in +// https://clang.llvm.org/docs/LanguageExtensions.html +// +// Note: Use this macro to avoid an extra level of #ifdef __has_builtin check. +// http://releases.llvm.org/3.3/tools/clang/docs/LanguageExtensions.html +#ifdef __has_builtin +#define ABSL_HAVE_BUILTIN(x) __has_builtin(x) +#else +#define ABSL_HAVE_BUILTIN(x) 0 +#endif + +#ifdef __has_feature +#define ABSL_HAVE_FEATURE(f) __has_feature(f) +#else +#define ABSL_HAVE_FEATURE(f) 0 +#endif + +// Portable check for GCC minimum version: +// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html +#if defined(__GNUC__) && defined(__GNUC_MINOR__) +#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) \ + (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y)) +#else +#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) 0 +#endif + +#if defined(__clang__) && defined(__clang_major__) && defined(__clang_minor__) +#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) \ + (__clang_major__ > (x) || __clang_major__ == (x) && __clang_minor__ >= (y)) +#else +#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) 0 +#endif + +// ABSL_HAVE_TLS is defined to 1 when __thread should be supported. +// We assume __thread is supported on Linux or Asylo when compiled with Clang or +// compiled against libstdc++ with _GLIBCXX_HAVE_TLS defined. +#ifdef ABSL_HAVE_TLS +#error ABSL_HAVE_TLS cannot be directly set +#elif (defined(__linux__) || defined(__ASYLO__)) && \ + (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS)) +#define ABSL_HAVE_TLS 1 +#endif + +// ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE +// +// Checks whether `std::is_trivially_destructible` is supported. +// +// Notes: All supported compilers using libc++ support this feature, as does +// gcc >= 4.8.1 using libstdc++, and Visual Studio. +#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE +#error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set +#elif defined(_LIBCPP_VERSION) || defined(_MSC_VER) || \ + (!defined(__clang__) && defined(__GLIBCXX__) && \ + ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(4, 8)) +#define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1 +#endif + +// ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE +// +// Checks whether `std::is_trivially_default_constructible` and +// `std::is_trivially_copy_constructible` are supported. + +// ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE +// +// Checks whether `std::is_trivially_copy_assignable` is supported. + +// Notes: Clang with libc++ supports these features, as does gcc >= 7.4 with +// libstdc++, or gcc >= 8.2 with libc++, and Visual Studio (but not NVCC). +#if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) +#error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set +#elif defined(ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE) +#error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set +#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \ + (!defined(__clang__) && \ + ((ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(7, 4) && defined(__GLIBCXX__)) || \ + (ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(8, 2) && \ + defined(_LIBCPP_VERSION)))) || \ + (defined(_MSC_VER) && !defined(__NVCC__)) +#define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1 +#define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1 +#endif + +// ABSL_HAVE_THREAD_LOCAL +// +// Checks whether C++11's `thread_local` storage duration specifier is +// supported. +#ifdef ABSL_HAVE_THREAD_LOCAL +#error ABSL_HAVE_THREAD_LOCAL cannot be directly set +#elif defined(__APPLE__) +// Notes: +// * Xcode's clang did not support `thread_local` until version 8, and +// even then not for all iOS < 9.0. +// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator +// targeting iOS 9.x. +// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time +// making ABSL_HAVE_FEATURE unreliable there. +// +#if ABSL_HAVE_FEATURE(cxx_thread_local) && \ + !(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0) +#define ABSL_HAVE_THREAD_LOCAL 1 +#endif +#else // !defined(__APPLE__) +#define ABSL_HAVE_THREAD_LOCAL 1 +#endif + +// There are platforms for which TLS should not be used even though the compiler +// makes it seem like it's supported (Android NDK < r12b for example). +// This is primarily because of linker problems and toolchain misconfiguration: +// Abseil does not intend to support this indefinitely. Currently, the newest +// toolchain that we intend to support that requires this behavior is the +// r11 NDK - allowing for a 5 year support window on that means this option +// is likely to be removed around June of 2021. +// TLS isn't supported until NDK r12b per +// https://developer.android.com/ndk/downloads/revision_history.html +// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in +// . For NDK < r16, users should define these macros, +// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11. +#if defined(__ANDROID__) && defined(__clang__) +#if __has_include() +#include +#endif // __has_include() +#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \ + defined(__NDK_MINOR__) && \ + ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1))) +#undef ABSL_HAVE_TLS +#undef ABSL_HAVE_THREAD_LOCAL +#endif +#endif // defined(__ANDROID__) && defined(__clang__) + +// ABSL_HAVE_INTRINSIC_INT128 +// +// Checks whether the __int128 compiler extension for a 128-bit integral type is +// supported. +// +// Note: __SIZEOF_INT128__ is defined by Clang and GCC when __int128 is +// supported, but we avoid using it in certain cases: +// * On Clang: +// * Building using Clang for Windows, where the Clang runtime library has +// 128-bit support only on LP64 architectures, but Windows is LLP64. +// * On Nvidia's nvcc: +// * nvcc also defines __GNUC__ and __SIZEOF_INT128__, but not all versions +// actually support __int128. +#ifdef ABSL_HAVE_INTRINSIC_INT128 +#error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set +#elif defined(__SIZEOF_INT128__) +#if (defined(__clang__) && !defined(_WIN32)) || \ + (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \ + (defined(__GNUC__) && !defined(__clang__) && !defined(__CUDACC__)) +#define ABSL_HAVE_INTRINSIC_INT128 1 +#elif defined(__CUDACC__) +// __CUDACC_VER__ is a full version number before CUDA 9, and is defined to a +// string explaining that it has been removed starting with CUDA 9. We use +// nested #ifs because there is no short-circuiting in the preprocessor. +// NOTE: `__CUDACC__` could be undefined while `__CUDACC_VER__` is defined. +#if __CUDACC_VER__ >= 70000 +#define ABSL_HAVE_INTRINSIC_INT128 1 +#endif // __CUDACC_VER__ >= 70000 +#endif // defined(__CUDACC__) +#endif // ABSL_HAVE_INTRINSIC_INT128 + +// ABSL_HAVE_EXCEPTIONS +// +// Checks whether the compiler both supports and enables exceptions. Many +// compilers support a "no exceptions" mode that disables exceptions. +// +// Generally, when ABSL_HAVE_EXCEPTIONS is not defined: +// +// * Code using `throw` and `try` may not compile. +// * The `noexcept` specifier will still compile and behave as normal. +// * The `noexcept` operator may still return `false`. +// +// For further details, consult the compiler's documentation. +#ifdef ABSL_HAVE_EXCEPTIONS +#error ABSL_HAVE_EXCEPTIONS cannot be directly set. +#elif ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(3, 6) +// Clang >= 3.6 +#if ABSL_HAVE_FEATURE(cxx_exceptions) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif // ABSL_HAVE_FEATURE(cxx_exceptions) +#elif defined(__clang__) +// Clang < 3.6 +// http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro +#if defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif // defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions) +// Handle remaining special cases and default to exceptions being supported. +#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \ + !(ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0) && \ + !defined(__cpp_exceptions)) && \ + !(defined(_MSC_VER) && !defined(_CPPUNWIND)) +#define ABSL_HAVE_EXCEPTIONS 1 +#endif + +// ----------------------------------------------------------------------------- +// Platform Feature Checks +// ----------------------------------------------------------------------------- + +// Currently supported operating systems and associated preprocessor +// symbols: +// +// Linux and Linux-derived __linux__ +// Android __ANDROID__ (implies __linux__) +// Linux (non-Android) __linux__ && !__ANDROID__ +// Darwin (macOS and iOS) __APPLE__ +// Akaros (http://akaros.org) __ros__ +// Windows _WIN32 +// NaCL __native_client__ +// AsmJS __asmjs__ +// WebAssembly __wasm__ +// Fuchsia __Fuchsia__ +// +// Note that since Android defines both __ANDROID__ and __linux__, one +// may probe for either Linux or Android by simply testing for __linux__. + +// ABSL_HAVE_MMAP +// +// Checks whether the platform has an mmap(2) implementation as defined in +// POSIX.1-2001. +#ifdef ABSL_HAVE_MMAP +#error ABSL_HAVE_MMAP cannot be directly set +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(_AIX) || defined(__ros__) || defined(__native_client__) || \ + defined(__asmjs__) || defined(__wasm__) || defined(__Fuchsia__) || \ + defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \ + defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) || \ + defined(__QNX__) +#define ABSL_HAVE_MMAP 1 +#endif + +// ABSL_HAVE_PTHREAD_GETSCHEDPARAM +// +// Checks whether the platform implements the pthread_(get|set)schedparam(3) +// functions as defined in POSIX.1-2001. +#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM +#error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(_AIX) || defined(__ros__) || defined(__OpenBSD__) || \ + defined(__NetBSD__) +#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1 +#endif + +// ABSL_HAVE_SCHED_GETCPU +// +// Checks whether sched_getcpu is available. +#ifdef ABSL_HAVE_SCHED_GETCPU +#error ABSL_HAVE_SCHED_GETCPU cannot be directly set +#elif defined(__linux__) +#define ABSL_HAVE_SCHED_GETCPU 1 +#endif + +// ABSL_HAVE_SCHED_YIELD +// +// Checks whether the platform implements sched_yield(2) as defined in +// POSIX.1-2001. +#ifdef ABSL_HAVE_SCHED_YIELD +#error ABSL_HAVE_SCHED_YIELD cannot be directly set +#elif defined(__linux__) || defined(__ros__) || defined(__native_client__) +#define ABSL_HAVE_SCHED_YIELD 1 +#endif + +// ABSL_HAVE_SEMAPHORE_H +// +// Checks whether the platform supports the header and sem_init(3) +// family of functions as standardized in POSIX.1-2001. +// +// Note: While Apple provides for both iOS and macOS, it is +// explicitly deprecated and will cause build failures if enabled for those +// platforms. We side-step the issue by not defining it here for Apple +// platforms. +#ifdef ABSL_HAVE_SEMAPHORE_H +#error ABSL_HAVE_SEMAPHORE_H cannot be directly set +#elif defined(__linux__) || defined(__ros__) +#define ABSL_HAVE_SEMAPHORE_H 1 +#endif + +// ABSL_HAVE_ALARM +// +// Checks whether the platform supports the header and alarm(2) +// function as standardized in POSIX.1-2001. +#ifdef ABSL_HAVE_ALARM +#error ABSL_HAVE_ALARM cannot be directly set +#elif defined(__GOOGLE_GRTE_VERSION__) +// feature tests for Google's GRTE +#define ABSL_HAVE_ALARM 1 +#elif defined(__GLIBC__) +// feature test for glibc +#define ABSL_HAVE_ALARM 1 +#elif defined(_MSC_VER) +// feature tests for Microsoft's library +#elif defined(__MINGW32__) +// mingw32 doesn't provide alarm(2): +// https://osdn.net/projects/mingw/scm/git/mingw-org-wsl/blobs/5.2-trunk/mingwrt/include/unistd.h +// mingw-w64 provides a no-op implementation: +// https://sourceforge.net/p/mingw-w64/mingw-w64/ci/master/tree/mingw-w64-crt/misc/alarm.c +#elif defined(__EMSCRIPTEN__) +// emscripten doesn't support signals +#elif defined(__Fuchsia__) +// Signals don't exist on fuchsia. +#elif defined(__native_client__) +#else +// other standard libraries +#define ABSL_HAVE_ALARM 1 +#endif + +// ABSL_IS_LITTLE_ENDIAN +// ABSL_IS_BIG_ENDIAN +// +// Checks the endianness of the platform. +// +// Notes: uses the built in endian macros provided by GCC (since 4.6) and +// Clang (since 3.2); see +// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html. +// Otherwise, if _WIN32, assume little endian. Otherwise, bail with an error. +#if defined(ABSL_IS_BIG_ENDIAN) +#error "ABSL_IS_BIG_ENDIAN cannot be directly set." +#endif +#if defined(ABSL_IS_LITTLE_ENDIAN) +#error "ABSL_IS_LITTLE_ENDIAN cannot be directly set." +#endif + +#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ + __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +#define ABSL_IS_LITTLE_ENDIAN 1 +#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \ + __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define ABSL_IS_BIG_ENDIAN 1 +#elif defined(_WIN32) +#define ABSL_IS_LITTLE_ENDIAN 1 +#else +#error "absl endian detection needs to be set up for your compiler" +#endif + +// macOS < 10.13 and iOS < 11 don't let you use , , or +// even though the headers exist and are publicly noted to work, because the +// libc++ shared library shipped on the system doesn't have the requisite +// exported symbols. See https://github.com/abseil/abseil-cpp/issues/207 and +// https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes +// +// libc++ spells out the availability requirements in the file +// llvm-project/libcxx/include/__config via the #define +// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS. +// +// Unfortunately, Apple initially mis-stated the requirements as macOS < 10.14 +// and iOS < 12 in the libc++ headers. This was corrected by +// https://github.com/llvm/llvm-project/commit/7fb40e1569dd66292b647f4501b85517e9247953 +// which subsequently made it into the XCode 12.5 release. We need to match the +// old (incorrect) conditions when built with old XCode, but can use the +// corrected earlier versions with new XCode. +#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \ + ((_LIBCPP_VERSION >= 11000 && /* XCode 12.5 or later: */ \ + ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 110000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 40000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 110000))) || \ + (_LIBCPP_VERSION < 11000 && /* Pre-XCode 12.5: */ \ + ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000)))) +#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1 +#else +#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0 +#endif + +// ABSL_HAVE_STD_ANY +// +// Checks whether C++17 std::any is available by checking whether exists. +#ifdef ABSL_HAVE_STD_ANY +#error "ABSL_HAVE_STD_ANY cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include() && defined(__cplusplus) && __cplusplus >= 201703L && \ + !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define ABSL_HAVE_STD_ANY 1 +#endif +#endif + +// ABSL_HAVE_STD_OPTIONAL +// +// Checks whether C++17 std::optional is available. +#ifdef ABSL_HAVE_STD_OPTIONAL +#error "ABSL_HAVE_STD_OPTIONAL cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include() && defined(__cplusplus) && \ + __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define ABSL_HAVE_STD_OPTIONAL 1 +#endif +#endif + +// ABSL_HAVE_STD_VARIANT +// +// Checks whether C++17 std::variant is available. +#ifdef ABSL_HAVE_STD_VARIANT +#error "ABSL_HAVE_STD_VARIANT cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include() && defined(__cplusplus) && \ + __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define ABSL_HAVE_STD_VARIANT 1 +#endif +#endif + +// ABSL_HAVE_STD_STRING_VIEW +// +// Checks whether C++17 std::string_view is available. +#ifdef ABSL_HAVE_STD_STRING_VIEW +#error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include() && defined(__cplusplus) && \ + __cplusplus >= 201703L +#define ABSL_HAVE_STD_STRING_VIEW 1 +#endif +#endif + +// For MSVC, `__has_include` is supported in VS 2017 15.3, which is later than +// the support for , , , . So we use +// _MSC_VER to check whether we have VS 2017 RTM (when , , +// , is implemented) or higher. Also, `__cplusplus` is +// not correctly set by MSVC, so we use `_MSVC_LANG` to check the language +// version. +// TODO(zhangxy): fix tests before enabling aliasing for `std::any`. +#if defined(_MSC_VER) && _MSC_VER >= 1910 && \ + ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || \ + (defined(__cplusplus) && __cplusplus > 201402)) +// #define ABSL_HAVE_STD_ANY 1 +#define ABSL_HAVE_STD_OPTIONAL 1 +#define ABSL_HAVE_STD_VARIANT 1 +#define ABSL_HAVE_STD_STRING_VIEW 1 +#endif + +// ABSL_USES_STD_ANY +// +// Indicates whether absl::any is an alias for std::any. +#if !defined(ABSL_OPTION_USE_STD_ANY) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_ANY == 0 || \ + (ABSL_OPTION_USE_STD_ANY == 2 && !defined(ABSL_HAVE_STD_ANY)) +#undef ABSL_USES_STD_ANY +#elif ABSL_OPTION_USE_STD_ANY == 1 || \ + (ABSL_OPTION_USE_STD_ANY == 2 && defined(ABSL_HAVE_STD_ANY)) +#define ABSL_USES_STD_ANY 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_OPTIONAL +// +// Indicates whether absl::optional is an alias for std::optional. +#if !defined(ABSL_OPTION_USE_STD_OPTIONAL) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_OPTIONAL == 0 || \ + (ABSL_OPTION_USE_STD_OPTIONAL == 2 && !defined(ABSL_HAVE_STD_OPTIONAL)) +#undef ABSL_USES_STD_OPTIONAL +#elif ABSL_OPTION_USE_STD_OPTIONAL == 1 || \ + (ABSL_OPTION_USE_STD_OPTIONAL == 2 && defined(ABSL_HAVE_STD_OPTIONAL)) +#define ABSL_USES_STD_OPTIONAL 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_VARIANT +// +// Indicates whether absl::variant is an alias for std::variant. +#if !defined(ABSL_OPTION_USE_STD_VARIANT) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_VARIANT == 0 || \ + (ABSL_OPTION_USE_STD_VARIANT == 2 && !defined(ABSL_HAVE_STD_VARIANT)) +#undef ABSL_USES_STD_VARIANT +#elif ABSL_OPTION_USE_STD_VARIANT == 1 || \ + (ABSL_OPTION_USE_STD_VARIANT == 2 && defined(ABSL_HAVE_STD_VARIANT)) +#define ABSL_USES_STD_VARIANT 1 +#else +#error options.h is misconfigured. +#endif + +// ABSL_USES_STD_STRING_VIEW +// +// Indicates whether absl::string_view is an alias for std::string_view. +#if !defined(ABSL_OPTION_USE_STD_STRING_VIEW) +#error options.h is misconfigured. +#elif ABSL_OPTION_USE_STD_STRING_VIEW == 0 || \ + (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ + !defined(ABSL_HAVE_STD_STRING_VIEW)) +#undef ABSL_USES_STD_STRING_VIEW +#elif ABSL_OPTION_USE_STD_STRING_VIEW == 1 || \ + (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ + defined(ABSL_HAVE_STD_STRING_VIEW)) +#define ABSL_USES_STD_STRING_VIEW 1 +#else +#error options.h is misconfigured. +#endif + +// In debug mode, MSVC 2017's std::variant throws a EXCEPTION_ACCESS_VIOLATION +// SEH exception from emplace for variant when constructing the +// struct can throw. This defeats some of variant_test and +// variant_exception_safety_test. +#if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_DEBUG) +#define ABSL_INTERNAL_MSVC_2017_DBG_MODE +#endif + +// ABSL_INTERNAL_MANGLED_NS +// ABSL_INTERNAL_MANGLED_BACKREFERENCE +// +// Internal macros for building up mangled names in our internal fork of CCTZ. +// This implementation detail is only needed and provided for the MSVC build. +// +// These macros both expand to string literals. ABSL_INTERNAL_MANGLED_NS is +// the mangled spelling of the `absl` namespace, and +// ABSL_INTERNAL_MANGLED_BACKREFERENCE is a back-reference integer representing +// the proper count to skip past the CCTZ fork namespace names. (This number +// is one larger when there is an inline namespace name to skip.) +#if defined(_MSC_VER) +#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0 +#define ABSL_INTERNAL_MANGLED_NS "absl" +#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "5" +#else +#define ABSL_INTERNAL_MANGLED_NS \ + ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) "@absl" +#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "6" +#endif +#endif + +// ABSL_DLL +// +// When building Abseil as a DLL, this macro expands to `__declspec(dllexport)` +// so we can annotate symbols appropriately as being exported. When used in +// headers consuming a DLL, this macro expands to `__declspec(dllimport)` so +// that consumers know the symbol is defined inside the DLL. In all other cases, +// the macro expands to nothing. +#if defined(_MSC_VER) +#if defined(ABSL_BUILD_DLL) +#define ABSL_DLL __declspec(dllexport) +#elif defined(ABSL_CONSUME_DLL) +#define ABSL_DLL __declspec(dllimport) +#else +#define ABSL_DLL +#endif +#else +#define ABSL_DLL +#endif // defined(_MSC_VER) + +// ABSL_HAVE_MEMORY_SANITIZER +// +// MemorySanitizer (MSan) is a detector of uninitialized reads. It consists of +// a compiler instrumentation module and a run-time library. +#ifdef ABSL_HAVE_MEMORY_SANITIZER +#error "ABSL_HAVE_MEMORY_SANITIZER cannot be directly set." +#elif !defined(__native_client__) && ABSL_HAVE_FEATURE(memory_sanitizer) +#define ABSL_HAVE_MEMORY_SANITIZER 1 +#endif + +// ABSL_HAVE_THREAD_SANITIZER +// +// ThreadSanitizer (TSan) is a fast data race detector. +#ifdef ABSL_HAVE_THREAD_SANITIZER +#error "ABSL_HAVE_THREAD_SANITIZER cannot be directly set." +#elif defined(__SANITIZE_THREAD__) +#define ABSL_HAVE_THREAD_SANITIZER 1 +#elif ABSL_HAVE_FEATURE(thread_sanitizer) +#define ABSL_HAVE_THREAD_SANITIZER 1 +#endif + +// ABSL_HAVE_ADDRESS_SANITIZER +// +// AddressSanitizer (ASan) is a fast memory error detector. +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +#error "ABSL_HAVE_ADDRESS_SANITIZER cannot be directly set." +#elif defined(__SANITIZE_ADDRESS__) +#define ABSL_HAVE_ADDRESS_SANITIZER 1 +#elif ABSL_HAVE_FEATURE(address_sanitizer) +#define ABSL_HAVE_ADDRESS_SANITIZER 1 +#endif + +// ABSL_HAVE_HWADDRESS_SANITIZER +// +// Hardware-Assisted AddressSanitizer (or HWASAN) is even faster than asan +// memory error detector which can use CPU features like ARM TBI, Intel LAM or +// AMD UAI. +#ifdef ABSL_HAVE_HWADDRESS_SANITIZER +#error "ABSL_HAVE_HWADDRESS_SANITIZER cannot be directly set." +#elif defined(__SANITIZE_HWADDRESS__) +#define ABSL_HAVE_HWADDRESS_SANITIZER 1 +#elif ABSL_HAVE_FEATURE(hwaddress_sanitizer) +#define ABSL_HAVE_HWADDRESS_SANITIZER 1 +#endif + +// ABSL_HAVE_LEAK_SANITIZER +// +// LeakSanitizer (or lsan) is a detector of memory leaks. +// https://clang.llvm.org/docs/LeakSanitizer.html +// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer +// +// The macro ABSL_HAVE_LEAK_SANITIZER can be used to detect at compile-time +// whether the LeakSanitizer is potentially available. However, just because the +// LeakSanitizer is available does not mean it is active. Use the +// always-available run-time interface in //absl/debugging/leak_check.h for +// interacting with LeakSanitizer. +#ifdef ABSL_HAVE_LEAK_SANITIZER +#error "ABSL_HAVE_LEAK_SANITIZER cannot be directly set." +#elif defined(LEAK_SANITIZER) +// GCC provides no method for detecting the presense of the standalone +// LeakSanitizer (-fsanitize=leak), so GCC users of -fsanitize=leak should also +// use -DLEAK_SANITIZER. +#define ABSL_HAVE_LEAK_SANITIZER 1 +// Clang standalone LeakSanitizer (-fsanitize=leak) +#elif ABSL_HAVE_FEATURE(leak_sanitizer) +#define ABSL_HAVE_LEAK_SANITIZER 1 +#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) +// GCC or Clang using the LeakSanitizer integrated into AddressSanitizer. +#define ABSL_HAVE_LEAK_SANITIZER 1 +#endif + +// ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION +// +// Class template argument deduction is a language feature added in C++17. +#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION +#error "ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION cannot be directly set." +#elif defined(__cpp_deduction_guides) +#define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1 +#endif + +// ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +// +// Prior to C++17, static constexpr variables defined in classes required a +// separate definition outside of the class body, for example: +// +// class Foo { +// static constexpr int kBar = 0; +// }; +// constexpr int Foo::kBar; +// +// In C++17, these variables defined in classes are considered inline variables, +// and the extra declaration is redundant. Since some compilers warn on the +// extra declarations, ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL can be used +// conditionally ignore them: +// +// #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +// constexpr int Foo::kBar; +// #endif +#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ + ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L +#define ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL 1 +#endif + +// `ABSL_INTERNAL_HAS_RTTI` determines whether abseil is being compiled with +// RTTI support. +#ifdef ABSL_INTERNAL_HAS_RTTI +#error ABSL_INTERNAL_HAS_RTTI cannot be directly set +#elif !defined(__GNUC__) || defined(__GXX_RTTI) +#define ABSL_INTERNAL_HAS_RTTI 1 +#endif // !defined(__GNUC__) || defined(__GXX_RTTI) + +// ABSL_INTERNAL_HAVE_SSE is used for compile-time detection of SSE support. +// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of +// which architectures support the various x86 instruction sets. +#ifdef ABSL_INTERNAL_HAVE_SSE +#error ABSL_INTERNAL_HAVE_SSE cannot be directly set +#elif defined(__SSE__) +#define ABSL_INTERNAL_HAVE_SSE 1 +#elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1) +// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 1 +// indicates that at least SSE was targeted with the /arch:SSE option. +// All x86-64 processors support SSE, so support can be assumed. +// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros +#define ABSL_INTERNAL_HAVE_SSE 1 +#endif + +// ABSL_INTERNAL_HAVE_SSE2 is used for compile-time detection of SSE2 support. +// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of +// which architectures support the various x86 instruction sets. +#ifdef ABSL_INTERNAL_HAVE_SSE2 +#error ABSL_INTERNAL_HAVE_SSE2 cannot be directly set +#elif defined(__SSE2__) +#define ABSL_INTERNAL_HAVE_SSE2 1 +#elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2) +// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 2 +// indicates that at least SSE2 was targeted with the /arch:SSE2 option. +// All x86-64 processors support SSE2, so support can be assumed. +// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros +#define ABSL_INTERNAL_HAVE_SSE2 1 +#endif + +// ABSL_INTERNAL_HAVE_SSSE3 is used for compile-time detection of SSSE3 support. +// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of +// which architectures support the various x86 instruction sets. +// +// MSVC does not have a mode that targets SSSE3 at compile-time. To use SSSE3 +// with MSVC requires either assuming that the code will only every run on CPUs +// that support SSSE3, otherwise __cpuid() can be used to detect support at +// runtime and fallback to a non-SSSE3 implementation when SSSE3 is unsupported +// by the CPU. +#ifdef ABSL_INTERNAL_HAVE_SSSE3 +#error ABSL_INTERNAL_HAVE_SSSE3 cannot be directly set +#elif defined(__SSSE3__) +#define ABSL_INTERNAL_HAVE_SSSE3 1 +#endif + +// ABSL_INTERNAL_HAVE_ARM_NEON is used for compile-time detection of NEON (ARM +// SIMD). +#ifdef ABSL_INTERNAL_HAVE_ARM_NEON +#error ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set +#elif defined(__ARM_NEON) +#define ABSL_INTERNAL_HAVE_ARM_NEON 1 +#endif + +#endif // ABSL_BASE_CONFIG_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/const_init.h b/CAPI/cpp/grpc/include/absl/base/const_init.h new file mode 100644 index 0000000..16520b6 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/const_init.h @@ -0,0 +1,76 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// kConstInit +// ----------------------------------------------------------------------------- +// +// A constructor tag used to mark an object as safe for use as a global +// variable, avoiding the usual lifetime issues that can affect globals. + +#ifndef ABSL_BASE_CONST_INIT_H_ +#define ABSL_BASE_CONST_INIT_H_ + +#include "absl/base/config.h" + +// In general, objects with static storage duration (such as global variables) +// can trigger tricky object lifetime situations. Attempting to access them +// from the constructors or destructors of other global objects can result in +// undefined behavior, unless their constructors and destructors are designed +// with this issue in mind. +// +// The normal way to deal with this issue in C++11 is to use constant +// initialization and trivial destructors. +// +// Constant initialization is guaranteed to occur before any other code +// executes. Constructors that are declared 'constexpr' are eligible for +// constant initialization. You can annotate a variable declaration with the +// ABSL_CONST_INIT macro to express this intent. For compilers that support +// it, this annotation will cause a compilation error for declarations that +// aren't subject to constant initialization (perhaps because a runtime value +// was passed as a constructor argument). +// +// On program shutdown, lifetime issues can be avoided on global objects by +// ensuring that they contain trivial destructors. A class has a trivial +// destructor unless it has a user-defined destructor, a virtual method or base +// class, or a data member or base class with a non-trivial destructor of its +// own. Objects with static storage duration and a trivial destructor are not +// cleaned up on program shutdown, and are thus safe to access from other code +// running during shutdown. +// +// For a few core Abseil classes, we make a best effort to allow for safe global +// instances, even though these classes have non-trivial destructors. These +// objects can be created with the absl::kConstInit tag. For example: +// ABSL_CONST_INIT absl::Mutex global_mutex(absl::kConstInit); +// +// The line above declares a global variable of type absl::Mutex which can be +// accessed at any point during startup or shutdown. global_mutex's destructor +// will still run, but will not invalidate the object. Note that C++ specifies +// that accessing an object after its destructor has run results in undefined +// behavior, but this pattern works on the toolchains we support. +// +// The absl::kConstInit tag should only be used to define objects with static +// or thread_local storage duration. + +namespace absl { +ABSL_NAMESPACE_BEGIN + +enum ConstInitType { + kConstInit, +}; + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CONST_INIT_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/dynamic_annotations.h b/CAPI/cpp/grpc/include/absl/base/dynamic_annotations.h new file mode 100644 index 0000000..3ea7c15 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/dynamic_annotations.h @@ -0,0 +1,471 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines dynamic annotations for use with dynamic analysis tool +// such as valgrind, PIN, etc. +// +// Dynamic annotation is a source code annotation that affects the generated +// code (that is, the annotation is not a comment). Each such annotation is +// attached to a particular instruction and/or to a particular object (address) +// in the program. +// +// The annotations that should be used by users are macros in all upper-case +// (e.g., ABSL_ANNOTATE_THREAD_NAME). +// +// Actual implementation of these macros may differ depending on the dynamic +// analysis tool being used. +// +// This file supports the following configurations: +// - Dynamic Annotations enabled (with static thread-safety warnings disabled). +// In this case, macros expand to functions implemented by Thread Sanitizer, +// when building with TSan. When not provided an external implementation, +// dynamic_annotations.cc provides no-op implementations. +// +// - Static Clang thread-safety warnings enabled. +// When building with a Clang compiler that supports thread-safety warnings, +// a subset of annotations can be statically-checked at compile-time. We +// expand these macros to static-inline functions that can be analyzed for +// thread-safety, but afterwards elided when building the final binary. +// +// - All annotations are disabled. +// If neither Dynamic Annotations nor Clang thread-safety warnings are +// enabled, then all annotation-macros expand to empty. + +#ifndef ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ +#define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#ifdef __cplusplus +#include "absl/base/macros.h" +#endif + +// TODO(rogeeff): Remove after the backward compatibility period. +#include "absl/base/internal/dynamic_annotations.h" // IWYU pragma: export + +// ------------------------------------------------------------------------- +// Decide which features are enabled. + +#ifdef ABSL_HAVE_THREAD_SANITIZER + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1 + +#else + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0 + +// Clang provides limited support for static thread-safety analysis through a +// feature called Annotalysis. We configure macro-definitions according to +// whether Annotalysis support is available. When running in opt-mode, GCC +// will issue a warning, if these attributes are compiled. Only include them +// when compiling using Clang. + +#if defined(__clang__) +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 1 +#if !defined(SWIG) +#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1 +#endif +#else +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 +#endif + +// Read/write annotations are enabled in Annotalysis mode; disabled otherwise. +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ + ABSL_INTERNAL_ANNOTALYSIS_ENABLED + +#endif // ABSL_HAVE_THREAD_SANITIZER + +#ifdef __cplusplus +#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" { +#define ABSL_INTERNAL_END_EXTERN_C } // extern "C" +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F +#define ABSL_INTERNAL_STATIC_INLINE inline +#else +#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty +#define ABSL_INTERNAL_END_EXTERN_C // empty +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F +#define ABSL_INTERNAL_STATIC_INLINE static inline +#endif + +// ------------------------------------------------------------------------- +// Define race annotations. + +#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1 +// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are +// defined by the compiler-based santizer implementation, not by the Abseil +// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. + +// ------------------------------------------------------------- +// Annotations that suppress errors. It is usually better to express the +// program's synchronization using the other annotations, but these can be used +// when all else fails. + +// Report that we may have a benign race at `pointer`, with size +// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the +// point where `pointer` has been allocated, preferably close to the point +// where the race happens. See also ABSL_ANNOTATE_BENIGN_RACE_STATIC. +#define ABSL_ANNOTATE_BENIGN_RACE(pointer, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) + +// Same as ABSL_ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to +// the memory range [`address`, `address`+`size`). +#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, address, size, description) + +// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. +// This annotation could be useful if you want to skip expensive race analysis +// during some period of program execution, e.g. during initialization. +#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ + (__FILE__, __LINE__, enable) + +// ------------------------------------------------------------- +// Annotations useful for debugging. + +// Report the current thread `name` to a race detector. +#define ABSL_ANNOTATE_THREAD_NAME(name) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name) + +// ------------------------------------------------------------- +// Annotations useful when implementing locks. They are not normally needed by +// modules that merely use locks. The `lock` argument is a pointer to the lock +// object. + +// Report that a lock has been created at address `lock`. +#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock) + +// Report that a linker initialized lock has been created at address `lock`. +#ifdef ABSL_HAVE_THREAD_SANITIZER +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ + (__FILE__, __LINE__, lock) +#else +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_ANNOTATE_RWLOCK_CREATE(lock) +#endif + +// Report that the lock at address `lock` is about to be destroyed. +#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock) + +// Report that the lock at address `lock` has been acquired. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ + (__FILE__, __LINE__, lock, is_w) + +// Report that the lock at address `lock` is about to be released. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ + (__FILE__, __LINE__, lock, is_w) + +// Apply ABSL_ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. +#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace { \ + class static_var##_annotator { \ + public: \ + static_var##_annotator() { \ + ABSL_ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \ + #static_var ": " description); \ + } \ + }; \ + static static_var##_annotator the##static_var##_annotator; \ + } // namespace + +// Function prototypes of annotations provided by the compiler-based sanitizer +// implementation. +ABSL_INTERNAL_BEGIN_EXTERN_C +void AnnotateRWLockCreate(const char* file, int line, + const volatile void* lock); +void AnnotateRWLockCreateStatic(const char* file, int line, + const volatile void* lock); +void AnnotateRWLockDestroy(const char* file, int line, + const volatile void* lock); +void AnnotateRWLockAcquired(const char* file, int line, + const volatile void* lock, long is_w); // NOLINT +void AnnotateRWLockReleased(const char* file, int line, + const volatile void* lock, long is_w); // NOLINT +void AnnotateBenignRace(const char* file, int line, + const volatile void* address, const char* description); +void AnnotateBenignRaceSized(const char* file, int line, + const volatile void* address, size_t size, + const char* description); +void AnnotateThreadName(const char* file, int line, const char* name); +void AnnotateEnableRaceDetection(const char* file, int line, int enable); +ABSL_INTERNAL_END_EXTERN_C + +#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0 + +#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) // empty +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty +#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) // empty +#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty +#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty +#define ABSL_ANNOTATE_BENIGN_RACE(address, description) // empty +#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty +#define ABSL_ANNOTATE_THREAD_NAME(name) // empty +#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty +#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty + +#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED + +// ------------------------------------------------------------------------- +// Define memory annotations. + +#ifdef ABSL_HAVE_MEMORY_SANITIZER + +#include + +#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + __msan_unpoison(address, size) + +#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + __msan_allocated_memory(address, size) + +#else // !defined(ABSL_HAVE_MEMORY_SANITIZER) + +// TODO(rogeeff): remove this branch +#ifdef ABSL_HAVE_THREAD_SANITIZER +#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + do { \ + (void)(address); \ + (void)(size); \ + } while (0) +#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + do { \ + (void)(address); \ + (void)(size); \ + } while (0) +#else + +#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty +#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty + +#endif + +#endif // ABSL_HAVE_MEMORY_SANITIZER + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END attributes. + +#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ + __attribute((exclusive_lock_function("*"))) +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ + __attribute((unlock_function("*"))) + +#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty + +#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END annotations. + +#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1 +// Some of the symbols used in this section (e.g. AnnotateIgnoreReadsBegin) are +// defined by the compiler-based implementation, not by the Abseil +// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. + +// Request the analysis tool to ignore all reads in the current thread until +// ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey +// reads, while still checking other reads and all writes. +// See also ABSL_ANNOTATE_UNPROTECTED_READ. +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \ + (__FILE__, __LINE__) + +// Stop ignoring reads. +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \ + (__FILE__, __LINE__) + +// Function prototypes of annotations provided by the compiler-based sanitizer +// implementation. +ABSL_INTERNAL_BEGIN_EXTERN_C +void AnnotateIgnoreReadsBegin(const char* file, int line) + ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE; +void AnnotateIgnoreReadsEnd(const char* file, + int line) ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE; +ABSL_INTERNAL_END_EXTERN_C + +#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) + +// When Annotalysis is enabled without Dynamic Annotations, the use of +// static-inline functions allows the annotations to be read at compile-time, +// while still letting the compiler elide the functions from the final build. +// +// TODO(delesley) -- The exclusive lock here ignores writes as well, but +// allows IGNORE_READS_AND_WRITES to work properly. + +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED( \ + ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \ + () + +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED( \ + ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \ + () + +ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( + AbslInternalAnnotateIgnoreReadsBegin)() + ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {} + +ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( + AbslInternalAnnotateIgnoreReadsEnd)() + ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {} + +#else + +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() // empty +#define ABSL_ANNOTATE_IGNORE_READS_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define IGNORE_WRITES_BEGIN/_END annotations. + +#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 + +// Similar to ABSL_ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. +#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__) + +// Stop ignoring writes. +#define ABSL_ANNOTATE_IGNORE_WRITES_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__) + +// Function prototypes of annotations provided by the compiler-based sanitizer +// implementation. +ABSL_INTERNAL_BEGIN_EXTERN_C +void AnnotateIgnoreWritesBegin(const char* file, int line); +void AnnotateIgnoreWritesEnd(const char* file, int line); +ABSL_INTERNAL_END_EXTERN_C + +#else + +#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() // empty +#define ABSL_ANNOTATE_IGNORE_WRITES_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define the ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more +// primitive annotations defined above. +// +// Instead of doing +// ABSL_ANNOTATE_IGNORE_READS_BEGIN(); +// ... = x; +// ABSL_ANNOTATE_IGNORE_READS_END(); +// one can use +// ... = ABSL_ANNOTATE_UNPROTECTED_READ(x); + +#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED) + +// Start ignoring all memory accesses (both reads and writes). +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ + do { \ + ABSL_ANNOTATE_IGNORE_READS_BEGIN(); \ + ABSL_ANNOTATE_IGNORE_WRITES_BEGIN(); \ + } while (0) + +// Stop ignoring both reads and writes. +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() \ + do { \ + ABSL_ANNOTATE_IGNORE_WRITES_END(); \ + ABSL_ANNOTATE_IGNORE_READS_END(); \ + } while (0) + +#ifdef __cplusplus +// ABSL_ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. +#define ABSL_ANNOTATE_UNPROTECTED_READ(x) \ + absl::base_internal::AnnotateUnprotectedRead(x) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +template +inline T AnnotateUnprotectedRead(const volatile T& x) { // NOLINT + ABSL_ANNOTATE_IGNORE_READS_BEGIN(); + T res = x; + ABSL_ANNOTATE_IGNORE_READS_END(); + return res; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl +#endif + +#else + +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty +#define ABSL_ANNOTATE_UNPROTECTED_READ(x) (x) + +#endif + +// ------------------------------------------------------------------------- +// Address sanitizer annotations + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +// Describe the current state of a contiguous container such as e.g. +// std::vector or std::string. For more details see +// sanitizer/common_interface_defs.h, which is provided by the compiler. +#include + +#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ + __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) +#define ABSL_ADDRESS_SANITIZER_REDZONE(name) \ + struct { \ + alignas(8) char x[8]; \ + } name + +#else + +#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) // empty +#define ABSL_ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") + +#endif // ABSL_HAVE_ADDRESS_SANITIZER + +// ------------------------------------------------------------------------- +// Undefine the macros intended only for this file. + +#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED +#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_BEGIN_EXTERN_C +#undef ABSL_INTERNAL_END_EXTERN_C +#undef ABSL_INTERNAL_STATIC_INLINE + +#endif // ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook.h b/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook.h new file mode 100644 index 0000000..ae21cd7 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook.h @@ -0,0 +1,200 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ +#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +#if defined(_MSC_VER) && !defined(__clang__) +#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 0 +#else +#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 1 +#endif + +#if defined(_MSC_VER) +#define ABSL_HAVE_WORKING_ATOMIC_POINTER 0 +#else +#define ABSL_HAVE_WORKING_ATOMIC_POINTER 1 +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +template +class AtomicHook; + +// To workaround AtomicHook not being constant-initializable on some platforms, +// prefer to annotate instances with `ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES` +// instead of `ABSL_CONST_INIT`. +#if ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT +#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_CONST_INIT +#else +#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES +#endif + +// `AtomicHook` is a helper class, templatized on a raw function pointer type, +// for implementing Abseil customization hooks. It is a callable object that +// dispatches to the registered hook. Objects of type `AtomicHook` must have +// static or thread storage duration. +// +// A default constructed object performs a no-op (and returns a default +// constructed object) if no hook has been registered. +// +// Hooks can be pre-registered via constant initialization, for example: +// +// ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static AtomicHook +// my_hook(DefaultAction); +// +// and then changed at runtime via a call to `Store()`. +// +// Reads and writes guarantee memory_order_acquire/memory_order_release +// semantics. +template +class AtomicHook { + public: + using FnPtr = ReturnType (*)(Args...); + + // Constructs an object that by default performs a no-op (and + // returns a default constructed object) when no hook as been registered. + constexpr AtomicHook() : AtomicHook(DummyFunction) {} + + // Constructs an object that by default dispatches to/returns the + // pre-registered default_fn when no hook has been registered at runtime. +#if ABSL_HAVE_WORKING_ATOMIC_POINTER && ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT + explicit constexpr AtomicHook(FnPtr default_fn) + : hook_(default_fn), default_fn_(default_fn) {} +#elif ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT + explicit constexpr AtomicHook(FnPtr default_fn) + : hook_(kUninitialized), default_fn_(default_fn) {} +#else + // As of January 2020, on all known versions of MSVC this constructor runs in + // the global constructor sequence. If `Store()` is called by a dynamic + // initializer, we want to preserve the value, even if this constructor runs + // after the call to `Store()`. If not, `hook_` will be + // zero-initialized by the linker and we have no need to set it. + // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html + explicit constexpr AtomicHook(FnPtr default_fn) + : /* hook_(deliberately omitted), */ default_fn_(default_fn) { + static_assert(kUninitialized == 0, "here we rely on zero-initialization"); + } +#endif + + // Stores the provided function pointer as the value for this hook. + // + // This is intended to be called once. Multiple calls are legal only if the + // same function pointer is provided for each call. The store is implemented + // as a memory_order_release operation, and read accesses are implemented as + // memory_order_acquire. + void Store(FnPtr fn) { + bool success = DoStore(fn); + static_cast(success); + assert(success); + } + + // Invokes the registered callback. If no callback has yet been registered, a + // default-constructed object of the appropriate type is returned instead. + template + ReturnType operator()(CallArgs&&... args) const { + return DoLoad()(std::forward(args)...); + } + + // Returns the registered callback, or nullptr if none has been registered. + // Useful if client code needs to conditionalize behavior based on whether a + // callback was registered. + // + // Note that atomic_hook.Load()() and atomic_hook() have different semantics: + // operator()() will perform a no-op if no callback was registered, while + // Load()() will dereference a null function pointer. Prefer operator()() to + // Load()() unless you must conditionalize behavior on whether a hook was + // registered. + FnPtr Load() const { + FnPtr ptr = DoLoad(); + return (ptr == DummyFunction) ? nullptr : ptr; + } + + private: + static ReturnType DummyFunction(Args...) { + return ReturnType(); + } + + // Current versions of MSVC (as of September 2017) have a broken + // implementation of std::atomic: Its constructor attempts to do the + // equivalent of a reinterpret_cast in a constexpr context, which is not + // allowed. + // + // This causes an issue when building with LLVM under Windows. To avoid this, + // we use a less-efficient, intptr_t-based implementation on Windows. +#if ABSL_HAVE_WORKING_ATOMIC_POINTER + // Return the stored value, or DummyFunction if no value has been stored. + FnPtr DoLoad() const { return hook_.load(std::memory_order_acquire); } + + // Store the given value. Returns false if a different value was already + // stored to this object. + bool DoStore(FnPtr fn) { + assert(fn); + FnPtr expected = default_fn_; + const bool store_succeeded = hook_.compare_exchange_strong( + expected, fn, std::memory_order_acq_rel, std::memory_order_acquire); + const bool same_value_already_stored = (expected == fn); + return store_succeeded || same_value_already_stored; + } + + std::atomic hook_; +#else // !ABSL_HAVE_WORKING_ATOMIC_POINTER + // Use a sentinel value unlikely to be the address of an actual function. + static constexpr intptr_t kUninitialized = 0; + + static_assert(sizeof(intptr_t) >= sizeof(FnPtr), + "intptr_t can't contain a function pointer"); + + FnPtr DoLoad() const { + const intptr_t value = hook_.load(std::memory_order_acquire); + if (value == kUninitialized) { + return default_fn_; + } + return reinterpret_cast(value); + } + + bool DoStore(FnPtr fn) { + assert(fn); + const auto value = reinterpret_cast(fn); + intptr_t expected = kUninitialized; + const bool store_succeeded = hook_.compare_exchange_strong( + expected, value, std::memory_order_acq_rel, std::memory_order_acquire); + const bool same_value_already_stored = (expected == value); + return store_succeeded || same_value_already_stored; + } + + std::atomic hook_; +#endif + + const FnPtr default_fn_; +}; + +#undef ABSL_HAVE_WORKING_ATOMIC_POINTER +#undef ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook_test_helper.h b/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook_test_helper.h new file mode 100644 index 0000000..3e72b49 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook_test_helper.h @@ -0,0 +1,34 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ +#define ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ + +#include "absl/base/internal/atomic_hook.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace atomic_hook_internal { + +using VoidF = void (*)(); +extern absl::base_internal::AtomicHook func; +extern int default_func_calls; +void DefaultFunc(); +void RegisterFunc(VoidF func); + +} // namespace atomic_hook_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/cycleclock.h b/CAPI/cpp/grpc/include/absl/base/internal/cycleclock.h new file mode 100644 index 0000000..9704e38 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/cycleclock.h @@ -0,0 +1,159 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// ----------------------------------------------------------------------------- +// File: cycleclock.h +// ----------------------------------------------------------------------------- +// +// This header file defines a `CycleClock`, which yields the value and frequency +// of a cycle counter that increments at a rate that is approximately constant. +// +// NOTE: +// +// The cycle counter frequency is not necessarily related to the core clock +// frequency and should not be treated as such. That is, `CycleClock` cycles are +// not necessarily "CPU cycles" and code should not rely on that behavior, even +// if experimentally observed. +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/unscaledcycleclock.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +using CycleClockSourceFunc = int64_t (*)(); + +// ----------------------------------------------------------------------------- +// CycleClock +// ----------------------------------------------------------------------------- +class CycleClock { + public: + // CycleClock::Now() + // + // Returns the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // CycleClock::Frequency() + // + // Returns the amount by which `CycleClock::Now()` increases per second. Note + // that this value may not necessarily match the core CPU clock frequency. + static double Frequency(); + + private: +#if ABSL_USE_UNSCALED_CYCLECLOCK + static CycleClockSourceFunc LoadCycleClockSource(); + +#ifdef NDEBUG +#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY + // Not debug mode and the UnscaledCycleClock frequency is the CPU + // frequency. Scale the CycleClock to prevent overflow if someone + // tries to represent the time as cycles since the Unix epoch. + static constexpr int32_t kShift = 1; +#else + // Not debug mode and the UnscaledCycleClock isn't operating at the + // raw CPU frequency. There is no need to do any scaling, so don't + // needlessly sacrifice precision. + static constexpr int32_t kShift = 0; +#endif +#else // NDEBUG + // In debug mode use a different shift to discourage depending on a + // particular shift value. + static constexpr int32_t kShift = 2; +#endif // NDEBUG + + static constexpr double kFrequencyScale = 1.0 / (1 << kShift); + ABSL_CONST_INIT static std::atomic cycle_clock_source_; +#endif // ABSL_USE_UNSCALED_CYCLECLOC + + CycleClock() = delete; // no instances + CycleClock(const CycleClock&) = delete; + CycleClock& operator=(const CycleClock&) = delete; + + friend class CycleClockSource; +}; + +class CycleClockSource { + private: + // CycleClockSource::Register() + // + // Register a function that provides an alternate source for the unscaled CPU + // cycle count value. The source function must be async signal safe, must not + // call CycleClock::Now(), and must have a frequency that matches that of the + // unscaled clock used by CycleClock. A nullptr value resets CycleClock to use + // the default source. + static void Register(CycleClockSourceFunc source); +}; + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() { +#if !defined(__x86_64__) + // Optimize for the common case (no callback) by first doing a relaxed load; + // this is significantly faster on non-x86 platforms. + if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) { + return nullptr; + } +#endif // !defined(__x86_64__) + + // This corresponds to the store(std::memory_order_release) in + // CycleClockSource::Register, and makes sure that any updates made prior to + // registering the callback are visible to this thread before the callback + // is invoked. + return cycle_clock_source_.load(std::memory_order_acquire); +} + +// Accessing globals in inlined code in Window DLLs is problematic. +#ifndef _WIN32 +inline int64_t CycleClock::Now() { + auto fn = LoadCycleClockSource(); + if (fn == nullptr) { + return base_internal::UnscaledCycleClock::Now() >> kShift; + } + return fn() >> kShift; +} +#endif + +inline double CycleClock::Frequency() { + return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency(); +} + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/direct_mmap.h b/CAPI/cpp/grpc/include/absl/base/internal/direct_mmap.h new file mode 100644 index 0000000..e492bb0 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/direct_mmap.h @@ -0,0 +1,169 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Functions for directly invoking mmap() via syscall, avoiding the case where +// mmap() has been locally overridden. + +#ifndef ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ +#define ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ + +#include "absl/base/config.h" + +#ifdef ABSL_HAVE_MMAP + +#include + +#ifdef __linux__ + +#include +#ifdef __BIONIC__ +#include +#else +#include +#endif + +#include +#include +#include +#include +#include + +#ifdef __mips__ +// Include definitions of the ABI currently in use. +#if defined(__BIONIC__) || !defined(__GLIBC__) +// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the +// definitions we need. +#include +#else +#include +#endif // __BIONIC__ || !__GLIBC__ +#endif // __mips__ + +// SYS_mmap and SYS_munmap are not defined in Android. +#ifdef __BIONIC__ +extern "C" void* __mmap2(void*, size_t, int, int, int, size_t); +#if defined(__NR_mmap) && !defined(SYS_mmap) +#define SYS_mmap __NR_mmap +#endif +#ifndef SYS_munmap +#define SYS_munmap __NR_munmap +#endif +#endif // __BIONIC__ + +#if defined(__NR_mmap2) && !defined(SYS_mmap2) +#define SYS_mmap2 __NR_mmap2 +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Platform specific logic extracted from +// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h +inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, + off64_t offset) noexcept { +#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ + defined(__m68k__) || defined(__sh__) || \ + (defined(__hppa__) && !defined(__LP64__)) || \ + (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ + (defined(__PPC__) && !defined(__PPC64__)) || \ + (defined(__riscv) && __riscv_xlen == 32) || \ + (defined(__s390__) && !defined(__s390x__)) || \ + (defined(__sparc__) && !defined(__arch64__)) + // On these architectures, implement mmap with mmap2. + static int pagesize = 0; + if (pagesize == 0) { +#if defined(__wasm__) || defined(__asmjs__) + pagesize = getpagesize(); +#else + pagesize = sysconf(_SC_PAGESIZE); +#endif + } + if (offset < 0 || offset % pagesize != 0) { + errno = EINVAL; + return MAP_FAILED; + } +#ifdef __BIONIC__ + // SYS_mmap2 has problems on Android API level <= 16. + // Workaround by invoking __mmap2() instead. + return __mmap2(start, length, prot, flags, fd, offset / pagesize); +#else + return reinterpret_cast( + syscall(SYS_mmap2, start, length, prot, flags, fd, + static_cast(offset / pagesize))); +#endif +#elif defined(__s390x__) + // On s390x, mmap() arguments are passed in memory. + unsigned long buf[6] = {reinterpret_cast(start), // NOLINT + static_cast(length), // NOLINT + static_cast(prot), // NOLINT + static_cast(flags), // NOLINT + static_cast(fd), // NOLINT + static_cast(offset)}; // NOLINT + return reinterpret_cast(syscall(SYS_mmap, buf)); +#elif defined(__x86_64__) +// The x32 ABI has 32 bit longs, but the syscall interface is 64 bit. +// We need to explicitly cast to an unsigned 64 bit type to avoid implicit +// sign extension. We can't cast pointers directly because those are +// 32 bits, and gcc will dump ugly warnings about casting from a pointer +// to an integer of a different size. We also need to make sure __off64_t +// isn't truncated to 32-bits under x32. +#define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x)) + return reinterpret_cast( + syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length), + MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags), + MMAP_SYSCALL_ARG(fd), static_cast(offset))); +#undef MMAP_SYSCALL_ARG +#else // Remaining 64-bit aritectures. + static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit"); + return reinterpret_cast( + syscall(SYS_mmap, start, length, prot, flags, fd, offset)); +#endif +} + +inline int DirectMunmap(void* start, size_t length) { + return static_cast(syscall(SYS_munmap, start, length)); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#else // !__linux__ + +// For non-linux platforms where we have mmap, just dispatch directly to the +// actual mmap()/munmap() methods. + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, + off_t offset) { + return mmap(start, length, prot, flags, fd, offset); +} + +inline int DirectMunmap(void* start, size_t length) { + return munmap(start, length); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // __linux__ + +#endif // ABSL_HAVE_MMAP + +#endif // ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/dynamic_annotations.h b/CAPI/cpp/grpc/include/absl/base/internal/dynamic_annotations.h new file mode 100644 index 0000000..b23c5ec --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/dynamic_annotations.h @@ -0,0 +1,398 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines dynamic annotations for use with dynamic analysis tool +// such as valgrind, PIN, etc. +// +// Dynamic annotation is a source code annotation that affects the generated +// code (that is, the annotation is not a comment). Each such annotation is +// attached to a particular instruction and/or to a particular object (address) +// in the program. +// +// The annotations that should be used by users are macros in all upper-case +// (e.g., ANNOTATE_THREAD_NAME). +// +// Actual implementation of these macros may differ depending on the dynamic +// analysis tool being used. +// +// This file supports the following configurations: +// - Dynamic Annotations enabled (with static thread-safety warnings disabled). +// In this case, macros expand to functions implemented by Thread Sanitizer, +// when building with TSan. When not provided an external implementation, +// dynamic_annotations.cc provides no-op implementations. +// +// - Static Clang thread-safety warnings enabled. +// When building with a Clang compiler that supports thread-safety warnings, +// a subset of annotations can be statically-checked at compile-time. We +// expand these macros to static-inline functions that can be analyzed for +// thread-safety, but afterwards elided when building the final binary. +// +// - All annotations are disabled. +// If neither Dynamic Annotations nor Clang thread-safety warnings are +// enabled, then all annotation-macros expand to empty. + +#ifndef ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ +#define ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ + +#include + +#include "absl/base/config.h" + +// ------------------------------------------------------------------------- +// Decide which features are enabled + +#ifndef DYNAMIC_ANNOTATIONS_ENABLED +#define DYNAMIC_ANNOTATIONS_ENABLED 0 +#endif + +#if defined(__clang__) && !defined(SWIG) +#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1 +#endif + +#if DYNAMIC_ANNOTATIONS_ENABLED != 0 + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1 + +#else + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0 + +// Clang provides limited support for static thread-safety analysis through a +// feature called Annotalysis. We configure macro-definitions according to +// whether Annotalysis support is available. When running in opt-mode, GCC +// will issue a warning, if these attributes are compiled. Only include them +// when compiling using Clang. + +// ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1 +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \ + defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) +// Read/write annotations are enabled in Annotalysis mode; disabled otherwise. +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ + ABSL_INTERNAL_ANNOTALYSIS_ENABLED +#endif + +// Memory annotations are also made available to LLVM's Memory Sanitizer +#if defined(ABSL_HAVE_MEMORY_SANITIZER) && !defined(__native_client__) +#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1 +#endif + +#ifndef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED +#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 0 +#endif + +#ifdef __cplusplus +#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" { +#define ABSL_INTERNAL_END_EXTERN_C } // extern "C" +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F +#define ABSL_INTERNAL_STATIC_INLINE inline +#else +#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty +#define ABSL_INTERNAL_END_EXTERN_C // empty +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F +#define ABSL_INTERNAL_STATIC_INLINE static inline +#endif + +// ------------------------------------------------------------------------- +// Define race annotations. + +#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1 + +// ------------------------------------------------------------- +// Annotations that suppress errors. It is usually better to express the +// program's synchronization using the other annotations, but these can be used +// when all else fails. + +// Report that we may have a benign race at `pointer`, with size +// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the +// point where `pointer` has been allocated, preferably close to the point +// where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. +#define ANNOTATE_BENIGN_RACE(pointer, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) + +// Same as ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to +// the memory range [`address`, `address`+`size`). +#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, address, size, description) + +// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. +// This annotation could be useful if you want to skip expensive race analysis +// during some period of program execution, e.g. during initialization. +#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ + (__FILE__, __LINE__, enable) + +// ------------------------------------------------------------- +// Annotations useful for debugging. + +// Report the current thread `name` to a race detector. +#define ANNOTATE_THREAD_NAME(name) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name) + +// ------------------------------------------------------------- +// Annotations useful when implementing locks. They are not normally needed by +// modules that merely use locks. The `lock` argument is a pointer to the lock +// object. + +// Report that a lock has been created at address `lock`. +#define ANNOTATE_RWLOCK_CREATE(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock) + +// Report that a linker initialized lock has been created at address `lock`. +#ifdef ABSL_HAVE_THREAD_SANITIZER +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ + (__FILE__, __LINE__, lock) +#else +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock) +#endif + +// Report that the lock at address `lock` is about to be destroyed. +#define ANNOTATE_RWLOCK_DESTROY(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock) + +// Report that the lock at address `lock` has been acquired. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ + (__FILE__, __LINE__, lock, is_w) + +// Report that the lock at address `lock` is about to be released. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ + (__FILE__, __LINE__, lock, is_w) + +// Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. +#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace { \ + class static_var##_annotator { \ + public: \ + static_var##_annotator() { \ + ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \ + #static_var ": " description); \ + } \ + }; \ + static static_var##_annotator the##static_var##_annotator; \ + } // namespace + +#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0 + +#define ANNOTATE_RWLOCK_CREATE(lock) // empty +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty +#define ANNOTATE_RWLOCK_DESTROY(lock) // empty +#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty +#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty +#define ANNOTATE_BENIGN_RACE(address, description) // empty +#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty +#define ANNOTATE_THREAD_NAME(name) // empty +#define ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty +#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty + +#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED + +// ------------------------------------------------------------------------- +// Define memory annotations. + +#if ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 1 + +#include + +#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + __msan_unpoison(address, size) + +#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + __msan_allocated_memory(address, size) + +#else // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0 + +#if DYNAMIC_ANNOTATIONS_ENABLED == 1 +#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + do { \ + (void)(address); \ + (void)(size); \ + } while (0) +#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + do { \ + (void)(address); \ + (void)(size); \ + } while (0) +#else +#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty +#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty +#endif + +#endif // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END attributes. + +#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ + __attribute((exclusive_lock_function("*"))) +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ + __attribute((unlock_function("*"))) + +#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty + +#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END annotations. + +#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1 + +// Request the analysis tool to ignore all reads in the current thread until +// ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey +// reads, while still checking other reads and all writes. +// See also ANNOTATE_UNPROTECTED_READ. +#define ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__) + +// Stop ignoring reads. +#define ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__) + +#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) + +// When Annotalysis is enabled without Dynamic Annotations, the use of +// static-inline functions allows the annotations to be read at compile-time, +// while still letting the compiler elide the functions from the final build. +// +// TODO(delesley) -- The exclusive lock here ignores writes as well, but +// allows IGNORE_READS_AND_WRITES to work properly. + +#define ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)() + +#define ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)() + +#else + +#define ANNOTATE_IGNORE_READS_BEGIN() // empty +#define ANNOTATE_IGNORE_READS_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define IGNORE_WRITES_BEGIN/_END annotations. + +#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 + +// Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. +#define ANNOTATE_IGNORE_WRITES_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__) + +// Stop ignoring writes. +#define ANNOTATE_IGNORE_WRITES_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__) + +#else + +#define ANNOTATE_IGNORE_WRITES_BEGIN() // empty +#define ANNOTATE_IGNORE_WRITES_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more +// primitive annotations defined above. +// +// Instead of doing +// ANNOTATE_IGNORE_READS_BEGIN(); +// ... = x; +// ANNOTATE_IGNORE_READS_END(); +// one can use +// ... = ANNOTATE_UNPROTECTED_READ(x); + +#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED) + +// Start ignoring all memory accesses (both reads and writes). +#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ + do { \ + ANNOTATE_IGNORE_READS_BEGIN(); \ + ANNOTATE_IGNORE_WRITES_BEGIN(); \ + } while (0) + +// Stop ignoring both reads and writes. +#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ + do { \ + ANNOTATE_IGNORE_WRITES_END(); \ + ANNOTATE_IGNORE_READS_END(); \ + } while (0) + +#ifdef __cplusplus +// ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. +#define ANNOTATE_UNPROTECTED_READ(x) \ + absl::base_internal::AnnotateUnprotectedRead(x) + +#endif + +#else + +#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty +#define ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty +#define ANNOTATE_UNPROTECTED_READ(x) (x) + +#endif + +// ------------------------------------------------------------------------- +// Address sanitizer annotations + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +// Describe the current state of a contiguous container such as e.g. +// std::vector or std::string. For more details see +// sanitizer/common_interface_defs.h, which is provided by the compiler. +#include + +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ + __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) \ + struct { \ + char x[8] __attribute__((aligned(8))); \ + } name + +#else + +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") + +#endif // ABSL_HAVE_ADDRESS_SANITIZER + +// ------------------------------------------------------------------------- +// Undefine the macros intended only for this file. + +#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED +#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_BEGIN_EXTERN_C +#undef ABSL_INTERNAL_END_EXTERN_C +#undef ABSL_INTERNAL_STATIC_INLINE + +#endif // ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/endian.h b/CAPI/cpp/grpc/include/absl/base/internal/endian.h new file mode 100644 index 0000000..50747d7 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/endian.h @@ -0,0 +1,282 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_ +#define ABSL_BASE_INTERNAL_ENDIAN_H_ + +#include +#include + +#include "absl/base/casts.h" +#include "absl/base/config.h" +#include "absl/base/internal/unaligned_access.h" +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +inline uint64_t gbswap_64(uint64_t host_int) { +#if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__) + return __builtin_bswap64(host_int); +#elif defined(_MSC_VER) + return _byteswap_uint64(host_int); +#else + return (((host_int & uint64_t{0xFF}) << 56) | + ((host_int & uint64_t{0xFF00}) << 40) | + ((host_int & uint64_t{0xFF0000}) << 24) | + ((host_int & uint64_t{0xFF000000}) << 8) | + ((host_int & uint64_t{0xFF00000000}) >> 8) | + ((host_int & uint64_t{0xFF0000000000}) >> 24) | + ((host_int & uint64_t{0xFF000000000000}) >> 40) | + ((host_int & uint64_t{0xFF00000000000000}) >> 56)); +#endif +} + +inline uint32_t gbswap_32(uint32_t host_int) { +#if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__) + return __builtin_bswap32(host_int); +#elif defined(_MSC_VER) + return _byteswap_ulong(host_int); +#else + return (((host_int & uint32_t{0xFF}) << 24) | + ((host_int & uint32_t{0xFF00}) << 8) | + ((host_int & uint32_t{0xFF0000}) >> 8) | + ((host_int & uint32_t{0xFF000000}) >> 24)); +#endif +} + +inline uint16_t gbswap_16(uint16_t host_int) { +#if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__) + return __builtin_bswap16(host_int); +#elif defined(_MSC_VER) + return _byteswap_ushort(host_int); +#else + return (((host_int & uint16_t{0xFF}) << 8) | + ((host_int & uint16_t{0xFF00}) >> 8)); +#endif +} + +#ifdef ABSL_IS_LITTLE_ENDIAN + +// Portable definitions for htonl (host-to-network) and friends on little-endian +// architectures. +inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); } +inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); } +inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); } + +#elif defined ABSL_IS_BIG_ENDIAN + +// Portable definitions for htonl (host-to-network) etc on big-endian +// architectures. These definitions are simpler since the host byte order is the +// same as network byte order. +inline uint16_t ghtons(uint16_t x) { return x; } +inline uint32_t ghtonl(uint32_t x) { return x; } +inline uint64_t ghtonll(uint64_t x) { return x; } + +#else +#error \ + "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \ + "ABSL_IS_LITTLE_ENDIAN must be defined" +#endif // byte order + +inline uint16_t gntohs(uint16_t x) { return ghtons(x); } +inline uint32_t gntohl(uint32_t x) { return ghtonl(x); } +inline uint64_t gntohll(uint64_t x) { return ghtonll(x); } + +// Utilities to convert numbers between the current hosts's native byte +// order and little-endian byte order +// +// Load/Store methods are alignment safe +namespace little_endian { +// Conversion functions. +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +inline uint8_t FromHost(uint8_t x) { return x; } +inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } +inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } +inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } +inline uint8_t ToHost(uint8_t x) { return x; } +inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } +inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } +inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } + +inline int8_t FromHost(int8_t x) { return x; } +inline int16_t FromHost(int16_t x) { + return bit_cast(FromHost16(bit_cast(x))); +} +inline int32_t FromHost(int32_t x) { + return bit_cast(FromHost32(bit_cast(x))); +} +inline int64_t FromHost(int64_t x) { + return bit_cast(FromHost64(bit_cast(x))); +} +inline int8_t ToHost(int8_t x) { return x; } +inline int16_t ToHost(int16_t x) { + return bit_cast(ToHost16(bit_cast(x))); +} +inline int32_t ToHost(int32_t x) { + return bit_cast(ToHost32(bit_cast(x))); +} +inline int64_t ToHost(int64_t x) { + return bit_cast(ToHost64(bit_cast(x))); +} + +// Functions to do unaligned loads and stores in little-endian order. +inline uint16_t Load16(const void *p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(void *p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(const void *p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(void *p, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(const void *p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(void *p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace little_endian + +// Utilities to convert numbers between the current hosts's native byte +// order and big-endian byte order (same as network byte order) +// +// Load/Store methods are alignment safe +namespace big_endian { +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +inline uint8_t FromHost(uint8_t x) { return x; } +inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } +inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } +inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } +inline uint8_t ToHost(uint8_t x) { return x; } +inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } +inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } +inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } + +inline int8_t FromHost(int8_t x) { return x; } +inline int16_t FromHost(int16_t x) { + return bit_cast(FromHost16(bit_cast(x))); +} +inline int32_t FromHost(int32_t x) { + return bit_cast(FromHost32(bit_cast(x))); +} +inline int64_t FromHost(int64_t x) { + return bit_cast(FromHost64(bit_cast(x))); +} +inline int8_t ToHost(int8_t x) { return x; } +inline int16_t ToHost(int16_t x) { + return bit_cast(ToHost16(bit_cast(x))); +} +inline int32_t ToHost(int32_t x) { + return bit_cast(ToHost32(bit_cast(x))); +} +inline int64_t ToHost(int64_t x) { + return bit_cast(ToHost64(bit_cast(x))); +} + +// Functions to do unaligned loads and stores in big-endian order. +inline uint16_t Load16(const void *p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(void *p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(const void *p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(void *p, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(const void *p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(void *p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace big_endian + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ENDIAN_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/errno_saver.h b/CAPI/cpp/grpc/include/absl/base/internal/errno_saver.h new file mode 100644 index 0000000..251de51 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/errno_saver.h @@ -0,0 +1,43 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ +#define ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// `ErrnoSaver` captures the value of `errno` upon construction and restores it +// upon deletion. It is used in low-level code and must be super fast. Do not +// add instrumentation, even in debug modes. +class ErrnoSaver { + public: + ErrnoSaver() : saved_errno_(errno) {} + ~ErrnoSaver() { errno = saved_errno_; } + int operator()() const { return saved_errno_; } + + private: + const int saved_errno_; +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/exception_safety_testing.h b/CAPI/cpp/grpc/include/absl/base/internal/exception_safety_testing.h new file mode 100644 index 0000000..77a5aec --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/exception_safety_testing.h @@ -0,0 +1,1109 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Utilities for testing exception-safety + +#ifndef ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ +#define ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ + +#include "absl/base/config.h" + +#ifdef ABSL_HAVE_EXCEPTIONS + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" +#include "absl/base/internal/pretty_function.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" +#include "absl/strings/substitute.h" +#include "absl/utility/utility.h" + +namespace testing { + +enum class TypeSpec; +enum class AllocSpec; + +constexpr TypeSpec operator|(TypeSpec a, TypeSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) | static_cast(b)); +} + +constexpr TypeSpec operator&(TypeSpec a, TypeSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) & static_cast(b)); +} + +constexpr AllocSpec operator|(AllocSpec a, AllocSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) | static_cast(b)); +} + +constexpr AllocSpec operator&(AllocSpec a, AllocSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) & static_cast(b)); +} + +namespace exceptions_internal { + +std::string GetSpecString(TypeSpec); +std::string GetSpecString(AllocSpec); + +struct NoThrowTag {}; +struct StrongGuaranteeTagType {}; + +// A simple exception class. We throw this so that test code can catch +// exceptions specifically thrown by ThrowingValue. +class TestException { + public: + explicit TestException(absl::string_view msg) : msg_(msg) {} + virtual ~TestException() {} + virtual const char* what() const noexcept { return msg_.c_str(); } + + private: + std::string msg_; +}; + +// TestBadAllocException exists because allocation functions must throw an +// exception which can be caught by a handler of std::bad_alloc. We use a child +// class of std::bad_alloc so we can customise the error message, and also +// derive from TestException so we don't accidentally end up catching an actual +// bad_alloc exception in TestExceptionSafety. +class TestBadAllocException : public std::bad_alloc, public TestException { + public: + explicit TestBadAllocException(absl::string_view msg) : TestException(msg) {} + using TestException::what; +}; + +extern int countdown; + +// Allows the countdown variable to be set manually (defaulting to the initial +// value of 0) +inline void SetCountdown(int i = 0) { countdown = i; } +// Sets the countdown to the terminal value -1 +inline void UnsetCountdown() { SetCountdown(-1); } + +void MaybeThrow(absl::string_view msg, bool throw_bad_alloc = false); + +testing::AssertionResult FailureMessage(const TestException& e, + int countdown) noexcept; + +struct TrackedAddress { + bool is_alive; + std::string description; +}; + +// Inspects the constructions and destructions of anything inheriting from +// TrackedObject. This allows us to safely "leak" TrackedObjects, as +// ConstructorTracker will destroy everything left over in its destructor. +class ConstructorTracker { + public: + explicit ConstructorTracker(int count) : countdown_(count) { + assert(current_tracker_instance_ == nullptr); + current_tracker_instance_ = this; + } + + ~ConstructorTracker() { + assert(current_tracker_instance_ == this); + current_tracker_instance_ = nullptr; + + for (auto& it : address_map_) { + void* address = it.first; + TrackedAddress& tracked_address = it.second; + if (tracked_address.is_alive) { + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, + countdown_, "Object was not destroyed."); + } + } + } + + static void ObjectConstructed(void* address, std::string description) { + if (!CurrentlyTracking()) return; + + TrackedAddress& tracked_address = + current_tracker_instance_->address_map_[address]; + if (tracked_address.is_alive) { + ADD_FAILURE() << ErrorMessage( + address, tracked_address.description, + current_tracker_instance_->countdown_, + "Object was re-constructed. Current object was constructed by " + + description); + } + tracked_address = {true, std::move(description)}; + } + + static void ObjectDestructed(void* address) { + if (!CurrentlyTracking()) return; + + auto it = current_tracker_instance_->address_map_.find(address); + // Not tracked. Ignore. + if (it == current_tracker_instance_->address_map_.end()) return; + + TrackedAddress& tracked_address = it->second; + if (!tracked_address.is_alive) { + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, + current_tracker_instance_->countdown_, + "Object was re-destroyed."); + } + tracked_address.is_alive = false; + } + + private: + static bool CurrentlyTracking() { + return current_tracker_instance_ != nullptr; + } + + static std::string ErrorMessage(void* address, + const std::string& address_description, + int countdown, + const std::string& error_description) { + return absl::Substitute( + "With coundtown at $0:\n" + " $1\n" + " Object originally constructed by $2\n" + " Object address: $3\n", + countdown, error_description, address_description, address); + } + + std::unordered_map address_map_; + int countdown_; + + static ConstructorTracker* current_tracker_instance_; +}; + +class TrackedObject { + public: + TrackedObject(const TrackedObject&) = delete; + TrackedObject(TrackedObject&&) = delete; + + protected: + explicit TrackedObject(std::string description) { + ConstructorTracker::ObjectConstructed(this, std::move(description)); + } + + ~TrackedObject() noexcept { ConstructorTracker::ObjectDestructed(this); } +}; +} // namespace exceptions_internal + +extern exceptions_internal::NoThrowTag nothrow_ctor; + +extern exceptions_internal::StrongGuaranteeTagType strong_guarantee; + +// A test class which is convertible to bool. The conversion can be +// instrumented to throw at a controlled time. +class ThrowingBool { + public: + ThrowingBool(bool b) noexcept : b_(b) {} // NOLINT(runtime/explicit) + operator bool() const { // NOLINT + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return b_; + } + + private: + bool b_; +}; + +/* + * Configuration enum for the ThrowingValue type that defines behavior for the + * lifetime of the instance. Use testing::nothrow_ctor to prevent the integer + * constructor from throwing. + * + * kEverythingThrows: Every operation can throw an exception + * kNoThrowCopy: Copy construction and copy assignment will not throw + * kNoThrowMove: Move construction and move assignment will not throw + * kNoThrowNew: Overloaded operators new and new[] will not throw + */ +enum class TypeSpec { + kEverythingThrows = 0, + kNoThrowCopy = 1, + kNoThrowMove = 1 << 1, + kNoThrowNew = 1 << 2, +}; + +/* + * A testing class instrumented to throw an exception at a controlled time. + * + * ThrowingValue implements a slightly relaxed version of the Regular concept -- + * that is it's a value type with the expected semantics. It also implements + * arithmetic operations. It doesn't implement member and pointer operators + * like operator-> or operator[]. + * + * ThrowingValue can be instrumented to have certain operations be noexcept by + * using compile-time bitfield template arguments. That is, to make an + * ThrowingValue which has noexcept move construction/assignment and noexcept + * copy construction/assignment, use the following: + * ThrowingValue my_thrwr{val}; + */ +template +class ThrowingValue : private exceptions_internal::TrackedObject { + static constexpr bool IsSpecified(TypeSpec spec) { + return static_cast(Spec & spec); + } + + static constexpr int kDefaultValue = 0; + static constexpr int kBadValue = 938550620; + + public: + ThrowingValue() : TrackedObject(GetInstanceString(kDefaultValue)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = kDefaultValue; + } + + ThrowingValue(const ThrowingValue& other) noexcept( + IsSpecified(TypeSpec::kNoThrowCopy)) + : TrackedObject(GetInstanceString(other.dummy_)) { + if (!IsSpecified(TypeSpec::kNoThrowCopy)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + } + + ThrowingValue(ThrowingValue&& other) noexcept( + IsSpecified(TypeSpec::kNoThrowMove)) + : TrackedObject(GetInstanceString(other.dummy_)) { + if (!IsSpecified(TypeSpec::kNoThrowMove)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + } + + explicit ThrowingValue(int i) : TrackedObject(GetInstanceString(i)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = i; + } + + ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept + : TrackedObject(GetInstanceString(i)), dummy_(i) {} + + // absl expects nothrow destructors + ~ThrowingValue() noexcept = default; + + ThrowingValue& operator=(const ThrowingValue& other) noexcept( + IsSpecified(TypeSpec::kNoThrowCopy)) { + dummy_ = kBadValue; + if (!IsSpecified(TypeSpec::kNoThrowCopy)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + return *this; + } + + ThrowingValue& operator=(ThrowingValue&& other) noexcept( + IsSpecified(TypeSpec::kNoThrowMove)) { + dummy_ = kBadValue; + if (!IsSpecified(TypeSpec::kNoThrowMove)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + return *this; + } + + // Arithmetic Operators + ThrowingValue operator+(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ + other.dummy_, nothrow_ctor); + } + + ThrowingValue operator+() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_, nothrow_ctor); + } + + ThrowingValue operator-(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ - other.dummy_, nothrow_ctor); + } + + ThrowingValue operator-() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(-dummy_, nothrow_ctor); + } + + ThrowingValue& operator++() { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + ++dummy_; + return *this; + } + + ThrowingValue operator++(int) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + auto out = ThrowingValue(dummy_, nothrow_ctor); + ++dummy_; + return out; + } + + ThrowingValue& operator--() { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + --dummy_; + return *this; + } + + ThrowingValue operator--(int) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + auto out = ThrowingValue(dummy_, nothrow_ctor); + --dummy_; + return out; + } + + ThrowingValue operator*(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ * other.dummy_, nothrow_ctor); + } + + ThrowingValue operator/(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ / other.dummy_, nothrow_ctor); + } + + ThrowingValue operator%(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ % other.dummy_, nothrow_ctor); + } + + ThrowingValue operator<<(int shift) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ << shift, nothrow_ctor); + } + + ThrowingValue operator>>(int shift) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ >> shift, nothrow_ctor); + } + + // Comparison Operators + // NOTE: We use `ThrowingBool` instead of `bool` because most STL + // types/containers requires T to be convertible to bool. + friend ThrowingBool operator==(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ == b.dummy_; + } + friend ThrowingBool operator!=(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ != b.dummy_; + } + friend ThrowingBool operator<(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ < b.dummy_; + } + friend ThrowingBool operator<=(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ <= b.dummy_; + } + friend ThrowingBool operator>(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ > b.dummy_; + } + friend ThrowingBool operator>=(const ThrowingValue& a, + const ThrowingValue& b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ >= b.dummy_; + } + + // Logical Operators + ThrowingBool operator!() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return !dummy_; + } + + ThrowingBool operator&&(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return dummy_ && other.dummy_; + } + + ThrowingBool operator||(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return dummy_ || other.dummy_; + } + + // Bitwise Logical Operators + ThrowingValue operator~() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(~dummy_, nothrow_ctor); + } + + ThrowingValue operator&(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ & other.dummy_, nothrow_ctor); + } + + ThrowingValue operator|(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ | other.dummy_, nothrow_ctor); + } + + ThrowingValue operator^(const ThrowingValue& other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ ^ other.dummy_, nothrow_ctor); + } + + // Compound Assignment operators + ThrowingValue& operator+=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ += other.dummy_; + return *this; + } + + ThrowingValue& operator-=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ -= other.dummy_; + return *this; + } + + ThrowingValue& operator*=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ *= other.dummy_; + return *this; + } + + ThrowingValue& operator/=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ /= other.dummy_; + return *this; + } + + ThrowingValue& operator%=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ %= other.dummy_; + return *this; + } + + ThrowingValue& operator&=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ &= other.dummy_; + return *this; + } + + ThrowingValue& operator|=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ |= other.dummy_; + return *this; + } + + ThrowingValue& operator^=(const ThrowingValue& other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ ^= other.dummy_; + return *this; + } + + ThrowingValue& operator<<=(int shift) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ <<= shift; + return *this; + } + + ThrowingValue& operator>>=(int shift) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ >>= shift; + return *this; + } + + // Pointer operators + void operator&() const = delete; // NOLINT(runtime/operator) + + // Stream operators + friend std::ostream& operator<<(std::ostream& os, const ThrowingValue& tv) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return os << GetInstanceString(tv.dummy_); + } + + friend std::istream& operator>>(std::istream& is, const ThrowingValue&) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return is; + } + + // Memory management operators + static void* operator new(size_t s) noexcept( + IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new(s); + } + + static void* operator new[](size_t s) noexcept( + IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new[](s); + } + + template + static void* operator new(size_t s, Args&&... args) noexcept( + IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new(s, std::forward(args)...); + } + + template + static void* operator new[](size_t s, Args&&... args) noexcept( + IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new[](s, std::forward(args)...); + } + + // Abseil doesn't support throwing overloaded operator delete. These are + // provided so a throwing operator-new can clean up after itself. + void operator delete(void* p) noexcept { ::operator delete(p); } + + template + void operator delete(void* p, Args&&... args) noexcept { + ::operator delete(p, std::forward(args)...); + } + + void operator delete[](void* p) noexcept { return ::operator delete[](p); } + + template + void operator delete[](void* p, Args&&... args) noexcept { + return ::operator delete[](p, std::forward(args)...); + } + + // Non-standard access to the actual contained value. No need for this to + // throw. + int& Get() noexcept { return dummy_; } + const int& Get() const noexcept { return dummy_; } + + private: + static std::string GetInstanceString(int dummy) { + return absl::StrCat("ThrowingValue<", + exceptions_internal::GetSpecString(Spec), ">(", dummy, + ")"); + } + + int dummy_; +}; +// While not having to do with exceptions, explicitly delete comma operator, to +// make sure we don't use it on user-supplied types. +template +void operator,(const ThrowingValue&, T&&) = delete; +template +void operator,(T&&, const ThrowingValue&) = delete; + +/* + * Configuration enum for the ThrowingAllocator type that defines behavior for + * the lifetime of the instance. + * + * kEverythingThrows: Calls to the member functions may throw + * kNoThrowAllocate: Calls to the member functions will not throw + */ +enum class AllocSpec { + kEverythingThrows = 0, + kNoThrowAllocate = 1, +}; + +/* + * An allocator type which is instrumented to throw at a controlled time, or not + * to throw, using AllocSpec. The supported settings are the default of every + * function which is allowed to throw in a conforming allocator possibly + * throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS + * configuration macro. + */ +template +class ThrowingAllocator : private exceptions_internal::TrackedObject { + static constexpr bool IsSpecified(AllocSpec spec) { + return static_cast(Spec & spec); + } + + public: + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using void_pointer = void*; + using const_void_pointer = const void*; + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + + using is_nothrow = + std::integral_constant; + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + using is_always_equal = std::false_type; + + ThrowingAllocator() : TrackedObject(GetInstanceString(next_id_)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = std::make_shared(next_id_++); + } + + template + ThrowingAllocator(const ThrowingAllocator& other) noexcept // NOLINT + : TrackedObject(GetInstanceString(*other.State())), + dummy_(other.State()) {} + + // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of + // allocator shall not exit via an exception, thus they are marked noexcept. + ThrowingAllocator(const ThrowingAllocator& other) noexcept + : TrackedObject(GetInstanceString(*other.State())), + dummy_(other.State()) {} + + template + ThrowingAllocator(ThrowingAllocator&& other) noexcept // NOLINT + : TrackedObject(GetInstanceString(*other.State())), + dummy_(std::move(other.State())) {} + + ThrowingAllocator(ThrowingAllocator&& other) noexcept + : TrackedObject(GetInstanceString(*other.State())), + dummy_(std::move(other.State())) {} + + ~ThrowingAllocator() noexcept = default; + + ThrowingAllocator& operator=(const ThrowingAllocator& other) noexcept { + dummy_ = other.State(); + return *this; + } + + template + ThrowingAllocator& operator=( + const ThrowingAllocator& other) noexcept { + dummy_ = other.State(); + return *this; + } + + template + ThrowingAllocator& operator=(ThrowingAllocator&& other) noexcept { + dummy_ = std::move(other.State()); + return *this; + } + + template + struct rebind { + using other = ThrowingAllocator; + }; + + pointer allocate(size_type n) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + return static_cast(::operator new(n * sizeof(T))); + } + + pointer allocate(size_type n, const_void_pointer) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + return allocate(n); + } + + void deallocate(pointer ptr, size_type) noexcept { + ReadState(); + ::operator delete(static_cast(ptr)); + } + + template + void construct(U* ptr, Args&&... args) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + ::new (static_cast(ptr)) U(std::forward(args)...); + } + + template + void destroy(U* p) noexcept { + ReadState(); + p->~U(); + } + + size_type max_size() const noexcept { + return (std::numeric_limits::max)() / sizeof(value_type); + } + + ThrowingAllocator select_on_container_copy_construction() noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + return *this; + } + + template + bool operator==(const ThrowingAllocator& other) const noexcept { + return dummy_ == other.dummy_; + } + + template + bool operator!=(const ThrowingAllocator& other) const noexcept { + return dummy_ != other.dummy_; + } + + template + friend class ThrowingAllocator; + + private: + static std::string GetInstanceString(int dummy) { + return absl::StrCat("ThrowingAllocator<", + exceptions_internal::GetSpecString(Spec), ">(", dummy, + ")"); + } + + const std::shared_ptr& State() const { return dummy_; } + std::shared_ptr& State() { return dummy_; } + + void ReadState() { + // we know that this will never be true, but the compiler doesn't, so this + // should safely force a read of the value. + if (*dummy_ < 0) std::abort(); + } + + void ReadStateAndMaybeThrow(absl::string_view msg) const { + if (!IsSpecified(AllocSpec::kNoThrowAllocate)) { + exceptions_internal::MaybeThrow( + absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg)); + } + } + + static int next_id_; + std::shared_ptr dummy_; +}; + +template +int ThrowingAllocator::next_id_ = 0; + +// Tests for resource leaks by attempting to construct a T using args repeatedly +// until successful, using the countdown method. Side effects can then be +// tested for resource leaks. +template +void TestThrowingCtor(Args&&... args) { + struct Cleanup { + ~Cleanup() { exceptions_internal::UnsetCountdown(); } + } c; + for (int count = 0;; ++count) { + exceptions_internal::ConstructorTracker ct(count); + exceptions_internal::SetCountdown(count); + try { + T temp(std::forward(args)...); + static_cast(temp); + break; + } catch (const exceptions_internal::TestException&) { + } + } +} + +// Tests the nothrow guarantee of the provided nullary operation. If the an +// exception is thrown, the result will be AssertionFailure(). Otherwise, it +// will be AssertionSuccess(). +template +testing::AssertionResult TestNothrowOp(const Operation& operation) { + struct Cleanup { + Cleanup() { exceptions_internal::SetCountdown(); } + ~Cleanup() { exceptions_internal::UnsetCountdown(); } + } c; + try { + operation(); + return testing::AssertionSuccess(); + } catch (const exceptions_internal::TestException&) { + return testing::AssertionFailure() + << "TestException thrown during call to operation() when nothrow " + "guarantee was expected."; + } catch (...) { + return testing::AssertionFailure() + << "Unknown exception thrown during call to operation() when " + "nothrow guarantee was expected."; + } +} + +namespace exceptions_internal { + +// Dummy struct for ExceptionSafetyTestBuilder<> partial state. +struct UninitializedT {}; + +template +class DefaultFactory { + public: + explicit DefaultFactory(const T& t) : t_(t) {} + std::unique_ptr operator()() const { return absl::make_unique(t_); } + + private: + T t_; +}; + +template +using EnableIfTestable = typename absl::enable_if_t< + LazyContractsCount != 0 && + !std::is_same::value && + !std::is_same::value>; + +template +class ExceptionSafetyTestBuilder; + +} // namespace exceptions_internal + +/* + * Constructs an empty ExceptionSafetyTestBuilder. All + * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation + * methods return new instances of ExceptionSafetyTestBuilder. + * + * In order to test a T for exception safety, a factory for that T, a testable + * operation, and at least one contract callback returning an assertion + * result must be applied using the respective methods. + */ +exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester(); + +namespace exceptions_internal { +template +struct IsUniquePtr : std::false_type {}; + +template +struct IsUniquePtr> : std::true_type {}; + +template +struct FactoryPtrTypeHelper { + using type = decltype(std::declval()()); + + static_assert(IsUniquePtr::value, "Factories must return a unique_ptr"); +}; + +template +using FactoryPtrType = typename FactoryPtrTypeHelper::type; + +template +using FactoryElementType = typename FactoryPtrType::element_type; + +template +class ExceptionSafetyTest { + using Factory = std::function()>; + using Operation = std::function; + using Contract = std::function; + + public: + template + explicit ExceptionSafetyTest(const Factory& f, const Operation& op, + const Contracts&... contracts) + : factory_(f), operation_(op), contracts_{WrapContract(contracts)...} {} + + AssertionResult Test() const { + for (int count = 0;; ++count) { + exceptions_internal::ConstructorTracker ct(count); + + for (const auto& contract : contracts_) { + auto t_ptr = factory_(); + try { + SetCountdown(count); + operation_(t_ptr.get()); + // Unset for the case that the operation throws no exceptions, which + // would leave the countdown set and break the *next* exception safety + // test after this one. + UnsetCountdown(); + return AssertionSuccess(); + } catch (const exceptions_internal::TestException& e) { + if (!contract(t_ptr.get())) { + return AssertionFailure() << e.what() << " failed contract check"; + } + } + } + } + } + + private: + template + Contract WrapContract(const ContractFn& contract) { + return [contract](T* t_ptr) { return AssertionResult(contract(t_ptr)); }; + } + + Contract WrapContract(StrongGuaranteeTagType) { + return [this](T* t_ptr) { return AssertionResult(*factory_() == *t_ptr); }; + } + + Factory factory_; + Operation operation_; + std::vector contracts_; +}; + +/* + * Builds a tester object that tests if performing a operation on a T follows + * exception safety guarantees. Verification is done via contract assertion + * callbacks applied to T instances post-throw. + * + * Template parameters for ExceptionSafetyTestBuilder: + * + * - Factory: The factory object (passed in via tester.WithFactory(...) or + * tester.WithInitialValue(...)) must be invocable with the signature + * `std::unique_ptr operator()() const` where T is the type being tested. + * It is used for reliably creating identical T instances to test on. + * + * - Operation: The operation object (passsed in via tester.WithOperation(...) + * or tester.Test(...)) must be invocable with the signature + * `void operator()(T*) const` where T is the type being tested. It is used + * for performing steps on a T instance that may throw and that need to be + * checked for exception safety. Each call to the operation will receive a + * fresh T instance so it's free to modify and destroy the T instances as it + * pleases. + * + * - Contracts...: The contract assertion callback objects (passed in via + * tester.WithContracts(...)) must be invocable with the signature + * `testing::AssertionResult operator()(T*) const` where T is the type being + * tested. Contract assertion callbacks are provided T instances post-throw. + * They must return testing::AssertionSuccess when the type contracts of the + * provided T instance hold. If the type contracts of the T instance do not + * hold, they must return testing::AssertionFailure. Execution order of + * Contracts... is unspecified. They will each individually get a fresh T + * instance so they are free to modify and destroy the T instances as they + * please. + */ +template +class ExceptionSafetyTestBuilder { + public: + /* + * Returns a new ExceptionSafetyTestBuilder with an included T factory based + * on the provided T instance. The existing factory will not be included in + * the newly created tester instance. The created factory returns a new T + * instance by copy-constructing the provided const T& t. + * + * Preconditions for tester.WithInitialValue(const T& t): + * + * - The const T& t object must be copy-constructible where T is the type + * being tested. For non-copy-constructible objects, use the method + * tester.WithFactory(...). + */ + template + ExceptionSafetyTestBuilder, Operation, Contracts...> + WithInitialValue(const T& t) const { + return WithFactory(DefaultFactory(t)); + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided T factory + * included. The existing factory will not be included in the newly-created + * tester instance. This method is intended for use with types lacking a copy + * constructor. Types that can be copy-constructed should instead use the + * method tester.WithInitialValue(...). + */ + template + ExceptionSafetyTestBuilder, Operation, Contracts...> + WithFactory(const NewFactory& new_factory) const { + return {new_factory, operation_, contracts_}; + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided testable + * operation included. The existing operation will not be included in the + * newly created tester. + */ + template + ExceptionSafetyTestBuilder, Contracts...> + WithOperation(const NewOperation& new_operation) const { + return {factory_, new_operation, contracts_}; + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts... + * combined with the Contracts... that were already included in the instance + * on which the method was called. Contracts... cannot be removed or replaced + * once added to an ExceptionSafetyTestBuilder instance. A fresh object must + * be created in order to get an empty Contracts... list. + * + * In addition to passing in custom contract assertion callbacks, this method + * accepts `testing::strong_guarantee` as an argument which checks T instances + * post-throw against freshly created T instances via operator== to verify + * that any state changes made during the execution of the operation were + * properly rolled back. + */ + template + ExceptionSafetyTestBuilder...> + WithContracts(const MoreContracts&... more_contracts) const { + return { + factory_, operation_, + std::tuple_cat(contracts_, std::tuple...>( + more_contracts...))}; + } + + /* + * Returns a testing::AssertionResult that is the reduced result of the + * exception safety algorithm. The algorithm short circuits and returns + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an + * AssertionSuccess, the reduced result is AssertionSuccess. + * + * The passed-in testable operation will not be saved in a new tester instance + * nor will it modify/replace the existing tester instance. This is useful + * when each operation being tested is unique and does not need to be reused. + * + * Preconditions for tester.Test(const NewOperation& new_operation): + * + * - May only be called after at least one contract assertion callback and a + * factory or initial value have been provided. + */ + template < + typename NewOperation, + typename = EnableIfTestable> + testing::AssertionResult Test(const NewOperation& new_operation) const { + return TestImpl(new_operation, absl::index_sequence_for()); + } + + /* + * Returns a testing::AssertionResult that is the reduced result of the + * exception safety algorithm. The algorithm short circuits and returns + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an + * AssertionSuccess, the reduced result is AssertionSuccess. + * + * Preconditions for tester.Test(): + * + * - May only be called after at least one contract assertion callback, a + * factory or initial value and a testable operation have been provided. + */ + template < + typename LazyOperation = Operation, + typename = EnableIfTestable> + testing::AssertionResult Test() const { + return Test(operation_); + } + + private: + template + friend class ExceptionSafetyTestBuilder; + + friend ExceptionSafetyTestBuilder<> testing::MakeExceptionSafetyTester(); + + ExceptionSafetyTestBuilder() {} + + ExceptionSafetyTestBuilder(const Factory& f, const Operation& o, + const std::tuple& i) + : factory_(f), operation_(o), contracts_(i) {} + + template + testing::AssertionResult TestImpl(SelectedOperation selected_operation, + absl::index_sequence) const { + return ExceptionSafetyTest>( + factory_, selected_operation, std::get(contracts_)...) + .Test(); + } + + Factory factory_; + Operation operation_; + std::tuple contracts_; +}; + +} // namespace exceptions_internal + +} // namespace testing + +#endif // ABSL_HAVE_EXCEPTIONS + +#endif // ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/exception_testing.h b/CAPI/cpp/grpc/include/absl/base/internal/exception_testing.h new file mode 100644 index 0000000..01b5465 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/exception_testing.h @@ -0,0 +1,42 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Testing utilities for ABSL types which throw exceptions. + +#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ +#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ + +#include "gtest/gtest.h" +#include "absl/base/config.h" + +// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception +// if exceptions are enabled, or for death with a specified text in the error +// message +#ifdef ABSL_HAVE_EXCEPTIONS + +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_THROW(expr, exception_t) + +#elif defined(__ANDROID__) +// Android asserts do not log anywhere that gtest can currently inspect. +// So we expect exit, but cannot match the message. +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_DEATH(expr, ".*") +#else +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_DEATH_IF_SUPPORTED(expr, text) + +#endif + +#endif // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/fast_type_id.h b/CAPI/cpp/grpc/include/absl/base/internal/fast_type_id.h new file mode 100644 index 0000000..a547b3a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/fast_type_id.h @@ -0,0 +1,50 @@ +// +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ +#define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +template +struct FastTypeTag { + constexpr static char dummy_var = 0; +}; + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +template +constexpr char FastTypeTag::dummy_var; +#endif + +// FastTypeId() evaluates at compile/link-time to a unique pointer for the +// passed-in type. These are meant to be good match for keys into maps or +// straight up comparisons. +using FastTypeIdType = const void*; + +template +constexpr inline FastTypeIdType FastTypeId() { + return &FastTypeTag::dummy_var; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/hide_ptr.h b/CAPI/cpp/grpc/include/absl/base/internal/hide_ptr.h new file mode 100644 index 0000000..1dba809 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/hide_ptr.h @@ -0,0 +1,51 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_HIDE_PTR_H_ +#define ABSL_BASE_INTERNAL_HIDE_PTR_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Arbitrary value with high bits set. Xor'ing with it is unlikely +// to map one valid pointer to another valid pointer. +constexpr uintptr_t HideMask() { + return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU; +} + +// Hide a pointer from the leak checker. For internal use only. +// Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr +// and all objects reachable from ptr to be ignored by the leak checker. +template +inline uintptr_t HidePtr(T* ptr) { + return reinterpret_cast(ptr) ^ HideMask(); +} + +// Return a pointer that has been hidden from the leak checker. +// For internal use only. +template +inline T* UnhidePtr(uintptr_t hidden) { + return reinterpret_cast(hidden ^ HideMask()); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/identity.h b/CAPI/cpp/grpc/include/absl/base/internal/identity.h new file mode 100644 index 0000000..a3154ed --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/identity.h @@ -0,0 +1,37 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_IDENTITY_H_ +#define ABSL_BASE_INTERNAL_IDENTITY_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace internal { + +template +struct identity { + typedef T type; +}; + +template +using identity_t = typename identity::type; + +} // namespace internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_IDENTITY_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/inline_variable.h b/CAPI/cpp/grpc/include/absl/base/internal/inline_variable.h new file mode 100644 index 0000000..130d8c2 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/inline_variable.h @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ +#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ + +#include + +#include "absl/base/internal/identity.h" + +// File: +// This file define a macro that allows the creation of or emulation of C++17 +// inline variables based on whether or not the feature is supported. + +//////////////////////////////////////////////////////////////////////////////// +// Macro: ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) +// +// Description: +// Expands to the equivalent of an inline constexpr instance of the specified +// `type` and `name`, initialized to the value `init`. If the compiler being +// used is detected as supporting actual inline variables as a language +// feature, then the macro expands to an actual inline variable definition. +// +// Requires: +// `type` is a type that is usable in an extern variable declaration. +// +// Requires: `name` is a valid identifier +// +// Requires: +// `init` is an expression that can be used in the following definition: +// constexpr type name = init; +// +// Usage: +// +// // Equivalent to: `inline constexpr size_t variant_npos = -1;` +// ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1); +// +// Differences in implementation: +// For a direct, language-level inline variable, decltype(name) will be the +// type that was specified along with const qualification, whereas for +// emulated inline variables, decltype(name) may be different (in practice +// it will likely be a reference type). +//////////////////////////////////////////////////////////////////////////////// + +#ifdef __cpp_inline_variables + +// Clang's -Wmissing-variable-declarations option erroneously warned that +// inline constexpr objects need to be pre-declared. This has now been fixed, +// but we will need to support this workaround for people building with older +// versions of clang. +// +// Bug: https://bugs.llvm.org/show_bug.cgi?id=35862 +// +// Note: +// identity_t is used here so that the const and name are in the +// appropriate place for pointer types, reference types, function pointer +// types, etc.. +#if defined(__clang__) +#define ABSL_INTERNAL_EXTERN_DECL(type, name) \ + extern const ::absl::internal::identity_t name; +#else // Otherwise, just define the macro to do nothing. +#define ABSL_INTERNAL_EXTERN_DECL(type, name) +#endif // defined(__clang__) + +// See above comment at top of file for details. +#define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \ + ABSL_INTERNAL_EXTERN_DECL(type, name) \ + inline constexpr ::absl::internal::identity_t name = init + +#else + +// See above comment at top of file for details. +// +// Note: +// identity_t is used here so that the const and name are in the +// appropriate place for pointer types, reference types, function pointer +// types, etc.. +#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \ + template \ + struct AbslInternalInlineVariableHolder##name { \ + static constexpr ::absl::internal::identity_t kInstance = init; \ + }; \ + \ + template \ + constexpr ::absl::internal::identity_t \ + AbslInternalInlineVariableHolder##name::kInstance; \ + \ + static constexpr const ::absl::internal::identity_t& \ + name = /* NOLINT */ \ + AbslInternalInlineVariableHolder##name<>::kInstance; \ + static_assert(sizeof(void (*)(decltype(name))) != 0, \ + "Silence unused variable warnings.") + +#endif // __cpp_inline_variables + +#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/inline_variable_testing.h b/CAPI/cpp/grpc/include/absl/base/internal/inline_variable_testing.h new file mode 100644 index 0000000..3856b9f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/inline_variable_testing.h @@ -0,0 +1,46 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INLINE_VARIABLE_TESTING_H_ +#define ABSL_BASE_INLINE_VARIABLE_TESTING_H_ + +#include "absl/base/internal/inline_variable.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inline_variable_testing_internal { + +struct Foo { + int value = 5; +}; + +ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {}); +ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {}); + +ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5); +ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5); + +ABSL_INTERNAL_INLINE_CONSTEXPR(void(*)(), inline_variable_fun_ptr, nullptr); + +const Foo& get_foo_a(); +const Foo& get_foo_b(); + +const int& get_int_a(); +const int& get_int_b(); + +} // namespace inline_variable_testing_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INLINE_VARIABLE_TESTING_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/invoke.h b/CAPI/cpp/grpc/include/absl/base/internal/invoke.h new file mode 100644 index 0000000..643c2a4 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/invoke.h @@ -0,0 +1,241 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// absl::base_internal::invoke(f, args...) is an implementation of +// INVOKE(f, args...) from section [func.require] of the C++ standard. +// When compiled as C++17 and later versions, it is implemented as an alias of +// std::invoke. +// +// [func.require] +// Define INVOKE (f, t1, t2, ..., tN) as follows: +// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T +// and t1 is an object of type T or a reference to an object of type T or a +// reference to an object of a type derived from T; +// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a +// class T and t1 is not one of the types described in the previous item; +// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is +// an object of type T or a reference to an object of type T or a reference +// to an object of a type derived from T; +// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 +// is not one of the types described in the previous item; +// 5. f(t1, t2, ..., tN) in all other cases. +// +// The implementation is SFINAE-friendly: substitution failure within invoke() +// isn't an error. + +#ifndef ABSL_BASE_INTERNAL_INVOKE_H_ +#define ABSL_BASE_INTERNAL_INVOKE_H_ + +#include "absl/base/config.h" + +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + +#include + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +using std::invoke; +using std::invoke_result_t; +using std::is_invocable_r; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#else // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + +#include +#include +#include + +#include "absl/meta/type_traits.h" + +// The following code is internal implementation detail. See the comment at the +// top of this file for the API documentation. + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// The five classes below each implement one of the clauses from the definition +// of INVOKE. The inner class template Accept checks whether the +// clause is applicable; static function template Invoke(f, args...) does the +// invocation. +// +// By separating the clause selection logic from invocation we make sure that +// Invoke() does exactly what the standard says. + +template +struct StrippedAccept { + template + struct Accept : Derived::template AcceptImpl::type>::type...> {}; +}; + +// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T +// and t1 is an object of type T or a reference to an object of type T or a +// reference to an object of a type derived from T. +struct MemFunAndRef : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + absl::is_function::value> { + }; + + template + static decltype((std::declval().* + std::declval())(std::declval()...)) + Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) { +// Ignore bogus GCC warnings on this line. +// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101436 for similar example. +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Warray-bounds" +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + return (std::forward(obj).* + std::forward(mem_fun))(std::forward(args)...); +#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) +#pragma GCC diagnostic pop +#endif + } +}; + +// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a +// class T and t1 is not one of the types described in the previous item. +struct MemFunAndPtr : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + absl::is_function::value> { + }; + + template + static decltype(((*std::declval()).* + std::declval())(std::declval()...)) + Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) { + return ((*std::forward(ptr)).* + std::forward(mem_fun))(std::forward(args)...); + } +}; + +// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is +// an object of type T or a reference to an object of type T or a reference +// to an object of a type derived from T. +struct DataMemAndRef : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + !absl::is_function::value> {}; + + template + static decltype(std::declval().*std::declval()) Invoke( + DataMem&& data_mem, Ref&& ref) { + return std::forward(ref).*std::forward(data_mem); + } +}; + +// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 +// is not one of the types described in the previous item. +struct DataMemAndPtr : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + !absl::is_function::value> {}; + + template + static decltype((*std::declval()).*std::declval()) Invoke( + DataMem&& data_mem, Ptr&& ptr) { + return (*std::forward(ptr)).*std::forward(data_mem); + } +}; + +// f(t1, t2, ..., tN) in all other cases. +struct Callable { + // Callable doesn't have Accept because it's the last clause that gets picked + // when none of the previous clauses are applicable. + template + static decltype(std::declval()(std::declval()...)) Invoke( + F&& f, Args&&... args) { + return std::forward(f)(std::forward(args)...); + } +}; + +// Resolves to the first matching clause. +template +struct Invoker { + typedef typename std::conditional< + MemFunAndRef::Accept::value, MemFunAndRef, + typename std::conditional< + MemFunAndPtr::Accept::value, MemFunAndPtr, + typename std::conditional< + DataMemAndRef::Accept::value, DataMemAndRef, + typename std::conditional::value, + DataMemAndPtr, Callable>::type>::type>:: + type>::type type; +}; + +// The result type of Invoke. +template +using invoke_result_t = decltype(Invoker::type::Invoke( + std::declval(), std::declval()...)); + +// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section +// [func.require] of the C++ standard. +template +invoke_result_t invoke(F&& f, Args&&... args) { + return Invoker::type::Invoke(std::forward(f), + std::forward(args)...); +} + +template +struct IsInvocableRImpl : std::false_type {}; + +template +struct IsInvocableRImpl< + absl::void_t >, R, F, + Args...> + : std::integral_constant< + bool, + std::is_convertible, + R>::value || + std::is_void::value> {}; + +// Type trait whose member `value` is true if invoking `F` with `Args` is valid, +// and either the return type is convertible to `R`, or `R` is void. +// C++11-compatible version of `std::is_invocable_r`. +template +using is_invocable_r = IsInvocableRImpl; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L + +#endif // ABSL_BASE_INTERNAL_INVOKE_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/low_level_alloc.h b/CAPI/cpp/grpc/include/absl/base/internal/low_level_alloc.h new file mode 100644 index 0000000..db91951 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/low_level_alloc.h @@ -0,0 +1,126 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ +#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ + +// A simple thread-safe memory allocator that does not depend on +// mutexes or thread-specific data. It is intended to be used +// sparingly, and only when malloc() would introduce an unwanted +// dependency, such as inside the heap-checker, or the Mutex +// implementation. + +// IWYU pragma: private, include "base/low_level_alloc.h" + +#include + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +// LowLevelAlloc requires that the platform support low-level +// allocation of virtual memory. Platforms lacking this cannot use +// LowLevelAlloc. +#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING +#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set +#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32) +#define ABSL_LOW_LEVEL_ALLOC_MISSING 1 +#endif + +// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows or +// asm.js / WebAssembly. +// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html +// for more information. +#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set +#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__) +#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1 +#endif + +#include + +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class LowLevelAlloc { + public: + struct Arena; // an arena from which memory may be allocated + + // Returns a pointer to a block of at least "request" bytes + // that have been newly allocated from the specific arena. + // for Alloc() call the DefaultArena() is used. + // Returns 0 if passed request==0. + // Does not return 0 under other circumstances; it crashes if memory + // is not available. + static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook); + static void *AllocWithArena(size_t request, Arena *arena) + ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // Deallocates a region of memory that was previously allocated with + // Alloc(). Does nothing if passed 0. "s" must be either 0, + // or must have been returned from a call to Alloc() and not yet passed to + // Free() since that call to Alloc(). The space is returned to the arena + // from which it was allocated. + static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free + // are to put all callers of MallocHook::Invoke* in this module + // into special section, + // so that MallocHook::GetCallerStackTrace can function accurately. + + // Create a new arena. + // The root metadata for the new arena is allocated in the + // meta_data_arena; the DefaultArena() can be passed for meta_data_arena. + // These values may be ored into flags: + enum { + // Report calls to Alloc() and Free() via the MallocHook interface. + // Set in the DefaultArena. + kCallMallocHook = 0x0001, + +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + // Make calls to Alloc(), Free() be async-signal-safe. Not set in + // DefaultArena(). Not supported on all platforms. + kAsyncSignalSafe = 0x0002, +#endif + }; + // Construct a new arena. The allocation of the underlying metadata honors + // the provided flags. For example, the call NewArena(kAsyncSignalSafe) + // is itself async-signal-safe, as well as generatating an arena that provides + // async-signal-safe Alloc/Free. + static Arena *NewArena(int32_t flags); + + // Destroys an arena allocated by NewArena and returns true, + // provided no allocated blocks remain in the arena. + // If allocated blocks remain in the arena, does nothing and + // returns false. + // It is illegal to attempt to destroy the DefaultArena(). + static bool DeleteArena(Arena *arena); + + // The default arena that always exists. + static Arena *DefaultArena(); + + private: + LowLevelAlloc(); // no instances +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/low_level_scheduling.h b/CAPI/cpp/grpc/include/absl/base/internal/low_level_scheduling.h new file mode 100644 index 0000000..9baccc0 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/low_level_scheduling.h @@ -0,0 +1,134 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Core interfaces and definitions used by by low-level interfaces such as +// SpinLock. + +#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ +#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ + +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/macros.h" + +// The following two declarations exist so SchedulingGuard may friend them with +// the appropriate language linkage. These callbacks allow libc internals, such +// as function level statics, to schedule cooperatively when locking. +extern "C" bool __google_disable_rescheduling(void); +extern "C" void __google_enable_rescheduling(bool disable_result); + +namespace absl { +ABSL_NAMESPACE_BEGIN +class CondVar; +class Mutex; + +namespace synchronization_internal { +int MutexDelay(int32_t c, int mode); +} // namespace synchronization_internal + +namespace base_internal { + +class SchedulingHelper; // To allow use of SchedulingGuard. +class SpinLock; // To allow use of SchedulingGuard. + +// SchedulingGuard +// Provides guard semantics that may be used to disable cooperative rescheduling +// of the calling thread within specific program blocks. This is used to +// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative +// scheduling depends on. +// +// Domain implementations capable of rescheduling in reaction to involuntary +// kernel thread actions (e.g blocking due to a pagefault or syscall) must +// guarantee that an annotated thread is not allowed to (cooperatively) +// reschedule until the annotated region is complete. +// +// It is an error to attempt to use a cooperatively scheduled resource (e.g. +// Mutex) within a rescheduling-disabled region. +// +// All methods are async-signal safe. +class SchedulingGuard { + public: + // Returns true iff the calling thread may be cooperatively rescheduled. + static bool ReschedulingIsAllowed(); + SchedulingGuard(const SchedulingGuard&) = delete; + SchedulingGuard& operator=(const SchedulingGuard&) = delete; + + private: + // Disable cooperative rescheduling of the calling thread. It may still + // initiate scheduling operations (e.g. wake-ups), however, it may not itself + // reschedule. Nestable. The returned result is opaque, clients should not + // attempt to interpret it. + // REQUIRES: Result must be passed to a pairing EnableScheduling(). + static bool DisableRescheduling(); + + // Marks the end of a rescheduling disabled region, previously started by + // DisableRescheduling(). + // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling(). + static void EnableRescheduling(bool disable_result); + + // A scoped helper for {Disable, Enable}Rescheduling(). + // REQUIRES: destructor must run in same thread as constructor. + struct ScopedDisable { + ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); } + ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); } + + bool disabled; + }; + + // A scoped helper to enable rescheduling temporarily. + // REQUIRES: destructor must run in same thread as constructor. + class ScopedEnable { + public: + ScopedEnable(); + ~ScopedEnable(); + + private: + int scheduling_disabled_depth_; + }; + + // Access to SchedulingGuard is explicitly permitted. + friend class absl::CondVar; + friend class absl::Mutex; + friend class SchedulingHelper; + friend class SpinLock; + friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode); +}; + +//------------------------------------------------------------------------------ +// End of public interfaces. +//------------------------------------------------------------------------------ + +inline bool SchedulingGuard::ReschedulingIsAllowed() { + return false; +} + +inline bool SchedulingGuard::DisableRescheduling() { + return false; +} + +inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) { + return; +} + +inline SchedulingGuard::ScopedEnable::ScopedEnable() + : scheduling_disabled_depth_(0) {} +inline SchedulingGuard::ScopedEnable::~ScopedEnable() { + ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning"); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/per_thread_tls.h b/CAPI/cpp/grpc/include/absl/base/internal/per_thread_tls.h new file mode 100644 index 0000000..cf5e97a --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/per_thread_tls.h @@ -0,0 +1,52 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ +#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ + +// This header defines two macros: +// +// If the platform supports thread-local storage: +// +// * ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a +// thread-local variable +// * ABSL_PER_THREAD_TLS is 1 +// +// Otherwise: +// +// * ABSL_PER_THREAD_TLS_KEYWORD is empty +// * ABSL_PER_THREAD_TLS is 0 +// +// Microsoft C supports thread-local storage. +// GCC supports it if the appropriate version of glibc is available, +// which the programmer can indicate by defining ABSL_HAVE_TLS + +#include "absl/base/port.h" // For ABSL_HAVE_TLS + +#if defined(ABSL_PER_THREAD_TLS) +#error ABSL_PER_THREAD_TLS cannot be directly set +#elif defined(ABSL_PER_THREAD_TLS_KEYWORD) +#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set +#elif defined(ABSL_HAVE_TLS) +#define ABSL_PER_THREAD_TLS_KEYWORD __thread +#define ABSL_PER_THREAD_TLS 1 +#elif defined(_MSC_VER) +#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread) +#define ABSL_PER_THREAD_TLS 1 +#else +#define ABSL_PER_THREAD_TLS_KEYWORD +#define ABSL_PER_THREAD_TLS 0 +#endif + +#endif // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/prefetch.h b/CAPI/cpp/grpc/include/absl/base/internal/prefetch.h new file mode 100644 index 0000000..0641928 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/prefetch.h @@ -0,0 +1,138 @@ +// Copyright 2022 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PREFETCH_H_ +#define ABSL_BASE_INTERNAL_PREFETCH_H_ + +#include "absl/base/config.h" + +#ifdef __SSE__ +#include +#endif + +#if defined(_MSC_VER) && defined(ABSL_INTERNAL_HAVE_SSE) +#include +#pragma intrinsic(_mm_prefetch) +#endif + +// Compatibility wrappers around __builtin_prefetch, to prefetch data +// for read if supported by the toolchain. + +// Move data into the cache before it is read, or "prefetch" it. +// +// The value of `addr` is the address of the memory to prefetch. If +// the target and compiler support it, data prefetch instructions are +// generated. If the prefetch is done some time before the memory is +// read, it may be in the cache by the time the read occurs. +// +// The function names specify the temporal locality heuristic applied, +// using the names of Intel prefetch instructions: +// +// T0 - high degree of temporal locality; data should be left in as +// many levels of the cache possible +// T1 - moderate degree of temporal locality +// T2 - low degree of temporal locality +// Nta - no temporal locality, data need not be left in the cache +// after the read +// +// Incorrect or gratuitous use of these functions can degrade +// performance, so use them only when representative benchmarks show +// an improvement. +// +// Example usage: +// +// absl::base_internal::PrefetchT0(addr); +// +// Currently, the different prefetch calls behave on some Intel +// architectures as follows: +// +// SNB..SKL SKX +// PrefetchT0() L1/L2/L3 L1/L2 +// PrefetchT1() L2/L3 L2 +// PrefetchT2() L2/L3 L2 +// PrefetchNta() L1/--/L3 L1* +// +// * On SKX PrefetchNta() will bring the line into L1 but will evict +// from L3 cache. This might result in surprising behavior. +// +// SNB = Sandy Bridge, SKL = Skylake, SKX = Skylake Xeon. +// +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +void PrefetchT0(const void* addr); +void PrefetchT1(const void* addr); +void PrefetchT2(const void* addr); +void PrefetchNta(const void* addr); + +// Implementation details follow. + +#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__) + +#define ABSL_INTERNAL_HAVE_PREFETCH 1 + +// See __builtin_prefetch: +// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html. +// +// These functions speculatively load for read only. This is +// safe for all currently supported platforms. However, prefetch for +// store may have problems depending on the target platform. +// +inline void PrefetchT0(const void* addr) { + // Note: this uses prefetcht0 on Intel. + __builtin_prefetch(addr, 0, 3); +} +inline void PrefetchT1(const void* addr) { + // Note: this uses prefetcht1 on Intel. + __builtin_prefetch(addr, 0, 2); +} +inline void PrefetchT2(const void* addr) { + // Note: this uses prefetcht2 on Intel. + __builtin_prefetch(addr, 0, 1); +} +inline void PrefetchNta(const void* addr) { + // Note: this uses prefetchtnta on Intel. + __builtin_prefetch(addr, 0, 0); +} + +#elif defined(ABSL_INTERNAL_HAVE_SSE) + +#define ABSL_INTERNAL_HAVE_PREFETCH 1 + +inline void PrefetchT0(const void* addr) { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T0); +} +inline void PrefetchT1(const void* addr) { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T1); +} +inline void PrefetchT2(const void* addr) { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T2); +} +inline void PrefetchNta(const void* addr) { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_NTA); +} + +#else +inline void PrefetchT0(const void*) {} +inline void PrefetchT1(const void*) {} +inline void PrefetchT2(const void*) {} +inline void PrefetchNta(const void*) {} +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_PREFETCH_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/pretty_function.h b/CAPI/cpp/grpc/include/absl/base/internal/pretty_function.h new file mode 100644 index 0000000..35d5167 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/pretty_function.h @@ -0,0 +1,33 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ +#define ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ + +// ABSL_PRETTY_FUNCTION +// +// In C++11, __func__ gives the undecorated name of the current function. That +// is, "main", not "int main()". Various compilers give extra macros to get the +// decorated function name, including return type and arguments, to +// differentiate between overload sets. ABSL_PRETTY_FUNCTION is a portable +// version of these macros which forwards to the correct macro on each compiler. +#if defined(_MSC_VER) +#define ABSL_PRETTY_FUNCTION __FUNCSIG__ +#elif defined(__GNUC__) +#define ABSL_PRETTY_FUNCTION __PRETTY_FUNCTION__ +#else +#error "Unsupported compiler" +#endif + +#endif // ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/raw_logging.h b/CAPI/cpp/grpc/include/absl/base/internal/raw_logging.h new file mode 100644 index 0000000..0747c9d --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/raw_logging.h @@ -0,0 +1,196 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Thread-safe logging routines that do not allocate any memory or +// acquire any locks, and can therefore be used by low-level memory +// allocation, synchronization, and signal-handling code. + +#ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_ +#define ABSL_BASE_INTERNAL_RAW_LOGGING_H_ + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/atomic_hook.h" +#include "absl/base/log_severity.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +// This is similar to LOG(severity) << format..., but +// * it is to be used ONLY by low-level modules that can't use normal LOG() +// * it is designed to be a low-level logger that does not allocate any +// memory and does not need any locks, hence: +// * it logs straight and ONLY to STDERR w/o buffering +// * it uses an explicit printf-format and arguments list +// * it will silently chop off really long message strings +// Usage example: +// ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error); +// This will print an almost standard log line like this to stderr only: +// E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file + +#define ABSL_RAW_LOG(severity, ...) \ + do { \ + constexpr const char* absl_raw_logging_internal_basename = \ + ::absl::raw_logging_internal::Basename(__FILE__, \ + sizeof(__FILE__) - 1); \ + ::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \ + absl_raw_logging_internal_basename, \ + __LINE__, __VA_ARGS__); \ + } while (0) + +// Similar to CHECK(condition) << message, but for low-level modules: +// we use only ABSL_RAW_LOG that does not allocate memory. +// We do not want to provide args list here to encourage this usage: +// if (!cond) ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args); +// so that the args are not computed when not needed. +#define ABSL_RAW_CHECK(condition, message) \ + do { \ + if (ABSL_PREDICT_FALSE(!(condition))) { \ + ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \ + } \ + } while (0) + +// ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above, +// except that if the richer log library is linked into the binary, we dispatch +// to that instead. This is potentially useful for internal logging and +// assertions, where we are using RAW_LOG neither for its async-signal-safety +// nor for its non-allocating nature, but rather because raw logging has very +// few other dependencies. +// +// The API is a subset of the above: each macro only takes two arguments. Use +// StrCat if you need to build a richer message. +#define ABSL_INTERNAL_LOG(severity, message) \ + do { \ + constexpr const char* absl_raw_logging_internal_filename = __FILE__; \ + ::absl::raw_logging_internal::internal_log_function( \ + ABSL_RAW_LOGGING_INTERNAL_##severity, \ + absl_raw_logging_internal_filename, __LINE__, message); \ + if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \ + ABSL_INTERNAL_UNREACHABLE; \ + } while (0) + +#define ABSL_INTERNAL_CHECK(condition, message) \ + do { \ + if (ABSL_PREDICT_FALSE(!(condition))) { \ + std::string death_message = "Check " #condition " failed: "; \ + death_message += std::string(message); \ + ABSL_INTERNAL_LOG(FATAL, death_message); \ + } \ + } while (0) + +#define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo +#define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning +#define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError +#define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal +#define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \ + ::absl::NormalizeLogSeverity(severity) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace raw_logging_internal { + +// Helper function to implement ABSL_RAW_LOG +// Logs format... at "severity" level, reporting it +// as called from file:line. +// This does not allocate memory or acquire locks. +void RawLog(absl::LogSeverity severity, const char* file, int line, + const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); + +// Writes the provided buffer directly to stderr, in a signal-safe, low-level +// manner. +void AsyncSignalSafeWriteToStderr(const char* s, size_t len); + +// compile-time function to get the "base" filename, that is, the part of +// a filename after the last "/" or "\" path separator. The search starts at +// the end of the string; the second parameter is the length of the string. +constexpr const char* Basename(const char* fname, int offset) { + return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\' + ? fname + offset + : Basename(fname, offset - 1); +} + +// For testing only. +// Returns true if raw logging is fully supported. When it is not +// fully supported, no messages will be emitted, but a log at FATAL +// severity will cause an abort. +// +// TODO(gfalcon): Come up with a better name for this method. +bool RawLoggingFullySupported(); + +// Function type for a raw_logging customization hook for suppressing messages +// by severity, and for writing custom prefixes on non-suppressed messages. +// +// The installed hook is called for every raw log invocation. The message will +// be logged to stderr only if the hook returns true. FATAL errors will cause +// the process to abort, even if writing to stderr is suppressed. The hook is +// also provided with an output buffer, where it can write a custom log message +// prefix. +// +// The raw_logging system does not allocate memory or grab locks. User-provided +// hooks must avoid these operations, and must not throw exceptions. +// +// 'severity' is the severity level of the message being written. +// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro +// was located. +// 'buf' and 'buf_size' are pointers to the buffer and buffer size. If the +// hook writes a prefix, it must increment *buf and decrement *buf_size +// accordingly. +using LogFilterAndPrefixHook = bool (*)(absl::LogSeverity severity, + const char* file, int line, char** buf, + int* buf_size); + +// Function type for a raw_logging customization hook called to abort a process +// when a FATAL message is logged. If the provided AbortHook() returns, the +// logging system will call abort(). +// +// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro +// was located. +// The NUL-terminated logged message lives in the buffer between 'buf_start' +// and 'buf_end'. 'prefix_end' points to the first non-prefix character of the +// buffer (as written by the LogFilterAndPrefixHook.) +// +// The lifetime of the filename and message buffers will not end while the +// process remains alive. +using AbortHook = void (*)(const char* file, int line, const char* buf_start, + const char* prefix_end, const char* buf_end); + +// Internal logging function for ABSL_INTERNAL_LOG to dispatch to. +// +// TODO(gfalcon): When string_view no longer depends on base, change this +// interface to take its message as a string_view instead. +using InternalLogFunction = void (*)(absl::LogSeverity severity, + const char* file, int line, + const std::string& message); + +ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook< + InternalLogFunction> + internal_log_function; + +// Registers hooks of the above types. Only a single hook of each type may be +// registered. It is an error to call these functions multiple times with +// different input arguments. +// +// These functions are safe to call at any point during initialization; they do +// not block or malloc, and are async-signal safe. +void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func); +void RegisterAbortHook(AbortHook func); +void RegisterInternalLogFunction(InternalLogFunction func); + +} // namespace raw_logging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/scheduling_mode.h b/CAPI/cpp/grpc/include/absl/base/internal/scheduling_mode.h new file mode 100644 index 0000000..8be5ab6 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/scheduling_mode.h @@ -0,0 +1,58 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Core interfaces and definitions used by by low-level interfaces such as +// SpinLock. + +#ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ +#define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Used to describe how a thread may be scheduled. Typically associated with +// the declaration of a resource supporting synchronized access. +// +// SCHEDULE_COOPERATIVE_AND_KERNEL: +// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may +// reschedule (using base::scheduling semantics); allowing other cooperative +// threads to proceed. +// +// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative") +// Specifies that no cooperative scheduling semantics may be used, even if the +// current thread is itself cooperatively scheduled. This means that +// cooperative threads will NOT allow other cooperative threads to execute in +// their place while waiting for a resource of this type. Host operating system +// semantics (e.g. a futex) may still be used. +// +// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL +// by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which +// base::scheduling (e.g. the implementation of a Scheduler) may depend. +// +// NOTE: Cooperative resources may not be nested below non-cooperative ones. +// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL +// resource if a SCHEDULE_KERNEL_ONLY resource is already held. +enum SchedulingMode { + SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS. + SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling. +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/scoped_set_env.h b/CAPI/cpp/grpc/include/absl/base/internal/scoped_set_env.h new file mode 100644 index 0000000..19ec7b5 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/scoped_set_env.h @@ -0,0 +1,45 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ +#define ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class ScopedSetEnv { + public: + ScopedSetEnv(const char* var_name, const char* new_value); + ~ScopedSetEnv(); + + private: + std::string var_name_; + std::string old_value_; + + // True if the environment variable was initially not set. + bool was_unset_; +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/spinlock.h b/CAPI/cpp/grpc/include/absl/base/internal/spinlock.h new file mode 100644 index 0000000..6d8d8dd --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/spinlock.h @@ -0,0 +1,256 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Most users requiring mutual exclusion should use Mutex. +// SpinLock is provided for use in two situations: +// - for use by Abseil internal code that Mutex itself depends on +// - for async signal safety (see below) + +// SpinLock is async signal safe. If a spinlock is used within a signal +// handler, all code that acquires the lock must ensure that the signal cannot +// arrive while they are holding the lock. Typically, this is done by blocking +// the signal. +// +// Threads waiting on a SpinLock may be woken in an arbitrary order. + +#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_ +#define ABSL_BASE_INTERNAL_SPINLOCK_H_ + +#include +#include + +#include + +#include "absl/base/attributes.h" +#include "absl/base/const_init.h" +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/tsan_mutex_interface.h" +#include "absl/base/macros.h" +#include "absl/base/port.h" +#include "absl/base/thread_annotations.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class ABSL_LOCKABLE SpinLock { + public: + SpinLock() : lockword_(kSpinLockCooperative) { + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); + } + + // Constructors that allow non-cooperative spinlocks to be created for use + // inside thread schedulers. Normal clients should not use these. + explicit SpinLock(base_internal::SchedulingMode mode); + + // Constructor for global SpinLock instances. See absl/base/const_init.h. + constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode) + : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {} + + // For global SpinLock instances prefer trivial destructor when possible. + // Default but non-trivial destructor in some build configurations causes an + // extra static initializer. +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE + ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); } +#else + ~SpinLock() = default; +#endif + + // Acquire this SpinLock. + inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { + ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); + if (!TryLockImpl()) { + SlowLock(); + } + ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); + } + + // Try to acquire this SpinLock without blocking and return true if the + // acquisition was successful. If the lock was not acquired, false is + // returned. If this SpinLock is free at the time of the call, TryLock + // will return true with high probability. + inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { + ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); + bool res = TryLockImpl(); + ABSL_TSAN_MUTEX_POST_LOCK( + this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed), + 0); + return res; + } + + // Release this SpinLock, which must be held by the calling thread. + inline void Unlock() ABSL_UNLOCK_FUNCTION() { + ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + lock_value = lockword_.exchange(lock_value & kSpinLockCooperative, + std::memory_order_release); + + if ((lock_value & kSpinLockDisabledScheduling) != 0) { + base_internal::SchedulingGuard::EnableRescheduling(true); + } + if ((lock_value & kWaitTimeMask) != 0) { + // Collect contentionz profile info, and speed the wakeup of any waiter. + // The wait_cycles value indicates how long this thread spent waiting + // for the lock. + SlowUnlock(lock_value); + } + ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0); + } + + // Determine if the lock is held. When the lock is held by the invoking + // thread, true will always be returned. Intended to be used as + // CHECK(lock.IsHeld()). + inline bool IsHeld() const { + return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; + } + + // Return immediately if this thread holds the SpinLock exclusively. + // Otherwise, report an error by crashing with a diagnostic. + inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() { + if (!IsHeld()) { + ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock"); + } + } + + protected: + // These should not be exported except for testing. + + // Store number of cycles between wait_start_time and wait_end_time in a + // lock value. + static uint32_t EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time); + + // Extract number of wait cycles in a lock value. + static uint64_t DecodeWaitCycles(uint32_t lock_value); + + // Provide access to protected method above. Use for testing only. + friend struct SpinLockTest; + + private: + // lockword_ is used to store the following: + // + // bit[0] encodes whether a lock is being held. + // bit[1] encodes whether a lock uses cooperative scheduling. + // bit[2] encodes whether the current lock holder disabled scheduling when + // acquiring the lock. Only set when kSpinLockHeld is also set. + // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. + // This is set by the lock holder to indicate how long it waited on + // the lock before eventually acquiring it. The number of cycles is + // encoded as a 29-bit unsigned int, or in the case that the current + // holder did not wait but another waiter is queued, the LSB + // (kSpinLockSleeper) is set. The implementation does not explicitly + // track the number of queued waiters beyond this. It must always be + // assumed that waiters may exist if the current holder was required to + // queue. + // + // Invariant: if the lock is not held, the value is either 0 or + // kSpinLockCooperative. + static constexpr uint32_t kSpinLockHeld = 1; + static constexpr uint32_t kSpinLockCooperative = 2; + static constexpr uint32_t kSpinLockDisabledScheduling = 4; + static constexpr uint32_t kSpinLockSleeper = 8; + // Includes kSpinLockSleeper. + static constexpr uint32_t kWaitTimeMask = + ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling); + + // Returns true if the provided scheduling mode is cooperative. + static constexpr bool IsCooperative( + base_internal::SchedulingMode scheduling_mode) { + return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + } + + uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles); + void SlowLock() ABSL_ATTRIBUTE_COLD; + void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD; + uint32_t SpinLoop(); + + inline bool TryLockImpl() { + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0; + } + + std::atomic lockword_; + + SpinLock(const SpinLock&) = delete; + SpinLock& operator=(const SpinLock&) = delete; +}; + +// Corresponding locker object that arranges to acquire a spinlock for +// the duration of a C++ scope. +class ABSL_SCOPED_LOCKABLE SpinLockHolder { + public: + inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l) + : lock_(l) { + l->Lock(); + } + inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); } + + SpinLockHolder(const SpinLockHolder&) = delete; + SpinLockHolder& operator=(const SpinLockHolder&) = delete; + + private: + SpinLock* lock_; +}; + +// Register a hook for profiling support. +// +// The function pointer registered here will be called whenever a spinlock is +// contended. The callback is given an opaque handle to the contended spinlock +// and the number of wait cycles. This is thread-safe, but only a single +// profiler can be registered. It is an error to call this function multiple +// times with different arguments. +void RegisterSpinLockProfiler(void (*fn)(const void* lock, + int64_t wait_cycles)); + +//------------------------------------------------------------------------------ +// Public interface ends here. +//------------------------------------------------------------------------------ + +// If (result & kSpinLockHeld) == 0, then *this was successfully locked. +// Otherwise, returns last observed value for lockword_. +inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, + uint32_t wait_cycles) { + if ((lock_value & kSpinLockHeld) != 0) { + return lock_value; + } + + uint32_t sched_disabled_bit = 0; + if ((lock_value & kSpinLockCooperative) == 0) { + // For non-cooperative locks we must make sure we mark ourselves as + // non-reschedulable before we attempt to CompareAndSwap. + if (base_internal::SchedulingGuard::DisableRescheduling()) { + sched_disabled_bit = kSpinLockDisabledScheduling; + } + } + + if (!lockword_.compare_exchange_strong( + lock_value, + kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit, + std::memory_order_acquire, std::memory_order_relaxed)) { + base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0); + } + + return lock_value; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SPINLOCK_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/spinlock_akaros.inc b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_akaros.inc new file mode 100644 index 0000000..7b0cada --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_akaros.inc @@ -0,0 +1,35 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is an Akaros-specific part of spinlock_wait.cc + +#include + +#include "absl/base/internal/scheduling_mode.h" + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* /* lock_word */, uint32_t /* value */, + int /* loop */, absl::base_internal::SchedulingMode /* mode */) { + // In Akaros, one must take care not to call anything that could cause a + // malloc(), a blocking system call, or a uthread_yield() while holding a + // spinlock. Our callers assume will not call into libraries or other + // arbitrary code. +} + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/CAPI/cpp/grpc/include/absl/base/internal/spinlock_linux.inc b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_linux.inc new file mode 100644 index 0000000..fe8ba67 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_linux.inc @@ -0,0 +1,71 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Linux-specific part of spinlock_wait.cc + +#include +#include +#include + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/internal/errno_saver.h" + +// The SpinLock lockword is `std::atomic`. Here we assert that +// `std::atomic` is bitwise equivalent of the `int` expected +// by SYS_futex. We also assume that reads/writes done to the lockword +// by SYS_futex have rational semantics with regard to the +// std::atomic<> API. C++ provides no guarantees of these assumptions, +// but they are believed to hold in practice. +static_assert(sizeof(std::atomic) == sizeof(int), + "SpinLock lockword has the wrong size for a futex"); + +// Some Android headers are missing these definitions even though they +// support these futex operations. +#ifdef __BIONIC__ +#ifndef SYS_futex +#define SYS_futex __NR_futex +#endif +#ifndef FUTEX_PRIVATE_FLAG +#define FUTEX_PRIVATE_FLAG 128 +#endif +#endif + +#if defined(__NR_futex_time64) && !defined(SYS_futex_time64) +#define SYS_futex_time64 __NR_futex_time64 +#endif + +#if defined(SYS_futex_time64) && !defined(SYS_futex) +#define SYS_futex SYS_futex_time64 +#endif + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic *w, uint32_t value, int, + absl::base_internal::SchedulingMode) { + absl::base_internal::ErrnoSaver errno_saver; + syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, nullptr); +} + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic *w, bool all) { + syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0); +} + +} // extern "C" diff --git a/CAPI/cpp/grpc/include/absl/base/internal/spinlock_posix.inc b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_posix.inc new file mode 100644 index 0000000..4f6f887 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_posix.inc @@ -0,0 +1,46 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Posix-specific part of spinlock_wait.cc + +#include + +#include +#include + +#include "absl/base/internal/errno_saver.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/port.h" + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* /* lock_word */, uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + absl::base_internal::ErrnoSaver errno_saver; + if (loop == 0) { + } else if (loop == 1) { + sched_yield(); + } else { + struct timespec tm; + tm.tv_sec = 0; + tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop); + nanosleep(&tm, nullptr); + } +} + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/CAPI/cpp/grpc/include/absl/base/internal/spinlock_wait.h b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_wait.h new file mode 100644 index 0000000..9a1adcd --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_wait.h @@ -0,0 +1,95 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ +#define ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ + +// Operations to make atomic transitions on a word, and to allow +// waiting for those transitions to become possible. + +#include +#include + +#include "absl/base/internal/scheduling_mode.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// SpinLockWait() waits until it can perform one of several transitions from +// "from" to "to". It returns when it performs a transition where done==true. +struct SpinLockWaitTransition { + uint32_t from; + uint32_t to; + bool done; +}; + +// Wait until *w can transition from trans[i].from to trans[i].to for some i +// satisfying 0<=i *w, int n, + const SpinLockWaitTransition trans[], + SchedulingMode scheduling_mode); + +// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all` +// is true, wake all such threads. On some systems, this may be a no-op; on +// those systems, threads calling SpinLockDelay() will always wake eventually +// even if SpinLockWake() is never called. +void SpinLockWake(std::atomic *w, bool all); + +// Wait for an appropriate spin delay on iteration "loop" of a +// spin loop on location *w, whose previously observed value was "value". +// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick, +// or may wait for a call to SpinLockWake(w). +void SpinLockDelay(std::atomic *w, uint32_t value, int loop, + base_internal::SchedulingMode scheduling_mode); + +// Helper used by AbslInternalSpinLockDelay. +// Returns a suggested delay in nanoseconds for iteration number "loop". +int SpinLockSuggestedDelayNS(int loop); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +// In some build configurations we pass --detect-odr-violations to the +// gold linker. This causes it to flag weak symbol overrides as ODR +// violations. Because ODR only applies to C++ and not C, +// --detect-odr-violations ignores symbols not mangled with C++ names. +// By changing our extension points to be extern "C", we dodge this +// check. +extern "C" { +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic *w, + bool all); +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic *w, uint32_t value, int loop, + absl::base_internal::SchedulingMode scheduling_mode); +} + +inline void absl::base_internal::SpinLockWake(std::atomic *w, + bool all) { + ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all); +} + +inline void absl::base_internal::SpinLockDelay( + std::atomic *w, uint32_t value, int loop, + absl::base_internal::SchedulingMode scheduling_mode) { + ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay) + (w, value, loop, scheduling_mode); +} + +#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/spinlock_win32.inc b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_win32.inc new file mode 100644 index 0000000..9d22481 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_win32.inc @@ -0,0 +1,37 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Win32-specific part of spinlock_wait.cc + +#include +#include +#include "absl/base/internal/scheduling_mode.h" + +extern "C" { + +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* /* lock_word */, uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + if (loop == 0) { + } else if (loop == 1) { + Sleep(0); + } else { + Sleep(absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000); + } +} + +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/CAPI/cpp/grpc/include/absl/base/internal/strerror.h b/CAPI/cpp/grpc/include/absl/base/internal/strerror.h new file mode 100644 index 0000000..3500973 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/strerror.h @@ -0,0 +1,39 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_STRERROR_H_ +#define ABSL_BASE_INTERNAL_STRERROR_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// A portable and thread-safe alternative to C89's `strerror`. +// +// The C89 specification of `strerror` is not suitable for use in a +// multi-threaded application as the returned string may be changed by calls to +// `strerror` from another thread. The many non-stdlib alternatives differ +// enough in their names, availability, and semantics to justify this wrapper +// around them. `errno` will not be modified by a call to `absl::StrError`. +std::string StrError(int errnum); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_STRERROR_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/sysinfo.h b/CAPI/cpp/grpc/include/absl/base/internal/sysinfo.h new file mode 100644 index 0000000..119cf1f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/sysinfo.h @@ -0,0 +1,74 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file includes routines to find out characteristics +// of the machine a program is running on. It is undoubtedly +// system-dependent. + +// Functions listed here that accept a pid_t as an argument act on the +// current process if the pid_t argument is 0 +// All functions here are thread-hostile due to file caching unless +// commented otherwise. + +#ifndef ABSL_BASE_INTERNAL_SYSINFO_H_ +#define ABSL_BASE_INTERNAL_SYSINFO_H_ + +#ifndef _WIN32 +#include +#endif + +#include + +#include "absl/base/config.h" +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Nominal core processor cycles per second of each processor. This is _not_ +// necessarily the frequency of the CycleClock counter (see cycleclock.h) +// Thread-safe. +double NominalCPUFrequency(); + +// Number of logical processors (hyperthreads) in system. Thread-safe. +int NumCPUs(); + +// Return the thread id of the current thread, as told by the system. +// No two currently-live threads implemented by the OS shall have the same ID. +// Thread ids of exited threads may be reused. Multiple user-level threads +// may have the same thread ID if multiplexed on the same OS thread. +// +// On Linux, you may send a signal to the resulting ID with kill(). However, +// it is recommended for portability that you use pthread_kill() instead. +#ifdef _WIN32 +// On Windows, process id and thread id are of the same type according to the +// return types of GetProcessId() and GetThreadId() are both DWORD, an unsigned +// 32-bit type. +using pid_t = uint32_t; +#endif +pid_t GetTID(); + +// Like GetTID(), but caches the result in thread-local storage in order +// to avoid unnecessary system calls. Note that there are some cases where +// one must call through to GetTID directly, which is why this exists as a +// separate function. For example, GetCachedTID() is not safe to call in +// an asynchronous signal-handling context nor right after a call to fork(). +pid_t GetCachedTID(); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SYSINFO_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/thread_annotations.h b/CAPI/cpp/grpc/include/absl/base/internal/thread_annotations.h new file mode 100644 index 0000000..4dab6a9 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/thread_annotations.h @@ -0,0 +1,271 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: thread_annotations.h +// ----------------------------------------------------------------------------- +// +// WARNING: This is a backwards compatible header and it will be removed after +// the migration to prefixed thread annotations is finished; please include +// "absl/base/thread_annotations.h". +// +// This header file contains macro definitions for thread safety annotations +// that allow developers to document the locking policies of multi-threaded +// code. The annotations can also help program analysis tools to identify +// potential thread safety issues. +// +// These annotations are implemented using compiler attributes. Using the macros +// defined here instead of raw attributes allow for portability and future +// compatibility. +// +// When referring to mutexes in the arguments of the attributes, you should +// use variable names or more complex expressions (e.g. my_object->mutex_) +// that evaluate to a concrete mutex object whenever possible. If the mutex +// you want to refer to is not in scope, you may use a member pointer +// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. + +#ifndef ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ +#define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ + +#if defined(__clang__) +#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) +#else +#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op +#endif + +// GUARDED_BY() +// +// Documents if a shared field or global variable needs to be protected by a +// mutex. GUARDED_BY() allows the user to specify a particular mutex that +// should be held when accessing the annotated variable. +// +// Although this annotation (and PT_GUARDED_BY, below) cannot be applied to +// local variables, a local variable and its associated mutex can often be +// combined into a small class or struct, thereby allowing the annotation. +// +// Example: +// +// class Foo { +// Mutex mu_; +// int p1_ GUARDED_BY(mu_); +// ... +// }; +#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) + +// PT_GUARDED_BY() +// +// Documents if the memory location pointed to by a pointer should be guarded +// by a mutex when dereferencing the pointer. +// +// Example: +// class Foo { +// Mutex mu_; +// int *p1_ PT_GUARDED_BY(mu_); +// ... +// }; +// +// Note that a pointer variable to a shared memory location could itself be a +// shared variable. +// +// Example: +// +// // `q_`, guarded by `mu1_`, points to a shared memory location that is +// // guarded by `mu2_`: +// int *q_ GUARDED_BY(mu1_) PT_GUARDED_BY(mu2_); +#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) + +// ACQUIRED_AFTER() / ACQUIRED_BEFORE() +// +// Documents the acquisition order between locks that can be held +// simultaneously by a thread. For any two locks that need to be annotated +// to establish an acquisition order, only one of them needs the annotation. +// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER +// and ACQUIRED_BEFORE.) +// +// As with GUARDED_BY, this is only applicable to mutexes that are shared +// fields or global variables. +// +// Example: +// +// Mutex m1_; +// Mutex m2_ ACQUIRED_AFTER(m1_); +#define ACQUIRED_AFTER(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) + +#define ACQUIRED_BEFORE(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) + +// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED() +// +// Documents a function that expects a mutex to be held prior to entry. +// The mutex is expected to be held both on entry to, and exit from, the +// function. +// +// An exclusive lock allows read-write access to the guarded data member(s), and +// only one thread can acquire a lock exclusively at any one time. A shared lock +// allows read-only access, and any number of threads can acquire a shared lock +// concurrently. +// +// Generally, non-const methods should be annotated with +// EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with +// SHARED_LOCKS_REQUIRED. +// +// Example: +// +// Mutex mu1, mu2; +// int a GUARDED_BY(mu1); +// int b GUARDED_BY(mu2); +// +// void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } +// void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } +#define EXCLUSIVE_LOCKS_REQUIRED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) + +#define SHARED_LOCKS_REQUIRED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) + +// LOCKS_EXCLUDED() +// +// Documents the locks acquired in the body of the function. These locks +// cannot be held when calling this function (as Abseil's `Mutex` locks are +// non-reentrant). +#define LOCKS_EXCLUDED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) + +// LOCK_RETURNED() +// +// Documents a function that returns a mutex without acquiring it. For example, +// a public getter method that returns a pointer to a private mutex should +// be annotated with LOCK_RETURNED. +#define LOCK_RETURNED(x) \ + THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) + +// LOCKABLE +// +// Documents if a class/type is a lockable type (such as the `Mutex` class). +#define LOCKABLE \ + THREAD_ANNOTATION_ATTRIBUTE__(lockable) + +// SCOPED_LOCKABLE +// +// Documents if a class does RAII locking (such as the `MutexLock` class). +// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is +// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no +// arguments; the analysis will assume that the destructor unlocks whatever the +// constructor locked. +#define SCOPED_LOCKABLE \ + THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) + +// EXCLUSIVE_LOCK_FUNCTION() +// +// Documents functions that acquire a lock in the body of a function, and do +// not release it. +#define EXCLUSIVE_LOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) + +// SHARED_LOCK_FUNCTION() +// +// Documents functions that acquire a shared (reader) lock in the body of a +// function, and do not release it. +#define SHARED_LOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) + +// UNLOCK_FUNCTION() +// +// Documents functions that expect a lock to be held on entry to the function, +// and release it in the body of the function. +#define UNLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) + +// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION() +// +// Documents functions that try to acquire a lock, and return success or failure +// (or a non-boolean value that can be interpreted as a boolean). +// The first argument should be `true` for functions that return `true` on +// success, or `false` for functions that return `false` on success. The second +// argument specifies the mutex that is locked on success. If unspecified, this +// mutex is assumed to be `this`. +#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__)) + +#define SHARED_TRYLOCK_FUNCTION(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) + +// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK() +// +// Documents functions that dynamically check to see if a lock is held, and fail +// if it is not held. +#define ASSERT_EXCLUSIVE_LOCK(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__)) + +#define ASSERT_SHARED_LOCK(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__)) + +// NO_THREAD_SAFETY_ANALYSIS +// +// Turns off thread safety checking within the body of a particular function. +// This annotation is used to mark functions that are known to be correct, but +// the locking behavior is more complicated than the analyzer can handle. +#define NO_THREAD_SAFETY_ANALYSIS \ + THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) + +//------------------------------------------------------------------------------ +// Tool-Supplied Annotations +//------------------------------------------------------------------------------ + +// TS_UNCHECKED should be placed around lock expressions that are not valid +// C++ syntax, but which are present for documentation purposes. These +// annotations will be ignored by the analysis. +#define TS_UNCHECKED(x) "" + +// TS_FIXME is used to mark lock expressions that are not valid C++ syntax. +// It is used by automated tools to mark and disable invalid expressions. +// The annotation should either be fixed, or changed to TS_UNCHECKED. +#define TS_FIXME(x) "" + +// Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of +// a particular function. However, this attribute is used to mark functions +// that are incorrect and need to be fixed. It is used by automated tools to +// avoid breaking the build when the analysis is updated. +// Code owners are expected to eventually fix the routine. +#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS + +// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY +// annotation that needs to be fixed, because it is producing thread safety +// warning. It disables the GUARDED_BY. +#define GUARDED_BY_FIXME(x) + +// Disables warnings for a single read operation. This can be used to avoid +// warnings when it is known that the read is not actually involved in a race, +// but the compiler cannot confirm that. +#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x) + + +namespace thread_safety_analysis { + +// Takes a reference to a guarded data member, and returns an unguarded +// reference. +template +inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +template +inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +} // namespace thread_safety_analysis + +#endif // ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/thread_identity.h b/CAPI/cpp/grpc/include/absl/base/internal/thread_identity.h new file mode 100644 index 0000000..659694b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/thread_identity.h @@ -0,0 +1,265 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Each active thread has an ThreadIdentity that may represent the thread in +// various level interfaces. ThreadIdentity objects are never deallocated. +// When a thread terminates, its ThreadIdentity object may be reused for a +// thread created later. + +#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ +#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ + +#ifndef _WIN32 +#include +// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when +// supported. +#include +#endif + +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/per_thread_tls.h" +#include "absl/base/optimization.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +struct SynchLocksHeld; +struct SynchWaitParams; + +namespace base_internal { + +class SpinLock; +struct ThreadIdentity; + +// Used by the implementation of absl::Mutex and absl::CondVar. +struct PerThreadSynch { + // The internal representation of absl::Mutex and absl::CondVar rely + // on the alignment of PerThreadSynch. Both store the address of the + // PerThreadSynch in the high-order bits of their internal state, + // which means the low kLowZeroBits of the address of PerThreadSynch + // must be zero. + static constexpr int kLowZeroBits = 8; + static constexpr int kAlignment = 1 << kLowZeroBits; + + // Returns the associated ThreadIdentity. + // This can be implemented as a cast because we guarantee + // PerThreadSynch is the first element of ThreadIdentity. + ThreadIdentity* thread_identity() { + return reinterpret_cast(this); + } + + PerThreadSynch *next; // Circular waiter queue; initialized to 0. + PerThreadSynch *skip; // If non-zero, all entries in Mutex queue + // up to and including "skip" have same + // condition as this, and will be woken later + bool may_skip; // if false while on mutex queue, a mutex unlocker + // is using this PerThreadSynch as a terminator. Its + // skip field must not be filled in because the loop + // might then skip over the terminator. + bool wake; // This thread is to be woken from a Mutex. + // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the + // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. + // + // The value of "x->cond_waiter" is meaningless if "x" is not on a + // Mutex waiter list. + bool cond_waiter; + bool maybe_unlocking; // Valid at head of Mutex waiter queue; + // true if UnlockSlow could be searching + // for a waiter to wake. Used for an optimization + // in Enqueue(). true is always a valid value. + // Can be reset to false when the unlocker or any + // writer releases the lock, or a reader fully + // releases the lock. It may not be set to false + // by a reader that decrements the count to + // non-zero. protected by mutex spinlock + bool suppress_fatal_errors; // If true, try to proceed even in the face + // of broken invariants. This is used within + // fatal signal handlers to improve the + // chances of debug logging information being + // output successfully. + int priority; // Priority of thread (updated every so often). + + // State values: + // kAvailable: This PerThreadSynch is available. + // kQueued: This PerThreadSynch is unavailable, it's currently queued on a + // Mutex or CondVar waistlist. + // + // Transitions from kQueued to kAvailable require a release + // barrier. This is needed as a waiter may use "state" to + // independently observe that it's no longer queued. + // + // Transitions from kAvailable to kQueued require no barrier, they + // are externally ordered by the Mutex. + enum State { + kAvailable, + kQueued + }; + std::atomic state; + + // The wait parameters of the current wait. waitp is null if the + // thread is not waiting. Transitions from null to non-null must + // occur before the enqueue commit point (state = kQueued in + // Enqueue() and CondVarEnqueue()). Transitions from non-null to + // null must occur after the wait is finished (state = kAvailable in + // Mutex::Block() and CondVar::WaitCommon()). This field may be + // changed only by the thread that describes this PerThreadSynch. A + // special case is Fer(), which calls Enqueue() on another thread, + // but with an identical SynchWaitParams pointer, thus leaving the + // pointer unchanged. + SynchWaitParams* waitp; + + intptr_t readers; // Number of readers in mutex. + + // When priority will next be read (cycles). + int64_t next_priority_read_cycles; + + // Locks held; used during deadlock detection. + // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). + SynchLocksHeld *all_locks; +}; + +// The instances of this class are allocated in NewThreadIdentity() with an +// alignment of PerThreadSynch::kAlignment. +struct ThreadIdentity { + // Must be the first member. The Mutex implementation requires that + // the PerThreadSynch object associated with each thread is + // PerThreadSynch::kAlignment aligned. We provide this alignment on + // ThreadIdentity itself. + PerThreadSynch per_thread_synch; + + // Private: Reserved for absl::synchronization_internal::Waiter. + struct WaiterState { + alignas(void*) char data[128]; + } waiter_state; + + // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter(). + std::atomic* blocked_count_ptr; + + // The following variables are mostly read/written just by the + // thread itself. The only exception is that these are read by + // a ticker thread as a hint. + std::atomic ticker; // Tick counter, incremented once per second. + std::atomic wait_start; // Ticker value when thread started waiting. + std::atomic is_idle; // Has thread become idle yet? + + ThreadIdentity* next; +}; + +// Returns the ThreadIdentity object representing the calling thread; guaranteed +// to be unique for its lifetime. The returned object will remain valid for the +// program's lifetime; although it may be re-assigned to a subsequent thread. +// If one does not exist, return nullptr instead. +// +// Does not malloc(*), and is async-signal safe. +// [*] Technically pthread_setspecific() does malloc on first use; however this +// is handled internally within tcmalloc's initialization already. +// +// New ThreadIdentity objects can be constructed and associated with a thread +// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h. +ThreadIdentity* CurrentThreadIdentityIfPresent(); + +using ThreadIdentityReclaimerFunction = void (*)(void*); + +// Sets the current thread identity to the given value. 'reclaimer' is a +// pointer to the global function for cleaning up instances on thread +// destruction. +void SetCurrentThreadIdentity(ThreadIdentity* identity, + ThreadIdentityReclaimerFunction reclaimer); + +// Removes the currently associated ThreadIdentity from the running thread. +// This must be called from inside the ThreadIdentityReclaimerFunction, and only +// from that function. +void ClearCurrentThreadIdentity(); + +// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE= +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be directly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS +#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be directly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be directly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE +#error ABSL_THREAD_IDENTITY_MODE cannot be directly set +#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE) +#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE +#elif defined(_WIN32) && !defined(__MINGW32__) +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL) +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ + (__GOOGLE_GRTE_VERSION__ >= 20140228L) +// Support for async-safe TLS was specifically added in GRTEv4. It's not +// present in the upstream eglibc. +// Note: Current default for production systems. +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS +#else +#define ABSL_THREAD_IDENTITY_MODE \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#endif + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + +#if ABSL_PER_THREAD_TLS +ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* + thread_identity_ptr; +#elif defined(ABSL_HAVE_THREAD_LOCAL) +ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr; +#else +#error Thread-local storage not detected on this platform +#endif + +// thread_local variables cannot be in headers exposed by DLLs or in certain +// build configurations on Apple platforms. However, it is important for +// performance reasons in general that `CurrentThreadIdentityIfPresent` be +// inlined. In the other cases we opt to have the function not be inlined. Note +// that `CurrentThreadIdentityIfPresent` is declared above so we can exclude +// this entire inline definition. +#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \ + !defined(ABSL_CONSUME_DLL) +#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1 +#endif + +#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT +inline ThreadIdentity* CurrentThreadIdentityIfPresent() { + return thread_identity_ptr; +} +#endif + +#elif ABSL_THREAD_IDENTITY_MODE != \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error Unknown ABSL_THREAD_IDENTITY_MODE +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/throw_delegate.h b/CAPI/cpp/grpc/include/absl/base/internal/throw_delegate.h new file mode 100644 index 0000000..075f527 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/throw_delegate.h @@ -0,0 +1,75 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ +#define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Helper functions that allow throwing exceptions consistently from anywhere. +// The main use case is for header-based libraries (eg templates), as they will +// be built by many different targets with their own compiler options. +// In particular, this will allow a safe way to throw exceptions even if the +// caller is compiled with -fno-exceptions. This is intended for implementing +// things like map<>::at(), which the standard documents as throwing an +// exception on error. +// +// Using other techniques like #if tricks could lead to ODR violations. +// +// You shouldn't use it unless you're writing code that you know will be built +// both with and without exceptions and you need to conform to an interface +// that uses exceptions. + +[[noreturn]] void ThrowStdLogicError(const std::string& what_arg); +[[noreturn]] void ThrowStdLogicError(const char* what_arg); +[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg); +[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg); +[[noreturn]] void ThrowStdDomainError(const std::string& what_arg); +[[noreturn]] void ThrowStdDomainError(const char* what_arg); +[[noreturn]] void ThrowStdLengthError(const std::string& what_arg); +[[noreturn]] void ThrowStdLengthError(const char* what_arg); +[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg); +[[noreturn]] void ThrowStdOutOfRange(const char* what_arg); +[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg); +[[noreturn]] void ThrowStdRuntimeError(const char* what_arg); +[[noreturn]] void ThrowStdRangeError(const std::string& what_arg); +[[noreturn]] void ThrowStdRangeError(const char* what_arg); +[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg); +[[noreturn]] void ThrowStdOverflowError(const char* what_arg); +[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg); +[[noreturn]] void ThrowStdUnderflowError(const char* what_arg); + +[[noreturn]] void ThrowStdBadFunctionCall(); +[[noreturn]] void ThrowStdBadAlloc(); + +// ThrowStdBadArrayNewLength() cannot be consistently supported because +// std::bad_array_new_length is missing in libstdc++ until 4.9.0. +// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html +// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html +// libcxx (as of 3.2) and msvc (as of 2015) both have it. +// [[noreturn]] void ThrowStdBadArrayNewLength(); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/tsan_mutex_interface.h b/CAPI/cpp/grpc/include/absl/base/internal/tsan_mutex_interface.h new file mode 100644 index 0000000..39207d8 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/tsan_mutex_interface.h @@ -0,0 +1,68 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is intended solely for spinlock.h. +// It provides ThreadSanitizer annotations for custom mutexes. +// See for meaning of these annotations. + +#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ +#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ + +#include "absl/base/config.h" + +// ABSL_INTERNAL_HAVE_TSAN_INTERFACE +// Macro intended only for internal use. +// +// Checks whether LLVM Thread Sanitizer interfaces are available. +// First made available in LLVM 5.0 (Sep 2017). +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE +#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set." +#endif + +#if defined(ABSL_HAVE_THREAD_SANITIZER) && defined(__has_include) +#if __has_include() +#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1 +#endif +#endif + +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE +#include + +#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create +#define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy +#define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock +#define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock +#define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock +#define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock +#define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal +#define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal +#define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert +#define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert + +#else + +#define ABSL_TSAN_MUTEX_CREATE(...) +#define ABSL_TSAN_MUTEX_DESTROY(...) +#define ABSL_TSAN_MUTEX_PRE_LOCK(...) +#define ABSL_TSAN_MUTEX_POST_LOCK(...) +#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...) +#define ABSL_TSAN_MUTEX_POST_UNLOCK(...) +#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...) +#define ABSL_TSAN_MUTEX_POST_SIGNAL(...) +#define ABSL_TSAN_MUTEX_PRE_DIVERT(...) +#define ABSL_TSAN_MUTEX_POST_DIVERT(...) + +#endif + +#endif // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/unaligned_access.h b/CAPI/cpp/grpc/include/absl/base/internal/unaligned_access.h new file mode 100644 index 0000000..093dd9b --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/unaligned_access.h @@ -0,0 +1,82 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ +#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ + +#include + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +// unaligned APIs + +// Portable handling of unaligned loads, stores, and copies. + +// The unaligned API is C++ only. The declarations use C++ features +// (namespaces, inline) which are absent or incompatible in C. +#if defined(__cplusplus) +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +inline uint16_t UnalignedLoad16(const void *p) { + uint16_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint32_t UnalignedLoad32(const void *p) { + uint32_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint64_t UnalignedLoad64(const void *p) { + uint64_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); } + +inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); } + +inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + (absl::base_internal::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (absl::base_internal::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (absl::base_internal::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::base_internal::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::base_internal::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::base_internal::UnalignedStore64(_p, _val)) + +#endif // defined(__cplusplus), end of unaligned API + +#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock.h b/CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock.h new file mode 100644 index 0000000..2cbeae3 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock.h @@ -0,0 +1,133 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// UnscaledCycleClock +// An UnscaledCycleClock yields the value and frequency of a cycle counter +// that increments at a rate that is approximately constant. +// This class is for internal use only, you should consider using CycleClock +// instead. +// +// Notes: +// The cycle counter frequency is not necessarily the core clock frequency. +// That is, CycleCounter cycles are not necessarily "CPU cycles". +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ + +#include + +#if defined(__APPLE__) +#include +#endif + +#include "absl/base/port.h" + +// The following platforms have an implementation of a hardware counter. +#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \ + defined(__powerpc__) || defined(__ppc__) || defined(__riscv) || \ + defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC)) +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1 +#else +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0 +#endif + +// The following platforms often disable access to the hardware +// counter (through a sandbox) even if the underlying hardware has a +// usable counter. The CycleTimer interface also requires a *scaled* +// CycleClock that runs at atleast 1 MHz. We've found some Android +// ARM64 devices where this is not the case, so we disable it by +// default on Android ARM64. +#if defined(__native_client__) || (defined(__APPLE__)) || \ + (defined(__ANDROID__) && defined(__aarch64__)) +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0 +#else +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1 +#endif + +// UnscaledCycleClock is an optional internal feature. +// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence. +// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1 +#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK) +#define ABSL_USE_UNSCALED_CYCLECLOCK \ + (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \ + ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT) +#endif + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +// This macro can be used to test if UnscaledCycleClock::Frequency() +// is NominalCPUFrequency() on a particular platform. +#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || \ + defined(_M_IX86) || defined(_M_X64)) +#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace time_internal { +class UnscaledCycleClockWrapperForGetCurrentTime; +} // namespace time_internal + +namespace base_internal { +class CycleClock; +class UnscaledCycleClockWrapperForInitializeFrequency; + +class UnscaledCycleClock { + private: + UnscaledCycleClock() = delete; + + // Return the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // Return the how much UnscaledCycleClock::Now() increases per second. + // This is not necessarily the core CPU clock frequency. + // It may be the nominal value report by the kernel, rather than a measured + // value. + static double Frequency(); + + // Allowed users + friend class base_internal::CycleClock; + friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime; + friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency; +}; + +#if defined(__x86_64__) + +inline int64_t UnscaledCycleClock::Now() { + uint64_t low, high; + __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); + return (high << 32) | low; +} + +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK + +#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/log_severity.h b/CAPI/cpp/grpc/include/absl/base/log_severity.h new file mode 100644 index 0000000..8bdca38 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/log_severity.h @@ -0,0 +1,172 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_LOG_SEVERITY_H_ +#define ABSL_BASE_LOG_SEVERITY_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// absl::LogSeverity +// +// Four severity levels are defined. Logging APIs should terminate the program +// when a message is logged at severity `kFatal`; the other levels have no +// special semantics. +// +// Values other than the four defined levels (e.g. produced by `static_cast`) +// are valid, but their semantics when passed to a function, macro, or flag +// depend on the function, macro, or flag. The usual behavior is to normalize +// such values to a defined severity level, however in some cases values other +// than the defined levels are useful for comparison. +// +// Example: +// +// // Effectively disables all logging: +// SetMinLogLevel(static_cast(100)); +// +// Abseil flags may be defined with type `LogSeverity`. Dependency layering +// constraints require that the `AbslParseFlag()` overload be declared and +// defined in the flags library itself rather than here. The `AbslUnparseFlag()` +// overload is defined there as well for consistency. +// +// absl::LogSeverity Flag String Representation +// +// An `absl::LogSeverity` has a string representation used for parsing +// command-line flags based on the enumerator name (e.g. `kFatal`) or +// its unprefixed name (without the `k`) in any case-insensitive form. (E.g. +// "FATAL", "fatal" or "Fatal" are all valid.) Unparsing such flags produces an +// unprefixed string representation in all caps (e.g. "FATAL") or an integer. +// +// Additionally, the parser accepts arbitrary integers (as if the type were +// `int`). +// +// Examples: +// +// --my_log_level=kInfo +// --my_log_level=INFO +// --my_log_level=info +// --my_log_level=0 +// +// Unparsing a flag produces the same result as `absl::LogSeverityName()` for +// the standard levels and a base-ten integer otherwise. +enum class LogSeverity : int { + kInfo = 0, + kWarning = 1, + kError = 2, + kFatal = 3, +}; + +// LogSeverities() +// +// Returns an iterable of all standard `absl::LogSeverity` values, ordered from +// least to most severe. +constexpr std::array LogSeverities() { + return {{absl::LogSeverity::kInfo, absl::LogSeverity::kWarning, + absl::LogSeverity::kError, absl::LogSeverity::kFatal}}; +} + +// LogSeverityName() +// +// Returns the all-caps string representation (e.g. "INFO") of the specified +// severity level if it is one of the standard levels and "UNKNOWN" otherwise. +constexpr const char* LogSeverityName(absl::LogSeverity s) { + return s == absl::LogSeverity::kInfo + ? "INFO" + : s == absl::LogSeverity::kWarning + ? "WARNING" + : s == absl::LogSeverity::kError + ? "ERROR" + : s == absl::LogSeverity::kFatal ? "FATAL" : "UNKNOWN"; +} + +// NormalizeLogSeverity() +// +// Values less than `kInfo` normalize to `kInfo`; values greater than `kFatal` +// normalize to `kError` (**NOT** `kFatal`). +constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s) { + return s < absl::LogSeverity::kInfo + ? absl::LogSeverity::kInfo + : s > absl::LogSeverity::kFatal ? absl::LogSeverity::kError : s; +} +constexpr absl::LogSeverity NormalizeLogSeverity(int s) { + return absl::NormalizeLogSeverity(static_cast(s)); +} + +// operator<< +// +// The exact representation of a streamed `absl::LogSeverity` is deliberately +// unspecified; do not rely on it. +std::ostream& operator<<(std::ostream& os, absl::LogSeverity s); + +// Enums representing a lower bound for LogSeverity. APIs that only operate on +// messages of at least a certain level (for example, `SetMinLogLevel()`) use +// this type to specify that level. absl::LogSeverityAtLeast::kInfinity is +// a level above all threshold levels and therefore no log message will +// ever meet this threshold. +enum class LogSeverityAtLeast : int { + kInfo = static_cast(absl::LogSeverity::kInfo), + kWarning = static_cast(absl::LogSeverity::kWarning), + kError = static_cast(absl::LogSeverity::kError), + kFatal = static_cast(absl::LogSeverity::kFatal), + kInfinity = 1000, +}; + +std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s); + +// Enums representing an upper bound for LogSeverity. APIs that only operate on +// messages of at most a certain level (for example, buffer all messages at or +// below a certain level) use this type to specify that level. +// absl::LogSeverityAtMost::kNegativeInfinity is a level below all threshold +// levels and therefore will exclude all log messages. +enum class LogSeverityAtMost : int { + kNegativeInfinity = -1000, + kInfo = static_cast(absl::LogSeverity::kInfo), + kWarning = static_cast(absl::LogSeverity::kWarning), + kError = static_cast(absl::LogSeverity::kError), + kFatal = static_cast(absl::LogSeverity::kFatal), +}; + +std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s); + +#define COMPOP(op1, op2, T) \ + constexpr bool operator op1(absl::T lhs, absl::LogSeverity rhs) { \ + return static_cast(lhs) op1 rhs; \ + } \ + constexpr bool operator op2(absl::LogSeverity lhs, absl::T rhs) { \ + return lhs op2 static_cast(rhs); \ + } + +// Comparisons between `LogSeverity` and `LogSeverityAtLeast`/ +// `LogSeverityAtMost` are only supported in one direction. +// Valid checks are: +// LogSeverity >= LogSeverityAtLeast +// LogSeverity < LogSeverityAtLeast +// LogSeverity <= LogSeverityAtMost +// LogSeverity > LogSeverityAtMost +COMPOP(>, <, LogSeverityAtLeast) +COMPOP(<=, >=, LogSeverityAtLeast) +COMPOP(<, >, LogSeverityAtMost) +COMPOP(>=, <=, LogSeverityAtMost) +#undef COMPOP + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_LOG_SEVERITY_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/macros.h b/CAPI/cpp/grpc/include/absl/base/macros.h new file mode 100644 index 0000000..3e085a9 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/macros.h @@ -0,0 +1,158 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: macros.h +// ----------------------------------------------------------------------------- +// +// This header file defines the set of language macros used within Abseil code. +// For the set of macros used to determine supported compilers and platforms, +// see absl/base/config.h instead. +// +// This code is compiled directly on many platforms, including client +// platforms like Windows, Mac, and embedded systems. Before making +// any changes here, make sure that you're not breaking any platforms. + +#ifndef ABSL_BASE_MACROS_H_ +#define ABSL_BASE_MACROS_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" + +// ABSL_ARRAYSIZE() +// +// Returns the number of elements in an array as a compile-time constant, which +// can be used in defining new arrays. If you use this macro on a pointer by +// mistake, you will get a compile-time error. +#define ABSL_ARRAYSIZE(array) \ + (sizeof(::absl::macros_internal::ArraySizeHelper(array))) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace macros_internal { +// Note: this internal template function declaration is used by ABSL_ARRAYSIZE. +// The function doesn't need a definition, as we only use its type. +template +auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N]; +} // namespace macros_internal +ABSL_NAMESPACE_END +} // namespace absl + +// ABSL_BAD_CALL_IF() +// +// Used on a function overload to trap bad calls: any call that matches the +// overload will cause a compile-time error. This macro uses a clang-specific +// "enable_if" attribute, as described at +// https://clang.llvm.org/docs/AttributeReference.html#enable-if +// +// Overloads which use this macro should be bracketed by +// `#ifdef ABSL_BAD_CALL_IF`. +// +// Example: +// +// int isdigit(int c); +// #ifdef ABSL_BAD_CALL_IF +// int isdigit(int c) +// ABSL_BAD_CALL_IF(c <= -1 || c > 255, +// "'c' must have the value of an unsigned char or EOF"); +// #endif // ABSL_BAD_CALL_IF +#if ABSL_HAVE_ATTRIBUTE(enable_if) +#define ABSL_BAD_CALL_IF(expr, msg) \ + __attribute__((enable_if(expr, "Bad call trap"), unavailable(msg))) +#endif + +// ABSL_ASSERT() +// +// In C++11, `assert` can't be used portably within constexpr functions. +// ABSL_ASSERT functions as a runtime assert but works in C++11 constexpr +// functions. Example: +// +// constexpr double Divide(double a, double b) { +// return ABSL_ASSERT(b != 0), a / b; +// } +// +// This macro is inspired by +// https://akrzemi1.wordpress.com/2017/05/18/asserts-in-constexpr-functions/ +#if defined(NDEBUG) +#define ABSL_ASSERT(expr) \ + (false ? static_cast(expr) : static_cast(0)) +#else +#define ABSL_ASSERT(expr) \ + (ABSL_PREDICT_TRUE((expr)) ? static_cast(0) \ + : [] { assert(false && #expr); }()) // NOLINT +#endif + +// `ABSL_INTERNAL_HARDENING_ABORT()` controls how `ABSL_HARDENING_ASSERT()` +// aborts the program in release mode (when NDEBUG is defined). The +// implementation should abort the program as quickly as possible and ideally it +// should not be possible to ignore the abort request. +#if (ABSL_HAVE_BUILTIN(__builtin_trap) && \ + ABSL_HAVE_BUILTIN(__builtin_unreachable)) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_INTERNAL_HARDENING_ABORT() \ + do { \ + __builtin_trap(); \ + __builtin_unreachable(); \ + } while (false) +#else +#define ABSL_INTERNAL_HARDENING_ABORT() abort() +#endif + +// ABSL_HARDENING_ASSERT() +// +// `ABSL_HARDENING_ASSERT()` is like `ABSL_ASSERT()`, but used to implement +// runtime assertions that should be enabled in hardened builds even when +// `NDEBUG` is defined. +// +// When `NDEBUG` is not defined, `ABSL_HARDENING_ASSERT()` is identical to +// `ABSL_ASSERT()`. +// +// See `ABSL_OPTION_HARDENED` in `absl/base/options.h` for more information on +// hardened mode. +#if ABSL_OPTION_HARDENED == 1 && defined(NDEBUG) +#define ABSL_HARDENING_ASSERT(expr) \ + (ABSL_PREDICT_TRUE((expr)) ? static_cast(0) \ + : [] { ABSL_INTERNAL_HARDENING_ABORT(); }()) +#else +#define ABSL_HARDENING_ASSERT(expr) ABSL_ASSERT(expr) +#endif + +#ifdef ABSL_HAVE_EXCEPTIONS +#define ABSL_INTERNAL_TRY try +#define ABSL_INTERNAL_CATCH_ANY catch (...) +#define ABSL_INTERNAL_RETHROW do { throw; } while (false) +#else // ABSL_HAVE_EXCEPTIONS +#define ABSL_INTERNAL_TRY if (true) +#define ABSL_INTERNAL_CATCH_ANY else if (false) +#define ABSL_INTERNAL_RETHROW do {} while (false) +#endif // ABSL_HAVE_EXCEPTIONS + +// `ABSL_INTERNAL_UNREACHABLE` is an unreachable statement. A program which +// reaches one has undefined behavior, and the compiler may optimize +// accordingly. +#if defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) +#define ABSL_INTERNAL_UNREACHABLE __builtin_unreachable() +#elif defined(_MSC_VER) +#define ABSL_INTERNAL_UNREACHABLE __assume(0) +#else +#define ABSL_INTERNAL_UNREACHABLE +#endif + +#endif // ABSL_BASE_MACROS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/optimization.h b/CAPI/cpp/grpc/include/absl/base/optimization.h new file mode 100644 index 0000000..db5cc09 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/optimization.h @@ -0,0 +1,252 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: optimization.h +// ----------------------------------------------------------------------------- +// +// This header file defines portable macros for performance optimization. + +#ifndef ABSL_BASE_OPTIMIZATION_H_ +#define ABSL_BASE_OPTIMIZATION_H_ + +#include + +#include "absl/base/config.h" + +// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION +// +// Instructs the compiler to avoid optimizing tail-call recursion. This macro is +// useful when you wish to preserve the existing function order within a stack +// trace for logging, debugging, or profiling purposes. +// +// Example: +// +// int f() { +// int result = g(); +// ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); +// return result; +// } +#if defined(__pnacl__) +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } +#elif defined(__clang__) +// Clang will not tail call given inline volatile assembly. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") +#elif defined(__GNUC__) +// GCC will not tail call given inline volatile assembly. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") +#elif defined(_MSC_VER) +#include +// The __nop() intrinsic blocks the optimisation. +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop() +#else +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } +#endif + +// ABSL_CACHELINE_SIZE +// +// Explicitly defines the size of the L1 cache for purposes of alignment. +// Setting the cacheline size allows you to specify that certain objects be +// aligned on a cacheline boundary with `ABSL_CACHELINE_ALIGNED` declarations. +// (See below.) +// +// NOTE: this macro should be replaced with the following C++17 features, when +// those are generally available: +// +// * `std::hardware_constructive_interference_size` +// * `std::hardware_destructive_interference_size` +// +// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html +// for more information. +#if defined(__GNUC__) +// Cache line alignment +#if defined(__i386__) || defined(__x86_64__) +#define ABSL_CACHELINE_SIZE 64 +#elif defined(__powerpc64__) +#define ABSL_CACHELINE_SIZE 128 +#elif defined(__aarch64__) +// We would need to read special register ctr_el0 to find out L1 dcache size. +// This value is a good estimate based on a real aarch64 machine. +#define ABSL_CACHELINE_SIZE 64 +#elif defined(__arm__) +// Cache line sizes for ARM: These values are not strictly correct since +// cache line sizes depend on implementations, not architectures. There +// are even implementations with cache line sizes configurable at boot +// time. +#if defined(__ARM_ARCH_5T__) +#define ABSL_CACHELINE_SIZE 32 +#elif defined(__ARM_ARCH_7A__) +#define ABSL_CACHELINE_SIZE 64 +#endif +#endif + +#ifndef ABSL_CACHELINE_SIZE +// A reasonable default guess. Note that overestimates tend to waste more +// space, while underestimates tend to waste more time. +#define ABSL_CACHELINE_SIZE 64 +#endif + +// ABSL_CACHELINE_ALIGNED +// +// Indicates that the declared object be cache aligned using +// `ABSL_CACHELINE_SIZE` (see above). Cacheline aligning objects allows you to +// load a set of related objects in the L1 cache for performance improvements. +// Cacheline aligning objects properly allows constructive memory sharing and +// prevents destructive (or "false") memory sharing. +// +// NOTE: callers should replace uses of this macro with `alignas()` using +// `std::hardware_constructive_interference_size` and/or +// `std::hardware_destructive_interference_size` when C++17 becomes available to +// them. +// +// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html +// for more information. +// +// On some compilers, `ABSL_CACHELINE_ALIGNED` expands to an `__attribute__` +// or `__declspec` attribute. For compilers where this is not known to work, +// the macro expands to nothing. +// +// No further guarantees are made here. The result of applying the macro +// to variables and types is always implementation-defined. +// +// WARNING: It is easy to use this attribute incorrectly, even to the point +// of causing bugs that are difficult to diagnose, crash, etc. It does not +// of itself guarantee that objects are aligned to a cache line. +// +// NOTE: Some compilers are picky about the locations of annotations such as +// this attribute, so prefer to put it at the beginning of your declaration. +// For example, +// +// ABSL_CACHELINE_ALIGNED static Foo* foo = ... +// +// class ABSL_CACHELINE_ALIGNED Bar { ... +// +// Recommendations: +// +// 1) Consult compiler documentation; this comment is not kept in sync as +// toolchains evolve. +// 2) Verify your use has the intended effect. This often requires inspecting +// the generated machine code. +// 3) Prefer applying this attribute to individual variables. Avoid +// applying it to types. This tends to localize the effect. +#define ABSL_CACHELINE_ALIGNED __attribute__((aligned(ABSL_CACHELINE_SIZE))) +#elif defined(_MSC_VER) +#define ABSL_CACHELINE_SIZE 64 +#define ABSL_CACHELINE_ALIGNED __declspec(align(ABSL_CACHELINE_SIZE)) +#else +#define ABSL_CACHELINE_SIZE 64 +#define ABSL_CACHELINE_ALIGNED +#endif + +// ABSL_PREDICT_TRUE, ABSL_PREDICT_FALSE +// +// Enables the compiler to prioritize compilation using static analysis for +// likely paths within a boolean branch. +// +// Example: +// +// if (ABSL_PREDICT_TRUE(expression)) { +// return result; // Faster if more likely +// } else { +// return 0; +// } +// +// Compilers can use the information that a certain branch is not likely to be +// taken (for instance, a CHECK failure) to optimize for the common case in +// the absence of better information (ie. compiling gcc with `-fprofile-arcs`). +// +// Recommendation: Modern CPUs dynamically predict branch execution paths, +// typically with accuracy greater than 97%. As a result, annotating every +// branch in a codebase is likely counterproductive; however, annotating +// specific branches that are both hot and consistently mispredicted is likely +// to yield performance improvements. +#if ABSL_HAVE_BUILTIN(__builtin_expect) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define ABSL_PREDICT_FALSE(x) (__builtin_expect(false || (x), false)) +#define ABSL_PREDICT_TRUE(x) (__builtin_expect(false || (x), true)) +#else +#define ABSL_PREDICT_FALSE(x) (x) +#define ABSL_PREDICT_TRUE(x) (x) +#endif + +// ABSL_ASSUME(cond) +// +// Informs the compiler that a condition is always true and that it can assume +// it to be true for optimization purposes. +// +// WARNING: If the condition is false, the program can produce undefined and +// potentially dangerous behavior. +// +// In !NDEBUG mode, the condition is checked with an assert(). +// +// NOTE: The expression must not have side effects, as it may only be evaluated +// in some compilation modes and not others. Some compilers may issue a warning +// if the compiler cannot prove the expression has no side effects. For example, +// the expression should not use a function call since the compiler cannot prove +// that a function call does not have side effects. +// +// Example: +// +// int x = ...; +// ABSL_ASSUME(x >= 0); +// // The compiler can optimize the division to a simple right shift using the +// // assumption specified above. +// int y = x / 16; +// +#if !defined(NDEBUG) +#define ABSL_ASSUME(cond) assert(cond) +#elif ABSL_HAVE_BUILTIN(__builtin_assume) +#define ABSL_ASSUME(cond) __builtin_assume(cond) +#elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) +#define ABSL_ASSUME(cond) \ + do { \ + if (!(cond)) __builtin_unreachable(); \ + } while (0) +#elif defined(_MSC_VER) +#define ABSL_ASSUME(cond) __assume(cond) +#else +#define ABSL_ASSUME(cond) \ + do { \ + static_cast(false && (cond)); \ + } while (0) +#endif + +// ABSL_INTERNAL_UNIQUE_SMALL_NAME(cond) +// This macro forces small unique name on a static file level symbols like +// static local variables or static functions. This is intended to be used in +// macro definitions to optimize the cost of generated code. Do NOT use it on +// symbols exported from translation unit since it may cause a link time +// conflict. +// +// Example: +// +// #define MY_MACRO(txt) +// namespace { +// char VeryVeryLongVarName[] ABSL_INTERNAL_UNIQUE_SMALL_NAME() = txt; +// const char* VeryVeryLongFuncName() ABSL_INTERNAL_UNIQUE_SMALL_NAME(); +// const char* VeryVeryLongFuncName() { return txt; } +// } +// + +#if defined(__GNUC__) +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) #x +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME1(x) ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME() \ + asm(ABSL_INTERNAL_UNIQUE_SMALL_NAME1(.absl.__COUNTER__)) +#else +#define ABSL_INTERNAL_UNIQUE_SMALL_NAME() +#endif + +#endif // ABSL_BASE_OPTIMIZATION_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/options.h b/CAPI/cpp/grpc/include/absl/base/options.h new file mode 100644 index 0000000..13aeda3 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/options.h @@ -0,0 +1,238 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: options.h +// ----------------------------------------------------------------------------- +// +// This file contains Abseil configuration options for setting specific +// implementations instead of letting Abseil determine which implementation to +// use at compile-time. Setting these options may be useful for package or build +// managers who wish to guarantee ABI stability within binary builds (which are +// otherwise difficult to enforce). +// +// *** IMPORTANT NOTICE FOR PACKAGE MANAGERS: It is important that +// maintainers of package managers who wish to package Abseil read and +// understand this file! *** +// +// Abseil contains a number of possible configuration endpoints, based on +// parameters such as the detected platform, language version, or command-line +// flags used to invoke the underlying binary. As is the case with all +// libraries, binaries which contain Abseil code must ensure that separate +// packages use the same compiled copy of Abseil to avoid a diamond dependency +// problem, which can occur if two packages built with different Abseil +// configuration settings are linked together. Diamond dependency problems in +// C++ may manifest as violations to the One Definition Rule (ODR) (resulting in +// linker errors), or undefined behavior (resulting in crashes). +// +// Diamond dependency problems can be avoided if all packages utilize the same +// exact version of Abseil. Building from source code with the same compilation +// parameters is the easiest way to avoid such dependency problems. However, for +// package managers who cannot control such compilation parameters, we are +// providing the file to allow you to inject ABI (Application Binary Interface) +// stability across builds. Settings options in this file will neither change +// API nor ABI, providing a stable copy of Abseil between packages. +// +// Care must be taken to keep options within these configurations isolated +// from any other dynamic settings, such as command-line flags which could alter +// these options. This file is provided specifically to help build and package +// managers provide a stable copy of Abseil within their libraries and binaries; +// other developers should not have need to alter the contents of this file. +// +// ----------------------------------------------------------------------------- +// Usage +// ----------------------------------------------------------------------------- +// +// For any particular package release, set the appropriate definitions within +// this file to whatever value makes the most sense for your package(s). Note +// that, by default, most of these options, at the moment, affect the +// implementation of types; future options may affect other implementation +// details. +// +// NOTE: the defaults within this file all assume that Abseil can select the +// proper Abseil implementation at compile-time, which will not be sufficient +// to guarantee ABI stability to package managers. + +#ifndef ABSL_BASE_OPTIONS_H_ +#define ABSL_BASE_OPTIONS_H_ + +// Include a standard library header to allow configuration based on the +// standard library in use. +#ifdef __cplusplus +#include +#endif + +// ----------------------------------------------------------------------------- +// Type Compatibility Options +// ----------------------------------------------------------------------------- +// +// ABSL_OPTION_USE_STD_ANY +// +// This option controls whether absl::any is implemented as an alias to +// std::any, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::any. This requires that all code +// using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::any is available. This option is +// useful when you are building your entire program, including all of its +// dependencies, from source. It should not be used otherwise -- for example, +// if you are distributing Abseil in a binary package manager -- since in +// mode 2, absl::any will name a different type, with a different mangled name +// and binary layout, depending on the compiler flags passed by the end user. +// For more info, see https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::any is a typedef of std::any, use the feature macro ABSL_USES_STD_ANY. + +#define ABSL_OPTION_USE_STD_ANY 0 + + +// ABSL_OPTION_USE_STD_OPTIONAL +// +// This option controls whether absl::optional is implemented as an alias to +// std::optional, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::optional. This requires that all +// code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::optional is available. This option +// is useful when you are building your program from source. It should not be +// used otherwise -- for example, if you are distributing Abseil in a binary +// package manager -- since in mode 2, absl::optional will name a different +// type, with a different mangled name and binary layout, depending on the +// compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. + +// User code should not inspect this macro. To check in the preprocessor if +// absl::optional is a typedef of std::optional, use the feature macro +// ABSL_USES_STD_OPTIONAL. + +#define ABSL_OPTION_USE_STD_OPTIONAL 0 + + +// ABSL_OPTION_USE_STD_STRING_VIEW +// +// This option controls whether absl::string_view is implemented as an alias to +// std::string_view, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::string_view. This requires that +// all code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::string_view is available. This +// option is useful when you are building your program from source. It should +// not be used otherwise -- for example, if you are distributing Abseil in a +// binary package manager -- since in mode 2, absl::string_view will name a +// different type, with a different mangled name and binary layout, depending on +// the compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::string_view is a typedef of std::string_view, use the feature macro +// ABSL_USES_STD_STRING_VIEW. + +#define ABSL_OPTION_USE_STD_STRING_VIEW 0 + +// ABSL_OPTION_USE_STD_VARIANT +// +// This option controls whether absl::variant is implemented as an alias to +// std::variant, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::variant. This requires that all +// code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::variant is available. This option +// is useful when you are building your program from source. It should not be +// used otherwise -- for example, if you are distributing Abseil in a binary +// package manager -- since in mode 2, absl::variant will name a different +// type, with a different mangled name and binary layout, depending on the +// compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::variant is a typedef of std::variant, use the feature macro +// ABSL_USES_STD_VARIANT. + +#define ABSL_OPTION_USE_STD_VARIANT 0 + + +// ABSL_OPTION_USE_INLINE_NAMESPACE +// ABSL_OPTION_INLINE_NAMESPACE_NAME +// +// These options controls whether all entities in the absl namespace are +// contained within an inner inline namespace. This does not affect the +// user-visible API of Abseil, but it changes the mangled names of all symbols. +// +// This can be useful as a version tag if you are distributing Abseil in +// precompiled form. This will prevent a binary library build of Abseil with +// one inline namespace being used with headers configured with a different +// inline namespace name. Binary packagers are reminded that Abseil does not +// guarantee any ABI stability in Abseil, so any update of Abseil or +// configuration change in such a binary package should be combined with a +// new, unique value for the inline namespace name. +// +// A value of 0 means not to use inline namespaces. +// +// A value of 1 means to use an inline namespace with the given name inside +// namespace absl. If this is set, ABSL_OPTION_INLINE_NAMESPACE_NAME must also +// be changed to a new, unique identifier name. In particular "head" is not +// allowed. + +#define ABSL_OPTION_USE_INLINE_NAMESPACE 1 +#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20220623 + +// ABSL_OPTION_HARDENED +// +// This option enables a "hardened" build in release mode (in this context, +// release mode is defined as a build where the `NDEBUG` macro is defined). +// +// A value of 0 means that "hardened" mode is not enabled. +// +// A value of 1 means that "hardened" mode is enabled. +// +// Hardened builds have additional security checks enabled when `NDEBUG` is +// defined. Defining `NDEBUG` is normally used to turn `assert()` macro into a +// no-op, as well as disabling other bespoke program consistency checks. By +// defining ABSL_OPTION_HARDENED to 1, a select set of checks remain enabled in +// release mode. These checks guard against programming errors that may lead to +// security vulnerabilities. In release mode, when one of these programming +// errors is encountered, the program will immediately abort, possibly without +// any attempt at logging. +// +// The checks enabled by this option are not free; they do incur runtime cost. +// +// The checks enabled by this option are always active when `NDEBUG` is not +// defined, even in the case when ABSL_OPTION_HARDENED is defined to 0. The +// checks enabled by this option may abort the program in a different way and +// log additional information when `NDEBUG` is not defined. + +#define ABSL_OPTION_HARDENED 0 + +#endif // ABSL_BASE_OPTIONS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/policy_checks.h b/CAPI/cpp/grpc/include/absl/base/policy_checks.h new file mode 100644 index 0000000..06b3243 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/policy_checks.h @@ -0,0 +1,111 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: policy_checks.h +// ----------------------------------------------------------------------------- +// +// This header enforces a minimum set of policies at build time, such as the +// supported compiler and library versions. Unsupported configurations are +// reported with `#error`. This enforcement is best effort, so successfully +// compiling this header does not guarantee a supported configuration. + +#ifndef ABSL_BASE_POLICY_CHECKS_H_ +#define ABSL_BASE_POLICY_CHECKS_H_ + +// Included for the __GLIBC_PREREQ macro used below. +#include + +// Included for the _STLPORT_VERSION macro used below. +#if defined(__cplusplus) +#include +#endif + +// ----------------------------------------------------------------------------- +// Operating System Check +// ----------------------------------------------------------------------------- + +#if defined(__CYGWIN__) +#error "Cygwin is not supported." +#endif + +// ----------------------------------------------------------------------------- +// Toolchain Check +// ----------------------------------------------------------------------------- + +// We support MSVC++ 14.0 update 2 and later. +// This minimum will go up. +#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023918 && !defined(__clang__) +#error "This package requires Visual Studio 2015 Update 2 or higher." +#endif + +// We support gcc 4.7 and later. +// This minimum will go up. +#if defined(__GNUC__) && !defined(__clang__) +#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 7) +#error "This package requires gcc 4.7 or higher." +#endif +#endif + +// We support Apple Xcode clang 4.2.1 (version 421.11.65) and later. +// This corresponds to Apple Xcode version 4.5. +// This minimum will go up. +#if defined(__apple_build_version__) && __apple_build_version__ < 4211165 +#error "This package requires __apple_build_version__ of 4211165 or higher." +#endif + +// ----------------------------------------------------------------------------- +// C++ Version Check +// ----------------------------------------------------------------------------- + +// Enforce C++11 as the minimum. Note that Visual Studio has not +// advanced __cplusplus despite being good enough for our purposes, so +// so we exempt it from the check. +#if defined(__cplusplus) && !defined(_MSC_VER) +#if __cplusplus < 201103L +#error "C++ versions less than C++11 are not supported." +#endif +#endif + +// ----------------------------------------------------------------------------- +// Standard Library Check +// ----------------------------------------------------------------------------- + +#if defined(_STLPORT_VERSION) +#error "STLPort is not supported." +#endif + +// ----------------------------------------------------------------------------- +// `char` Size Check +// ----------------------------------------------------------------------------- + +// Abseil currently assumes CHAR_BIT == 8. If you would like to use Abseil on a +// platform where this is not the case, please provide us with the details about +// your platform so we can consider relaxing this requirement. +#if CHAR_BIT != 8 +#error "Abseil assumes CHAR_BIT == 8." +#endif + +// ----------------------------------------------------------------------------- +// `int` Size Check +// ----------------------------------------------------------------------------- + +// Abseil currently assumes that an int is 4 bytes. If you would like to use +// Abseil on a platform where this is not the case, please provide us with the +// details about your platform so we can consider relaxing this requirement. +#if INT_MAX < 2147483647 +#error "Abseil assumes that int is at least 4 bytes. " +#endif + +#endif // ABSL_BASE_POLICY_CHECKS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/port.h b/CAPI/cpp/grpc/include/absl/base/port.h new file mode 100644 index 0000000..5bc4d6c --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/port.h @@ -0,0 +1,25 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This files is a forwarding header for other headers containing various +// portability macros and functions. + +#ifndef ABSL_BASE_PORT_H_ +#define ABSL_BASE_PORT_H_ + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/optimization.h" + +#endif // ABSL_BASE_PORT_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/thread_annotations.h b/CAPI/cpp/grpc/include/absl/base/thread_annotations.h new file mode 100644 index 0000000..bc8a620 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/base/thread_annotations.h @@ -0,0 +1,335 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: thread_annotations.h +// ----------------------------------------------------------------------------- +// +// This header file contains macro definitions for thread safety annotations +// that allow developers to document the locking policies of multi-threaded +// code. The annotations can also help program analysis tools to identify +// potential thread safety issues. +// +// These annotations are implemented using compiler attributes. Using the macros +// defined here instead of raw attributes allow for portability and future +// compatibility. +// +// When referring to mutexes in the arguments of the attributes, you should +// use variable names or more complex expressions (e.g. my_object->mutex_) +// that evaluate to a concrete mutex object whenever possible. If the mutex +// you want to refer to is not in scope, you may use a member pointer +// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. + +#ifndef ABSL_BASE_THREAD_ANNOTATIONS_H_ +#define ABSL_BASE_THREAD_ANNOTATIONS_H_ + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +// TODO(mbonadei): Remove after the backward compatibility period. +#include "absl/base/internal/thread_annotations.h" // IWYU pragma: export + +// ABSL_GUARDED_BY() +// +// Documents if a shared field or global variable needs to be protected by a +// mutex. ABSL_GUARDED_BY() allows the user to specify a particular mutex that +// should be held when accessing the annotated variable. +// +// Although this annotation (and ABSL_PT_GUARDED_BY, below) cannot be applied to +// local variables, a local variable and its associated mutex can often be +// combined into a small class or struct, thereby allowing the annotation. +// +// Example: +// +// class Foo { +// Mutex mu_; +// int p1_ ABSL_GUARDED_BY(mu_); +// ... +// }; +#if ABSL_HAVE_ATTRIBUTE(guarded_by) +#define ABSL_GUARDED_BY(x) __attribute__((guarded_by(x))) +#else +#define ABSL_GUARDED_BY(x) +#endif + +// ABSL_PT_GUARDED_BY() +// +// Documents if the memory location pointed to by a pointer should be guarded +// by a mutex when dereferencing the pointer. +// +// Example: +// class Foo { +// Mutex mu_; +// int *p1_ ABSL_PT_GUARDED_BY(mu_); +// ... +// }; +// +// Note that a pointer variable to a shared memory location could itself be a +// shared variable. +// +// Example: +// +// // `q_`, guarded by `mu1_`, points to a shared memory location that is +// // guarded by `mu2_`: +// int *q_ ABSL_GUARDED_BY(mu1_) ABSL_PT_GUARDED_BY(mu2_); +#if ABSL_HAVE_ATTRIBUTE(pt_guarded_by) +#define ABSL_PT_GUARDED_BY(x) __attribute__((pt_guarded_by(x))) +#else +#define ABSL_PT_GUARDED_BY(x) +#endif + +// ABSL_ACQUIRED_AFTER() / ABSL_ACQUIRED_BEFORE() +// +// Documents the acquisition order between locks that can be held +// simultaneously by a thread. For any two locks that need to be annotated +// to establish an acquisition order, only one of them needs the annotation. +// (i.e. You don't have to annotate both locks with both ABSL_ACQUIRED_AFTER +// and ABSL_ACQUIRED_BEFORE.) +// +// As with ABSL_GUARDED_BY, this is only applicable to mutexes that are shared +// fields or global variables. +// +// Example: +// +// Mutex m1_; +// Mutex m2_ ABSL_ACQUIRED_AFTER(m1_); +#if ABSL_HAVE_ATTRIBUTE(acquired_after) +#define ABSL_ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__))) +#else +#define ABSL_ACQUIRED_AFTER(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(acquired_before) +#define ABSL_ACQUIRED_BEFORE(...) __attribute__((acquired_before(__VA_ARGS__))) +#else +#define ABSL_ACQUIRED_BEFORE(...) +#endif + +// ABSL_EXCLUSIVE_LOCKS_REQUIRED() / ABSL_SHARED_LOCKS_REQUIRED() +// +// Documents a function that expects a mutex to be held prior to entry. +// The mutex is expected to be held both on entry to, and exit from, the +// function. +// +// An exclusive lock allows read-write access to the guarded data member(s), and +// only one thread can acquire a lock exclusively at any one time. A shared lock +// allows read-only access, and any number of threads can acquire a shared lock +// concurrently. +// +// Generally, non-const methods should be annotated with +// ABSL_EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with +// ABSL_SHARED_LOCKS_REQUIRED. +// +// Example: +// +// Mutex mu1, mu2; +// int a ABSL_GUARDED_BY(mu1); +// int b ABSL_GUARDED_BY(mu2); +// +// void foo() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } +// void bar() const ABSL_SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } +#if ABSL_HAVE_ATTRIBUTE(exclusive_locks_required) +#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \ + __attribute__((exclusive_locks_required(__VA_ARGS__))) +#else +#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(shared_locks_required) +#define ABSL_SHARED_LOCKS_REQUIRED(...) \ + __attribute__((shared_locks_required(__VA_ARGS__))) +#else +#define ABSL_SHARED_LOCKS_REQUIRED(...) +#endif + +// ABSL_LOCKS_EXCLUDED() +// +// Documents the locks that cannot be held by callers of this function, as they +// might be acquired by this function (Abseil's `Mutex` locks are +// non-reentrant). +#if ABSL_HAVE_ATTRIBUTE(locks_excluded) +#define ABSL_LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__))) +#else +#define ABSL_LOCKS_EXCLUDED(...) +#endif + +// ABSL_LOCK_RETURNED() +// +// Documents a function that returns a mutex without acquiring it. For example, +// a public getter method that returns a pointer to a private mutex should +// be annotated with ABSL_LOCK_RETURNED. +#if ABSL_HAVE_ATTRIBUTE(lock_returned) +#define ABSL_LOCK_RETURNED(x) __attribute__((lock_returned(x))) +#else +#define ABSL_LOCK_RETURNED(x) +#endif + +// ABSL_LOCKABLE +// +// Documents if a class/type is a lockable type (such as the `Mutex` class). +#if ABSL_HAVE_ATTRIBUTE(lockable) +#define ABSL_LOCKABLE __attribute__((lockable)) +#else +#define ABSL_LOCKABLE +#endif + +// ABSL_SCOPED_LOCKABLE +// +// Documents if a class does RAII locking (such as the `MutexLock` class). +// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is +// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no +// arguments; the analysis will assume that the destructor unlocks whatever the +// constructor locked. +#if ABSL_HAVE_ATTRIBUTE(scoped_lockable) +#define ABSL_SCOPED_LOCKABLE __attribute__((scoped_lockable)) +#else +#define ABSL_SCOPED_LOCKABLE +#endif + +// ABSL_EXCLUSIVE_LOCK_FUNCTION() +// +// Documents functions that acquire a lock in the body of a function, and do +// not release it. +#if ABSL_HAVE_ATTRIBUTE(exclusive_lock_function) +#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \ + __attribute__((exclusive_lock_function(__VA_ARGS__))) +#else +#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) +#endif + +// ABSL_SHARED_LOCK_FUNCTION() +// +// Documents functions that acquire a shared (reader) lock in the body of a +// function, and do not release it. +#if ABSL_HAVE_ATTRIBUTE(shared_lock_function) +#define ABSL_SHARED_LOCK_FUNCTION(...) \ + __attribute__((shared_lock_function(__VA_ARGS__))) +#else +#define ABSL_SHARED_LOCK_FUNCTION(...) +#endif + +// ABSL_UNLOCK_FUNCTION() +// +// Documents functions that expect a lock to be held on entry to the function, +// and release it in the body of the function. +#if ABSL_HAVE_ATTRIBUTE(unlock_function) +#define ABSL_UNLOCK_FUNCTION(...) __attribute__((unlock_function(__VA_ARGS__))) +#else +#define ABSL_UNLOCK_FUNCTION(...) +#endif + +// ABSL_EXCLUSIVE_TRYLOCK_FUNCTION() / ABSL_SHARED_TRYLOCK_FUNCTION() +// +// Documents functions that try to acquire a lock, and return success or failure +// (or a non-boolean value that can be interpreted as a boolean). +// The first argument should be `true` for functions that return `true` on +// success, or `false` for functions that return `false` on success. The second +// argument specifies the mutex that is locked on success. If unspecified, this +// mutex is assumed to be `this`. +#if ABSL_HAVE_ATTRIBUTE(exclusive_trylock_function) +#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) \ + __attribute__((exclusive_trylock_function(__VA_ARGS__))) +#else +#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(shared_trylock_function) +#define ABSL_SHARED_TRYLOCK_FUNCTION(...) \ + __attribute__((shared_trylock_function(__VA_ARGS__))) +#else +#define ABSL_SHARED_TRYLOCK_FUNCTION(...) +#endif + +// ABSL_ASSERT_EXCLUSIVE_LOCK() / ABSL_ASSERT_SHARED_LOCK() +// +// Documents functions that dynamically check to see if a lock is held, and fail +// if it is not held. +#if ABSL_HAVE_ATTRIBUTE(assert_exclusive_lock) +#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) \ + __attribute__((assert_exclusive_lock(__VA_ARGS__))) +#else +#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(assert_shared_lock) +#define ABSL_ASSERT_SHARED_LOCK(...) \ + __attribute__((assert_shared_lock(__VA_ARGS__))) +#else +#define ABSL_ASSERT_SHARED_LOCK(...) +#endif + +// ABSL_NO_THREAD_SAFETY_ANALYSIS +// +// Turns off thread safety checking within the body of a particular function. +// This annotation is used to mark functions that are known to be correct, but +// the locking behavior is more complicated than the analyzer can handle. +#if ABSL_HAVE_ATTRIBUTE(no_thread_safety_analysis) +#define ABSL_NO_THREAD_SAFETY_ANALYSIS \ + __attribute__((no_thread_safety_analysis)) +#else +#define ABSL_NO_THREAD_SAFETY_ANALYSIS +#endif + +//------------------------------------------------------------------------------ +// Tool-Supplied Annotations +//------------------------------------------------------------------------------ + +// ABSL_TS_UNCHECKED should be placed around lock expressions that are not valid +// C++ syntax, but which are present for documentation purposes. These +// annotations will be ignored by the analysis. +#define ABSL_TS_UNCHECKED(x) "" + +// ABSL_TS_FIXME is used to mark lock expressions that are not valid C++ syntax. +// It is used by automated tools to mark and disable invalid expressions. +// The annotation should either be fixed, or changed to ABSL_TS_UNCHECKED. +#define ABSL_TS_FIXME(x) "" + +// Like ABSL_NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body +// of a particular function. However, this attribute is used to mark functions +// that are incorrect and need to be fixed. It is used by automated tools to +// avoid breaking the build when the analysis is updated. +// Code owners are expected to eventually fix the routine. +#define ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME ABSL_NO_THREAD_SAFETY_ANALYSIS + +// Similar to ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a +// ABSL_GUARDED_BY annotation that needs to be fixed, because it is producing +// thread safety warning. It disables the ABSL_GUARDED_BY. +#define ABSL_GUARDED_BY_FIXME(x) + +// Disables warnings for a single read operation. This can be used to avoid +// warnings when it is known that the read is not actually involved in a race, +// but the compiler cannot confirm that. +#define ABSL_TS_UNCHECKED_READ(x) absl::base_internal::ts_unchecked_read(x) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Takes a reference to a guarded data member, and returns an unguarded +// reference. +// Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead. +template +inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +template +inline T& ts_unchecked_read(T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_THREAD_ANNOTATIONS_H_ diff --git a/CAPI/cpp/grpc/include/absl/cleanup/cleanup.h b/CAPI/cpp/grpc/include/absl/cleanup/cleanup.h new file mode 100644 index 0000000..960ccd0 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/cleanup/cleanup.h @@ -0,0 +1,140 @@ +// Copyright 2021 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: cleanup.h +// ----------------------------------------------------------------------------- +// +// `absl::Cleanup` implements the scope guard idiom, invoking the contained +// callback's `operator()() &&` on scope exit. +// +// Example: +// +// ``` +// absl::Status CopyGoodData(const char* source_path, const char* sink_path) { +// FILE* source_file = fopen(source_path, "r"); +// if (source_file == nullptr) { +// return absl::NotFoundError("No source file"); // No cleanups execute +// } +// +// // C++17 style cleanup using class template argument deduction +// absl::Cleanup source_closer = [source_file] { fclose(source_file); }; +// +// FILE* sink_file = fopen(sink_path, "w"); +// if (sink_file == nullptr) { +// return absl::NotFoundError("No sink file"); // First cleanup executes +// } +// +// // C++11 style cleanup using the factory function +// auto sink_closer = absl::MakeCleanup([sink_file] { fclose(sink_file); }); +// +// Data data; +// while (ReadData(source_file, &data)) { +// if (!data.IsGood()) { +// absl::Status result = absl::FailedPreconditionError("Read bad data"); +// return result; // Both cleanups execute +// } +// SaveData(sink_file, &data); +// } +// +// return absl::OkStatus(); // Both cleanups execute +// } +// ``` +// +// Methods: +// +// `std::move(cleanup).Cancel()` will prevent the callback from executing. +// +// `std::move(cleanup).Invoke()` will execute the callback early, before +// destruction, and prevent the callback from executing in the destructor. +// +// Usage: +// +// `absl::Cleanup` is not an interface type. It is only intended to be used +// within the body of a function. It is not a value type and instead models a +// control flow construct. Check out `defer` in Golang for something similar. + +#ifndef ABSL_CLEANUP_CLEANUP_H_ +#define ABSL_CLEANUP_CLEANUP_H_ + +#include + +#include "absl/base/config.h" +#include "absl/base/macros.h" +#include "absl/cleanup/internal/cleanup.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +template +class ABSL_MUST_USE_RESULT Cleanup final { + static_assert(cleanup_internal::WasDeduced(), + "Explicit template parameters are not supported."); + + static_assert(cleanup_internal::ReturnsVoid(), + "Callbacks that return values are not supported."); + + public: + Cleanup(Callback callback) : storage_(std::move(callback)) {} // NOLINT + + Cleanup(Cleanup&& other) = default; + + void Cancel() && { + ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); + storage_.DestroyCallback(); + } + + void Invoke() && { + ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); + storage_.InvokeCallback(); + storage_.DestroyCallback(); + } + + ~Cleanup() { + if (storage_.IsCallbackEngaged()) { + storage_.InvokeCallback(); + storage_.DestroyCallback(); + } + } + + private: + cleanup_internal::Storage storage_; +}; + +// `absl::Cleanup c = /* callback */;` +// +// C++17 type deduction API for creating an instance of `absl::Cleanup` +#if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) +template +Cleanup(Callback callback) -> Cleanup; +#endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) + +// `auto c = absl::MakeCleanup(/* callback */);` +// +// C++11 type deduction API for creating an instance of `absl::Cleanup` +template +absl::Cleanup MakeCleanup(Callback callback) { + static_assert(cleanup_internal::WasDeduced(), + "Explicit template parameters are not supported."); + + static_assert(cleanup_internal::ReturnsVoid(), + "Callbacks that return values are not supported."); + + return {std::move(callback)}; +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CLEANUP_CLEANUP_H_ diff --git a/CAPI/cpp/grpc/include/absl/cleanup/internal/cleanup.h b/CAPI/cpp/grpc/include/absl/cleanup/internal/cleanup.h new file mode 100644 index 0000000..2783fcb --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/cleanup/internal/cleanup.h @@ -0,0 +1,100 @@ +// Copyright 2021 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CLEANUP_INTERNAL_CLEANUP_H_ +#define ABSL_CLEANUP_INTERNAL_CLEANUP_H_ + +#include +#include +#include + +#include "absl/base/internal/invoke.h" +#include "absl/base/macros.h" +#include "absl/base/thread_annotations.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace cleanup_internal { + +struct Tag {}; + +template +constexpr bool WasDeduced() { + return (std::is_same::value) && + (sizeof...(Args) == 0); +} + +template +constexpr bool ReturnsVoid() { + return (std::is_same, void>::value); +} + +template +class Storage { + public: + Storage() = delete; + + explicit Storage(Callback callback) { + // Placement-new into a character buffer is used for eager destruction when + // the cleanup is invoked or cancelled. To ensure this optimizes well, the + // behavior is implemented locally instead of using an absl::optional. + ::new (GetCallbackBuffer()) Callback(std::move(callback)); + is_callback_engaged_ = true; + } + + Storage(Storage&& other) { + ABSL_HARDENING_ASSERT(other.IsCallbackEngaged()); + + ::new (GetCallbackBuffer()) Callback(std::move(other.GetCallback())); + is_callback_engaged_ = true; + + other.DestroyCallback(); + } + + Storage(const Storage& other) = delete; + + Storage& operator=(Storage&& other) = delete; + + Storage& operator=(const Storage& other) = delete; + + void* GetCallbackBuffer() { return static_cast(+callback_buffer_); } + + Callback& GetCallback() { + return *reinterpret_cast(GetCallbackBuffer()); + } + + bool IsCallbackEngaged() const { return is_callback_engaged_; } + + void DestroyCallback() { + is_callback_engaged_ = false; + GetCallback().~Callback(); + } + + void InvokeCallback() ABSL_NO_THREAD_SAFETY_ANALYSIS { + std::move(GetCallback())(); + } + + private: + bool is_callback_engaged_; + alignas(Callback) char callback_buffer_[sizeof(Callback)]; +}; + +} // namespace cleanup_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CLEANUP_INTERNAL_CLEANUP_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/btree_map.h b/CAPI/cpp/grpc/include/absl/container/btree_map.h new file mode 100644 index 0000000..286817f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/btree_map.h @@ -0,0 +1,851 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: btree_map.h +// ----------------------------------------------------------------------------- +// +// This header file defines B-tree maps: sorted associative containers mapping +// keys to values. +// +// * `absl::btree_map<>` +// * `absl::btree_multimap<>` +// +// These B-tree types are similar to the corresponding types in the STL +// (`std::map` and `std::multimap`) and generally conform to the STL interfaces +// of those types. However, because they are implemented using B-trees, they +// are more efficient in most situations. +// +// Unlike `std::map` and `std::multimap`, which are commonly implemented using +// red-black tree nodes, B-tree maps use more generic B-tree nodes able to hold +// multiple values per node. Holding multiple values per node often makes +// B-tree maps perform better than their `std::map` counterparts, because +// multiple entries can be checked within the same cache hit. +// +// However, these types should not be considered drop-in replacements for +// `std::map` and `std::multimap` as there are some API differences, which are +// noted in this header file. The most consequential differences with respect to +// migrating to b-tree from the STL types are listed in the next paragraph. +// Other API differences are minor. +// +// Importantly, insertions and deletions may invalidate outstanding iterators, +// pointers, and references to elements. Such invalidations are typically only +// an issue if insertion and deletion operations are interleaved with the use of +// more than one iterator, pointer, or reference simultaneously. For this +// reason, `insert()` and `erase()` return a valid iterator at the current +// position. Another important difference is that key-types must be +// copy-constructible. + +#ifndef ABSL_CONTAINER_BTREE_MAP_H_ +#define ABSL_CONTAINER_BTREE_MAP_H_ + +#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/btree_container.h" // IWYU pragma: export + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace container_internal { + +template +struct map_params; + +} // namespace container_internal + +// absl::btree_map<> +// +// An `absl::btree_map` is an ordered associative container of +// unique keys and associated values designed to be a more efficient replacement +// for `std::map` (in most cases). +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less`. +// +// An `absl::btree_map` uses a default allocator of +// `std::allocator>` to allocate (and deallocate) +// nodes, and construct and destruct values within those nodes. You may +// instead specify a custom allocator `A` (which in turn requires specifying a +// custom comparator `C`) as in `absl::btree_map`. +// +template , + typename Alloc = std::allocator>> +class btree_map + : public container_internal::btree_map_container< + container_internal::btree>> { + using Base = typename btree_map::btree_map_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_map` supports the same overload set as `std::map` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_map map1; + // + // * Initializer List constructor + // + // absl::btree_map map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::btree_map map3(map2); + // + // * Copy assignment operator + // + // absl::btree_map map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_map map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_map map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::btree_map map7(v.begin(), v.end()); + btree_map() {} + using Base::Base; + + // btree_map::begin() + // + // Returns an iterator to the beginning of the `btree_map`. + using Base::begin; + + // btree_map::cbegin() + // + // Returns a const iterator to the beginning of the `btree_map`. + using Base::cbegin; + + // btree_map::end() + // + // Returns an iterator to the end of the `btree_map`. + using Base::end; + + // btree_map::cend() + // + // Returns a const iterator to the end of the `btree_map`. + using Base::cend; + + // btree_map::empty() + // + // Returns whether or not the `btree_map` is empty. + using Base::empty; + + // btree_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_map` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `btree_map`. + using Base::max_size; + + // btree_map::size() + // + // Returns the number of elements currently within the `btree_map`. + using Base::size; + + // btree_map::clear() + // + // Removes all elements from the `btree_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_map::erase() + // + // Erases elements within the `btree_map`. If an erase occurs, any references, + // pointers, or iterators are invalidated. + // Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_map`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // btree_map::insert() + // + // Inserts an element of the specified value into the `btree_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If an insertion + // occurs, any references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // std::pair insert(const value_type& value): + // + // Inserts a value into the `btree_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(value_type&& value): + // + // Inserts a moveable value into the `btree_map`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_map::insert_or_assign() + // + // Inserts an element of the specified value into the `btree_map` provided + // that a value with the given key does not already exist, or replaces the + // corresponding mapped type with the forwarded `obj` argument if a key for + // that value already exists, returning an iterator pointing to the newly + // inserted element. Overloads are listed below. + // + // pair insert_or_assign(const key_type& k, M&& obj): + // pair insert_or_assign(key_type&& k, M&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `btree_map`. If the returned bool is true, insertion took place, and if + // it's false, assignment took place. + // + // iterator insert_or_assign(const_iterator hint, + // const key_type& k, M&& obj): + // iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `btree_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // btree_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace_hint; + + // btree_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + // + // Overloads are listed below. + // + // std::pair try_emplace(const key_type& k, Args&&... args): + // std::pair try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `btree_map`. + // + // iterator try_emplace(const_iterator hint, + // const key_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `btree_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::try_emplace; + + // btree_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_map` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_map::merge() + // + // Extracts elements from a given `source` btree_map into this + // `btree_map`. If the destination `btree_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_map::swap(btree_map& other) + // + // Exchanges the contents of this `btree_map` with those of the `other` + // btree_map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `btree_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // btree_map::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_map`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_map::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_map`. Note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_map::equal_range() + // + // Returns a half-open range [first, last), defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the `btree_map`. + using Base::equal_range; + + // btree_map::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_map::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element with a key that is not less than `key` within the + // `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_map::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element with a key that is greater than `key` within the + // `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `btree_map`, performing an `insert()` if the key does not already + // exist. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. Otherwise iterators are not affected and references are not + // invalidated. Overloads are listed below. + // + // T& operator[](key_type&& key): + // T& operator[](const key_type& key): + // + // Inserts a value_type object constructed in-place if the element with the + // given key does not exist. + using Base::operator[]; + + // btree_map::get_allocator() + // + // Returns the allocator function associated with this `btree_map`. + using Base::get_allocator; + + // btree_map::key_comp(); + // + // Returns the key comparator associated with this `btree_map`. + using Base::key_comp; + + // btree_map::value_comp(); + // + // Returns the value comparator associated with this `btree_map`. + using Base::value_comp; +}; + +// absl::swap(absl::btree_map<>, absl::btree_map<>) +// +// Swaps the contents of two `absl::btree_map` containers. +template +void swap(btree_map &x, btree_map &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_map<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. +template +typename btree_map::size_type erase_if( + btree_map &map, Pred pred) { + return container_internal::btree_access::erase_if(map, std::move(pred)); +} + +// absl::btree_multimap +// +// An `absl::btree_multimap` is an ordered associative container of +// keys and associated values designed to be a more efficient replacement for +// `std::multimap` (in most cases). Unlike `absl::btree_map`, a B-tree multimap +// allows multiple elements with equivalent keys. +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less`. +// +// An `absl::btree_multimap` uses a default allocator of +// `std::allocator>` to allocate (and deallocate) +// nodes, and construct and destruct values within those nodes. You may +// instead specify a custom allocator `A` (which in turn requires specifying a +// custom comparator `C`) as in `absl::btree_multimap`. +// +template , + typename Alloc = std::allocator>> +class btree_multimap + : public container_internal::btree_multimap_container< + container_internal::btree>> { + using Base = typename btree_multimap::btree_multimap_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_multimap` supports the same overload set as `std::multimap` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_multimap map1; + // + // * Initializer List constructor + // + // absl::btree_multimap map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::btree_multimap map3(map2); + // + // * Copy assignment operator + // + // absl::btree_multimap map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_multimap map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_multimap map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::btree_multimap map7(v.begin(), v.end()); + btree_multimap() {} + using Base::Base; + + // btree_multimap::begin() + // + // Returns an iterator to the beginning of the `btree_multimap`. + using Base::begin; + + // btree_multimap::cbegin() + // + // Returns a const iterator to the beginning of the `btree_multimap`. + using Base::cbegin; + + // btree_multimap::end() + // + // Returns an iterator to the end of the `btree_multimap`. + using Base::end; + + // btree_multimap::cend() + // + // Returns a const iterator to the end of the `btree_multimap`. + using Base::cend; + + // btree_multimap::empty() + // + // Returns whether or not the `btree_multimap` is empty. + using Base::empty; + + // btree_multimap::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_multimap` under current memory constraints. This value can be + // thought of as the largest value of `std::distance(begin(), end())` for a + // `btree_multimap`. + using Base::max_size; + + // btree_multimap::size() + // + // Returns the number of elements currently within the `btree_multimap`. + using Base::size; + + // btree_multimap::clear() + // + // Removes all elements from the `btree_multimap`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_multimap::erase() + // + // Erases elements within the `btree_multimap`. If an erase occurs, any + // references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_multimap`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the elements matching the key, if any exist, returning the + // number of elements erased. + using Base::erase; + + // btree_multimap::insert() + // + // Inserts an element of the specified value into the `btree_multimap`, + // returning an iterator pointing to the newly inserted element. + // Any references, pointers, or iterators are invalidated. Overloads are + // listed below. + // + // iterator insert(const value_type& value): + // + // Inserts a value into the `btree_multimap`, returning an iterator to the + // inserted element. + // + // iterator insert(value_type&& value): + // + // Inserts a moveable value into the `btree_multimap`, returning an iterator + // to the inserted element. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_multimap::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multimap`. Any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_multimap::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multimap`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search. + // + // Any references, pointers, or iterators are invalidated. + using Base::emplace_hint; + + // btree_multimap::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_multimap` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_multimap::merge() + // + // Extracts all elements from a given `source` btree_multimap into this + // `btree_multimap`. + using Base::merge; + + // btree_multimap::swap(btree_multimap& other) + // + // Exchanges the contents of this `btree_multimap` with those of the `other` + // btree_multimap, avoiding invocation of any move, copy, or swap operations + // on individual elements. + // + // All iterators and references on the `btree_multimap` remain valid, + // excepting for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_multimap::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_multimap`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_multimap::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_multimap::equal_range() + // + // Returns a half-open range [first, last), defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_multimap`. + using Base::equal_range; + + // btree_multimap::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_multimap::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element with a key that is not less than `key` within the + // `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_multimap::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element with a key that is greater than `key` within the + // `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_multimap::get_allocator() + // + // Returns the allocator function associated with this `btree_multimap`. + using Base::get_allocator; + + // btree_multimap::key_comp(); + // + // Returns the key comparator associated with this `btree_multimap`. + using Base::key_comp; + + // btree_multimap::value_comp(); + // + // Returns the value comparator associated with this `btree_multimap`. + using Base::value_comp; +}; + +// absl::swap(absl::btree_multimap<>, absl::btree_multimap<>) +// +// Swaps the contents of two `absl::btree_multimap` containers. +template +void swap(btree_multimap &x, btree_multimap &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_multimap<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. +template +typename btree_multimap::size_type erase_if( + btree_multimap &map, Pred pred) { + return container_internal::btree_access::erase_if(map, std::move(pred)); +} + +namespace container_internal { + +// A parameters structure for holding the type parameters for a btree_map. +// Compare and Alloc should be nothrow copy-constructible. +template +struct map_params : common_params> { + using super_type = typename map_params::common_params; + using mapped_type = Data; + // This type allows us to move keys when it is safe to do so. It is safe + // for maps in which value_type and mutable_value_type are layout compatible. + using slot_policy = typename super_type::slot_policy; + using slot_type = typename super_type::slot_type; + using value_type = typename super_type::value_type; + using init_type = typename super_type::init_type; + + template + static auto key(const V &value) -> decltype(value.first) { + return value.first; + } + static const Key &key(const slot_type *s) { return slot_policy::key(s); } + static const Key &key(slot_type *s) { return slot_policy::key(s); } + // For use in node handle. + static auto mutable_key(slot_type *s) + -> decltype(slot_policy::mutable_key(s)) { + return slot_policy::mutable_key(s); + } + static mapped_type &value(value_type *value) { return value->second; } +}; + +} // namespace container_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_MAP_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/btree_set.h b/CAPI/cpp/grpc/include/absl/container/btree_set.h new file mode 100644 index 0000000..695b09f --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/btree_set.h @@ -0,0 +1,793 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: btree_set.h +// ----------------------------------------------------------------------------- +// +// This header file defines B-tree sets: sorted associative containers of +// values. +// +// * `absl::btree_set<>` +// * `absl::btree_multiset<>` +// +// These B-tree types are similar to the corresponding types in the STL +// (`std::set` and `std::multiset`) and generally conform to the STL interfaces +// of those types. However, because they are implemented using B-trees, they +// are more efficient in most situations. +// +// Unlike `std::set` and `std::multiset`, which are commonly implemented using +// red-black tree nodes, B-tree sets use more generic B-tree nodes able to hold +// multiple values per node. Holding multiple values per node often makes +// B-tree sets perform better than their `std::set` counterparts, because +// multiple entries can be checked within the same cache hit. +// +// However, these types should not be considered drop-in replacements for +// `std::set` and `std::multiset` as there are some API differences, which are +// noted in this header file. The most consequential differences with respect to +// migrating to b-tree from the STL types are listed in the next paragraph. +// Other API differences are minor. +// +// Importantly, insertions and deletions may invalidate outstanding iterators, +// pointers, and references to elements. Such invalidations are typically only +// an issue if insertion and deletion operations are interleaved with the use of +// more than one iterator, pointer, or reference simultaneously. For this +// reason, `insert()` and `erase()` return a valid iterator at the current +// position. + +#ifndef ABSL_CONTAINER_BTREE_SET_H_ +#define ABSL_CONTAINER_BTREE_SET_H_ + +#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/btree_container.h" // IWYU pragma: export + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace container_internal { + +template +struct set_slot_policy; + +template +struct set_params; + +} // namespace container_internal + +// absl::btree_set<> +// +// An `absl::btree_set` is an ordered associative container of unique key +// values designed to be a more efficient replacement for `std::set` (in most +// cases). +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less`. +// +// An `absl::btree_set` uses a default allocator of `std::allocator` to +// allocate (and deallocate) nodes, and construct and destruct values within +// those nodes. You may instead specify a custom allocator `A` (which in turn +// requires specifying a custom comparator `C`) as in +// `absl::btree_set`. +// +template , + typename Alloc = std::allocator> +class btree_set + : public container_internal::btree_set_container< + container_internal::btree>> { + using Base = typename btree_set::btree_set_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_set` supports the same overload set as `std::set` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_set set1; + // + // * Initializer List constructor + // + // absl::btree_set set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::btree_set set3(set2); + // + // * Copy assignment operator + // + // absl::btree_set set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_set set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_set set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::btree_set set7(v.begin(), v.end()); + btree_set() {} + using Base::Base; + + // btree_set::begin() + // + // Returns an iterator to the beginning of the `btree_set`. + using Base::begin; + + // btree_set::cbegin() + // + // Returns a const iterator to the beginning of the `btree_set`. + using Base::cbegin; + + // btree_set::end() + // + // Returns an iterator to the end of the `btree_set`. + using Base::end; + + // btree_set::cend() + // + // Returns a const iterator to the end of the `btree_set`. + using Base::cend; + + // btree_set::empty() + // + // Returns whether or not the `btree_set` is empty. + using Base::empty; + + // btree_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_set` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `btree_set`. + using Base::max_size; + + // btree_set::size() + // + // Returns the number of elements currently within the `btree_set`. + using Base::size; + + // btree_set::clear() + // + // Removes all elements from the `btree_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_set::erase() + // + // Erases elements within the `btree_set`. Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_set`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // btree_set::insert() + // + // Inserts an element of the specified value into the `btree_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If an insertion + // occurs, any references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // std::pair insert(const value_type& value): + // + // Inserts a value into the `btree_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(value_type&& value): + // + // Inserts a moveable value into the `btree_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace_hint; + + // btree_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_set::merge() + // + // Extracts elements from a given `source` btree_set into this + // `btree_set`. If the destination `btree_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_set::swap(btree_set& other) + // + // Exchanges the contents of this `btree_set` with those of the `other` + // btree_set, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `btree_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_set::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_set`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_set::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_set`. Note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_set`. + using Base::equal_range; + + // btree_set::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_set::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element that is not less than `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_set::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element that is greater than `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_set::get_allocator() + // + // Returns the allocator function associated with this `btree_set`. + using Base::get_allocator; + + // btree_set::key_comp(); + // + // Returns the key comparator associated with this `btree_set`. + using Base::key_comp; + + // btree_set::value_comp(); + // + // Returns the value comparator associated with this `btree_set`. The keys to + // sort the elements are the values themselves, therefore `value_comp` and its + // sibling member function `key_comp` are equivalent. + using Base::value_comp; +}; + +// absl::swap(absl::btree_set<>, absl::btree_set<>) +// +// Swaps the contents of two `absl::btree_set` containers. +template +void swap(btree_set &x, btree_set &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_set<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. +template +typename btree_set::size_type erase_if(btree_set &set, + Pred pred) { + return container_internal::btree_access::erase_if(set, std::move(pred)); +} + +// absl::btree_multiset<> +// +// An `absl::btree_multiset` is an ordered associative container of +// keys and associated values designed to be a more efficient replacement +// for `std::multiset` (in most cases). Unlike `absl::btree_set`, a B-tree +// multiset allows equivalent elements. +// +// Keys are sorted using an (optional) comparison function, which defaults to +// `std::less`. +// +// An `absl::btree_multiset` uses a default allocator of `std::allocator` +// to allocate (and deallocate) nodes, and construct and destruct values within +// those nodes. You may instead specify a custom allocator `A` (which in turn +// requires specifying a custom comparator `C`) as in +// `absl::btree_multiset`. +// +template , + typename Alloc = std::allocator> +class btree_multiset + : public container_internal::btree_multiset_container< + container_internal::btree>> { + using Base = typename btree_multiset::btree_multiset_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_multiset` supports the same overload set as `std::set` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_multiset set1; + // + // * Initializer List constructor + // + // absl::btree_multiset set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::btree_multiset set3(set2); + // + // * Copy assignment operator + // + // absl::btree_multiset set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_multiset set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_multiset set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::btree_multiset set7(v.begin(), v.end()); + btree_multiset() {} + using Base::Base; + + // btree_multiset::begin() + // + // Returns an iterator to the beginning of the `btree_multiset`. + using Base::begin; + + // btree_multiset::cbegin() + // + // Returns a const iterator to the beginning of the `btree_multiset`. + using Base::cbegin; + + // btree_multiset::end() + // + // Returns an iterator to the end of the `btree_multiset`. + using Base::end; + + // btree_multiset::cend() + // + // Returns a const iterator to the end of the `btree_multiset`. + using Base::cend; + + // btree_multiset::empty() + // + // Returns whether or not the `btree_multiset` is empty. + using Base::empty; + + // btree_multiset::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_multiset` under current memory constraints. This value can be + // thought of as the largest value of `std::distance(begin(), end())` for a + // `btree_multiset`. + using Base::max_size; + + // btree_multiset::size() + // + // Returns the number of elements currently within the `btree_multiset`. + using Base::size; + + // btree_multiset::clear() + // + // Removes all elements from the `btree_multiset`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_multiset::erase() + // + // Erases elements within the `btree_multiset`. Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_multiset`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the elements matching the key, if any exist, returning the + // number of elements erased. + using Base::erase; + + // btree_multiset::insert() + // + // Inserts an element of the specified value into the `btree_multiset`, + // returning an iterator pointing to the newly inserted element. + // Any references, pointers, or iterators are invalidated. Overloads are + // listed below. + // + // iterator insert(const value_type& value): + // + // Inserts a value into the `btree_multiset`, returning an iterator to the + // inserted element. + // + // iterator insert(value_type&& value): + // + // Inserts a moveable value into the `btree_multiset`, returning an iterator + // to the inserted element. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_multiset::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multiset`. Any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_multiset::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multiset`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search. + // + // Any references, pointers, or iterators are invalidated. + using Base::emplace_hint; + + // btree_multiset::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_multiset` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_multiset::merge() + // + // Extracts all elements from a given `source` btree_multiset into this + // `btree_multiset`. + using Base::merge; + + // btree_multiset::swap(btree_multiset& other) + // + // Exchanges the contents of this `btree_multiset` with those of the `other` + // btree_multiset, avoiding invocation of any move, copy, or swap operations + // on individual elements. + // + // All iterators and references on the `btree_multiset` remain valid, + // excepting for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_multiset::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_multiset`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_multiset::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_multiset::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_multiset`. + using Base::equal_range; + + // btree_multiset::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_multiset::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element that is not less than `key` within the + // `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_multiset::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element that is greater than `key` within the + // `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_multiset::get_allocator() + // + // Returns the allocator function associated with this `btree_multiset`. + using Base::get_allocator; + + // btree_multiset::key_comp(); + // + // Returns the key comparator associated with this `btree_multiset`. + using Base::key_comp; + + // btree_multiset::value_comp(); + // + // Returns the value comparator associated with this `btree_multiset`. The + // keys to sort the elements are the values themselves, therefore `value_comp` + // and its sibling member function `key_comp` are equivalent. + using Base::value_comp; +}; + +// absl::swap(absl::btree_multiset<>, absl::btree_multiset<>) +// +// Swaps the contents of two `absl::btree_multiset` containers. +template +void swap(btree_multiset &x, btree_multiset &y) { + return x.swap(y); +} + +// absl::erase_if(absl::btree_multiset<>, Pred) +// +// Erases all elements that satisfy the predicate pred from the container. +// Returns the number of erased elements. +template +typename btree_multiset::size_type erase_if( + btree_multiset & set, Pred pred) { + return container_internal::btree_access::erase_if(set, std::move(pred)); +} + +namespace container_internal { + +// This type implements the necessary functions from the +// absl::container_internal::slot_type interface for btree_(multi)set. +template +struct set_slot_policy { + using slot_type = Key; + using value_type = Key; + using mutable_value_type = Key; + + static value_type &element(slot_type *slot) { return *slot; } + static const value_type &element(const slot_type *slot) { return *slot; } + + template + static void construct(Alloc *alloc, slot_type *slot, Args &&...args) { + absl::allocator_traits::construct(*alloc, slot, + std::forward(args)...); + } + + template + static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { + absl::allocator_traits::construct(*alloc, slot, std::move(*other)); + } + + template + static void construct(Alloc *alloc, slot_type *slot, const slot_type *other) { + absl::allocator_traits::construct(*alloc, slot, *other); + } + + template + static void destroy(Alloc *alloc, slot_type *slot) { + absl::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) { + construct(alloc, new_slot, old_slot); + destroy(alloc, old_slot); + } +}; + +// A parameters structure for holding the type parameters for a btree_set. +// Compare and Alloc should be nothrow copy-constructible. +template +struct set_params : common_params> { + using value_type = Key; + using slot_type = typename set_params::common_params::slot_type; + + template + static const V &key(const V &value) { + return value; + } + static const Key &key(const slot_type *slot) { return *slot; } + static const Key &key(slot_type *slot) { return *slot; } +}; + +} // namespace container_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_SET_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/btree_test.h b/CAPI/cpp/grpc/include/absl/container/btree_test.h new file mode 100644 index 0000000..6249080 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/btree_test.h @@ -0,0 +1,166 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_BTREE_TEST_H_ +#define ABSL_CONTAINER_BTREE_TEST_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/container/btree_map.h" +#include "absl/container/btree_set.h" +#include "absl/container/flat_hash_set.h" +#include "absl/strings/cord.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// Like remove_const but propagates the removal through std::pair. +template +struct remove_pair_const { + using type = typename std::remove_const::type; +}; +template +struct remove_pair_const > { + using type = std::pair::type, + typename remove_pair_const::type>; +}; + +// Utility class to provide an accessor for a key given a value. The default +// behavior is to treat the value as a pair and return the first element. +template +struct KeyOfValue { + struct type { + const K& operator()(const V& p) const { return p.first; } + }; +}; + +// Partial specialization of KeyOfValue class for when the key and value are +// the same type such as in set<> and btree_set<>. +template +struct KeyOfValue { + struct type { + const K& operator()(const K& k) const { return k; } + }; +}; + +inline char* GenerateDigits(char buf[16], unsigned val, unsigned maxval) { + assert(val <= maxval); + constexpr unsigned kBase = 64; // avoid integer division. + unsigned p = 15; + buf[p--] = 0; + while (maxval > 0) { + buf[p--] = ' ' + (val % kBase); + val /= kBase; + maxval /= kBase; + } + return buf + p + 1; +} + +template +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + K operator()(int i) const { + assert(i <= maxval); + return K(i); + } +}; + +template <> +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + absl::Time operator()(int i) const { return absl::FromUnixMillis(i); } +}; + +template <> +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + std::string operator()(int i) const { + char buf[16]; + return GenerateDigits(buf, i, maxval); + } +}; + +template <> +struct Generator { + int maxval; + explicit Generator(int m) : maxval(m) {} + Cord operator()(int i) const { + char buf[16]; + return Cord(GenerateDigits(buf, i, maxval)); + } +}; + +template +struct Generator > { + Generator::type> tgen; + Generator::type> ugen; + + explicit Generator(int m) : tgen(m), ugen(m) {} + std::pair operator()(int i) const { + return std::make_pair(tgen(i), ugen(i)); + } +}; + +// Generate n values for our tests and benchmarks. Value range is [0, maxval]. +inline std::vector GenerateNumbersWithSeed(int n, int maxval, int seed) { + // NOTE: Some tests rely on generated numbers not changing between test runs. + // We use std::minstd_rand0 because it is well-defined, but don't use + // std::uniform_int_distribution because platforms use different algorithms. + std::minstd_rand0 rng(seed); + + std::vector values; + absl::flat_hash_set unique_values; + if (values.size() < n) { + for (int i = values.size(); i < n; i++) { + int value; + do { + value = static_cast(rng()) % (maxval + 1); + } while (!unique_values.insert(value).second); + + values.push_back(value); + } + } + return values; +} + +// Generates n values in the range [0, maxval]. +template +std::vector GenerateValuesWithSeed(int n, int maxval, int seed) { + const std::vector nums = GenerateNumbersWithSeed(n, maxval, seed); + Generator gen(maxval); + std::vector vec; + + vec.reserve(n); + for (int i = 0; i < n; i++) { + vec.push_back(gen(nums[i])); + } + + return vec; +} + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_BTREE_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/fixed_array.h b/CAPI/cpp/grpc/include/absl/container/fixed_array.h new file mode 100644 index 0000000..2aefae3 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/fixed_array.h @@ -0,0 +1,529 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: fixed_array.h +// ----------------------------------------------------------------------------- +// +// A `FixedArray` represents a non-resizable array of `T` where the length of +// the array can be determined at run-time. It is a good replacement for +// non-standard and deprecated uses of `alloca()` and variable length arrays +// within the GCC extension. (See +// https://gcc.gnu.org/onlinedocs/gcc/Variable-Length.html). +// +// `FixedArray` allocates small arrays inline, keeping performance fast by +// avoiding heap operations. It also helps reduce the chances of +// accidentally overflowing your stack if large input is passed to +// your function. + +#ifndef ABSL_CONTAINER_FIXED_ARRAY_H_ +#define ABSL_CONTAINER_FIXED_ARRAY_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/config.h" +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/memory/memory.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +constexpr static auto kFixedArrayUseDefault = static_cast(-1); + +// ----------------------------------------------------------------------------- +// FixedArray +// ----------------------------------------------------------------------------- +// +// A `FixedArray` provides a run-time fixed-size array, allocating a small array +// inline for efficiency. +// +// Most users should not specify an `inline_elements` argument and let +// `FixedArray` automatically determine the number of elements +// to store inline based on `sizeof(T)`. If `inline_elements` is specified, the +// `FixedArray` implementation will use inline storage for arrays with a +// length <= `inline_elements`. +// +// Note that a `FixedArray` constructed with a `size_type` argument will +// default-initialize its values by leaving trivially constructible types +// uninitialized (e.g. int, int[4], double), and others default-constructed. +// This matches the behavior of c-style arrays and `std::array`, but not +// `std::vector`. +template > +class FixedArray { + static_assert(!std::is_array::value || std::extent::value > 0, + "Arrays with unknown bounds cannot be used with FixedArray."); + + static constexpr size_t kInlineBytesDefault = 256; + + using AllocatorTraits = std::allocator_traits; + // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17, + // but this seems to be mostly pedantic. + template + using EnableIfForwardIterator = absl::enable_if_t::iterator_category, + std::forward_iterator_tag>::value>; + static constexpr bool NoexceptCopyable() { + return std::is_nothrow_copy_constructible::value && + absl::allocator_is_nothrow::value; + } + static constexpr bool NoexceptMovable() { + return std::is_nothrow_move_constructible::value && + absl::allocator_is_nothrow::value; + } + static constexpr bool DefaultConstructorIsNonTrivial() { + return !absl::is_trivially_default_constructible::value; + } + + public: + using allocator_type = typename AllocatorTraits::allocator_type; + using value_type = typename AllocatorTraits::value_type; + using pointer = typename AllocatorTraits::pointer; + using const_pointer = typename AllocatorTraits::const_pointer; + using reference = value_type&; + using const_reference = const value_type&; + using size_type = typename AllocatorTraits::size_type; + using difference_type = typename AllocatorTraits::difference_type; + using iterator = pointer; + using const_iterator = const_pointer; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + static constexpr size_type inline_elements = + (N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type) + : static_cast(N)); + + FixedArray( + const FixedArray& other, + const allocator_type& a = allocator_type()) noexcept(NoexceptCopyable()) + : FixedArray(other.begin(), other.end(), a) {} + + FixedArray( + FixedArray&& other, + const allocator_type& a = allocator_type()) noexcept(NoexceptMovable()) + : FixedArray(std::make_move_iterator(other.begin()), + std::make_move_iterator(other.end()), a) {} + + // Creates an array object that can store `n` elements. + // Note that trivially constructible elements will be uninitialized. + explicit FixedArray(size_type n, const allocator_type& a = allocator_type()) + : storage_(n, a) { + if (DefaultConstructorIsNonTrivial()) { + memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), + storage_.end()); + } + } + + // Creates an array initialized with `n` copies of `val`. + FixedArray(size_type n, const value_type& val, + const allocator_type& a = allocator_type()) + : storage_(n, a) { + memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), + storage_.end(), val); + } + + // Creates an array initialized with the size and contents of `init_list`. + FixedArray(std::initializer_list init_list, + const allocator_type& a = allocator_type()) + : FixedArray(init_list.begin(), init_list.end(), a) {} + + // Creates an array initialized with the elements from the input + // range. The array's size will always be `std::distance(first, last)`. + // REQUIRES: Iterator must be a forward_iterator or better. + template * = nullptr> + FixedArray(Iterator first, Iterator last, + const allocator_type& a = allocator_type()) + : storage_(std::distance(first, last), a) { + memory_internal::CopyRange(storage_.alloc(), storage_.begin(), first, last); + } + + ~FixedArray() noexcept { + for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) { + AllocatorTraits::destroy(storage_.alloc(), cur); + } + } + + // Assignments are deleted because they break the invariant that the size of a + // `FixedArray` never changes. + void operator=(FixedArray&&) = delete; + void operator=(const FixedArray&) = delete; + + // FixedArray::size() + // + // Returns the length of the fixed array. + size_type size() const { return storage_.size(); } + + // FixedArray::max_size() + // + // Returns the largest possible value of `std::distance(begin(), end())` for a + // `FixedArray`. This is equivalent to the most possible addressable bytes + // over the number of bytes taken by T. + constexpr size_type max_size() const { + return (std::numeric_limits::max)() / sizeof(value_type); + } + + // FixedArray::empty() + // + // Returns whether or not the fixed array is empty. + bool empty() const { return size() == 0; } + + // FixedArray::memsize() + // + // Returns the memory size of the fixed array in bytes. + size_t memsize() const { return size() * sizeof(value_type); } + + // FixedArray::data() + // + // Returns a const T* pointer to elements of the `FixedArray`. This pointer + // can be used to access (but not modify) the contained elements. + const_pointer data() const { return AsValueType(storage_.begin()); } + + // Overload of FixedArray::data() to return a T* pointer to elements of the + // fixed array. This pointer can be used to access and modify the contained + // elements. + pointer data() { return AsValueType(storage_.begin()); } + + // FixedArray::operator[] + // + // Returns a reference the ith element of the fixed array. + // REQUIRES: 0 <= i < size() + reference operator[](size_type i) { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // Overload of FixedArray::operator()[] to return a const reference to the + // ith element of the fixed array. + // REQUIRES: 0 <= i < size() + const_reference operator[](size_type i) const { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // FixedArray::at + // + // Bounds-checked access. Returns a reference to the ith element of the fixed + // array, or throws std::out_of_range + reference at(size_type i) { + if (ABSL_PREDICT_FALSE(i >= size())) { + base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); + } + return data()[i]; + } + + // Overload of FixedArray::at() to return a const reference to the ith element + // of the fixed array. + const_reference at(size_type i) const { + if (ABSL_PREDICT_FALSE(i >= size())) { + base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); + } + return data()[i]; + } + + // FixedArray::front() + // + // Returns a reference to the first element of the fixed array. + reference front() { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // Overload of FixedArray::front() to return a reference to the first element + // of a fixed array of const values. + const_reference front() const { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // FixedArray::back() + // + // Returns a reference to the last element of the fixed array. + reference back() { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // Overload of FixedArray::back() to return a reference to the last element + // of a fixed array of const values. + const_reference back() const { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // FixedArray::begin() + // + // Returns an iterator to the beginning of the fixed array. + iterator begin() { return data(); } + + // Overload of FixedArray::begin() to return a const iterator to the + // beginning of the fixed array. + const_iterator begin() const { return data(); } + + // FixedArray::cbegin() + // + // Returns a const iterator to the beginning of the fixed array. + const_iterator cbegin() const { return begin(); } + + // FixedArray::end() + // + // Returns an iterator to the end of the fixed array. + iterator end() { return data() + size(); } + + // Overload of FixedArray::end() to return a const iterator to the end of the + // fixed array. + const_iterator end() const { return data() + size(); } + + // FixedArray::cend() + // + // Returns a const iterator to the end of the fixed array. + const_iterator cend() const { return end(); } + + // FixedArray::rbegin() + // + // Returns a reverse iterator from the end of the fixed array. + reverse_iterator rbegin() { return reverse_iterator(end()); } + + // Overload of FixedArray::rbegin() to return a const reverse iterator from + // the end of the fixed array. + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + + // FixedArray::crbegin() + // + // Returns a const reverse iterator from the end of the fixed array. + const_reverse_iterator crbegin() const { return rbegin(); } + + // FixedArray::rend() + // + // Returns a reverse iterator from the beginning of the fixed array. + reverse_iterator rend() { return reverse_iterator(begin()); } + + // Overload of FixedArray::rend() for returning a const reverse iterator + // from the beginning of the fixed array. + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + // FixedArray::crend() + // + // Returns a reverse iterator from the beginning of the fixed array. + const_reverse_iterator crend() const { return rend(); } + + // FixedArray::fill() + // + // Assigns the given `value` to all elements in the fixed array. + void fill(const value_type& val) { std::fill(begin(), end(), val); } + + // Relational operators. Equality operators are elementwise using + // `operator==`, while order operators order FixedArrays lexicographically. + friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) { + return absl::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); + } + + friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) { + return !(lhs == rhs); + } + + friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) { + return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), + rhs.end()); + } + + friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) { + return rhs < lhs; + } + + friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) { + return !(rhs < lhs); + } + + friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) { + return !(lhs < rhs); + } + + template + friend H AbslHashValue(H h, const FixedArray& v) { + return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()), + v.size()); + } + + private: + // StorageElement + // + // For FixedArrays with a C-style-array value_type, StorageElement is a POD + // wrapper struct called StorageElementWrapper that holds the value_type + // instance inside. This is needed for construction and destruction of the + // entire array regardless of how many dimensions it has. For all other cases, + // StorageElement is just an alias of value_type. + // + // Maintainer's Note: The simpler solution would be to simply wrap value_type + // in a struct whether it's an array or not. That causes some paranoid + // diagnostics to misfire, believing that 'data()' returns a pointer to a + // single element, rather than the packed array that it really is. + // e.g.: + // + // FixedArray buf(1); + // sprintf(buf.data(), "foo"); + // + // error: call to int __builtin___sprintf_chk(etc...) + // will always overflow destination buffer [-Werror] + // + template , + size_t InnerN = std::extent::value> + struct StorageElementWrapper { + InnerT array[InnerN]; + }; + + using StorageElement = + absl::conditional_t::value, + StorageElementWrapper, value_type>; + + static pointer AsValueType(pointer ptr) { return ptr; } + static pointer AsValueType(StorageElementWrapper* ptr) { + return std::addressof(ptr->array); + } + + static_assert(sizeof(StorageElement) == sizeof(value_type), ""); + static_assert(alignof(StorageElement) == alignof(value_type), ""); + + class NonEmptyInlinedStorage { + public: + StorageElement* data() { return reinterpret_cast(buff_); } + void AnnotateConstruct(size_type n); + void AnnotateDestruct(size_type n); + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + void* RedzoneBegin() { return &redzone_begin_; } + void* RedzoneEnd() { return &redzone_end_ + 1; } +#endif // ABSL_HAVE_ADDRESS_SANITIZER + + private: + ABSL_ADDRESS_SANITIZER_REDZONE(redzone_begin_); + alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])]; + ABSL_ADDRESS_SANITIZER_REDZONE(redzone_end_); + }; + + class EmptyInlinedStorage { + public: + StorageElement* data() { return nullptr; } + void AnnotateConstruct(size_type) {} + void AnnotateDestruct(size_type) {} + }; + + using InlinedStorage = + absl::conditional_t; + + // Storage + // + // An instance of Storage manages the inline and out-of-line memory for + // instances of FixedArray. This guarantees that even when construction of + // individual elements fails in the FixedArray constructor body, the + // destructor for Storage will still be called and out-of-line memory will be + // properly deallocated. + // + class Storage : public InlinedStorage { + public: + Storage(size_type n, const allocator_type& a) + : size_alloc_(n, a), data_(InitializeData()) {} + + ~Storage() noexcept { + if (UsingInlinedStorage(size())) { + InlinedStorage::AnnotateDestruct(size()); + } else { + AllocatorTraits::deallocate(alloc(), AsValueType(begin()), size()); + } + } + + size_type size() const { return size_alloc_.template get<0>(); } + StorageElement* begin() const { return data_; } + StorageElement* end() const { return begin() + size(); } + allocator_type& alloc() { return size_alloc_.template get<1>(); } + + private: + static bool UsingInlinedStorage(size_type n) { + return n <= inline_elements; + } + + StorageElement* InitializeData() { + if (UsingInlinedStorage(size())) { + InlinedStorage::AnnotateConstruct(size()); + return InlinedStorage::data(); + } else { + return reinterpret_cast( + AllocatorTraits::allocate(alloc(), size())); + } + } + + // `CompressedTuple` takes advantage of EBCO for stateless `allocator_type`s + container_internal::CompressedTuple size_alloc_; + StorageElement* data_; + }; + + Storage storage_; +}; + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +template +constexpr size_t FixedArray::kInlineBytesDefault; + +template +constexpr typename FixedArray::size_type + FixedArray::inline_elements; +#endif + +template +void FixedArray::NonEmptyInlinedStorage::AnnotateConstruct( + typename FixedArray::size_type n) { +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + if (!n) return; + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), + data() + n); + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), + RedzoneBegin()); +#endif // ABSL_HAVE_ADDRESS_SANITIZER + static_cast(n); // Mark used when not in asan mode +} + +template +void FixedArray::NonEmptyInlinedStorage::AnnotateDestruct( + typename FixedArray::size_type n) { +#ifdef ABSL_HAVE_ADDRESS_SANITIZER + if (!n) return; + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, + RedzoneEnd()); + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), + data()); +#endif // ABSL_HAVE_ADDRESS_SANITIZER + static_cast(n); // Mark used when not in asan mode +} +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_FIXED_ARRAY_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/flat_hash_map.h b/CAPI/cpp/grpc/include/absl/container/flat_hash_map.h new file mode 100644 index 0000000..e6bdbd9 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/flat_hash_map.h @@ -0,0 +1,613 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: flat_hash_map.h +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_map` is an unordered associative container of +// unique keys and associated values designed to be a more efficient replacement +// for `std::unordered_map`. Like `unordered_map`, search, insertion, and +// deletion of map elements can be done as an `O(1)` operation. However, +// `flat_hash_map` (and other unordered associative containers known as the +// collection of Abseil "Swiss tables") contain other optimizations that result +// in both memory and computation advantages. +// +// In most cases, your default choice for a hash map should be a map of type +// `flat_hash_map`. + +#ifndef ABSL_CONTAINER_FLAT_HASH_MAP_H_ +#define ABSL_CONTAINER_FLAT_HASH_MAP_H_ + +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/base/macros.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +template +struct FlatHashMapPolicy; +} // namespace container_internal + +// ----------------------------------------------------------------------------- +// absl::flat_hash_map +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_map` is an unordered associative container which +// has been optimized for both speed and memory footprint in most common use +// cases. Its interface is similar to that of `std::unordered_map` with +// the following notable differences: +// +// * Requires keys that are CopyConstructible +// * Requires values that are MoveConstructible +// * Supports heterogeneous lookup, through `find()`, `operator[]()` and +// `insert()`, provided that the map is provided a compatible heterogeneous +// hashing function and equality operator. +// * Invalidates any references and pointers to elements within the table after +// `rehash()`. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash map. +// * Returns `void` from the `erase(iterator)` overload. +// +// By default, `flat_hash_map` uses the `absl::Hash` hashing framework. +// All fundamental and Abseil types that support the `absl::Hash` framework have +// a compatible equality operator for comparing insertions into `flat_hash_map`. +// If your type is not yet supported by the `absl::Hash` framework, see +// absl/hash/hash.h for information on extending Abseil hashing to user-defined +// types. +// +// Using `absl::flat_hash_map` at interface boundaries in dynamically loaded +// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may +// be randomized across dynamically loaded libraries. +// +// NOTE: A `flat_hash_map` stores its value types directly inside its +// implementation array to avoid memory indirection. Because a `flat_hash_map` +// is designed to move data when rehashed, map values will not retain pointer +// stability. If you require pointer stability, or if your values are large, +// consider using `absl::flat_hash_map>` instead. +// If your types are not moveable or you require pointer stability for keys, +// consider `absl::node_hash_map`. +// +// Example: +// +// // Create a flat hash map of three strings (that map to strings) +// absl::flat_hash_map ducks = +// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; +// +// // Insert a new element into the flat hash map +// ducks.insert({"d", "donald"}); +// +// // Force a rehash of the flat hash map +// ducks.rehash(0); +// +// // Find the element with the key "b" +// std::string search_key = "b"; +// auto result = ducks.find(search_key); +// if (result != ducks.end()) { +// std::cout << "Result: " << result->second << std::endl; +// } +template , + class Eq = absl::container_internal::hash_default_eq, + class Allocator = std::allocator>> +class flat_hash_map : public absl::container_internal::raw_hash_map< + absl::container_internal::FlatHashMapPolicy, + Hash, Eq, Allocator> { + using Base = typename flat_hash_map::raw_hash_map; + + public: + // Constructors and Assignment Operators + // + // A flat_hash_map supports the same overload set as `std::unordered_map` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::flat_hash_map map1; + // + // * Initializer List constructor + // + // absl::flat_hash_map map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::flat_hash_map map3(map2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::flat_hash_map map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::flat_hash_map map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::flat_hash_map map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::flat_hash_map map7(v.begin(), v.end()); + flat_hash_map() {} + using Base::Base; + + // flat_hash_map::begin() + // + // Returns an iterator to the beginning of the `flat_hash_map`. + using Base::begin; + + // flat_hash_map::cbegin() + // + // Returns a const iterator to the beginning of the `flat_hash_map`. + using Base::cbegin; + + // flat_hash_map::cend() + // + // Returns a const iterator to the end of the `flat_hash_map`. + using Base::cend; + + // flat_hash_map::end() + // + // Returns an iterator to the end of the `flat_hash_map`. + using Base::end; + + // flat_hash_map::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `flat_hash_map`. + // + // NOTE: this member function is particular to `absl::flat_hash_map` and is + // not provided in the `std::unordered_map` API. + using Base::capacity; + + // flat_hash_map::empty() + // + // Returns whether or not the `flat_hash_map` is empty. + using Base::empty; + + // flat_hash_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `flat_hash_map` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `flat_hash_map`. + using Base::max_size; + + // flat_hash_map::size() + // + // Returns the number of elements currently within the `flat_hash_map`. + using Base::size; + + // flat_hash_map::clear() + // + // Removes all elements from the `flat_hash_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // flat_hash_map::erase() + // + // Erases elements within the `flat_hash_map`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `flat_hash_map`, returning + // `void`. + // + // NOTE: returning `void` in this case is different than that of STL + // containers in general and `std::unordered_map` in particular (which + // return an iterator to the element following the erased element). If that + // iterator is needed, simply post increment the iterator: + // + // map.erase(it++); + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // flat_hash_map::insert() + // + // Inserts an element of the specified value into the `flat_hash_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const init_type& value): + // + // Inserts a value into the `flat_hash_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // std::pair insert(init_type&& value): + // + // Inserts a moveable value into the `flat_hash_map`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const init_type& value): + // iterator insert(const_iterator hint, T&& value): + // iterator insert(const_iterator hint, init_type&& value); + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `flat_hash_map` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `flat_hash_map` we guarantee the first match is inserted. + using Base::insert; + + // flat_hash_map::insert_or_assign() + // + // Inserts an element of the specified value into the `flat_hash_map` provided + // that a value with the given key does not already exist, or replaces it with + // the element value if a key for that value already exists, returning an + // iterator pointing to the newly inserted element. If rehashing occurs due + // to the insertion, all existing iterators are invalidated. Overloads are + // listed below. + // + // pair insert_or_assign(const init_type& k, T&& obj): + // pair insert_or_assign(init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `flat_hash_map`. + // + // iterator insert_or_assign(const_iterator hint, + // const init_type& k, T&& obj): + // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `flat_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // flat_hash_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // flat_hash_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // flat_hash_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + // Overloads are listed below. + // + // pair try_emplace(const key_type& k, Args&&... args): + // pair try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `flat_hash_map`. + // + // iterator try_emplace(const_iterator hint, + // const key_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `flat_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + // + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. + using Base::try_emplace; + + // flat_hash_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the key,value pair of the element at the indicated position and + // returns a node handle owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the key,value pair of the element with a key matching the passed + // key value and returns a node handle owning that extracted data. If the + // `flat_hash_map` does not contain an element with a matching key, this + // function returns an empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + using Base::extract; + + // flat_hash_map::merge() + // + // Extracts elements from a given `source` flat hash map into this + // `flat_hash_map`. If the destination `flat_hash_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // flat_hash_map::swap(flat_hash_map& other) + // + // Exchanges the contents of this `flat_hash_map` with those of the `other` + // flat hash map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `flat_hash_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the flat hash map's hashing and key equivalence + // functions be Swappable, and are exchanged using unqualified calls to + // non-member `swap()`. If the map's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // flat_hash_map::rehash(count) + // + // Rehashes the `flat_hash_map`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_map`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // flat_hash_map::reserve(count) + // + // Sets the number of slots in the `flat_hash_map` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // flat_hash_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // flat_hash_map::contains() + // + // Determines whether an element with a key comparing equal to the given `key` + // exists within the `flat_hash_map`, returning `true` if so or `false` + // otherwise. + using Base::contains; + + // flat_hash_map::count(const Key& key) const + // + // Returns the number of elements with a key comparing equal to the given + // `key` within the `flat_hash_map`. note that this function will return + // either `1` or `0` since duplicate keys are not allowed within a + // `flat_hash_map`. + using Base::count; + + // flat_hash_map::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `flat_hash_map`. + using Base::equal_range; + + // flat_hash_map::find() + // + // Finds an element with the passed `key` within the `flat_hash_map`. + using Base::find; + + // flat_hash_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `flat_hash_map`, performing an `insert()` if the key does not already + // exist. + // + // If an insertion occurs and results in a rehashing of the container, all + // iterators are invalidated. Otherwise iterators are not affected and + // references are not invalidated. Overloads are listed below. + // + // T& operator[](const Key& key): + // + // Inserts an init_type object constructed in-place if the element with the + // given key does not exist. + // + // T& operator[](Key&& key): + // + // Inserts an init_type object constructed in-place provided that an element + // with the given key does not exist. + using Base::operator[]; + + // flat_hash_map::bucket_count() + // + // Returns the number of "buckets" within the `flat_hash_map`. Note that + // because a flat hash map contains all elements within its internal storage, + // this value simply equals the current capacity of the `flat_hash_map`. + using Base::bucket_count; + + // flat_hash_map::load_factor() + // + // Returns the current load factor of the `flat_hash_map` (the average number + // of slots occupied with a value within the hash map). + using Base::load_factor; + + // flat_hash_map::max_load_factor() + // + // Manages the maximum load factor of the `flat_hash_map`. Overloads are + // listed below. + // + // float flat_hash_map::max_load_factor() + // + // Returns the current maximum load factor of the `flat_hash_map`. + // + // void flat_hash_map::max_load_factor(float ml) + // + // Sets the maximum load factor of the `flat_hash_map` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `flat_hash_map` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // flat_hash_map::get_allocator() + // + // Returns the allocator function associated with this `flat_hash_map`. + using Base::get_allocator; + + // flat_hash_map::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `flat_hash_map`. + using Base::hash_function; + + // flat_hash_map::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; +}; + +// erase_if(flat_hash_map<>, Pred) +// +// Erases all elements that satisfy the predicate `pred` from the container `c`. +// Returns the number of erased elements. +template +typename flat_hash_map::size_type erase_if( + flat_hash_map& c, Predicate pred) { + return container_internal::EraseIf(pred, &c); +} + +namespace container_internal { + +template +struct FlatHashMapPolicy { + using slot_policy = container_internal::map_slot_policy; + using slot_type = typename slot_policy::slot_type; + using key_type = K; + using mapped_type = V; + using init_type = std::pair; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + slot_policy::construct(alloc, slot, std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + slot_policy::destroy(alloc, slot); + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + slot_policy::transfer(alloc, new_slot, old_slot); + } + + template + static decltype(absl::container_internal::DecomposePair( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposePair(std::forward(f), + std::forward(args)...); + } + + static size_t space_used(const slot_type*) { return 0; } + + static std::pair& element(slot_type* slot) { return slot->value; } + + static V& value(std::pair* kv) { return kv->second; } + static const V& value(const std::pair* kv) { return kv->second; } +}; + +} // namespace container_internal + +namespace container_algorithm_internal { + +// Specialization of trait in absl/algorithm/container.h +template +struct IsUnorderedContainer< + absl::flat_hash_map> : std::true_type {}; + +} // namespace container_algorithm_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_FLAT_HASH_MAP_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/flat_hash_set.h b/CAPI/cpp/grpc/include/absl/container/flat_hash_set.h new file mode 100644 index 0000000..4938c70 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/flat_hash_set.h @@ -0,0 +1,510 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: flat_hash_set.h +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_set` is an unordered associative container designed to +// be a more efficient replacement for `std::unordered_set`. Like +// `unordered_set`, search, insertion, and deletion of set elements can be done +// as an `O(1)` operation. However, `flat_hash_set` (and other unordered +// associative containers known as the collection of Abseil "Swiss tables") +// contain other optimizations that result in both memory and computation +// advantages. +// +// In most cases, your default choice for a hash set should be a set of type +// `flat_hash_set`. +#ifndef ABSL_CONTAINER_FLAT_HASH_SET_H_ +#define ABSL_CONTAINER_FLAT_HASH_SET_H_ + +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/base/macros.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export +#include "absl/memory/memory.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +template +struct FlatHashSetPolicy; +} // namespace container_internal + +// ----------------------------------------------------------------------------- +// absl::flat_hash_set +// ----------------------------------------------------------------------------- +// +// An `absl::flat_hash_set` is an unordered associative container which has +// been optimized for both speed and memory footprint in most common use cases. +// Its interface is similar to that of `std::unordered_set` with the +// following notable differences: +// +// * Requires keys that are CopyConstructible +// * Supports heterogeneous lookup, through `find()` and `insert()`, provided +// that the set is provided a compatible heterogeneous hashing function and +// equality operator. +// * Invalidates any references and pointers to elements within the table after +// `rehash()`. +// * Contains a `capacity()` member function indicating the number of element +// slots (open, deleted, and empty) within the hash set. +// * Returns `void` from the `erase(iterator)` overload. +// +// By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All +// fundamental and Abseil types that support the `absl::Hash` framework have a +// compatible equality operator for comparing insertions into `flat_hash_set`. +// If your type is not yet supported by the `absl::Hash` framework, see +// absl/hash/hash.h for information on extending Abseil hashing to user-defined +// types. +// +// Using `absl::flat_hash_set` at interface boundaries in dynamically loaded +// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may +// be randomized across dynamically loaded libraries. +// +// NOTE: A `flat_hash_set` stores its keys directly inside its implementation +// array to avoid memory indirection. Because a `flat_hash_set` is designed to +// move data when rehashed, set keys will not retain pointer stability. If you +// require pointer stability, consider using +// `absl::flat_hash_set>`. If your type is not moveable and +// you require pointer stability, consider `absl::node_hash_set` instead. +// +// Example: +// +// // Create a flat hash set of three strings +// absl::flat_hash_set ducks = +// {"huey", "dewey", "louie"}; +// +// // Insert a new element into the flat hash set +// ducks.insert("donald"); +// +// // Force a rehash of the flat hash set +// ducks.rehash(0); +// +// // See if "dewey" is present +// if (ducks.contains("dewey")) { +// std::cout << "We found dewey!" << std::endl; +// } +template , + class Eq = absl::container_internal::hash_default_eq, + class Allocator = std::allocator> +class flat_hash_set + : public absl::container_internal::raw_hash_set< + absl::container_internal::FlatHashSetPolicy, Hash, Eq, Allocator> { + using Base = typename flat_hash_set::raw_hash_set; + + public: + // Constructors and Assignment Operators + // + // A flat_hash_set supports the same overload set as `std::unordered_set` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::flat_hash_set set1; + // + // * Initializer List constructor + // + // absl::flat_hash_set set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::flat_hash_set set3(set2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::flat_hash_set set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::flat_hash_set set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::flat_hash_set set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::flat_hash_set set7(v.begin(), v.end()); + flat_hash_set() {} + using Base::Base; + + // flat_hash_set::begin() + // + // Returns an iterator to the beginning of the `flat_hash_set`. + using Base::begin; + + // flat_hash_set::cbegin() + // + // Returns a const iterator to the beginning of the `flat_hash_set`. + using Base::cbegin; + + // flat_hash_set::cend() + // + // Returns a const iterator to the end of the `flat_hash_set`. + using Base::cend; + + // flat_hash_set::end() + // + // Returns an iterator to the end of the `flat_hash_set`. + using Base::end; + + // flat_hash_set::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `flat_hash_set`. + // + // NOTE: this member function is particular to `absl::flat_hash_set` and is + // not provided in the `std::unordered_set` API. + using Base::capacity; + + // flat_hash_set::empty() + // + // Returns whether or not the `flat_hash_set` is empty. + using Base::empty; + + // flat_hash_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `flat_hash_set` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `flat_hash_set`. + using Base::max_size; + + // flat_hash_set::size() + // + // Returns the number of elements currently within the `flat_hash_set`. + using Base::size; + + // flat_hash_set::clear() + // + // Removes all elements from the `flat_hash_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // flat_hash_set::erase() + // + // Erases elements within the `flat_hash_set`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `flat_hash_set`, returning + // `void`. + // + // NOTE: returning `void` in this case is different than that of STL + // containers in general and `std::unordered_set` in particular (which + // return an iterator to the element following the erased element). If that + // iterator is needed, simply post increment the iterator: + // + // set.erase(it++); + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // flat_hash_set::insert() + // + // Inserts an element of the specified value into the `flat_hash_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const T& value): + // + // Inserts a value into the `flat_hash_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // + // Inserts a moveable value into the `flat_hash_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const T& value): + // iterator insert(const_iterator hint, T&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `flat_hash_set` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `flat_hash_set` we guarantee the first match is inserted. + using Base::insert; + + // flat_hash_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // flat_hash_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // flat_hash_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `flat_hash_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + using Base::extract; + + // flat_hash_set::merge() + // + // Extracts elements from a given `source` flat hash set into this + // `flat_hash_set`. If the destination `flat_hash_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // flat_hash_set::swap(flat_hash_set& other) + // + // Exchanges the contents of this `flat_hash_set` with those of the `other` + // flat hash set, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `flat_hash_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the flat hash set's hashing and key equivalence + // functions be Swappable, and are exchaged using unqualified calls to + // non-member `swap()`. If the set's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // flat_hash_set::rehash(count) + // + // Rehashes the `flat_hash_set`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_set`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // flat_hash_set::reserve(count) + // + // Sets the number of slots in the `flat_hash_set` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // flat_hash_set::contains() + // + // Determines whether an element comparing equal to the given `key` exists + // within the `flat_hash_set`, returning `true` if so or `false` otherwise. + using Base::contains; + + // flat_hash_set::count(const Key& key) const + // + // Returns the number of elements comparing equal to the given `key` within + // the `flat_hash_set`. note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `flat_hash_set`. + using Base::count; + + // flat_hash_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `flat_hash_set`. + using Base::equal_range; + + // flat_hash_set::find() + // + // Finds an element with the passed `key` within the `flat_hash_set`. + using Base::find; + + // flat_hash_set::bucket_count() + // + // Returns the number of "buckets" within the `flat_hash_set`. Note that + // because a flat hash set contains all elements within its internal storage, + // this value simply equals the current capacity of the `flat_hash_set`. + using Base::bucket_count; + + // flat_hash_set::load_factor() + // + // Returns the current load factor of the `flat_hash_set` (the average number + // of slots occupied with a value within the hash set). + using Base::load_factor; + + // flat_hash_set::max_load_factor() + // + // Manages the maximum load factor of the `flat_hash_set`. Overloads are + // listed below. + // + // float flat_hash_set::max_load_factor() + // + // Returns the current maximum load factor of the `flat_hash_set`. + // + // void flat_hash_set::max_load_factor(float ml) + // + // Sets the maximum load factor of the `flat_hash_set` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `flat_hash_set` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // flat_hash_set::get_allocator() + // + // Returns the allocator function associated with this `flat_hash_set`. + using Base::get_allocator; + + // flat_hash_set::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `flat_hash_set`. + using Base::hash_function; + + // flat_hash_set::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; +}; + +// erase_if(flat_hash_set<>, Pred) +// +// Erases all elements that satisfy the predicate `pred` from the container `c`. +// Returns the number of erased elements. +template +typename flat_hash_set::size_type erase_if( + flat_hash_set& c, Predicate pred) { + return container_internal::EraseIf(pred, &c); +} + +namespace container_internal { + +template +struct FlatHashSetPolicy { + using slot_type = T; + using key_type = T; + using init_type = T; + using constant_iterators = std::true_type; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { + absl::allocator_traits::construct(*alloc, slot, + std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) { + absl::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, + slot_type* old_slot) { + construct(alloc, new_slot, std::move(*old_slot)); + destroy(alloc, old_slot); + } + + static T& element(slot_type* slot) { return *slot; } + + template + static decltype(absl::container_internal::DecomposeValue( + std::declval(), std::declval()...)) + apply(F&& f, Args&&... args) { + return absl::container_internal::DecomposeValue( + std::forward(f), std::forward(args)...); + } + + static size_t space_used(const T*) { return 0; } +}; +} // namespace container_internal + +namespace container_algorithm_internal { + +// Specialization of trait in absl/algorithm/container.h +template +struct IsUnorderedContainer> + : std::true_type {}; + +} // namespace container_algorithm_internal + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_FLAT_HASH_SET_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/inlined_vector.h b/CAPI/cpp/grpc/include/absl/container/inlined_vector.h new file mode 100644 index 0000000..bc1c4a7 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/inlined_vector.h @@ -0,0 +1,866 @@ +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: inlined_vector.h +// ----------------------------------------------------------------------------- +// +// This header file contains the declaration and definition of an "inlined +// vector" which behaves in an equivalent fashion to a `std::vector`, except +// that storage for small sequences of the vector are provided inline without +// requiring any heap allocation. +// +// An `absl::InlinedVector` specifies the default capacity `N` as one of +// its template parameters. Instances where `size() <= N` hold contained +// elements in inline space. Typically `N` is very small so that sequences that +// are expected to be short do not require allocations. +// +// An `absl::InlinedVector` does not usually require a specific allocator. If +// the inlined vector grows beyond its initial constraints, it will need to +// allocate (as any normal `std::vector` would). This is usually performed with +// the default allocator (defined as `std::allocator`). Optionally, a custom +// allocator type may be specified as `A` in `absl::InlinedVector`. + +#ifndef ABSL_CONTAINER_INLINED_VECTOR_H_ +#define ABSL_CONTAINER_INLINED_VECTOR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/algorithm.h" +#include "absl/base/internal/throw_delegate.h" +#include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/port.h" +#include "absl/container/internal/inlined_vector.h" +#include "absl/memory/memory.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +// ----------------------------------------------------------------------------- +// InlinedVector +// ----------------------------------------------------------------------------- +// +// An `absl::InlinedVector` is designed to be a drop-in replacement for +// `std::vector` for use cases where the vector's size is sufficiently small +// that it can be inlined. If the inlined vector does grow beyond its estimated +// capacity, it will trigger an initial allocation on the heap, and will behave +// as a `std::vector`. The API of the `absl::InlinedVector` within this file is +// designed to cover the same API footprint as covered by `std::vector`. +template > +class InlinedVector { + static_assert(N > 0, "`absl::InlinedVector` requires an inlined capacity."); + + using Storage = inlined_vector_internal::Storage; + + template + using AllocatorTraits = inlined_vector_internal::AllocatorTraits; + template + using MoveIterator = inlined_vector_internal::MoveIterator; + template + using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk; + + template + using IteratorValueAdapter = + inlined_vector_internal::IteratorValueAdapter; + template + using CopyValueAdapter = inlined_vector_internal::CopyValueAdapter; + template + using DefaultValueAdapter = + inlined_vector_internal::DefaultValueAdapter; + + template + using EnableIfAtLeastForwardIterator = absl::enable_if_t< + inlined_vector_internal::IsAtLeastForwardIterator::value, int>; + template + using DisableIfAtLeastForwardIterator = absl::enable_if_t< + !inlined_vector_internal::IsAtLeastForwardIterator::value, int>; + + public: + using allocator_type = A; + using value_type = inlined_vector_internal::ValueType; + using pointer = inlined_vector_internal::Pointer; + using const_pointer = inlined_vector_internal::ConstPointer; + using size_type = inlined_vector_internal::SizeType; + using difference_type = inlined_vector_internal::DifferenceType; + using reference = inlined_vector_internal::Reference; + using const_reference = inlined_vector_internal::ConstReference; + using iterator = inlined_vector_internal::Iterator; + using const_iterator = inlined_vector_internal::ConstIterator; + using reverse_iterator = inlined_vector_internal::ReverseIterator; + using const_reverse_iterator = + inlined_vector_internal::ConstReverseIterator; + + // --------------------------------------------------------------------------- + // InlinedVector Constructors and Destructor + // --------------------------------------------------------------------------- + + // Creates an empty inlined vector with a value-initialized allocator. + InlinedVector() noexcept(noexcept(allocator_type())) : storage_() {} + + // Creates an empty inlined vector with a copy of `allocator`. + explicit InlinedVector(const allocator_type& allocator) noexcept + : storage_(allocator) {} + + // Creates an inlined vector with `n` copies of `value_type()`. + explicit InlinedVector(size_type n, + const allocator_type& allocator = allocator_type()) + : storage_(allocator) { + storage_.Initialize(DefaultValueAdapter(), n); + } + + // Creates an inlined vector with `n` copies of `v`. + InlinedVector(size_type n, const_reference v, + const allocator_type& allocator = allocator_type()) + : storage_(allocator) { + storage_.Initialize(CopyValueAdapter(std::addressof(v)), n); + } + + // Creates an inlined vector with copies of the elements of `list`. + InlinedVector(std::initializer_list list, + const allocator_type& allocator = allocator_type()) + : InlinedVector(list.begin(), list.end(), allocator) {} + + // Creates an inlined vector with elements constructed from the provided + // forward iterator range [`first`, `last`). + // + // NOTE: the `enable_if` prevents ambiguous interpretation between a call to + // this constructor with two integral arguments and a call to the above + // `InlinedVector(size_type, const_reference)` constructor. + template = 0> + InlinedVector(ForwardIterator first, ForwardIterator last, + const allocator_type& allocator = allocator_type()) + : storage_(allocator) { + storage_.Initialize(IteratorValueAdapter(first), + static_cast(std::distance(first, last))); + } + + // Creates an inlined vector with elements constructed from the provided input + // iterator range [`first`, `last`). + template = 0> + InlinedVector(InputIterator first, InputIterator last, + const allocator_type& allocator = allocator_type()) + : storage_(allocator) { + std::copy(first, last, std::back_inserter(*this)); + } + + // Creates an inlined vector by copying the contents of `other` using + // `other`'s allocator. + InlinedVector(const InlinedVector& other) + : InlinedVector(other, other.storage_.GetAllocator()) {} + + // Creates an inlined vector by copying the contents of `other` using the + // provided `allocator`. + InlinedVector(const InlinedVector& other, const allocator_type& allocator) + : storage_(allocator) { + if (other.empty()) { + // Empty; nothing to do. + } else if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) { + // Memcpy-able and do not need allocation. + storage_.MemcpyFrom(other.storage_); + } else { + storage_.InitFrom(other.storage_); + } + } + + // Creates an inlined vector by moving in the contents of `other` without + // allocating. If `other` contains allocated memory, the newly-created inlined + // vector will take ownership of that memory. However, if `other` does not + // contain allocated memory, the newly-created inlined vector will perform + // element-wise move construction of the contents of `other`. + // + // NOTE: since no allocation is performed for the inlined vector in either + // case, the `noexcept(...)` specification depends on whether moving the + // underlying objects can throw. It is assumed assumed that... + // a) move constructors should only throw due to allocation failure. + // b) if `value_type`'s move constructor allocates, it uses the same + // allocation function as the inlined vector's allocator. + // Thus, the move constructor is non-throwing if the allocator is non-throwing + // or `value_type`'s move constructor is specified as `noexcept`. + InlinedVector(InlinedVector&& other) noexcept( + absl::allocator_is_nothrow::value || + std::is_nothrow_move_constructible::value) + : storage_(other.storage_.GetAllocator()) { + if (IsMemcpyOk::value) { + storage_.MemcpyFrom(other.storage_); + + other.storage_.SetInlinedSize(0); + } else if (other.storage_.GetIsAllocated()) { + storage_.SetAllocation({other.storage_.GetAllocatedData(), + other.storage_.GetAllocatedCapacity()}); + storage_.SetAllocatedSize(other.storage_.GetSize()); + + other.storage_.SetInlinedSize(0); + } else { + IteratorValueAdapter> other_values( + MoveIterator(other.storage_.GetInlinedData())); + + inlined_vector_internal::ConstructElements( + storage_.GetAllocator(), storage_.GetInlinedData(), other_values, + other.storage_.GetSize()); + + storage_.SetInlinedSize(other.storage_.GetSize()); + } + } + + // Creates an inlined vector by moving in the contents of `other` with a copy + // of `allocator`. + // + // NOTE: if `other`'s allocator is not equal to `allocator`, even if `other` + // contains allocated memory, this move constructor will still allocate. Since + // allocation is performed, this constructor can only be `noexcept` if the + // specified allocator is also `noexcept`. + InlinedVector( + InlinedVector&& other, + const allocator_type& + allocator) noexcept(absl::allocator_is_nothrow::value) + : storage_(allocator) { + if (IsMemcpyOk::value) { + storage_.MemcpyFrom(other.storage_); + + other.storage_.SetInlinedSize(0); + } else if ((storage_.GetAllocator() == other.storage_.GetAllocator()) && + other.storage_.GetIsAllocated()) { + storage_.SetAllocation({other.storage_.GetAllocatedData(), + other.storage_.GetAllocatedCapacity()}); + storage_.SetAllocatedSize(other.storage_.GetSize()); + + other.storage_.SetInlinedSize(0); + } else { + storage_.Initialize(IteratorValueAdapter>( + MoveIterator(other.data())), + other.size()); + } + } + + ~InlinedVector() {} + + // --------------------------------------------------------------------------- + // InlinedVector Member Accessors + // --------------------------------------------------------------------------- + + // `InlinedVector::empty()` + // + // Returns whether the inlined vector contains no elements. + bool empty() const noexcept { return !size(); } + + // `InlinedVector::size()` + // + // Returns the number of elements in the inlined vector. + size_type size() const noexcept { return storage_.GetSize(); } + + // `InlinedVector::max_size()` + // + // Returns the maximum number of elements the inlined vector can hold. + size_type max_size() const noexcept { + // One bit of the size storage is used to indicate whether the inlined + // vector contains allocated memory. As a result, the maximum size that the + // inlined vector can express is half of the max for `size_type`. + return (std::numeric_limits::max)() / 2; + } + + // `InlinedVector::capacity()` + // + // Returns the number of elements that could be stored in the inlined vector + // without requiring a reallocation. + // + // NOTE: for most inlined vectors, `capacity()` should be equal to the + // template parameter `N`. For inlined vectors which exceed this capacity, + // they will no longer be inlined and `capacity()` will equal the capactity of + // the allocated memory. + size_type capacity() const noexcept { + return storage_.GetIsAllocated() ? storage_.GetAllocatedCapacity() + : storage_.GetInlinedCapacity(); + } + + // `InlinedVector::data()` + // + // Returns a `pointer` to the elements of the inlined vector. This pointer + // can be used to access and modify the contained elements. + // + // NOTE: only elements within [`data()`, `data() + size()`) are valid. + pointer data() noexcept { + return storage_.GetIsAllocated() ? storage_.GetAllocatedData() + : storage_.GetInlinedData(); + } + + // Overload of `InlinedVector::data()` that returns a `const_pointer` to the + // elements of the inlined vector. This pointer can be used to access but not + // modify the contained elements. + // + // NOTE: only elements within [`data()`, `data() + size()`) are valid. + const_pointer data() const noexcept { + return storage_.GetIsAllocated() ? storage_.GetAllocatedData() + : storage_.GetInlinedData(); + } + + // `InlinedVector::operator[](...)` + // + // Returns a `reference` to the `i`th element of the inlined vector. + reference operator[](size_type i) { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // Overload of `InlinedVector::operator[](...)` that returns a + // `const_reference` to the `i`th element of the inlined vector. + const_reference operator[](size_type i) const { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // `InlinedVector::at(...)` + // + // Returns a `reference` to the `i`th element of the inlined vector. + // + // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`, + // in both debug and non-debug builds, `std::out_of_range` will be thrown. + reference at(size_type i) { + if (ABSL_PREDICT_FALSE(i >= size())) { + base_internal::ThrowStdOutOfRange( + "`InlinedVector::at(size_type)` failed bounds check"); + } + return data()[i]; + } + + // Overload of `InlinedVector::at(...)` that returns a `const_reference` to + // the `i`th element of the inlined vector. + // + // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`, + // in both debug and non-debug builds, `std::out_of_range` will be thrown. + const_reference at(size_type i) const { + if (ABSL_PREDICT_FALSE(i >= size())) { + base_internal::ThrowStdOutOfRange( + "`InlinedVector::at(size_type) const` failed bounds check"); + } + return data()[i]; + } + + // `InlinedVector::front()` + // + // Returns a `reference` to the first element of the inlined vector. + reference front() { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // Overload of `InlinedVector::front()` that returns a `const_reference` to + // the first element of the inlined vector. + const_reference front() const { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // `InlinedVector::back()` + // + // Returns a `reference` to the last element of the inlined vector. + reference back() { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // Overload of `InlinedVector::back()` that returns a `const_reference` to the + // last element of the inlined vector. + const_reference back() const { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // `InlinedVector::begin()` + // + // Returns an `iterator` to the beginning of the inlined vector. + iterator begin() noexcept { return data(); } + + // Overload of `InlinedVector::begin()` that returns a `const_iterator` to + // the beginning of the inlined vector. + const_iterator begin() const noexcept { return data(); } + + // `InlinedVector::end()` + // + // Returns an `iterator` to the end of the inlined vector. + iterator end() noexcept { return data() + size(); } + + // Overload of `InlinedVector::end()` that returns a `const_iterator` to the + // end of the inlined vector. + const_iterator end() const noexcept { return data() + size(); } + + // `InlinedVector::cbegin()` + // + // Returns a `const_iterator` to the beginning of the inlined vector. + const_iterator cbegin() const noexcept { return begin(); } + + // `InlinedVector::cend()` + // + // Returns a `const_iterator` to the end of the inlined vector. + const_iterator cend() const noexcept { return end(); } + + // `InlinedVector::rbegin()` + // + // Returns a `reverse_iterator` from the end of the inlined vector. + reverse_iterator rbegin() noexcept { return reverse_iterator(end()); } + + // Overload of `InlinedVector::rbegin()` that returns a + // `const_reverse_iterator` from the end of the inlined vector. + const_reverse_iterator rbegin() const noexcept { + return const_reverse_iterator(end()); + } + + // `InlinedVector::rend()` + // + // Returns a `reverse_iterator` from the beginning of the inlined vector. + reverse_iterator rend() noexcept { return reverse_iterator(begin()); } + + // Overload of `InlinedVector::rend()` that returns a `const_reverse_iterator` + // from the beginning of the inlined vector. + const_reverse_iterator rend() const noexcept { + return const_reverse_iterator(begin()); + } + + // `InlinedVector::crbegin()` + // + // Returns a `const_reverse_iterator` from the end of the inlined vector. + const_reverse_iterator crbegin() const noexcept { return rbegin(); } + + // `InlinedVector::crend()` + // + // Returns a `const_reverse_iterator` from the beginning of the inlined + // vector. + const_reverse_iterator crend() const noexcept { return rend(); } + + // `InlinedVector::get_allocator()` + // + // Returns a copy of the inlined vector's allocator. + allocator_type get_allocator() const { return storage_.GetAllocator(); } + + // --------------------------------------------------------------------------- + // InlinedVector Member Mutators + // --------------------------------------------------------------------------- + + // `InlinedVector::operator=(...)` + // + // Replaces the elements of the inlined vector with copies of the elements of + // `list`. + InlinedVector& operator=(std::initializer_list list) { + assign(list.begin(), list.end()); + + return *this; + } + + // Overload of `InlinedVector::operator=(...)` that replaces the elements of + // the inlined vector with copies of the elements of `other`. + InlinedVector& operator=(const InlinedVector& other) { + if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { + const_pointer other_data = other.data(); + assign(other_data, other_data + other.size()); + } + + return *this; + } + + // Overload of `InlinedVector::operator=(...)` that moves the elements of + // `other` into the inlined vector. + // + // NOTE: as a result of calling this overload, `other` is left in a valid but + // unspecified state. + InlinedVector& operator=(InlinedVector&& other) { + if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { + if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) { + inlined_vector_internal::DestroyAdapter::DestroyElements( + storage_.GetAllocator(), data(), size()); + storage_.DeallocateIfAllocated(); + storage_.MemcpyFrom(other.storage_); + + other.storage_.SetInlinedSize(0); + } else { + storage_.Assign(IteratorValueAdapter>( + MoveIterator(other.storage_.GetInlinedData())), + other.size()); + } + } + + return *this; + } + + // `InlinedVector::assign(...)` + // + // Replaces the contents of the inlined vector with `n` copies of `v`. + void assign(size_type n, const_reference v) { + storage_.Assign(CopyValueAdapter(std::addressof(v)), n); + } + + // Overload of `InlinedVector::assign(...)` that replaces the contents of the + // inlined vector with copies of the elements of `list`. + void assign(std::initializer_list list) { + assign(list.begin(), list.end()); + } + + // Overload of `InlinedVector::assign(...)` to replace the contents of the + // inlined vector with the range [`first`, `last`). + // + // NOTE: this overload is for iterators that are "forward" category or better. + template = 0> + void assign(ForwardIterator first, ForwardIterator last) { + storage_.Assign(IteratorValueAdapter(first), + static_cast(std::distance(first, last))); + } + + // Overload of `InlinedVector::assign(...)` to replace the contents of the + // inlined vector with the range [`first`, `last`). + // + // NOTE: this overload is for iterators that are "input" category. + template = 0> + void assign(InputIterator first, InputIterator last) { + size_type i = 0; + for (; i < size() && first != last; ++i, static_cast(++first)) { + data()[i] = *first; + } + + erase(data() + i, data() + size()); + std::copy(first, last, std::back_inserter(*this)); + } + + // `InlinedVector::resize(...)` + // + // Resizes the inlined vector to contain `n` elements. + // + // NOTE: If `n` is smaller than `size()`, extra elements are destroyed. If `n` + // is larger than `size()`, new elements are value-initialized. + void resize(size_type n) { + ABSL_HARDENING_ASSERT(n <= max_size()); + storage_.Resize(DefaultValueAdapter(), n); + } + + // Overload of `InlinedVector::resize(...)` that resizes the inlined vector to + // contain `n` elements. + // + // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n` + // is larger than `size()`, new elements are copied-constructed from `v`. + void resize(size_type n, const_reference v) { + ABSL_HARDENING_ASSERT(n <= max_size()); + storage_.Resize(CopyValueAdapter(std::addressof(v)), n); + } + + // `InlinedVector::insert(...)` + // + // Inserts a copy of `v` at `pos`, returning an `iterator` to the newly + // inserted element. + iterator insert(const_iterator pos, const_reference v) { + return emplace(pos, v); + } + + // Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using + // move semantics, returning an `iterator` to the newly inserted element. + iterator insert(const_iterator pos, value_type&& v) { + return emplace(pos, std::move(v)); + } + + // Overload of `InlinedVector::insert(...)` that inserts `n` contiguous copies + // of `v` starting at `pos`, returning an `iterator` pointing to the first of + // the newly inserted elements. + iterator insert(const_iterator pos, size_type n, const_reference v) { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + if (ABSL_PREDICT_TRUE(n != 0)) { + value_type dealias = v; + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2 + // It appears that GCC thinks that since `pos` is a const pointer and may + // point to uninitialized memory at this point, a warning should be + // issued. But `pos` is actually only used to compute an array index to + // write to. +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + return storage_.Insert(pos, CopyValueAdapter(std::addressof(dealias)), + n); +#if !defined(__clang__) && defined(__GNUC__) +#pragma GCC diagnostic pop +#endif + } else { + return const_cast(pos); + } + } + + // Overload of `InlinedVector::insert(...)` that inserts copies of the + // elements of `list` starting at `pos`, returning an `iterator` pointing to + // the first of the newly inserted elements. + iterator insert(const_iterator pos, std::initializer_list list) { + return insert(pos, list.begin(), list.end()); + } + + // Overload of `InlinedVector::insert(...)` that inserts the range [`first`, + // `last`) starting at `pos`, returning an `iterator` pointing to the first + // of the newly inserted elements. + // + // NOTE: this overload is for iterators that are "forward" category or better. + template = 0> + iterator insert(const_iterator pos, ForwardIterator first, + ForwardIterator last) { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + if (ABSL_PREDICT_TRUE(first != last)) { + return storage_.Insert(pos, + IteratorValueAdapter(first), + std::distance(first, last)); + } else { + return const_cast(pos); + } + } + + // Overload of `InlinedVector::insert(...)` that inserts the range [`first`, + // `last`) starting at `pos`, returning an `iterator` pointing to the first + // of the newly inserted elements. + // + // NOTE: this overload is for iterators that are "input" category. + template = 0> + iterator insert(const_iterator pos, InputIterator first, InputIterator last) { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + size_type index = std::distance(cbegin(), pos); + for (size_type i = index; first != last; ++i, static_cast(++first)) { + insert(data() + i, *first); + } + + return iterator(data() + index); + } + + // `InlinedVector::emplace(...)` + // + // Constructs and inserts an element using `args...` in the inlined vector at + // `pos`, returning an `iterator` pointing to the newly emplaced element. + template + iterator emplace(const_iterator pos, Args&&... args) { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + value_type dealias(std::forward(args)...); + return storage_.Insert(pos, + IteratorValueAdapter>( + MoveIterator(std::addressof(dealias))), + 1); + } + + // `InlinedVector::emplace_back(...)` + // + // Constructs and inserts an element using `args...` in the inlined vector at + // `end()`, returning a `reference` to the newly emplaced element. + template + reference emplace_back(Args&&... args) { + return storage_.EmplaceBack(std::forward(args)...); + } + + // `InlinedVector::push_back(...)` + // + // Inserts a copy of `v` in the inlined vector at `end()`. + void push_back(const_reference v) { static_cast(emplace_back(v)); } + + // Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()` + // using move semantics. + void push_back(value_type&& v) { + static_cast(emplace_back(std::move(v))); + } + + // `InlinedVector::pop_back()` + // + // Destroys the element at `back()`, reducing the size by `1`. + void pop_back() noexcept { + ABSL_HARDENING_ASSERT(!empty()); + + AllocatorTraits::destroy(storage_.GetAllocator(), data() + (size() - 1)); + storage_.SubtractSize(1); + } + + // `InlinedVector::erase(...)` + // + // Erases the element at `pos`, returning an `iterator` pointing to where the + // erased element was located. + // + // NOTE: may return `end()`, which is not dereferencable. + iterator erase(const_iterator pos) { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos < end()); + + return storage_.Erase(pos, pos + 1); + } + + // Overload of `InlinedVector::erase(...)` that erases every element in the + // range [`from`, `to`), returning an `iterator` pointing to where the first + // erased element was located. + // + // NOTE: may return `end()`, which is not dereferencable. + iterator erase(const_iterator from, const_iterator to) { + ABSL_HARDENING_ASSERT(from >= begin()); + ABSL_HARDENING_ASSERT(from <= to); + ABSL_HARDENING_ASSERT(to <= end()); + + if (ABSL_PREDICT_TRUE(from != to)) { + return storage_.Erase(from, to); + } else { + return const_cast(from); + } + } + + // `InlinedVector::clear()` + // + // Destroys all elements in the inlined vector, setting the size to `0` and + // deallocating any held memory. + void clear() noexcept { + inlined_vector_internal::DestroyAdapter::DestroyElements( + storage_.GetAllocator(), data(), size()); + storage_.DeallocateIfAllocated(); + + storage_.SetInlinedSize(0); + } + + // `InlinedVector::reserve(...)` + // + // Ensures that there is enough room for at least `n` elements. + void reserve(size_type n) { storage_.Reserve(n); } + + // `InlinedVector::shrink_to_fit()` + // + // Attempts to reduce memory usage by moving elements to (or keeping elements + // in) the smallest available buffer sufficient for containing `size()` + // elements. + // + // If `size()` is sufficiently small, the elements will be moved into (or kept + // in) the inlined space. + void shrink_to_fit() { + if (storage_.GetIsAllocated()) { + storage_.ShrinkToFit(); + } + } + + // `InlinedVector::swap(...)` + // + // Swaps the contents of the inlined vector with `other`. + void swap(InlinedVector& other) { + if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { + storage_.Swap(std::addressof(other.storage_)); + } + } + + private: + template + friend H AbslHashValue(H h, const absl::InlinedVector& a); + + Storage storage_; +}; + +// ----------------------------------------------------------------------------- +// InlinedVector Non-Member Functions +// ----------------------------------------------------------------------------- + +// `swap(...)` +// +// Swaps the contents of two inlined vectors. +template +void swap(absl::InlinedVector& a, + absl::InlinedVector& b) noexcept(noexcept(a.swap(b))) { + a.swap(b); +} + +// `operator==(...)` +// +// Tests for value-equality of two inlined vectors. +template +bool operator==(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + auto a_data = a.data(); + auto b_data = b.data(); + return absl::equal(a_data, a_data + a.size(), b_data, b_data + b.size()); +} + +// `operator!=(...)` +// +// Tests for value-inequality of two inlined vectors. +template +bool operator!=(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + return !(a == b); +} + +// `operator<(...)` +// +// Tests whether the value of an inlined vector is less than the value of +// another inlined vector using a lexicographical comparison algorithm. +template +bool operator<(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + auto a_data = a.data(); + auto b_data = b.data(); + return std::lexicographical_compare(a_data, a_data + a.size(), b_data, + b_data + b.size()); +} + +// `operator>(...)` +// +// Tests whether the value of an inlined vector is greater than the value of +// another inlined vector using a lexicographical comparison algorithm. +template +bool operator>(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + return b < a; +} + +// `operator<=(...)` +// +// Tests whether the value of an inlined vector is less than or equal to the +// value of another inlined vector using a lexicographical comparison algorithm. +template +bool operator<=(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + return !(b < a); +} + +// `operator>=(...)` +// +// Tests whether the value of an inlined vector is greater than or equal to the +// value of another inlined vector using a lexicographical comparison algorithm. +template +bool operator>=(const absl::InlinedVector& a, + const absl::InlinedVector& b) { + return !(a < b); +} + +// `AbslHashValue(...)` +// +// Provides `absl::Hash` support for `absl::InlinedVector`. It is uncommon to +// call this directly. +template +H AbslHashValue(H h, const absl::InlinedVector& a) { + auto size = a.size(); + return H::combine(H::combine_contiguous(std::move(h), a.data(), size), size); +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INLINED_VECTOR_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/btree.h b/CAPI/cpp/grpc/include/absl/container/internal/btree.h new file mode 100644 index 0000000..01f4e74 --- /dev/null +++ b/CAPI/cpp/grpc/include/absl/container/internal/btree.h @@ -0,0 +1,2854 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A btree implementation of the STL set and map interfaces. A btree is smaller +// and generally also faster than STL set/map (refer to the benchmarks below). +// The red-black tree implementation of STL set/map has an overhead of 3 +// pointers (left, right and parent) plus the node color information for each +// stored value. So a set consumes 40 bytes for each value stored in +// 64-bit mode. This btree implementation stores multiple values on fixed +// size nodes (usually 256 bytes) and doesn't store child pointers for leaf +// nodes. The result is that a btree_set may use much less memory per +// stored value. For the random insertion benchmark in btree_bench.cc, a +// btree_set with node-size of 256 uses 5.1 bytes per stored value. +// +// The packing of multiple values on to each node of a btree has another effect +// besides better space utilization: better cache locality due to fewer cache +// lines being accessed. Better cache locality translates into faster +// operations. +// +// CAVEATS +// +// Insertions and deletions on a btree can cause splitting, merging or +// rebalancing of btree nodes. And even without these operations, insertions +// and deletions on a btree will move values around within a node. In both +// cases, the result is that insertions and deletions can invalidate iterators +// pointing to values other than the one being inserted/deleted. Therefore, this +// container does not provide pointer stability. This is notably different from +// STL set/map which takes care to not invalidate iterators on insert/erase +// except, of course, for iterators pointing to the value being erased. A +// partial workaround when erasing is available: erase() returns an iterator +// pointing to the item just after the one that was erased (or end() if none +// exists). + +#ifndef ABSL_CONTAINER_INTERNAL_BTREE_H_ +#define ABSL_CONTAINER_INTERNAL_BTREE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" +#include "absl/container/internal/common.h" +#include "absl/container/internal/compressed_tuple.h" +#include "absl/container/internal/container_memory.h" +#include "absl/container/internal/layout.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/cord.h" +#include "absl/strings/string_view.h" +#include "absl/types/compare.h" +#include "absl/utility/utility.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +#ifdef ABSL_BTREE_ENABLE_GENERATIONS +#error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set +#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_MEMORY_SANITIZER) +// When compiled in sanitizer mode, we add generation integers to the nodes and +// iterators. When iterators are used, we validate that the container has not +// been mutated since the iterator was constructed. +#define ABSL_BTREE_ENABLE_GENERATIONS +#endif + +template +using compare_result_t = absl::result_of_t; + +// A helper class that indicates if the Compare parameter is a key-compare-to +// comparator. +template +using btree_is_key_compare_to = + std::is_convertible, absl::weak_ordering>; + +struct StringBtreeDefaultLess { + using is_transparent = void; + + StringBtreeDefaultLess() = default; + + // Compatibility constructor. + StringBtreeDefaultLess(std::less) {} // NOLINT + StringBtreeDefaultLess(std::less) {} // NOLINT + + // Allow converting to std::less for use in key_comp()/value_comp(). + explicit operator std::less() const { return {}; } + explicit operator std::less() const { return {}; } + explicit operator std::less() const { return {}; } + + absl::weak_ordering operator()(absl::string_view lhs, + absl::string_view rhs) const { + return compare_internal::compare_result_as_ordering(lhs.compare(rhs)); + } + StringBtreeDefaultLess(std::less) {} // NOLINT + absl::weak_ordering operator()(const absl::Cord &lhs, + const absl::Cord &rhs) const { + return compare_internal::compare_result_as_ordering(lhs.Compare(rhs)); + } + absl::weak_ordering operator()(const absl::Cord &lhs, + absl::string_view rhs) const { + return compare_internal::compare_result_as_ordering(lhs.Compare(rhs)); + } + absl::weak_ordering operator()(absl::string_view lhs, + const absl::Cord &rhs) const { + return compare_internal::compare_result_as_ordering(-rhs.Compare(lhs)); + } +}; + +struct StringBtreeDefaultGreater { + using is_transparent = void; + + StringBtreeDefaultGreater() = default; + + StringBtreeDefaultGreater(std::greater) {} // NOLINT + StringBtreeDefaultGreater(std::greater) {} // NOLINT + + // Allow converting to std::greater for use in key_comp()/value_comp(). + explicit operator std::greater() const { return {}; } + explicit operator std::greater() const { return {}; } + explicit operator std::greater() const { return {}; } + + absl::weak_ordering operator()(absl::string_view lhs, + absl::string_view rhs) const { + return compare_internal::compare_result_as_ordering(rhs.compare(lhs)); + } + StringBtreeDefaultGreater(std::greater) {} // NOLINT + absl::weak_ordering operator()(const absl::Cord &lhs, + const absl::Cord &rhs) const { + return compare_internal::compare_result_as_ordering(rhs.Compare(lhs)); + } + absl::weak_ordering operator()(const absl::Cord &lhs, + absl::string_view rhs) const { + return compare_internal::compare_result_as_ordering(-lhs.Compare(rhs)); + } + absl::weak_ordering operator()(absl::string_view lhs, + const absl::Cord &rhs) const { + return compare_internal::compare_result_as_ordering(rhs.Compare(lhs)); + } +}; + +// See below comments for checked_compare. +template ::value> +struct checked_compare_base : Compare { + using Compare::Compare; + explicit checked_compare_base(Compare c) : Compare(std::move(c)) {} + const Compare &comp() const { return *this; } +}; +template +struct checked_compare_base { + explicit checked_compare_base(Compare c) : compare(std::move(c)) {} + const Compare &comp() const { return compare; } + Compare compare; +}; + +// A mechanism for opting out of checked_compare for use only in btree_test.cc. +struct BtreeTestOnlyCheckedCompareOptOutBase {}; + +// A helper class to adapt the specified comparator for two use cases: +// (1) When using common Abseil string types with common comparison functors, +// convert a boolean comparison into a three-way comparison that returns an +// `absl::weak_ordering`. This helper class is specialized for +// less, greater, less, +// greater, less, and greater. +// (2) Adapt the comparator to diagnose cases of non-strict-weak-ordering (see +// https://en.cppreference.com/w/cpp/named_req/Compare) in debug mode. Whenever +// a comparison is made, we will make assertions to verify that the comparator +// is valid. +template +struct key_compare_adapter { + // Inherit from checked_compare_base to support function pointers and also + // keep empty-base-optimization (EBO) support for classes. + // Note: we can't use CompressedTuple here because that would interfere + // with the EBO for `btree::rightmost_`. `btree::rightmost_` is itself a + // CompressedTuple and nested `CompressedTuple`s don't support EBO. + // TODO(b/214288561): use CompressedTuple instead once it supports EBO for + // nested `CompressedTuple`s. + struct checked_compare : checked_compare_base { + private: + using Base = typename checked_compare::checked_compare_base; + using Base::comp; + + // If possible, returns whether `t` is equivalent to itself. We can only do + // this for `Key`s because we can't be sure that it's safe to call + // `comp()(k, k)` otherwise. Even if SFINAE allows it, there could be a + // compilation failure inside the implementation of the comparison operator. + bool is_self_equivalent(const Key &k) const { + // Note: this works for both boolean and three-way comparators. + return comp()(k, k) == 0; + } + // If we can't compare `t` with itself, returns true unconditionally. + template + bool is_self_equivalent(const T &) const { + return true; + } + + public: + using Base::Base; + checked_compare(Compare comp) : Base(std::move(comp)) {} // NOLINT + + // Allow converting to Compare for use in key_comp()/value_comp(). + explicit operator Compare() const { return comp(); } + + template >::value, + int> = 0> + bool operator()(const T &lhs, const U &rhs) const { + // NOTE: if any of these assertions fail, then the comparator does not + // establish a strict-weak-ordering (see + // https://en.cppreference.com/w/cpp/named_req/Compare). + assert(is_self_equivalent(lhs)); + assert(is_self_equivalent(rhs)); + const bool lhs_comp_rhs = comp()(lhs, rhs); + assert(!lhs_comp_rhs || !comp()(rhs, lhs)); + return lhs_comp_rhs; + } + + template < + typename T, typename U, + absl::enable_if_t, + absl::weak_ordering>::value, + int> = 0> + absl::weak_ordering operator()(const T &lhs, const U &rhs) const { + // NOTE: if any of these assertions fail, then the comparator does not + // establish a strict-weak-ordering (see + // https://en.cppreference.com/w/cpp/named_req/Compare). + assert(is_self_equivalent(lhs)); + assert(is_self_equivalent(rhs)); + const absl::weak_ordering lhs_comp_rhs = comp()(lhs, rhs); +#ifndef NDEBUG + const absl::weak_ordering rhs_comp_lhs = comp()(rhs, lhs); + if (lhs_comp_rhs > 0) { + assert(rhs_comp_lhs < 0 && "lhs_comp_rhs > 0 -> rhs_comp_lhs < 0"); + } else if (lhs_comp_rhs == 0) { + assert(rhs_comp_lhs == 0 && "lhs_comp_rhs == 0 -> rhs_comp_lhs == 0"); + } else { + assert(rhs_comp_lhs > 0 && "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0"); + } +#endif + return lhs_comp_rhs; + } + }; + using type = absl::conditional_t< + std::is_base_of::value, + Compare, checked_compare>; +}; + +template <> +struct key_compare_adapter, std::string> { + using type = StringBtreeDefaultLess; +}; + +template <> +struct key_compare_adapter, std::string> { + using type = StringBtreeDefaultGreater; +}; + +template <> +struct key_compare_adapter, absl::string_view> { + using type = StringBtreeDefaultLess; +}; + +template <> +struct key_compare_adapter, absl::string_view> { + using type = StringBtreeDefaultGreater; +}; + +template <> +struct key_compare_adapter, absl::Cord> { + using type = StringBtreeDefaultLess; +}; + +template <> +struct key_compare_adapter, absl::Cord> { + using type = StringBtreeDefaultGreater; +}; + +// Detects an 'absl_btree_prefer_linear_node_search' member. This is +// a protocol used as an opt-in or opt-out of linear search. +// +// For example, this would be useful for key types that wrap an integer +// and define their own cheap operator<(). For example: +// +// class K { +// public: +// using absl_btree_prefer_linear_node_search = std::true_type; +// ... +// private: +// friend bool operator<(K a, K b) { return a.k_ < b.k_; } +// int k_; +// }; +// +// btree_map m; // Uses linear search +// +// If T has the preference tag, then it has a preference. +// Btree will use the tag's truth value. +template +struct has_linear_node_search_preference : std::false_type {}; +template +struct prefers_linear_node_search : std::false_type {}; +template +struct has_linear_node_search_preference< + T, absl::void_t> + : std::true_type {}; +template +struct prefers_linear_node_search< + T, absl::void_t> + : T::absl_btree_prefer_linear_node_search {}; + +template +constexpr bool compare_has_valid_result_type() { + using compare_result_type = compare_result_t; + return std::is_same::value || + std::is_convertible::value; +} + +template +class map_value_compare { + template + friend class btree; + + // Note: this `protected` is part of the API of std::map::value_compare. See + // https://en.cppreference.com/w/cpp/container/map/value_compare. + protected: + explicit map_value_compare(original_key_compare c) : comp(std::move(c)) {} + + original_key_compare comp; // NOLINT + + public: + auto operator()(const value_type &lhs, const value_type &rhs) const + -> decltype(comp(lhs.first, rhs.first)) { + return comp(lhs.first, rhs.first); + } +}; + +template +struct common_params { + using original_key_compare = Compare; + + // If Compare is a common comparator for a string-like type, then we adapt it + // to use heterogeneous lookup and to be a key-compare-to comparator. + // We also adapt the comparator to diagnose invalid comparators in debug mode. + // We disable this when `Compare` is invalid in a way that will cause + // adaptation to fail (having invalid return type) so that we can give a + // better compilation failure in static_assert_validation. If we don't do + // this, then there will be cascading compilation failures that are confusing + // for users. + using key_compare = + absl::conditional_t(), + Compare, + typename key_compare_adapter::type>; + + static constexpr bool kIsKeyCompareStringAdapted = + std::is_same::value || + std::is_same::value; + static constexpr bool kIsKeyCompareTransparent = + IsTransparent::value || + kIsKeyCompareStringAdapted; + static constexpr bool kEnableGenerations = +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + true; +#else + false; +#endif + + // A type which indicates if we have a key-compare-to functor or a plain old + // key-compare functor. + using is_key_compare_to = btree_is_key_compare_to; + + using allocator_type = Alloc; + using key_type = Key; + using size_type = size_t; + using difference_type = ptrdiff_t; + + using slot_policy = SlotPolicy; + using slot_type = typename slot_policy::slot_type; + using value_type = typename slot_policy::value_type; + using init_type = typename slot_policy::mutable_value_type; + using pointer = value_type *; + using const_pointer = const value_type *; + using reference = value_type &; + using const_reference = const value_type &; + + using value_compare = + absl::conditional_t, + original_key_compare>; + using is_map_container = std::integral_constant; + + // For the given lookup key type, returns whether we can have multiple + // equivalent keys in the btree. If this is a multi-container, then we can. + // Otherwise, we can have multiple equivalent keys only if all of the + // following conditions are met: + // - The comparator is transparent. + // - The lookup key type is not the same as key_type. + // - The comparator is not a StringBtreeDefault{Less,Greater} comparator + // that we know has the same equivalence classes for all lookup types. + template + constexpr static bool can_have_multiple_equivalent_keys() { + return IsMulti || (IsTransparent::value && + !std::is_same::value && + !kIsKeyCompareStringAdapted); + } + + enum { + kTargetNodeSize = TargetNodeSize, + + // Upper bound for the available space for slots. This is largest for leaf + // nodes, which have overhead of at least a pointer + 4 bytes (for storing + // 3 field_types and an enum). + kNodeSlotSpace = + TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4), + }; + + // This is an integral type large enough to hold as many slots as will fit a + // node of TargetNodeSize bytes. + using node_count_type = + absl::conditional_t<(kNodeSlotSpace / sizeof(slot_type) > + (std::numeric_limits::max)()), + uint16_t, uint8_t>; // NOLINT + + // The following methods are necessary for passing this struct as PolicyTraits + // for node_handle and/or are used within btree. + static value_type &element(slot_type *slot) { + return slot_policy::element(slot); + } + static const value_type &element(const slot_type *slot) { + return slot_policy::element(slot); + } + template + static void construct(Alloc *alloc, slot_type *slot, Args &&... args) { + slot_policy::construct(alloc, slot, std::forward(args)...); + } + static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { + slot_policy::construct(alloc, slot, other); + } + static void destroy(Alloc *alloc, slot_type *slot) { + slot_policy::destroy(alloc, slot); + } + static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) { + slot_policy::transfer(alloc, new_slot, old_slot); + } +}; + +// An adapter class that converts a lower-bound compare into an upper-bound +// compare. Note: there is no need to make a version of this adapter specialized +// for key-compare-to functors because the upper-bound (the first value greater +// than the input) is never an exact match. +template +struct upper_bound_adapter { + explicit upper_bound_adapter(const Compare &c) : comp(c) {} + template + bool operator()(const K1 &a, const K2 &b) const { + // Returns true when a is not greater than b. + return !compare_internal::compare_result_as_less_than(comp(b, a)); + } + + private: + Compare comp; +}; + +enum class MatchKind : uint8_t { kEq, kNe }; + +template +struct SearchResult { + V value; + MatchKind match; + + static constexpr bool HasMatch() { return true; } + bool IsEq() const { return match == MatchKind::kEq; } +}; + +// When we don't use CompareTo, `match` is not present. +// This ensures that callers can't use it accidentally when it provides no +// useful information. +template +struct SearchResult { + SearchResult() {} + explicit SearchResult(V v) : value(v) {} + SearchResult(V v, MatchKind /*match*/) : value(v) {} + + V value; + + static constexpr bool HasMatch() { return false; } + static constexpr bool IsEq() { return false; } +}; + +// A node in the btree holding. The same node type is used for both internal +// and leaf nodes in the btree, though the nodes are allocated in such a way +// that the children array is only valid in internal nodes. +template +class btree_node { + using is_key_compare_to = typename Params::is_key_compare_to; + using field_type = typename Params::node_count_type; + using allocator_type = typename Params::allocator_type; + using slot_type = typename Params::slot_type; + using original_key_compare = typename Params::original_key_compare; + + public: + using params_type = Params; + using key_type = typename Params::key_type; + using value_type = typename Params::value_type; + using pointer = typename Params::pointer; + using const_pointer = typename Params::const_pointer; + using reference = typename Params::reference; + using const_reference = typename Params::const_reference; + using key_compare = typename Params::key_compare; + using size_type = typename Params::size_type; + using difference_type = typename Params::difference_type; + + // Btree decides whether to use linear node search as follows: + // - If the comparator expresses a preference, use that. + // - If the key expresses a preference, use that. + // - If the key is arithmetic and the comparator is std::less or + // std::greater, choose linear. + // - Otherwise, choose binary. + // TODO(ezb): Might make sense to add condition(s) based on node-size. + using use_linear_search = std::integral_constant< + bool, has_linear_node_search_preference::value + ? prefers_linear_node_search::value + : has_linear_node_search_preference::value + ? prefers_linear_node_search::value + : std::is_arithmetic::value && + (std::is_same, + original_key_compare>::value || + std::is_same, + original_key_compare>::value)>; + + // This class is organized by absl::container_internal::Layout as if it had + // the following structure: + // // A pointer to the node's parent. + // btree_node *parent; + // + // // When ABSL_BTREE_ENABLE_GENERATIONS is defined, we also have a + // // generation integer in order to check that when iterators are + // // used, they haven't been invalidated already. Only the generation on + // // the root is used, but we have one on each node because whether a node + // // is root or not can change. + // uint32_t generation; + // + // // The position of the node in the node's parent. + // field_type position; + // // The index of the first populated value in `values`. + // // TODO(ezb): right now, `start` is always 0. Update insertion/merge + // // logic to allow for floating storage within nodes. + // field_type start; + // // The index after the last populated value in `values`. Currently, this + // // is the same as the count of values. + // field_type finish; + // // The maximum number of values the node can hold. This is an integer in + // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf + // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal + // // nodes (even though there are still kNodeSlots values in the node). + // // TODO(ezb): make max_count use only 4 bits and record log2(capacity) + // // to free extra bits for is_root, etc. + // field_type max_count; + // + // // The array of values. The capacity is `max_count` for leaf nodes and + // // kNodeSlots for internal nodes. Only the values in + // // [start, finish) have been initialized and are valid. + // slot_type values[max_count]; + // + // // The array of child pointers. The keys in children[i] are all less + // // than key(i). The keys in children[i + 1] are all greater than key(i). + // // There are 0 children for leaf nodes and kNodeSlots + 1 children for + // // internal nodes. + // btree_node *children[kNodeSlots + 1]; + // + // This class is only constructed by EmptyNodeType. Normally, pointers to the + // layout above are allocated, cast to btree_node*, and de-allocated within + // the btree implementation. + ~btree_node() = default; + btree_node(btree_node const &) = delete; + btree_node &operator=(btree_node const &) = delete; + + // Public for EmptyNodeType. + constexpr static size_type Alignment() { + static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(), + "Alignment of all nodes must be equal."); + return InternalLayout().Alignment(); + } + + protected: + btree_node() = default; + + private: + using layout_type = + absl::container_internal::Layout; + constexpr static size_type SizeWithNSlots(size_type n) { + return layout_type( + /*parent*/ 1, + /*generation*/ params_type::kEnableGenerations ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ n, + /*children*/ 0) + .AllocSize(); + } + // A lower bound for the overhead of fields other than slots in a leaf node. + constexpr static size_type MinimumOverhead() { + return SizeWithNSlots(1) - sizeof(slot_type); + } + + // Compute how many values we can fit onto a leaf node taking into account + // padding. + constexpr static size_type NodeTargetSlots(const size_type begin, + const size_type end) { + return begin == end ? begin + : SizeWithNSlots((begin + end) / 2 + 1) > + params_type::kTargetNodeSize + ? NodeTargetSlots(begin, (begin + end) / 2) + : NodeTargetSlots((begin + end) / 2 + 1, end); + } + + enum { + kTargetNodeSize = params_type::kTargetNodeSize, + kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize), + + // We need a minimum of 3 slots per internal node in order to perform + // splitting (1 value for the two nodes involved in the split and 1 value + // propagated to the parent as the delimiter for the split). For performance + // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy + // of 1/3 (for a node, not a b-tree). + kMinNodeSlots = 4, + + kNodeSlots = + kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots, + + // The node is internal (i.e. is not a leaf node) if and only if `max_count` + // has this value. + kInternalNodeMaxCount = 0, + }; + + // Leaves can have less than kNodeSlots values. + constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) { + return layout_type( + /*parent*/ 1, + /*generation*/ params_type::kEnableGenerations ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ slot_count, + /*children*/ 0); + } + constexpr static layout_type InternalLayout() { + return layout_type( + /*parent*/ 1, + /*generation*/ params_type::kEnableGenerations ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ kNodeSlots, + /*children*/ kNodeSlots + 1); + } + constexpr static size_type LeafSize(const int slot_count = kNodeSlots) { + return LeafLayout(slot_count).AllocSize(); + } + constexpr static size_type InternalSize() { + return InternalLayout().AllocSize(); + } + + // N is the index of the type in the Layout definition. + // ElementType is the Nth type in the Layout definition. + template + inline typename layout_type::template ElementType *GetField() { + // We assert that we don't read from values that aren't there. + assert(N < 4 || is_internal()); + return InternalLayout().template Pointer(reinterpret_cast(this)); + } + template + inline const typename layout_type::template ElementType *GetField() const { + assert(N < 4 || is_internal()); + return InternalLayout().template Pointer( + reinterpret_cast(this)); + } + void set_parent(btree_node *p) { *GetField<0>() = p; } + field_type &mutable_finish() { return GetField<2>()[2]; } + slot_type *slot(int i) { return &GetField<3>()[i]; } + slot_type *start_slot() { return slot(start()); } + slot_type *finish_slot() { return slot(finish()); } + const slot_type *slot(int i) const { return &GetField<3>()[i]; } + void set_position(field_type v) { GetField<2>()[0] = v; } + void set_start(field_type v) { GetField<2>()[1] = v; } + void set_finish(field_type v) { GetField<2>()[2] = v; } + // This method is only called by the node init methods. + void set_max_count(field_type v) { GetField<2>()[3] = v; } + + public: + // Whether this is a leaf node or not. This value doesn't change after the + // node is created. + bool is_leaf() const { return GetField<2>()[3] != kInternalNodeMaxCount; } + // Whether this is an internal node or not. This value doesn't change after + // the node is created. + bool is_internal() const { return !is_leaf(); } + + // Getter for the position of this node in its parent. + field_type position() const { return GetField<2>()[0]; } + + // Getter for the offset of the first value in the `values` array. + field_type start() const { + // TODO(ezb): when floating storage is implemented, return GetField<2>()[1]; + assert(GetField<2>()[1] == 0); + return 0; + } + + // Getter for the offset after the last value in the `values` array. + field_type finish() const { return GetField<2>()[2]; } + + // Getters for the number of values stored in this node. + field_type count() const { + assert(finish() >= start()); + return finish() - start(); + } + field_type max_count() const { + // Internal nodes have max_count==kInternalNodeMaxCount. + // Leaf nodes have max_count in [1, kNodeSlots]. + const field_type max_count = GetField<2>()[3]; + return max_count == field_type{kInternalNodeMaxCount} + ? field_type{kNodeSlots} + : max_count; + } + + // Getter for the parent of this node. + btree_node *parent() const { return *GetField<0>(); } + // Getter for whether the node is the root of the tree. The parent of the + // root of the tree is the leftmost node in the tree which is guaranteed to + // be a leaf. + bool is_root() const { return parent()->is_leaf(); } + void make_root() { + assert(parent()->is_root()); + set_generation(parent()->generation()); + set_parent(parent()->parent()); + } + + // Gets the root node's generation integer, which is the one used by the tree. + uint32_t *get_root_generation() const { + assert(params_type::kEnableGenerations); + const btree_node *curr = this; + for (; !curr->is_root(); curr = curr->parent()) continue; + return const_cast(&curr->GetField<1>()[0]); + } + + // Returns the generation for iterator validation. + uint32_t generation() const { + return params_type::kEnableGenerations ? *get_root_generation() : 0; + } + // Updates generation. Should only be called on a root node or during node + // initialization. + void set_generation(uint32_t generation) { + if (params_type::kEnableGenerations) GetField<1>()[0] = generation; + } + // Updates the generation. We do this whenever the node is mutated. + void next_generation() { + if (params_type::kEnableGenerations) ++*get_root_generation(); + } + + // Getters for the key/value at position i in the node. + const key_type &key(int i) const { return params_type::key(slot(i)); } + reference value(int i) { return params_type::element(slot(i)); } + const_reference value(int i) const { return params_type::element(slot(i)); } + + // Getters/setter for the child at position i in the node. + btree_node *child(int i) const { return GetField<4>()[i]; } + btree_node *start_child() const { return child(start()); } + btree_node *&mutable_child(int i) { return GetField<4>()[i]; } + void clear_child(int i) { + absl::container_internal::SanitizerPoisonObject(&mutable_child(i)); + } + void set_child(int i, btree_node *c) { + absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i)); + mutable_child(i) = c; + c->set_position(i); + } + void init_child(int i, btree_node *c) { + set_child(i, c); + c->set_parent(this); + } + + // Returns the position of the first value whose key is not less than k. + template + SearchResult lower_bound( + const K &k, const key_compare &comp) const { + return use_linear_search::value ? linear_search(k, comp) + : binary_search(k, comp); + } + // Returns the position of the first value whose key is greater than k. + template + int upper_bound(const K &k, const key_compare &comp) const { + auto upper_compare = upper_bound_adapter(comp); + return use_linear_search::value ? linear_search(k, upper_compare).value + : binary_search(k, upper_compare).value; + } + + template + SearchResult::value> + linear_search(const K &k, const Compare &comp) const { + return linear_search_impl(k, start(), finish(), comp, + btree_is_key_compare_to()); + } + + template + SearchResult::value> + binary_search(const K &k, const Compare &comp) const { + return binary_search_impl(k, start(), finish(), comp, + btree_is_key_compare_to()); + } + + // Returns the position of the first value whose key is not less than k using + // linear search performed using plain compare. + template + SearchResult linear_search_impl( + const K &k, int s, const int e, const Compare &comp, + std::false_type /* IsCompareTo */) const { + while (s < e) { + if (!comp(key(s), k)) { + break; + } + ++s; + } + return SearchResult{s}; + } + + // Returns the position of the first value whose key is not less than k using + // linear search performed using compare-to. + template + SearchResult linear_search_impl( + const K &k, int s, const int e, const Compare &comp, + std::true_type /* IsCompareTo */) const { + while (s < e) { + const absl::weak_ordering c = comp(key(s), k); + if (c == 0) { + return {s, MatchKind::kEq}; + } else if (c > 0) { + break; + } + ++s; + } + return {s, MatchKind::kNe}; + } + + // Returns the position of the first value whose key is not less than k using + // binary search performed using plain compare. + template + SearchResult binary_search_impl( + const K &k, int s, int e, const Compare &comp, + std::false_type /* IsCompareTo */) const { + while (s != e) { + const int mid = (s + e) >> 1; + if (comp(key(mid), k)) { + s = mid + 1; + } else { + e = mid; + } + } + return SearchResult{s}; + } + + // Returns the position of the first value whose key is not less than k using + // binary search performed using compare-to. + template + SearchResult binary_search_impl( + const K &k, int s, int e, const CompareTo &comp, + std::true_type /* IsCompareTo */) const { + if (params_type::template can_have_multiple_equivalent_keys()) { + MatchKind exact_match = MatchKind::kNe; + while (s != e) { + const int mid = (s + e) >> 1; + const absl::weak_ordering c = comp(key(mid), k); + if (c < 0) { + s = mid + 1; + } else { + e = mid; + if (c == 0) { + // Need to return the first value whose key is not less than k, + // which requires continuing the binary search if there could be + // multiple equivalent keys. + exact_match = MatchKind::kEq; + } + } + } + return {s, exact_match}; + } else { // Can't have multiple equivalent keys. + while (s != e) { + const int mid = (s + e) >> 1; + const absl::weak_ordering c = comp(key(mid), k); + if (c < 0) { + s = mid + 1; + } else if (c > 0) { + e = mid; + } else { + return {mid, MatchKind::kEq}; + } + } + return {s, MatchKind::kNe}; + } + } + + // Emplaces a value at position i, shifting all existing values and + // children at positions >= i to the right by 1. + template + void emplace_value(size_type i, allocator_type *alloc, Args &&... args); + + // Removes the values at positions [i, i + to_erase), shifting all existing + // values and children after that range to the left by to_erase. Clears all + // children between [i, i + to_erase). + void remove_values(field_type i, field_type to_erase, allocator_type *alloc); + + // Rebalances a node with its right sibling. + void rebalance_right_to_left(int to_move, btree_node *right, + allocator_type *alloc); + void rebalance_left_to_right(int to_move, btree_node *right, + allocator_type *alloc); + + // Splits a node, moving a portion of the node's values to its right sibling. + void split(int insert_position, btree_node *dest, allocator_type *alloc); + + // Merges a node with its right sibling, moving all of the values and the + // delimiting key in the parent node onto itself, and deleting the src node. + void merge(btree_node *src, allocator_type *alloc); + + // Node allocation/deletion routines. + void init_leaf(int max_count, btree_node *parent) { + set_generation(0); + set_parent(parent); + set_position(0); + set_start(0); + set_finish(0); + set_max_count(max_count); + absl::container_internal::SanitizerPoisonMemoryRegion( + start_slot(), max_count * sizeof(slot_type)); + } + void init_internal(btree_node *parent) { + init_leaf(kNodeSlots, parent); + // Set `max_count` to a sentinel value to indicate that this node is + // internal. + set_max_count(kInternalNodeMaxCount); + absl::container_internal::SanitizerPoisonMemoryRegion( + &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *)); + } + + static void deallocate(const size_type size, btree_node *node, + allocator_type *alloc) { + absl::container_internal::Deallocate(alloc, node, size); + } + + // Deletes a node and all of its children. + static void clear_and_delete(btree_node *node, allocator_type *alloc); + + private: + template + void value_init(const field_type i, allocator_type *alloc, Args &&... args) { + next_generation(); + absl::container_internal::SanitizerUnpoisonObject(slot(i)); + params_type::construct(alloc, slot(i), std::forward(args)...); + } + void value_destroy(const field_type i, allocator_type *alloc) { + next_generation(); + params_type::destroy(alloc, slot(i)); + absl::container_internal::SanitizerPoisonObject(slot(i)); + } + void value_destroy_n(const field_type i, const field_type n, + allocator_type *alloc) { + next_generation(); + for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) { + params_type::destroy(alloc, s); + absl::container_internal::SanitizerPoisonObject(s); + } + } + + static void transfer(slot_type *dest, slot_type *src, allocator_type *alloc) { + absl::container_internal::SanitizerUnpoisonObject(dest); + params_type::transfer(alloc, dest, src); + absl::container_internal::SanitizerPoisonObject(src); + } + + // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`. + void transfer(const size_type dest_i, const size_type src_i, + btree_node *src_node, allocator_type *alloc) { + next_generation(); + transfer(slot(dest_i), src_node->slot(src_i), alloc); + } + + // Transfers `n` values starting at value `src_i` in `src_node` into the + // values starting at value `dest_i` in `this`. + void transfer_n(const size_type n, const size_type dest_i, + const size_type src_i, btree_node *src_node, + allocator_type *alloc) { + next_generation(); + for (slot_type *src = src_node->slot(src_i), *end = src + n, + *dest = slot(dest_i); + src != end; ++src, ++dest) { + transfer(dest, src, alloc); + } + } + + // Same as above, except that we start at the end and work our way to the + // beginning. + void transfer_n_backward(const size_type n, const size_type dest_i, + const size_type src_i, btree_node *src_node, + allocator_type *alloc) { + next_generation(); + for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n, + *dest = slot(dest_i + n - 1); + src != end; --src, --dest) { + transfer(dest, src, alloc); + } + } + + template + friend class btree; + template + friend class btree_iterator; + friend class BtreeNodePeer; + friend struct btree_access; +}; + +template +class btree_iterator { + using key_type = typename Node::key_type; + using size_type = typename Node::size_type; + using params_type = typename Node::params_type; + using is_map_container = typename params_type::is_map_container; + + using node_type = Node; + using normal_node = typename std::remove_const::type; + using const_node = const Node; + using normal_pointer = typename params_type::pointer; + using normal_reference = typename params_type::reference; + using const_pointer = typename params_type::const_pointer; + using const_reference = typename params_type::const_reference; + using slot_type = typename params_type::slot_type; + + using iterator = + btree_iterator; + using const_iterator = + btree_iterator; + + public: + // These aliases are public for std::iterator_traits. + using difference_type = typename Node::difference_type; + using value_type = typename params_type::value_type; + using pointer = Pointer; + using reference = Reference; + using iterator_category = std::bidirectional_iterator_tag; + + btree_iterator() : btree_iterator(nullptr, -1) {} + explicit btree_iterator(Node *n) : btree_iterator(n, n->start()) {} + btree_iterator(Node *n, int p) : node_(n), position_(p) { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + // Use `~uint32_t{}` as a sentinel value for iterator generations so it + // doesn't match the initial value for the actual generation. + generation_ = n != nullptr ? n->generation() : ~uint32_t{}; +#endif + } + + // NOTE: this SFINAE allows for implicit conversions from iterator to + // const_iterator, but it specifically avoids hiding the copy constructor so + // that the trivial one will be used when possible. + template , iterator>::value && + std::is_same::value, + int> = 0> + btree_iterator(const btree_iterator other) // NOLINT + : node_(other.node_), position_(other.position_) { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + generation_ = other.generation_; +#endif + } + + bool operator==(const iterator &other) const { + return node_ == other.node_ && position_ == other.position_; + } + bool operator==(const const_iterator &other) const { + return node_ == other.node_ && position_ == other.position_; + } + bool operator!=(const iterator &other) const { + return node_ != other.node_ || position_ != other.position_; + } + bool operator!=(const const_iterator &other) const { + return node_ != other.node_ || position_ != other.position_; + } + + // Accessors for the key/value the iterator is pointing at. + reference operator*() const { + ABSL_HARDENING_ASSERT(node_ != nullptr); + ABSL_HARDENING_ASSERT(node_->start() <= position_); + ABSL_HARDENING_ASSERT(node_->finish() > position_); + assert_valid_generation(); + return node_->value(position_); + } + pointer operator->() const { return &operator*(); } + + btree_iterator &operator++() { + increment(); + return *this; + } + btree_iterator &operator--() { + decrement(); + return *this; + } + btree_iterator operator++(int) { + btree_iterator tmp = *this; + ++*this; + return tmp; + } + btree_iterator operator--(int) { + btree_iterator tmp = *this; + --*this; + return tmp; + } + + private: + friend iterator; + friend const_iterator; + template + friend class btree; + template + friend class btree_container; + template + friend class btree_set_container; + template + friend class btree_map_container; + template + friend class btree_multiset_container; + template + friend class base_checker; + friend struct btree_access; + + // This SFINAE allows explicit conversions from const_iterator to + // iterator, but also avoids hiding the copy constructor. + // NOTE: the const_cast is safe because this constructor is only called by + // non-const methods and the container owns the nodes. + template , const_iterator>::value && + std::is_same::value, + int> = 0> + explicit btree_iterator(const btree_iterator other) + : node_(const_cast(other.node_)), + position_(other.position_) { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + generation_ = other.generation_; +#endif + } + + // Increment/decrement the iterator. + void increment() { + assert_valid_generation(); + if (node_->is_leaf() && ++position_ < node_->finish()) { + return; + } + increment_slow(); + } + void increment_slow(); + + void decrement() { + assert_valid_generation(); + if (node_->is_leaf() && --position_ >= node_->start()) { + return; + } + decrement_slow(); + } + void decrement_slow(); + + // Updates the generation. For use internally right before we return an + // iterator to the user. + void update_generation() { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + if (node_ != nullptr) generation_ = node_->generation(); +#endif + } + + const key_type &key() const { return node_->key(position_); } + decltype(std::declval()->slot(0)) slot() { + return node_->slot(position_); + } + + void assert_valid_generation() const { +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + if (node_ != nullptr && node_->generation() != generation_) { + ABSL_INTERNAL_LOG( + FATAL, + "Attempting to use an invalidated iterator. The corresponding b-tree " + "container has been mutated since this iterator was constructed."); + } +#endif + } + + // The node in the tree the iterator is pointing at. + Node *node_; + // The position within the node of the tree the iterator is pointing at. + // NOTE: this is an int rather than a field_type because iterators can point + // to invalid positions (such as -1) in certain circumstances. + int position_; +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + // Used to check that the iterator hasn't been invalidated. + uint32_t generation_; +#endif +}; + +template +class btree { + using node_type = btree_node; + using is_key_compare_to = typename Params::is_key_compare_to; + using field_type = typename node_type::field_type; + + // We use a static empty node for the root/leftmost/rightmost of empty btrees + // in order to avoid branching in begin()/end(). + struct alignas(node_type::Alignment()) EmptyNodeType : node_type { + using field_type = typename node_type::field_type; + node_type *parent; +#ifdef ABSL_BTREE_ENABLE_GENERATIONS + uint32_t generation = 0; +#endif + field_type position = 0; + field_type start = 0; + field_type finish = 0; + // max_count must be != kInternalNodeMaxCount (so that this node is regarded + // as a leaf node). max_count() is never called when the tree is empty. + field_type max_count = node_type::kInternalNodeMaxCount + 1; + +#ifdef _MSC_VER + // MSVC has constexpr code generations bugs here. + EmptyNodeType() : parent(this) {} +#else + constexpr EmptyNodeType(node_type *p) : parent(p) {} +#endif + }; + + static node_type *EmptyNode() { +#ifdef _MSC_VER + static EmptyNodeType *empty_node = new EmptyNodeType; + // This assert fails on some other construction methods. + assert(empty_node->parent == empty_node); + return empty_node; +#else + static constexpr EmptyNodeType empty_node( + const_cast(&empty_node)); + return const_cast(&empty_node); +#endif + } + + enum : uint32_t { + kNodeSlots = node_type::kNodeSlots, + kMinNodeValues = kNodeSlots / 2, + }; + + struct node_stats { + using size_type = typename Params::size_type; + + node_stats(size_type l, size_type i) : leaf_nodes(l), internal_nodes(i) {} + + node_stats &operator+=(const node_stats &other) { + leaf_nodes += other.leaf_nodes; + internal_nodes += other.internal_nodes; + return *this; + } + + size_type leaf_nodes; + size_type internal_nodes; + }; + + public: + using key_type = typename Params::key_type; + using value_type = typename Params::value_type; + using size_type = typename Params::size_type; + using difference_type = typename Params::difference_type; + using key_compare = typename Params::key_compare; + using original_key_compare = typename Params::original_key_compare; + using value_compare = typename Params::value_compare; + using allocator_type = typename Params::allocator_type; + using reference = typename Params::reference; + using const_reference = typename Params::const_reference; + using pointer = typename Params::pointer; + using const_pointer = typename Params::const_pointer; + using iterator = + typename btree_iterator::iterator; + using const_iterator = typename iterator::const_iterator; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + using node_handle_type = node_handle; + + // Internal types made public for use by btree_container types. + using params_type = Params; + using slot_type = typename Params::slot_type; + + private: + // Copies or moves (depending on the template parameter) the values in + // other into this btree in their order in other. This btree must be empty + // before this method is called. This method is used in copy construction, + // copy assignment, and move assignment. + template + void copy_or_move_values_in_order(Btree &other); + + // Validates that various assumptions/requirements are true at compile time. + constexpr static bool static_assert_validation(); + + public: + btree(const key_compare &comp, const allocator_type &alloc) + : root_(EmptyNode()), rightmost_(comp, alloc, EmptyNode()), size_(0) {} + + btree(const btree &other) : btree(other, other.allocator()) {} + btree(const btree &other, const allocator_type &alloc) + : btree(other.key_comp(), alloc) { + copy_or_move_values_in_order(other); + } + btree(btree &&other) noexcept + : root_(absl::exchange(other.root_, EmptyNode())), + rightmost_(std::move(other.rightmost_)), + size_(absl::exchange(other.size_, 0)) { + other.mutable_rightmost() = EmptyNode(); + } + btree(btree &&other, const allocator_type &alloc) + : btree(other.key_comp(), alloc) { + if (alloc == other.allocator()) { + swap(other); + } else { + // Move values from `other` one at a time when allocators are different. + copy_or_move_values_in_order(other); + } + } + + ~btree() { + // Put static_asserts in destructor to avoid triggering them before the type + // is complete. + static_assert(static_assert_validation(), "This call must be elided."); + clear(); + } + + // Assign the contents of other to *this. + btree &operator=(const btree &other); + btree &operator=(btree &&other) noexcept; + + iterator begin() { return iterator(leftmost()); } + const_iterator begin() const { return const_iterator(leftmost()); } + iterator end() { return iterator(rightmost(), rightmost()->finish()); } + const_iterator end() const { + return const_iterator(rightmost(), rightmost()->finish()); + } + reverse_iterator rbegin() { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + reverse_iterator rend() { return reverse_iterator(begin()); } + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + // Finds the first element whose key is not less than `key`. + template + iterator lower_bound(const K &key) { + return internal_end(internal_lower_bound(key).value); + } + template + const_iterator lower_bound(const K &key) const { + return internal_end(internal_lower_bound(key).value); + } + + // Finds the first element whose key is not less than `key` and also returns + // whether that element is equal to `key`. + template + std::pair lower_bound_equal(const K &key) const; + + // Finds the first element whose key is greater than `key`. + template + iterator upper_bound(const K &key) { + return internal_end(internal_upper_bound(key)); + } + template + const_iterator upper_bound(const K &key) const { + return internal_end(internal_upper_bound(key)); + } + + // Finds the range of values which compare equal to key. The first member of + // the returned pair is equal to lower_bound(key). The second member of the + // pair is equal to upper_bound(key). + template + std::pair equal_range(const K &key); + template + std::pair equal_range(const K &key) const { + return const_cast(this)->equal_range(key); + } + + // Inserts a value into the btree only if it does not already exist. The + // boolean return value indicates whether insertion succeeded or failed. + // Requirement: if `key` already exists in the btree, does not consume `args`. + // Requirement: `key` is never referenced after consuming `args`. + template + std::pair insert_unique(const K &key, Args &&... args); + + // Inserts with hint. Checks to see if the value should be placed immediately + // before `position` in the tree. If so, then the insertion will take + // amortized constant time. If not, the insertion will take amortized + // logarithmic time as if a call to insert_unique() were made. + // Requirement: if `key` already exists in the btree, does not consume `args`. + // Requirement: `key` is never referenced after consuming `args`. + template + std::pair insert_hint_unique(iterator position, + const K &key, + Args &&... args); + + // Insert a range of values into the btree. + // Note: the first overload avoids constructing a value_type if the key + // already exists in the btree. + template ()( + params_type::key(*std::declval()), + std::declval()))> + void insert_iterator_unique(InputIterator b, InputIterator e, int); + // We need the second overload for cases in which we need to construct a + // value_type in order to compare it with the keys already in the btree. + template + void insert_iterator_unique(InputIterator b, InputIterator e, char); + + // Inserts a value into the btree. + template + iterator insert_multi(const key_type &key, ValueType &&v); + + // Inserts a value into the btree. + template + iterator insert_multi(ValueType &&v) { + return insert_multi(params_type::key(v), std::forward(v)); + } + + // Insert with hint. Check to see if the value should be placed immediately + // before position in the tree. If it does, then the insertion will take + // amortized constant time. If not, the insertion will take amortized + // logarithmic time as if a call to insert_multi(v) were made. + template + iterator insert_hint_multi(iterator position, ValueType &&v); + + // Insert a range of values into the btree. + template + void insert_iterator_multi(InputIterator b, InputIterator e); + + // Erase the specified iterator from the btree. The iterator must be valid + // (i.e. not equal to end()). Return an iterator pointing to the node after + // the one that was erased (or end() if none exists). + // Requirement: does not read the value at `*iter`. + iterator erase(iterator iter); + + // Erases range. Returns the number of keys erased and an iterator pointing + // to the element after the last erased element. + std::pair erase_range(iterator begin, iterator end); + + // Finds an element with key equivalent to `key` or returns `end()` if `key` + // is not present. + template + iterator find(const K &key) { + return internal_end(internal_find(key)); + } + template + const_iterator find(const K &key) const { + return internal_end(internal_find(key)); + } + + // Clear the btree, deleting all of the values it contains. + void clear(); + + // Swaps the contents of `this` and `other`. + void swap(btree &other); + + const key_compare &key_comp() const noexcept { + return rightmost_.template get<0>(); + } + template + bool compare_keys(const K1 &a, const K2 &b) const { + return compare_internal::compare_result_as_less_than(key_comp()(a, b)); + } + + value_compare value_comp() const { + return value_compare(original_key_compare(key_comp())); + } + + // Verifies the structure of the btree. + void verify() const; + + // Size routines. + size_type size() const { return size_; } + size_type max_size() const { return (std::numeric_limits::max)(); } + bool empty() const { return size_ == 0; } + + // The height of the btree. An empty tree will have height 0. + size_type height() const { + size_type h = 0; + if (!empty()) { + // Count the length of the chain from the leftmost node up to the + // root. We actually count from the root back around to the level below + // the root, but the calculation is the same because of the circularity + // of that traversal. + const node_type *n = root(); + do { + ++h; + n = n->parent(); + } while (n != root()); + } + return h; + } + + // The number of internal, leaf and total nodes used by the btree. + size_type leaf_nodes() const { return internal_stats(root()).leaf_nodes; } + size_type internal_nodes() const { + return internal_stats(root()).internal_nodes; + } + size_type nodes() const { + node_stats stats = internal_stats(root()); + return stats.leaf_nodes + stats.internal_nodes; + } + + // The total number of bytes used by the btree. + // TODO(b/169338300): update to support node_btree_*. + size_type bytes_used() const { + node_stats stats = internal_stats(root()); + if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) { + return sizeof(*this) + node_type::LeafSize(root()->max_count()); + } else { + return sizeof(*this) + stats.leaf_nodes * node_type::LeafSize() + + stats.internal_nodes * node_type::InternalSize(); + } + } + + // The average number of bytes used per value stored in the btree assuming + // random insertion order. + static double average_bytes_per_value() { + // The expected number of values per node with random insertion order is the + // average of the maximum and minimum numbers of values per node. + const double expected_values_per_node = + (kNodeSlots + kMinNodeValues) / 2.0; + return node_type::LeafSize() / expected_values_per_node; + } + + // The fullness of the btree. Computed as the number of elements in the btree + // divided by the maximum number of elements a tree with the current number + // of nodes could hold. A value of 1 indicates perfect space + // utilization. Smaller values indicate space wastage. + // Returns 0 for empty trees. + double fullness() const { + if (empty()) return 0.0; + return static_cast(size()) / (nodes() * kNodeSlots); + } + // The overhead of the btree structure in bytes per node. Computed as the + // total number of bytes used by the btree minus the number of bytes used for + // storing elements divided by the number of elements. + // Returns 0 for empty trees. + double overhead() const { + if (empty()) return 0.0; + return (bytes_used() - size() * sizeof(value_type)) / + static_cast(size()); + } + + // The allocator used by the btree. + allocator_type get_allocator() const { return allocator(); } + + private: + friend struct btree_access; + + // Internal accessor routines. + node_type *root() { return root_; } + const node_type *root() const { return root_; } + node_type *&mutable_root() noexcept { return root_; } + node_type *rightmost() { return rightmost_.template get<2>(); } + const node_type *rightmost() const { return rightmost_.template get<2>(); } + node_type *&mutable_rightmost() noexcept { + return rightmost_.template get<2>(); + } + key_compare *mutable_key_comp() noexcept { + return &rightmost_.template get<0>(); + } + + // The leftmost node is stored as the parent of the root node. + node_type *leftmost() { return root()->parent(); } + const node_type *leftmost() const { return root()->parent(); } + + // Allocator routines. + allocator_type *mutable_allocator() noexcept { + return &rightmost_.template get<1>(); + } + const allocator_type &allocator() const noexcept { + return rightmost_.template get<1>(); + } + + // Allocates a correctly aligned node of at least size bytes using the + // allocator. + node_type *allocate(const size_type size) { + return reinterpret_cast( + absl::container_internal::Allocate( + mutable_allocator(), size)); + } + + // Node creation/deletion routines. + node_type *new_internal_node(node_type *parent) { + node_type *n = allocate(node_type::InternalSize()); + n->init_internal(parent); + return n; + } + node_type *new_leaf_node(node_type *parent) { + node_type *n = allocate(node_type::LeafSize()); + n->init_leaf(kNodeSlots, parent); + return n; + } + node_type *new_leaf_root_node(const int max_count) { + node_type *n = allocate(node_type::LeafSize(max_count)); + n->init_leaf(max_count, /*parent=*/n); + return n; + } + + // Deletion helper routines. + iterator rebalance_after_delete(iterator iter); + + // Rebalances or splits the node iter points to. + void rebalance_or_split(iterator *iter); + + // Merges the values of left, right and the delimiting key on their parent + // onto left, removing the delimiting key and deleting right. + void merge_nodes(node_type *left, node_type *right); + + // Tries to merge node with its left or right sibling, and failing that, + // rebalance with its left or right sibling. Returns true if a merge + // occurred, at which point it is no longer valid to access node. Returns + // false if no merging took place. + bool try_merge_or_rebalance(iterator *iter); + + // Tries to shrink the height of the tree by 1. + void try_shrink(); + + iterator internal_end(iterator iter) { + return iter.node_ != nullptr ? iter : end(); + } + const_iterator internal_end(const_iterator iter) const { + return iter.node_ != nullptr ? iter : end(); + } + + // Emplaces a value into the btree immediately before iter. Requires that + // key(v) <= iter.key() and (--iter).key() <= key(v). + template + iterator internal_emplace(iterator iter, Args &&... args); + + // Returns an iterator pointing to the first value >= the value "iter" is + // pointing at. Note that "iter" might be pointing to an invalid location such + // as iter.position_ == iter.node_->finish(). This routine simply moves iter + // up in the tree to a valid location. Requires: iter.node_ is non-null. + template + static IterType internal_last(IterType iter); + + // Returns an iterator pointing to the leaf position at which key would + // reside in the tree, unless there is an exact match - in which case, the + // result may not be on a leaf. When there's a three-way comparator, we can + // return whether there was an exact match. This allows the caller to avoid a + // subsequent comparison to determine if an exact match was made, which is + // important for keys with expensive comparison, such as strings. + template + SearchResult internal_locate( + const K &key) const; + + // Internal routine which implements lower_bound(). + template + SearchResult internal_lower_bound( + const K &key) const; + + // Internal routine which implements upper_bound(). + template + iterator internal_upper_bound(const K &key) const; + + // Internal routine which implements find(). + template + iterator internal_find(const K &key) const; + + // Verifies the tree structure of node. + int internal_verify(const node_type *node, const key_type *lo, + const key_type *hi) const; + + node_stats internal_stats(const node_type *node) const { + // The root can be a static empty node. + if (node == nullptr || (node == root() && empty())) { + return node_stats(0, 0); + } + if (node->is_leaf()) { + return node_stats(1, 0); + } + node_stats res(0, 1); + for (int i = node->start(); i <= node->finish(); ++i) { + res += internal_stats(node->child(i)); + } + return res; + } + + node_type *root_; + + // A pointer to the rightmost node. Note that the leftmost node is stored as + // the root's parent. We use compressed tuple in order to save space because + // key_compare and allocator_type are usually empty. + absl::container_internal::CompressedTuple + rightmost_; + + // Number of values. + size_type size_; +}; + +//// +// btree_node methods +template +template +inline void btree_node