Browse Source

style: 🎨 format grpc

tags/0.1.0
DragonAura 2 years ago
parent
commit
217d87aeed
100 changed files with 26258 additions and 23107 deletions
  1. +129
    -126
      CAPI/cpp/grpc/include/absl/algorithm/algorithm.h
  2. +1564
    -1715
      CAPI/cpp/grpc/include/absl/algorithm/container.h
  3. +22
    -23
      CAPI/cpp/grpc/include/absl/base/attributes.h
  4. +186
    -171
      CAPI/cpp/grpc/include/absl/base/call_once.h
  5. +75
    -76
      CAPI/cpp/grpc/include/absl/base/casts.h
  6. +18
    -23
      CAPI/cpp/grpc/include/absl/base/config.h
  7. +8
    -6
      CAPI/cpp/grpc/include/absl/base/const_init.h
  8. +132
    -116
      CAPI/cpp/grpc/include/absl/base/dynamic_annotations.h
  9. +161
    -136
      CAPI/cpp/grpc/include/absl/base/internal/atomic_hook.h
  10. +12
    -10
      CAPI/cpp/grpc/include/absl/base/internal/atomic_hook_test_helper.h
  11. +89
    -80
      CAPI/cpp/grpc/include/absl/base/internal/cycleclock.h
  12. +65
    -57
      CAPI/cpp/grpc/include/absl/base/internal/direct_mmap.h
  13. +93
    -75
      CAPI/cpp/grpc/include/absl/base/internal/dynamic_annotations.h
  14. +398
    -208
      CAPI/cpp/grpc/include/absl/base/internal/endian.h
  15. +31
    -19
      CAPI/cpp/grpc/include/absl/base/internal/errno_saver.h
  16. +1286
    -1050
      CAPI/cpp/grpc/include/absl/base/internal/exception_safety_testing.h
  17. +3
    -3
      CAPI/cpp/grpc/include/absl/base/internal/exception_testing.h
  18. +24
    -20
      CAPI/cpp/grpc/include/absl/base/internal/fast_type_id.h
  19. +32
    -27
      CAPI/cpp/grpc/include/absl/base/internal/hide_ptr.h
  20. +14
    -11
      CAPI/cpp/grpc/include/absl/base/internal/identity.h
  21. +18
    -18
      CAPI/cpp/grpc/include/absl/base/internal/inline_variable.h
  22. +20
    -17
      CAPI/cpp/grpc/include/absl/base/internal/inline_variable_testing.h
  23. +194
    -168
      CAPI/cpp/grpc/include/absl/base/internal/invoke.h
  24. +68
    -64
      CAPI/cpp/grpc/include/absl/base/internal/low_level_alloc.h
  25. +119
    -101
      CAPI/cpp/grpc/include/absl/base/internal/low_level_scheduling.h
  26. +67
    -49
      CAPI/cpp/grpc/include/absl/base/internal/prefetch.h
  27. +129
    -128
      CAPI/cpp/grpc/include/absl/base/internal/raw_logging.h
  28. +34
    -31
      CAPI/cpp/grpc/include/absl/base/internal/scheduling_mode.h
  29. +22
    -19
      CAPI/cpp/grpc/include/absl/base/internal/scoped_set_env.h
  30. +232
    -202
      CAPI/cpp/grpc/include/absl/base/internal/spinlock.h
  31. +50
    -48
      CAPI/cpp/grpc/include/absl/base/internal/spinlock_wait.h
  32. +15
    -13
      CAPI/cpp/grpc/include/absl/base/internal/strerror.h
  33. +24
    -22
      CAPI/cpp/grpc/include/absl/base/internal/sysinfo.h
  34. +35
    -33
      CAPI/cpp/grpc/include/absl/base/internal/thread_annotations.h
  35. +167
    -160
      CAPI/cpp/grpc/include/absl/base/internal/thread_identity.h
  36. +46
    -44
      CAPI/cpp/grpc/include/absl/base/internal/throw_delegate.h
  37. +50
    -36
      CAPI/cpp/grpc/include/absl/base/internal/unaligned_access.h
  38. +46
    -41
      CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock.h
  39. +147
    -142
      CAPI/cpp/grpc/include/absl/base/log_severity.h
  40. +33
    -26
      CAPI/cpp/grpc/include/absl/base/macros.h
  41. +22
    -11
      CAPI/cpp/grpc/include/absl/base/optimization.h
  42. +0
    -3
      CAPI/cpp/grpc/include/absl/base/options.h
  43. +29
    -25
      CAPI/cpp/grpc/include/absl/base/thread_annotations.h
  44. +57
    -51
      CAPI/cpp/grpc/include/absl/cleanup/cleanup.h
  45. +89
    -71
      CAPI/cpp/grpc/include/absl/cleanup/internal/cleanup.h
  46. +812
    -794
      CAPI/cpp/grpc/include/absl/container/btree_map.h
  47. +760
    -737
      CAPI/cpp/grpc/include/absl/container/btree_set.h
  48. +182
    -133
      CAPI/cpp/grpc/include/absl/container/btree_test.h
  49. +586
    -462
      CAPI/cpp/grpc/include/absl/container/fixed_array.h
  50. +588
    -567
      CAPI/cpp/grpc/include/absl/container/flat_hash_map.h
  51. +484
    -467
      CAPI/cpp/grpc/include/absl/container/flat_hash_set.h
  52. +909
    -791
      CAPI/cpp/grpc/include/absl/container/inlined_vector.h
  53. +3292
    -2727
      CAPI/cpp/grpc/include/absl/container/internal/btree.h
  54. +831
    -667
      CAPI/cpp/grpc/include/absl/container/internal/btree_container.h
  55. +232
    -181
      CAPI/cpp/grpc/include/absl/container/internal/common.h
  56. +276
    -227
      CAPI/cpp/grpc/include/absl/container/internal/compressed_tuple.h
  57. +445
    -388
      CAPI/cpp/grpc/include/absl/container/internal/container_memory.h
  58. +112
    -90
      CAPI/cpp/grpc/include/absl/container/internal/counting_allocator.h
  59. +134
    -102
      CAPI/cpp/grpc/include/absl/container/internal/hash_function_defaults.h
  60. +170
    -143
      CAPI/cpp/grpc/include/absl/container/internal/hash_generator_testing.h
  61. +193
    -137
      CAPI/cpp/grpc/include/absl/container/internal/hash_policy_testing.h
  62. +195
    -177
      CAPI/cpp/grpc/include/absl/container/internal/hash_policy_traits.h
  63. +73
    -61
      CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug.h
  64. +59
    -49
      CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug_hooks.h
  65. +271
    -217
      CAPI/cpp/grpc/include/absl/container/internal/hashtablez_sampler.h
  66. +1061
    -903
      CAPI/cpp/grpc/include/absl/container/internal/inlined_vector.h
  67. +609
    -543
      CAPI/cpp/grpc/include/absl/container/internal/layout.h
  68. +49
    -36
      CAPI/cpp/grpc/include/absl/container/internal/node_slot_policy.h
  69. +190
    -170
      CAPI/cpp/grpc/include/absl/container/internal/raw_hash_map.h
  70. +2492
    -2109
      CAPI/cpp/grpc/include/absl/container/internal/raw_hash_set.h
  71. +315
    -249
      CAPI/cpp/grpc/include/absl/container/internal/test_instance_tracker.h
  72. +73
    -50
      CAPI/cpp/grpc/include/absl/container/internal/tracked.h
  73. +507
    -455
      CAPI/cpp/grpc/include/absl/container/internal/unordered_map_constructor_test.h
  74. +91
    -83
      CAPI/cpp/grpc/include/absl/container/internal/unordered_map_lookup_test.h
  75. +57
    -54
      CAPI/cpp/grpc/include/absl/container/internal/unordered_map_members_test.h
  76. +326
    -308
      CAPI/cpp/grpc/include/absl/container/internal/unordered_map_modifiers_test.h
  77. +511
    -456
      CAPI/cpp/grpc/include/absl/container/internal/unordered_set_constructor_test.h
  78. +62
    -59
      CAPI/cpp/grpc/include/absl/container/internal/unordered_set_lookup_test.h
  79. +57
    -53
      CAPI/cpp/grpc/include/absl/container/internal/unordered_set_members_test.h
  80. +204
    -194
      CAPI/cpp/grpc/include/absl/container/internal/unordered_set_modifiers_test.h
  81. +563
    -551
      CAPI/cpp/grpc/include/absl/container/node_hash_map.h
  82. +464
    -451
      CAPI/cpp/grpc/include/absl/container/node_hash_set.h
  83. +65
    -62
      CAPI/cpp/grpc/include/absl/debugging/failure_signal_handler.h
  84. +10
    -8
      CAPI/cpp/grpc/include/absl/debugging/internal/address_is_readable.h
  85. +11
    -9
      CAPI/cpp/grpc/include/absl/debugging/internal/demangle.h
  86. +94
    -87
      CAPI/cpp/grpc/include/absl/debugging/internal/elf_mem_image.h
  87. +35
    -40
      CAPI/cpp/grpc/include/absl/debugging/internal/examine_stack.h
  88. +16
    -14
      CAPI/cpp/grpc/include/absl/debugging/internal/stack_consumption.h
  89. +10
    -10
      CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_config.h
  90. +85
    -87
      CAPI/cpp/grpc/include/absl/debugging/internal/symbolize.h
  91. +112
    -95
      CAPI/cpp/grpc/include/absl/debugging/internal/vdso_support.h
  92. +88
    -85
      CAPI/cpp/grpc/include/absl/debugging/leak_check.h
  93. +178
    -186
      CAPI/cpp/grpc/include/absl/debugging/stacktrace.h
  94. +39
    -38
      CAPI/cpp/grpc/include/absl/debugging/symbolize.h
  95. +166
    -156
      CAPI/cpp/grpc/include/absl/flags/commandlineflag.h
  96. +15
    -15
      CAPI/cpp/grpc/include/absl/flags/config.h
  97. +21
    -17
      CAPI/cpp/grpc/include/absl/flags/declare.h
  98. +120
    -107
      CAPI/cpp/grpc/include/absl/flags/flag.h
  99. +43
    -38
      CAPI/cpp/grpc/include/absl/flags/internal/commandlineflag.h
  100. +860
    -737
      CAPI/cpp/grpc/include/absl/flags/internal/flag.h

+ 129
- 126
CAPI/cpp/grpc/include/absl/algorithm/algorithm.h View File

@@ -28,132 +28,135 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN

namespace algorithm_internal {

// Performs comparisons with operator==, similar to C++14's `std::equal_to<>`.
struct EqualTo {
template <typename T, typename U>
bool operator()(const T& a, const U& b) const {
return a == b;
}
};

template <typename InputIter1, typename InputIter2, typename Pred>
bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2,
InputIter2 last2, Pred pred, std::input_iterator_tag,
std::input_iterator_tag) {
while (true) {
if (first1 == last1) return first2 == last2;
if (first2 == last2) return false;
if (!pred(*first1, *first2)) return false;
++first1;
++first2;
}
}

template <typename InputIter1, typename InputIter2, typename Pred>
bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2,
InputIter2 last2, Pred&& pred, std::random_access_iterator_tag,
std::random_access_iterator_tag) {
return (last1 - first1 == last2 - first2) &&
std::equal(first1, last1, first2, std::forward<Pred>(pred));
}

// When we are using our own internal predicate that just applies operator==, we
// forward to the non-predicate form of std::equal. This enables an optimization
// in libstdc++ that can result in std::memcmp being used for integer types.
template <typename InputIter1, typename InputIter2>
bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2,
InputIter2 last2, algorithm_internal::EqualTo /* unused */,
std::random_access_iterator_tag,
std::random_access_iterator_tag) {
return (last1 - first1 == last2 - first2) &&
std::equal(first1, last1, first2);
}

template <typename It>
It RotateImpl(It first, It middle, It last, std::true_type) {
return std::rotate(first, middle, last);
}

template <typename It>
It RotateImpl(It first, It middle, It last, std::false_type) {
std::rotate(first, middle, last);
return std::next(first, std::distance(middle, last));
}

} // namespace algorithm_internal

// equal()
//
// Compares the equality of two ranges specified by pairs of iterators, using
// the given predicate, returning true iff for each corresponding iterator i1
// and i2 in the first and second range respectively, pred(*i1, *i2) == true
//
// This comparison takes at most min(`last1` - `first1`, `last2` - `first2`)
// invocations of the predicate. Additionally, if InputIter1 and InputIter2 are
// both random-access iterators, and `last1` - `first1` != `last2` - `first2`,
// then the predicate is never invoked and the function returns false.
//
// This is a C++11-compatible implementation of C++14 `std::equal`. See
// https://en.cppreference.com/w/cpp/algorithm/equal for more information.
template <typename InputIter1, typename InputIter2, typename Pred>
bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2,
InputIter2 last2, Pred&& pred) {
return algorithm_internal::EqualImpl(
first1, last1, first2, last2, std::forward<Pred>(pred),
typename std::iterator_traits<InputIter1>::iterator_category{},
typename std::iterator_traits<InputIter2>::iterator_category{});
}

// Overload of equal() that performs comparison of two ranges specified by pairs
// of iterators using operator==.
template <typename InputIter1, typename InputIter2>
bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2,
InputIter2 last2) {
return absl::equal(first1, last1, first2, last2,
algorithm_internal::EqualTo{});
}

// linear_search()
//
// Performs a linear search for `value` using the iterator `first` up to
// but not including `last`, returning true if [`first`, `last`) contains an
// element equal to `value`.
//
// A linear search is of O(n) complexity which is guaranteed to make at most
// n = (`last` - `first`) comparisons. A linear search over short containers
// may be faster than a binary search, even when the container is sorted.
template <typename InputIterator, typename EqualityComparable>
bool linear_search(InputIterator first, InputIterator last,
const EqualityComparable& value) {
return std::find(first, last, value) != last;
}

// rotate()
//
// Performs a left rotation on a range of elements (`first`, `last`) such that
// `middle` is now the first element. `rotate()` returns an iterator pointing to
// the first element before rotation. This function is exactly the same as
// `std::rotate`, but fixes a bug in gcc
// <= 4.9 where `std::rotate` returns `void` instead of an iterator.
//
// The complexity of this algorithm is the same as that of `std::rotate`, but if
// `ForwardIterator` is not a random-access iterator, then `absl::rotate`
// performs an additional pass over the range to construct the return value.
template <typename ForwardIterator>
ForwardIterator rotate(ForwardIterator first, ForwardIterator middle,
ForwardIterator last) {
return algorithm_internal::RotateImpl(
first, middle, last,
std::is_same<decltype(std::rotate(first, middle, last)),
ForwardIterator>());
}

ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN

namespace algorithm_internal
{

// Performs comparisons with operator==, similar to C++14's `std::equal_to<>`.
struct EqualTo
{
template<typename T, typename U>
bool operator()(const T& a, const U& b) const
{
return a == b;
}
};

template<typename InputIter1, typename InputIter2, typename Pred>
bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, Pred pred, std::input_iterator_tag, std::input_iterator_tag)
{
while (true)
{
if (first1 == last1)
return first2 == last2;
if (first2 == last2)
return false;
if (!pred(*first1, *first2))
return false;
++first1;
++first2;
}
}

template<typename InputIter1, typename InputIter2, typename Pred>
bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, Pred&& pred, std::random_access_iterator_tag, std::random_access_iterator_tag)
{
return (last1 - first1 == last2 - first2) &&
std::equal(first1, last1, first2, std::forward<Pred>(pred));
}

// When we are using our own internal predicate that just applies operator==, we
// forward to the non-predicate form of std::equal. This enables an optimization
// in libstdc++ that can result in std::memcmp being used for integer types.
template<typename InputIter1, typename InputIter2>
bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, algorithm_internal::EqualTo /* unused */, std::random_access_iterator_tag, std::random_access_iterator_tag)
{
return (last1 - first1 == last2 - first2) &&
std::equal(first1, last1, first2);
}

template<typename It>
It RotateImpl(It first, It middle, It last, std::true_type)
{
return std::rotate(first, middle, last);
}

template<typename It>
It RotateImpl(It first, It middle, It last, std::false_type)
{
std::rotate(first, middle, last);
return std::next(first, std::distance(middle, last));
}

} // namespace algorithm_internal

// equal()
//
// Compares the equality of two ranges specified by pairs of iterators, using
// the given predicate, returning true iff for each corresponding iterator i1
// and i2 in the first and second range respectively, pred(*i1, *i2) == true
//
// This comparison takes at most min(`last1` - `first1`, `last2` - `first2`)
// invocations of the predicate. Additionally, if InputIter1 and InputIter2 are
// both random-access iterators, and `last1` - `first1` != `last2` - `first2`,
// then the predicate is never invoked and the function returns false.
//
// This is a C++11-compatible implementation of C++14 `std::equal`. See
// https://en.cppreference.com/w/cpp/algorithm/equal for more information.
template<typename InputIter1, typename InputIter2, typename Pred>
bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, Pred&& pred)
{
return algorithm_internal::EqualImpl(
first1, last1, first2, last2, std::forward<Pred>(pred), typename std::iterator_traits<InputIter1>::iterator_category{}, typename std::iterator_traits<InputIter2>::iterator_category{}
);
}

// Overload of equal() that performs comparison of two ranges specified by pairs
// of iterators using operator==.
template<typename InputIter1, typename InputIter2>
bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2)
{
return absl::equal(first1, last1, first2, last2, algorithm_internal::EqualTo{});
}

// linear_search()
//
// Performs a linear search for `value` using the iterator `first` up to
// but not including `last`, returning true if [`first`, `last`) contains an
// element equal to `value`.
//
// A linear search is of O(n) complexity which is guaranteed to make at most
// n = (`last` - `first`) comparisons. A linear search over short containers
// may be faster than a binary search, even when the container is sorted.
template<typename InputIterator, typename EqualityComparable>
bool linear_search(InputIterator first, InputIterator last, const EqualityComparable& value)
{
return std::find(first, last, value) != last;
}

// rotate()
//
// Performs a left rotation on a range of elements (`first`, `last`) such that
// `middle` is now the first element. `rotate()` returns an iterator pointing to
// the first element before rotation. This function is exactly the same as
// `std::rotate`, but fixes a bug in gcc
// <= 4.9 where `std::rotate` returns `void` instead of an iterator.
//
// The complexity of this algorithm is the same as that of `std::rotate`, but if
// `ForwardIterator` is not a random-access iterator, then `absl::rotate`
// performs an additional pass over the range to construct the return value.
template<typename ForwardIterator>
ForwardIterator rotate(ForwardIterator first, ForwardIterator middle, ForwardIterator last)
{
return algorithm_internal::RotateImpl(
first, middle, last, std::is_same<decltype(std::rotate(first, middle, last)), ForwardIterator>()
);
}

ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_ALGORITHM_ALGORITHM_H_ #endif // ABSL_ALGORITHM_ALGORITHM_H_

+ 1564
- 1715
CAPI/cpp/grpc/include/absl/algorithm/container.h
File diff suppressed because it is too large
View File


+ 22
- 23
CAPI/cpp/grpc/include/absl/base/attributes.h View File

@@ -85,9 +85,9 @@
// should be counted from two, not one." // should be counted from two, not one."
#if ABSL_HAVE_ATTRIBUTE(format) || (defined(__GNUC__) && !defined(__clang__)) #if ABSL_HAVE_ATTRIBUTE(format) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) \ #define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) \
__attribute__((__format__(__printf__, string_index, first_to_check)))
__attribute__((__format__(__printf__, string_index, first_to_check)))
#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) \ #define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) \
__attribute__((__format__(__scanf__, string_index, first_to_check)))
__attribute__((__format__(__scanf__, string_index, first_to_check)))
#else #else
#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) #define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check)
#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) #define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check)
@@ -122,7 +122,7 @@
#elif defined(__GNUC__) && !defined(__clang__) && !defined(__e2k__) #elif defined(__GNUC__) && !defined(__clang__) && !defined(__e2k__)
#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 #define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1
#define ABSL_ATTRIBUTE_NO_TAIL_CALL \ #define ABSL_ATTRIBUTE_NO_TAIL_CALL \
__attribute__((optimize("no-optimize-sibling-calls")))
__attribute__((optimize("no-optimize-sibling-calls")))
#else #else
#define ABSL_ATTRIBUTE_NO_TAIL_CALL #define ABSL_ATTRIBUTE_NO_TAIL_CALL
#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 0 #define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 0
@@ -136,9 +136,8 @@
// for further information. // for further information.
// The MinGW compiler doesn't complain about the weak attribute until the link // The MinGW compiler doesn't complain about the weak attribute until the link
// step, presumably because Windows doesn't use ELF binaries. // step, presumably because Windows doesn't use ELF binaries.
#if (ABSL_HAVE_ATTRIBUTE(weak) || \
(defined(__GNUC__) && !defined(__clang__))) && \
(!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \
#if (ABSL_HAVE_ATTRIBUTE(weak) || (defined(__GNUC__) && !defined(__clang__))) && \
(!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \
!defined(__MINGW32__) !defined(__MINGW32__)
#undef ABSL_ATTRIBUTE_WEAK #undef ABSL_ATTRIBUTE_WEAK
#define ABSL_ATTRIBUTE_WEAK __attribute__((weak)) #define ABSL_ATTRIBUTE_WEAK __attribute__((weak))
@@ -253,10 +252,10 @@
// https://gcc.gnu.org/gcc-4.9/changes.html // https://gcc.gnu.org/gcc-4.9/changes.html
#if ABSL_HAVE_ATTRIBUTE(no_sanitize_undefined) #if ABSL_HAVE_ATTRIBUTE(no_sanitize_undefined)
#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ #define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \
__attribute__((no_sanitize_undefined))
__attribute__((no_sanitize_undefined))
#elif ABSL_HAVE_ATTRIBUTE(no_sanitize) #elif ABSL_HAVE_ATTRIBUTE(no_sanitize)
#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ #define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \
__attribute__((no_sanitize("undefined")))
__attribute__((no_sanitize("undefined")))
#else #else
#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED #define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED
#endif #endif
@@ -277,7 +276,7 @@
// See https://clang.llvm.org/docs/SafeStack.html for details. // See https://clang.llvm.org/docs/SafeStack.html for details.
#if ABSL_HAVE_ATTRIBUTE(no_sanitize) #if ABSL_HAVE_ATTRIBUTE(no_sanitize)
#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \ #define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \
__attribute__((no_sanitize("safe-stack")))
__attribute__((no_sanitize("safe-stack")))
#else #else
#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK #define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK
#endif #endif
@@ -297,8 +296,7 @@
// a prerequisite. Labeled sections are not supported on Darwin/iOS. // a prerequisite. Labeled sections are not supported on Darwin/iOS.
#ifdef ABSL_HAVE_ATTRIBUTE_SECTION #ifdef ABSL_HAVE_ATTRIBUTE_SECTION
#error ABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set #error ABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set
#elif (ABSL_HAVE_ATTRIBUTE(section) || \
(defined(__GNUC__) && !defined(__clang__))) && \
#elif (ABSL_HAVE_ATTRIBUTE(section) || (defined(__GNUC__) && !defined(__clang__))) && \
!defined(__APPLE__) && ABSL_HAVE_ATTRIBUTE_WEAK !defined(__APPLE__) && ABSL_HAVE_ATTRIBUTE_WEAK
#define ABSL_HAVE_ATTRIBUTE_SECTION 1 #define ABSL_HAVE_ATTRIBUTE_SECTION 1


@@ -312,7 +310,7 @@
// //
#ifndef ABSL_ATTRIBUTE_SECTION #ifndef ABSL_ATTRIBUTE_SECTION
#define ABSL_ATTRIBUTE_SECTION(name) \ #define ABSL_ATTRIBUTE_SECTION(name) \
__attribute__((section(#name))) __attribute__((noinline))
__attribute__((section(#name))) __attribute__((noinline))
#endif #endif


// ABSL_ATTRIBUTE_SECTION_VARIABLE // ABSL_ATTRIBUTE_SECTION_VARIABLE
@@ -341,9 +339,9 @@
// a no-op on ELF but not on Mach-O. // a no-op on ELF but not on Mach-O.
// //
#ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS #ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS
#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \
extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \
extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK
#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \
extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \
extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK
#endif #endif
#ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS #ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS
#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) #define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name)
@@ -359,9 +357,9 @@
// link. // link.
// //
#define ABSL_ATTRIBUTE_SECTION_START(name) \ #define ABSL_ATTRIBUTE_SECTION_START(name) \
(reinterpret_cast<void *>(__start_##name))
(reinterpret_cast<void*>(__start_##name))
#define ABSL_ATTRIBUTE_SECTION_STOP(name) \ #define ABSL_ATTRIBUTE_SECTION_STOP(name) \
(reinterpret_cast<void *>(__stop_##name))
(reinterpret_cast<void*>(__stop_##name))


#else // !ABSL_HAVE_ATTRIBUTE_SECTION #else // !ABSL_HAVE_ATTRIBUTE_SECTION


@@ -373,8 +371,8 @@
#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) #define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name)
#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) #define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name)
#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) #define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name)
#define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void *>(0))
#define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void *>(0))
#define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(0))
#define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(0))


#endif // ABSL_ATTRIBUTE_SECTION #endif // ABSL_ATTRIBUTE_SECTION


@@ -385,7 +383,7 @@
(defined(__GNUC__) && !defined(__clang__)) (defined(__GNUC__) && !defined(__clang__))
#if defined(__i386__) #if defined(__i386__)
#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC \ #define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC \
__attribute__((force_align_arg_pointer))
__attribute__((force_align_arg_pointer))
#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) #define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0)
#elif defined(__x86_64__) #elif defined(__x86_64__)
#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (1) #define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (1)
@@ -505,7 +503,7 @@
#define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]] #define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]]
#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args) #if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args)
#define ABSL_XRAY_LOG_ARGS(N) \ #define ABSL_XRAY_LOG_ARGS(N) \
[[clang::xray_always_instrument, clang::xray_log_args(N)]]
[[clang::xray_always_instrument, clang::xray_log_args(N)]]
#else #else
#define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]] #define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]]
#endif #endif
@@ -639,8 +637,9 @@
#define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]] #define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]]
#else #else
#define ABSL_FALLTHROUGH_INTENDED \ #define ABSL_FALLTHROUGH_INTENDED \
do { \
} while (0)
do \
{ \
} while (0)
#endif #endif


// ABSL_DEPRECATED() // ABSL_DEPRECATED()


+ 186
- 171
CAPI/cpp/grpc/include/absl/base/call_once.h View File

@@ -40,180 +40,195 @@
#include "absl/base/optimization.h" #include "absl/base/optimization.h"
#include "absl/base/port.h" #include "absl/base/port.h"


namespace absl {
ABSL_NAMESPACE_BEGIN

class once_flag;

namespace base_internal {
std::atomic<uint32_t>* ControlWord(absl::once_flag* flag);
} // namespace base_internal

// call_once()
//
// For all invocations using a given `once_flag`, invokes a given `fn` exactly
// once across all threads. The first call to `call_once()` with a particular
// `once_flag` argument (that does not throw an exception) will run the
// specified function with the provided `args`; other calls with the same
// `once_flag` argument will not run the function, but will wait
// for the provided function to finish running (if it is still running).
//
// This mechanism provides a safe, simple, and fast mechanism for one-time
// initialization in a multi-threaded process.
//
// Example:
//
// class MyInitClass {
// public:
// ...
// mutable absl::once_flag once_;
//
// MyInitClass* init() const {
// absl::call_once(once_, &MyInitClass::Init, this);
// return ptr_;
// }
//
template <typename Callable, typename... Args>
void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args);

// once_flag
//
// Objects of this type are used to distinguish calls to `call_once()` and
// ensure the provided function is only invoked once across all threads. This
// type is not copyable or movable. However, it has a `constexpr`
// constructor, and is safe to use as a namespace-scoped global variable.
class once_flag {
public:
constexpr once_flag() : control_(0) {}
once_flag(const once_flag&) = delete;
once_flag& operator=(const once_flag&) = delete;

private:
friend std::atomic<uint32_t>* base_internal::ControlWord(once_flag* flag);
std::atomic<uint32_t> control_;
};

//------------------------------------------------------------------------------
// End of public interfaces.
// Implementation details follow.
//------------------------------------------------------------------------------

namespace base_internal {

// Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to
// initialize entities used by the scheduler implementation.
template <typename Callable, typename... Args>
void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args);

// Disables scheduling while on stack when scheduling mode is non-cooperative.
// No effect for cooperative scheduling modes.
class SchedulingHelper {
public:
explicit SchedulingHelper(base_internal::SchedulingMode mode) : mode_(mode) {
if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) {
guard_result_ = base_internal::SchedulingGuard::DisableRescheduling();
}
}

~SchedulingHelper() {
if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) {
base_internal::SchedulingGuard::EnableRescheduling(guard_result_);
}
}

private:
base_internal::SchedulingMode mode_;
bool guard_result_;
};

// Bit patterns for call_once state machine values. Internal implementation
// detail, not for use by clients.
//
// The bit patterns are arbitrarily chosen from unlikely values, to aid in
// debugging. However, kOnceInit must be 0, so that a zero-initialized
// once_flag will be valid for immediate use.
enum {
kOnceInit = 0,
kOnceRunning = 0x65C2937B,
kOnceWaiter = 0x05A308D2,
// A very small constant is chosen for kOnceDone so that it fit in a single
// compare with immediate instruction for most common ISAs. This is verified
// for x86, POWER and ARM.
kOnceDone = 221, // Random Number
};

template <typename Callable, typename... Args>
ABSL_ATTRIBUTE_NOINLINE
void CallOnceImpl(std::atomic<uint32_t>* control,
base_internal::SchedulingMode scheduling_mode, Callable&& fn,
Args&&... args) {
namespace absl
{
ABSL_NAMESPACE_BEGIN

class once_flag;

namespace base_internal
{
std::atomic<uint32_t>* ControlWord(absl::once_flag* flag);
} // namespace base_internal

// call_once()
//
// For all invocations using a given `once_flag`, invokes a given `fn` exactly
// once across all threads. The first call to `call_once()` with a particular
// `once_flag` argument (that does not throw an exception) will run the
// specified function with the provided `args`; other calls with the same
// `once_flag` argument will not run the function, but will wait
// for the provided function to finish running (if it is still running).
//
// This mechanism provides a safe, simple, and fast mechanism for one-time
// initialization in a multi-threaded process.
//
// Example:
//
// class MyInitClass {
// public:
// ...
// mutable absl::once_flag once_;
//
// MyInitClass* init() const {
// absl::call_once(once_, &MyInitClass::Init, this);
// return ptr_;
// }
//
template<typename Callable, typename... Args>
void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args);

// once_flag
//
// Objects of this type are used to distinguish calls to `call_once()` and
// ensure the provided function is only invoked once across all threads. This
// type is not copyable or movable. However, it has a `constexpr`
// constructor, and is safe to use as a namespace-scoped global variable.
class once_flag
{
public:
constexpr once_flag() :
control_(0)
{
}
once_flag(const once_flag&) = delete;
once_flag& operator=(const once_flag&) = delete;

private:
friend std::atomic<uint32_t>* base_internal::ControlWord(once_flag* flag);
std::atomic<uint32_t> control_;
};

//------------------------------------------------------------------------------
// End of public interfaces.
// Implementation details follow.
//------------------------------------------------------------------------------

namespace base_internal
{

// Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to
// initialize entities used by the scheduler implementation.
template<typename Callable, typename... Args>
void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args);

// Disables scheduling while on stack when scheduling mode is non-cooperative.
// No effect for cooperative scheduling modes.
class SchedulingHelper
{
public:
explicit SchedulingHelper(base_internal::SchedulingMode mode) :
mode_(mode)
{
if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY)
{
guard_result_ = base_internal::SchedulingGuard::DisableRescheduling();
}
}

~SchedulingHelper()
{
if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY)
{
base_internal::SchedulingGuard::EnableRescheduling(guard_result_);
}
}

private:
base_internal::SchedulingMode mode_;
bool guard_result_;
};

// Bit patterns for call_once state machine values. Internal implementation
// detail, not for use by clients.
//
// The bit patterns are arbitrarily chosen from unlikely values, to aid in
// debugging. However, kOnceInit must be 0, so that a zero-initialized
// once_flag will be valid for immediate use.
enum
{
kOnceInit = 0,
kOnceRunning = 0x65C2937B,
kOnceWaiter = 0x05A308D2,
// A very small constant is chosen for kOnceDone so that it fit in a single
// compare with immediate instruction for most common ISAs. This is verified
// for x86, POWER and ARM.
kOnceDone = 221, // Random Number
};

template<typename Callable, typename... Args>
ABSL_ATTRIBUTE_NOINLINE void CallOnceImpl(std::atomic<uint32_t>* control, base_internal::SchedulingMode scheduling_mode, Callable&& fn, Args&&... args)
{
#ifndef NDEBUG #ifndef NDEBUG
{
uint32_t old_control = control->load(std::memory_order_relaxed);
if (old_control != kOnceInit &&
old_control != kOnceRunning &&
old_control != kOnceWaiter &&
old_control != kOnceDone) {
ABSL_RAW_LOG(FATAL, "Unexpected value for control word: 0x%lx",
static_cast<unsigned long>(old_control)); // NOLINT
}
}
{
uint32_t old_control = control->load(std::memory_order_relaxed);
if (old_control != kOnceInit &&
old_control != kOnceRunning &&
old_control != kOnceWaiter &&
old_control != kOnceDone)
{
ABSL_RAW_LOG(FATAL, "Unexpected value for control word: 0x%lx",
static_cast<unsigned long>(old_control)); // NOLINT
}
}
#endif // NDEBUG #endif // NDEBUG
static const base_internal::SpinLockWaitTransition trans[] = {
{kOnceInit, kOnceRunning, true},
{kOnceRunning, kOnceWaiter, false},
{kOnceDone, kOnceDone, true}};

// Must do this before potentially modifying control word's state.
base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode);
// Short circuit the simplest case to avoid procedure call overhead.
// The base_internal::SpinLockWait() call returns either kOnceInit or
// kOnceDone. If it returns kOnceDone, it must have loaded the control word
// with std::memory_order_acquire and seen a value of kOnceDone.
uint32_t old_control = kOnceInit;
if (control->compare_exchange_strong(old_control, kOnceRunning,
std::memory_order_relaxed) ||
base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans,
scheduling_mode) == kOnceInit) {
base_internal::invoke(std::forward<Callable>(fn),
std::forward<Args>(args)...);
old_control =
control->exchange(base_internal::kOnceDone, std::memory_order_release);
if (old_control == base_internal::kOnceWaiter) {
base_internal::SpinLockWake(control, true);
static const base_internal::SpinLockWaitTransition trans[] = {
{kOnceInit, kOnceRunning, true},
{kOnceRunning, kOnceWaiter, false},
{kOnceDone, kOnceDone, true}};

// Must do this before potentially modifying control word's state.
base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode);
// Short circuit the simplest case to avoid procedure call overhead.
// The base_internal::SpinLockWait() call returns either kOnceInit or
// kOnceDone. If it returns kOnceDone, it must have loaded the control word
// with std::memory_order_acquire and seen a value of kOnceDone.
uint32_t old_control = kOnceInit;
if (control->compare_exchange_strong(old_control, kOnceRunning, std::memory_order_relaxed) ||
base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans, scheduling_mode) == kOnceInit)
{
base_internal::invoke(std::forward<Callable>(fn), std::forward<Args>(args)...);
old_control =
control->exchange(base_internal::kOnceDone, std::memory_order_release);
if (old_control == base_internal::kOnceWaiter)
{
base_internal::SpinLockWake(control, true);
}
} // else *control is already kOnceDone
}

inline std::atomic<uint32_t>* ControlWord(once_flag* flag)
{
return &flag->control_;
}

template<typename Callable, typename... Args>
void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args)
{
std::atomic<uint32_t>* once = base_internal::ControlWord(flag);
uint32_t s = once->load(std::memory_order_acquire);
if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone))
{
base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY, std::forward<Callable>(fn), std::forward<Args>(args)...);
}
}

} // namespace base_internal

template<typename Callable, typename... Args>
void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args)
{
std::atomic<uint32_t>* once = base_internal::ControlWord(&flag);
uint32_t s = once->load(std::memory_order_acquire);
if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone))
{
base_internal::CallOnceImpl(
once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL, std::forward<Callable>(fn), std::forward<Args>(args)...
);
}
} }
} // else *control is already kOnceDone
}

inline std::atomic<uint32_t>* ControlWord(once_flag* flag) {
return &flag->control_;
}

template <typename Callable, typename... Args>
void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args) {
std::atomic<uint32_t>* once = base_internal::ControlWord(flag);
uint32_t s = once->load(std::memory_order_acquire);
if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) {
base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY,
std::forward<Callable>(fn),
std::forward<Args>(args)...);
}
}

} // namespace base_internal

template <typename Callable, typename... Args>
void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args) {
std::atomic<uint32_t>* once = base_internal::ControlWord(&flag);
uint32_t s = once->load(std::memory_order_acquire);
if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) {
base_internal::CallOnceImpl(
once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL,
std::forward<Callable>(fn), std::forward<Args>(args)...);
}
}

ABSL_NAMESPACE_END

ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_CALL_ONCE_H_ #endif // ABSL_BASE_CALL_ONCE_H_

+ 75
- 76
CAPI/cpp/grpc/include/absl/base/casts.h View File

@@ -31,68 +31,70 @@


#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L #if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
#include <bit> // For std::bit_cast. #include <bit> // For std::bit_cast.
#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L


#include "absl/base/internal/identity.h" #include "absl/base/internal/identity.h"
#include "absl/base/macros.h" #include "absl/base/macros.h"
#include "absl/meta/type_traits.h" #include "absl/meta/type_traits.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace absl
{
ABSL_NAMESPACE_BEGIN


// implicit_cast()
//
// Performs an implicit conversion between types following the language
// rules for implicit conversion; if an implicit conversion is otherwise
// allowed by the language in the given context, this function performs such an
// implicit conversion.
//
// Example:
//
// // If the context allows implicit conversion:
// From from;
// To to = from;
//
// // Such code can be replaced by:
// implicit_cast<To>(from);
//
// An `implicit_cast()` may also be used to annotate numeric type conversions
// that, although safe, may produce compiler warnings (such as `long` to `int`).
// Additionally, an `implicit_cast()` is also useful within return statements to
// indicate a specific implicit conversion is being undertaken.
//
// Example:
//
// return implicit_cast<double>(size_in_bytes) / capacity_;
//
// Annotating code with `implicit_cast()` allows you to explicitly select
// particular overloads and template instantiations, while providing a safer
// cast than `reinterpret_cast()` or `static_cast()`.
//
// Additionally, an `implicit_cast()` can be used to allow upcasting within a
// type hierarchy where incorrect use of `static_cast()` could accidentally
// allow downcasting.
//
// Finally, an `implicit_cast()` can be used to perform implicit conversions
// from unrelated types that otherwise couldn't be implicitly cast directly;
// C++ will normally only implicitly cast "one step" in such conversions.
//
// That is, if C is a type which can be implicitly converted to B, with B being
// a type that can be implicitly converted to A, an `implicit_cast()` can be
// used to convert C to B (which the compiler can then implicitly convert to A
// using language rules).
//
// Example:
//
// // Assume an object C is convertible to B, which is implicitly convertible
// // to A
// A a = implicit_cast<B>(C);
//
// Such implicit cast chaining may be useful within template logic.
template <typename To>
constexpr To implicit_cast(typename absl::internal::identity_t<To> to) {
return to;
}
// implicit_cast()
//
// Performs an implicit conversion between types following the language
// rules for implicit conversion; if an implicit conversion is otherwise
// allowed by the language in the given context, this function performs such an
// implicit conversion.
//
// Example:
//
// // If the context allows implicit conversion:
// From from;
// To to = from;
//
// // Such code can be replaced by:
// implicit_cast<To>(from);
//
// An `implicit_cast()` may also be used to annotate numeric type conversions
// that, although safe, may produce compiler warnings (such as `long` to `int`).
// Additionally, an `implicit_cast()` is also useful within return statements to
// indicate a specific implicit conversion is being undertaken.
//
// Example:
//
// return implicit_cast<double>(size_in_bytes) / capacity_;
//
// Annotating code with `implicit_cast()` allows you to explicitly select
// particular overloads and template instantiations, while providing a safer
// cast than `reinterpret_cast()` or `static_cast()`.
//
// Additionally, an `implicit_cast()` can be used to allow upcasting within a
// type hierarchy where incorrect use of `static_cast()` could accidentally
// allow downcasting.
//
// Finally, an `implicit_cast()` can be used to perform implicit conversions
// from unrelated types that otherwise couldn't be implicitly cast directly;
// C++ will normally only implicitly cast "one step" in such conversions.
//
// That is, if C is a type which can be implicitly converted to B, with B being
// a type that can be implicitly converted to A, an `implicit_cast()` can be
// used to convert C to B (which the compiler can then implicitly convert to A
// using language rules).
//
// Example:
//
// // Assume an object C is convertible to B, which is implicitly convertible
// // to A
// A a = implicit_cast<B>(C);
//
// Such implicit cast chaining may be useful within template logic.
template<typename To>
constexpr To implicit_cast(typename absl::internal::identity_t<To> to)
{
return to;
}


// bit_cast() // bit_cast()
// //
@@ -145,36 +147,33 @@ constexpr To implicit_cast(typename absl::internal::identity_t<To> to) {
// `std::bit_cast`. // `std::bit_cast`.
#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L #if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L


using std::bit_cast;
using std::bit_cast;


#else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L #else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L


template <typename Dest, typename Source,
typename std::enable_if<
sizeof(Dest) == sizeof(Source) &&
type_traits_internal::is_trivially_copyable<Source>::value &&
type_traits_internal::is_trivially_copyable<Dest>::value
template<typename Dest, typename Source, typename std::enable_if<sizeof(Dest) == sizeof(Source) && type_traits_internal::is_trivially_copyable<Source>::value && type_traits_internal::is_trivially_copyable<Dest>::value
#if !ABSL_HAVE_BUILTIN(__builtin_bit_cast) #if !ABSL_HAVE_BUILTIN(__builtin_bit_cast)
&& std::is_default_constructible<Dest>::value
&& std::is_default_constructible<Dest>::value
#endif // !ABSL_HAVE_BUILTIN(__builtin_bit_cast) #endif // !ABSL_HAVE_BUILTIN(__builtin_bit_cast)
,
int>::type = 0>
,
int>::type = 0>
#if ABSL_HAVE_BUILTIN(__builtin_bit_cast) #if ABSL_HAVE_BUILTIN(__builtin_bit_cast)
inline constexpr Dest bit_cast(const Source& source) {
return __builtin_bit_cast(Dest, source);
}
#else // ABSL_HAVE_BUILTIN(__builtin_bit_cast)
inline Dest bit_cast(const Source& source) {
Dest dest;
memcpy(static_cast<void*>(std::addressof(dest)),
static_cast<const void*>(std::addressof(source)), sizeof(dest));
return dest;
}
inline constexpr Dest bit_cast(const Source& source)
{
return __builtin_bit_cast(Dest, source);
}
#else // ABSL_HAVE_BUILTIN(__builtin_bit_cast)
inline Dest bit_cast(const Source& source)
{
Dest dest;
memcpy(static_cast<void*>(std::addressof(dest)), static_cast<const void*>(std::addressof(source)), sizeof(dest));
return dest;
}
#endif // ABSL_HAVE_BUILTIN(__builtin_bit_cast) #endif // ABSL_HAVE_BUILTIN(__builtin_bit_cast)


#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L #endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L


ABSL_NAMESPACE_END
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_CASTS_H_ #endif // ABSL_BASE_CASTS_H_

+ 18
- 23
CAPI/cpp/grpc/include/absl/base/config.h View File

@@ -151,18 +151,12 @@
#if defined(__cplusplus) && ABSL_OPTION_USE_INLINE_NAMESPACE == 1 #if defined(__cplusplus) && ABSL_OPTION_USE_INLINE_NAMESPACE == 1


#define ABSL_INTERNAL_INLINE_NAMESPACE_STR \ #define ABSL_INTERNAL_INLINE_NAMESPACE_STR \
ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME)
ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME)


static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != '\0',
"options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must "
"not be empty.");
static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
ABSL_INTERNAL_INLINE_NAMESPACE_STR[1] != 'e' ||
ABSL_INTERNAL_INLINE_NAMESPACE_STR[2] != 'a' ||
ABSL_INTERNAL_INLINE_NAMESPACE_STR[3] != 'd' ||
ABSL_INTERNAL_INLINE_NAMESPACE_STR[4] != '\0',
"options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must "
"be changed to a new, unique identifier name.");
static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != '\0', "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must "
"not be empty.");
static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[1] != 'e' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[2] != 'a' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[3] != 'd' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[4] != '\0', "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must "
"be changed to a new, unique identifier name.");


#endif #endif


@@ -171,14 +165,15 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_NAMESPACE_END #define ABSL_NAMESPACE_END
#define ABSL_INTERNAL_C_SYMBOL(x) x #define ABSL_INTERNAL_C_SYMBOL(x) x
#elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1 #elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1
#define ABSL_NAMESPACE_BEGIN \
inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME {
#define ABSL_NAMESPACE_BEGIN \
inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME \
{
#define ABSL_NAMESPACE_END } #define ABSL_NAMESPACE_END }
#define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v #define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v
#define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \ #define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \
ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v)
ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v)
#define ABSL_INTERNAL_C_SYMBOL(x) \ #define ABSL_INTERNAL_C_SYMBOL(x) \
ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME)
ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME)
#else #else
#error options.h is misconfigured. #error options.h is misconfigured.
#endif #endif
@@ -212,14 +207,14 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html // https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
#if defined(__GNUC__) && defined(__GNUC_MINOR__) #if defined(__GNUC__) && defined(__GNUC_MINOR__)
#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) \ #define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) \
(__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y))
(__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y))
#else #else
#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) 0 #define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) 0
#endif #endif


#if defined(__clang__) && defined(__clang_major__) && defined(__clang_minor__) #if defined(__clang__) && defined(__clang_major__) && defined(__clang_minor__)
#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) \ #define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) \
(__clang_major__ > (x) || __clang_major__ == (x) && __clang_minor__ >= (y))
(__clang_major__ > (x) || __clang_major__ == (x) && __clang_minor__ >= (y))
#else #else
#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) 0 #define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) 0
#endif #endif
@@ -336,8 +331,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#ifdef ABSL_HAVE_INTRINSIC_INT128 #ifdef ABSL_HAVE_INTRINSIC_INT128
#error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set #error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set
#elif defined(__SIZEOF_INT128__) #elif defined(__SIZEOF_INT128__)
#if (defined(__clang__) && !defined(_WIN32)) || \
(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \
#if (defined(__clang__) && !defined(_WIN32)) || \
(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \
(defined(__GNUC__) && !defined(__clang__) && !defined(__CUDACC__)) (defined(__GNUC__) && !defined(__clang__) && !defined(__CUDACC__))
#define ABSL_HAVE_INTRINSIC_INT128 1 #define ABSL_HAVE_INTRINSIC_INT128 1
#elif defined(__CUDACC__) #elif defined(__CUDACC__)
@@ -511,8 +506,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#error "ABSL_IS_LITTLE_ENDIAN cannot be directly set." #error "ABSL_IS_LITTLE_ENDIAN cannot be directly set."
#endif #endif


#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#define ABSL_IS_LITTLE_ENDIAN 1 #define ABSL_IS_LITTLE_ENDIAN 1
#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \ #elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
@@ -721,8 +715,9 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_INTERNAL_MANGLED_NS "absl" #define ABSL_INTERNAL_MANGLED_NS "absl"
#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "5" #define ABSL_INTERNAL_MANGLED_BACKREFERENCE "5"
#else #else
#define ABSL_INTERNAL_MANGLED_NS \
ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) "@absl"
#define ABSL_INTERNAL_MANGLED_NS \
ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) \
"@absl"
#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "6" #define ABSL_INTERNAL_MANGLED_BACKREFERENCE "6"
#endif #endif
#endif #endif


+ 8
- 6
CAPI/cpp/grpc/include/absl/base/const_init.h View File

@@ -63,14 +63,16 @@
// The absl::kConstInit tag should only be used to define objects with static // The absl::kConstInit tag should only be used to define objects with static
// or thread_local storage duration. // or thread_local storage duration.


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace absl
{
ABSL_NAMESPACE_BEGIN


enum ConstInitType {
kConstInit,
};
enum ConstInitType
{
kConstInit,
};


ABSL_NAMESPACE_END
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_CONST_INIT_H_ #endif // ABSL_BASE_CONST_INIT_H_

+ 132
- 116
CAPI/cpp/grpc/include/absl/base/dynamic_annotations.h View File

@@ -90,12 +90,14 @@


// Read/write annotations are enabled in Annotalysis mode; disabled otherwise. // Read/write annotations are enabled in Annotalysis mode; disabled otherwise.
#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ #define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \
ABSL_INTERNAL_ANNOTALYSIS_ENABLED
ABSL_INTERNAL_ANNOTALYSIS_ENABLED


#endif // ABSL_HAVE_THREAD_SANITIZER #endif // ABSL_HAVE_THREAD_SANITIZER


#ifdef __cplusplus #ifdef __cplusplus
#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" {
#define ABSL_INTERNAL_BEGIN_EXTERN_C \
extern "C" \
{
#define ABSL_INTERNAL_END_EXTERN_C } // extern "C" #define ABSL_INTERNAL_END_EXTERN_C } // extern "C"
#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F #define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F
#define ABSL_INTERNAL_STATIC_INLINE inline #define ABSL_INTERNAL_STATIC_INLINE inline
@@ -123,29 +125,30 @@
// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the // "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the
// point where `pointer` has been allocated, preferably close to the point // point where `pointer` has been allocated, preferably close to the point
// where the race happens. See also ABSL_ANNOTATE_BENIGN_RACE_STATIC. // where the race happens. See also ABSL_ANNOTATE_BENIGN_RACE_STATIC.
#define ABSL_ANNOTATE_BENIGN_RACE(pointer, description) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
(__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)
#define ABSL_ANNOTATE_BENIGN_RACE(pointer, description) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
(__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)


// Same as ABSL_ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to // Same as ABSL_ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to
// the memory range [`address`, `address`+`size`). // the memory range [`address`, `address`+`size`).
#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ #define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
(__FILE__, __LINE__, address, size, description)
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
(__FILE__, __LINE__, address, size, description)


// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. // Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads.
// This annotation could be useful if you want to skip expensive race analysis // This annotation could be useful if you want to skip expensive race analysis
// during some period of program execution, e.g. during initialization. // during some period of program execution, e.g. during initialization.
#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \
(__FILE__, __LINE__, enable)
#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \
(__FILE__, __LINE__, enable)


// ------------------------------------------------------------- // -------------------------------------------------------------
// Annotations useful for debugging. // Annotations useful for debugging.


// Report the current thread `name` to a race detector. // Report the current thread `name` to a race detector.
#define ABSL_ANNOTATE_THREAD_NAME(name) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name)
#define ABSL_ANNOTATE_THREAD_NAME(name) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName) \
(__FILE__, __LINE__, name)


// ------------------------------------------------------------- // -------------------------------------------------------------
// Annotations useful when implementing locks. They are not normally needed by // Annotations useful when implementing locks. They are not normally needed by
@@ -153,66 +156,62 @@
// object. // object.


// Report that a lock has been created at address `lock`. // Report that a lock has been created at address `lock`.
#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate) \
(__FILE__, __LINE__, lock)


// Report that a linker initialized lock has been created at address `lock`. // Report that a linker initialized lock has been created at address `lock`.
#ifdef ABSL_HAVE_THREAD_SANITIZER #ifdef ABSL_HAVE_THREAD_SANITIZER
#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \
(__FILE__, __LINE__, lock)
#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \
(__FILE__, __LINE__, lock)
#else #else
#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ #define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
ABSL_ANNOTATE_RWLOCK_CREATE(lock)
ABSL_ANNOTATE_RWLOCK_CREATE(lock)
#endif #endif


// Report that the lock at address `lock` is about to be destroyed. // Report that the lock at address `lock` is about to be destroyed.
#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy) \
(__FILE__, __LINE__, lock)


// Report that the lock at address `lock` has been acquired. // Report that the lock at address `lock` has been acquired.
// `is_w`=1 for writer lock, `is_w`=0 for reader lock. // `is_w`=1 for writer lock, `is_w`=0 for reader lock.
#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \
(__FILE__, __LINE__, lock, is_w)
#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \
(__FILE__, __LINE__, lock, is_w)


// Report that the lock at address `lock` is about to be released. // Report that the lock at address `lock` is about to be released.
// `is_w`=1 for writer lock, `is_w`=0 for reader lock. // `is_w`=1 for writer lock, `is_w`=0 for reader lock.
#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \
(__FILE__, __LINE__, lock, is_w)
#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \
(__FILE__, __LINE__, lock, is_w)


// Apply ABSL_ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. // Apply ABSL_ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`.
#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
namespace { \
class static_var##_annotator { \
public: \
static_var##_annotator() { \
ABSL_ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \
#static_var ": " description); \
} \
}; \
static static_var##_annotator the##static_var##_annotator; \
} // namespace
#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
namespace \
{ \
class static_var##_annotator \
{ \
public: \
static_var##_annotator() \
{ \
ABSL_ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), #static_var ": " description); \
} \
}; \
static static_var##_annotator the##static_var##_annotator; \
} // namespace


// Function prototypes of annotations provided by the compiler-based sanitizer // Function prototypes of annotations provided by the compiler-based sanitizer
// implementation. // implementation.
ABSL_INTERNAL_BEGIN_EXTERN_C ABSL_INTERNAL_BEGIN_EXTERN_C
void AnnotateRWLockCreate(const char* file, int line,
const volatile void* lock);
void AnnotateRWLockCreateStatic(const char* file, int line,
const volatile void* lock);
void AnnotateRWLockDestroy(const char* file, int line,
const volatile void* lock);
void AnnotateRWLockAcquired(const char* file, int line,
const volatile void* lock, long is_w); // NOLINT
void AnnotateRWLockReleased(const char* file, int line,
const volatile void* lock, long is_w); // NOLINT
void AnnotateBenignRace(const char* file, int line,
const volatile void* address, const char* description);
void AnnotateBenignRaceSized(const char* file, int line,
const volatile void* address, size_t size,
const char* description);
void AnnotateRWLockCreate(const char* file, int line, const volatile void* lock);
void AnnotateRWLockCreateStatic(const char* file, int line, const volatile void* lock);
void AnnotateRWLockDestroy(const char* file, int line, const volatile void* lock);
void AnnotateRWLockAcquired(const char* file, int line, const volatile void* lock, long is_w); // NOLINT
void AnnotateRWLockReleased(const char* file, int line, const volatile void* lock, long is_w); // NOLINT
void AnnotateBenignRace(const char* file, int line, const volatile void* address, const char* description);
void AnnotateBenignRaceSized(const char* file, int line, const volatile void* address, size_t size, const char* description);
void AnnotateThreadName(const char* file, int line, const char* name); void AnnotateThreadName(const char* file, int line, const char* name);
void AnnotateEnableRaceDetection(const char* file, int line, int enable); void AnnotateEnableRaceDetection(const char* file, int line, int enable);
ABSL_INTERNAL_END_EXTERN_C ABSL_INTERNAL_END_EXTERN_C
@@ -240,25 +239,27 @@ ABSL_INTERNAL_END_EXTERN_C
#include <sanitizer/msan_interface.h> #include <sanitizer/msan_interface.h>


#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ #define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
__msan_unpoison(address, size)
__msan_unpoison(address, size)


#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ #define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
__msan_allocated_memory(address, size)
__msan_allocated_memory(address, size)


#else // !defined(ABSL_HAVE_MEMORY_SANITIZER) #else // !defined(ABSL_HAVE_MEMORY_SANITIZER)


// TODO(rogeeff): remove this branch // TODO(rogeeff): remove this branch
#ifdef ABSL_HAVE_THREAD_SANITIZER #ifdef ABSL_HAVE_THREAD_SANITIZER
#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ #define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
do { \
(void)(address); \
(void)(size); \
} while (0)
do \
{ \
(void)(address); \
(void)(size); \
} while (0)
#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ #define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
do { \
(void)(address); \
(void)(size); \
} while (0)
do \
{ \
(void)(address); \
(void)(size); \
} while (0)
#else #else


#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty #define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty
@@ -274,9 +275,9 @@ ABSL_INTERNAL_END_EXTERN_C
#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) #if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)


#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ #define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \
__attribute((exclusive_lock_function("*")))
__attribute((exclusive_lock_function("*")))
#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ #define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \
__attribute((unlock_function("*")))
__attribute((unlock_function("*")))


#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) #else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)


@@ -297,22 +298,21 @@ ABSL_INTERNAL_END_EXTERN_C
// ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey // ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey
// reads, while still checking other reads and all writes. // reads, while still checking other reads and all writes.
// See also ABSL_ANNOTATE_UNPROTECTED_READ. // See also ABSL_ANNOTATE_UNPROTECTED_READ.
#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \
(__FILE__, __LINE__)
#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \
(__FILE__, __LINE__)


// Stop ignoring reads. // Stop ignoring reads.
#define ABSL_ANNOTATE_IGNORE_READS_END() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \
(__FILE__, __LINE__)
#define ABSL_ANNOTATE_IGNORE_READS_END() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \
(__FILE__, __LINE__)


// Function prototypes of annotations provided by the compiler-based sanitizer // Function prototypes of annotations provided by the compiler-based sanitizer
// implementation. // implementation.
ABSL_INTERNAL_BEGIN_EXTERN_C ABSL_INTERNAL_BEGIN_EXTERN_C
void AnnotateIgnoreReadsBegin(const char* file, int line) void AnnotateIgnoreReadsBegin(const char* file, int line)
ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE; ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE;
void AnnotateIgnoreReadsEnd(const char* file,
int line) ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE;
void AnnotateIgnoreReadsEnd(const char* file, int line) ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE;
ABSL_INTERNAL_END_EXTERN_C ABSL_INTERNAL_END_EXTERN_C


#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) #elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED)
@@ -324,23 +324,31 @@ ABSL_INTERNAL_END_EXTERN_C
// TODO(delesley) -- The exclusive lock here ignores writes as well, but // TODO(delesley) -- The exclusive lock here ignores writes as well, but
// allows IGNORE_READS_AND_WRITES to work properly. // allows IGNORE_READS_AND_WRITES to work properly.


#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
ABSL_INTERNAL_GLOBAL_SCOPED( \
ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \
()
#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
ABSL_INTERNAL_GLOBAL_SCOPED( \
ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin) \
) \
()


#define ABSL_ANNOTATE_IGNORE_READS_END() \
ABSL_INTERNAL_GLOBAL_SCOPED( \
ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \
()
#define ABSL_ANNOTATE_IGNORE_READS_END() \
ABSL_INTERNAL_GLOBAL_SCOPED( \
ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd) \
) \
()


ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL(
AbslInternalAnnotateIgnoreReadsBegin)()
ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {}
AbslInternalAnnotateIgnoreReadsBegin
)()
ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE
{
}


ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL(
AbslInternalAnnotateIgnoreReadsEnd)()
ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {}
AbslInternalAnnotateIgnoreReadsEnd
)()
ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE
{
}


#else #else


@@ -355,12 +363,14 @@ ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL(
#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 #if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1


// Similar to ABSL_ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. // Similar to ABSL_ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead.
#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin) \
(__FILE__, __LINE__)


// Stop ignoring writes. // Stop ignoring writes.
#define ABSL_ANNOTATE_IGNORE_WRITES_END() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
#define ABSL_ANNOTATE_IGNORE_WRITES_END() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd) \
(__FILE__, __LINE__)


// Function prototypes of annotations provided by the compiler-based sanitizer // Function prototypes of annotations provided by the compiler-based sanitizer
// implementation. // implementation.
@@ -391,37 +401,42 @@ ABSL_INTERNAL_END_EXTERN_C


// Start ignoring all memory accesses (both reads and writes). // Start ignoring all memory accesses (both reads and writes).
#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ #define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
do { \
ABSL_ANNOTATE_IGNORE_READS_BEGIN(); \
ABSL_ANNOTATE_IGNORE_WRITES_BEGIN(); \
} while (0)
do \
{ \
ABSL_ANNOTATE_IGNORE_READS_BEGIN(); \
ABSL_ANNOTATE_IGNORE_WRITES_BEGIN(); \
} while (0)


// Stop ignoring both reads and writes. // Stop ignoring both reads and writes.
#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() \ #define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() \
do { \
ABSL_ANNOTATE_IGNORE_WRITES_END(); \
ABSL_ANNOTATE_IGNORE_READS_END(); \
} while (0)
do \
{ \
ABSL_ANNOTATE_IGNORE_WRITES_END(); \
ABSL_ANNOTATE_IGNORE_READS_END(); \
} while (0)


#ifdef __cplusplus #ifdef __cplusplus
// ABSL_ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. // ABSL_ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
#define ABSL_ANNOTATE_UNPROTECTED_READ(x) \ #define ABSL_ANNOTATE_UNPROTECTED_READ(x) \
absl::base_internal::AnnotateUnprotectedRead(x)

namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {

template <typename T>
inline T AnnotateUnprotectedRead(const volatile T& x) { // NOLINT
ABSL_ANNOTATE_IGNORE_READS_BEGIN();
T res = x;
ABSL_ANNOTATE_IGNORE_READS_END();
return res;
}

} // namespace base_internal
ABSL_NAMESPACE_END
absl::base_internal::AnnotateUnprotectedRead(x)

namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{

template<typename T>
inline T AnnotateUnprotectedRead(const volatile T& x)
{ // NOLINT
ABSL_ANNOTATE_IGNORE_READS_BEGIN();
T res = x;
ABSL_ANNOTATE_IGNORE_READS_END();
return res;
}

} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl
#endif #endif


@@ -443,11 +458,12 @@ ABSL_NAMESPACE_END
#include <sanitizer/common_interface_defs.h> #include <sanitizer/common_interface_defs.h>


#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ #define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \
__sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
__sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
#define ABSL_ADDRESS_SANITIZER_REDZONE(name) \ #define ABSL_ADDRESS_SANITIZER_REDZONE(name) \
struct { \
alignas(8) char x[8]; \
} name
struct \
{ \
alignas(8) char x[8]; \
} name


#else #else




+ 161
- 136
CAPI/cpp/grpc/include/absl/base/internal/atomic_hook.h View File

@@ -35,12 +35,14 @@
#define ABSL_HAVE_WORKING_ATOMIC_POINTER 1 #define ABSL_HAVE_WORKING_ATOMIC_POINTER 1
#endif #endif


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{


template <typename T>
class AtomicHook;
template<typename T>
class AtomicHook;


// To workaround AtomicHook not being constant-initializable on some platforms, // To workaround AtomicHook not being constant-initializable on some platforms,
// prefer to annotate instances with `ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES` // prefer to annotate instances with `ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES`
@@ -51,150 +53,173 @@ class AtomicHook;
#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES #define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
#endif #endif


// `AtomicHook` is a helper class, templatized on a raw function pointer type,
// for implementing Abseil customization hooks. It is a callable object that
// dispatches to the registered hook. Objects of type `AtomicHook` must have
// static or thread storage duration.
//
// A default constructed object performs a no-op (and returns a default
// constructed object) if no hook has been registered.
//
// Hooks can be pre-registered via constant initialization, for example:
//
// ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static AtomicHook<void(*)()>
// my_hook(DefaultAction);
//
// and then changed at runtime via a call to `Store()`.
//
// Reads and writes guarantee memory_order_acquire/memory_order_release
// semantics.
template <typename ReturnType, typename... Args>
class AtomicHook<ReturnType (*)(Args...)> {
public:
using FnPtr = ReturnType (*)(Args...);

// Constructs an object that by default performs a no-op (and
// returns a default constructed object) when no hook as been registered.
constexpr AtomicHook() : AtomicHook(DummyFunction) {}

// Constructs an object that by default dispatches to/returns the
// pre-registered default_fn when no hook has been registered at runtime.
// `AtomicHook` is a helper class, templatized on a raw function pointer type,
// for implementing Abseil customization hooks. It is a callable object that
// dispatches to the registered hook. Objects of type `AtomicHook` must have
// static or thread storage duration.
//
// A default constructed object performs a no-op (and returns a default
// constructed object) if no hook has been registered.
//
// Hooks can be pre-registered via constant initialization, for example:
//
// ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static AtomicHook<void(*)()>
// my_hook(DefaultAction);
//
// and then changed at runtime via a call to `Store()`.
//
// Reads and writes guarantee memory_order_acquire/memory_order_release
// semantics.
template<typename ReturnType, typename... Args>
class AtomicHook<ReturnType (*)(Args...)>
{
public:
using FnPtr = ReturnType (*)(Args...);

// Constructs an object that by default performs a no-op (and
// returns a default constructed object) when no hook as been registered.
constexpr AtomicHook() :
AtomicHook(DummyFunction)
{
}

// Constructs an object that by default dispatches to/returns the
// pre-registered default_fn when no hook has been registered at runtime.
#if ABSL_HAVE_WORKING_ATOMIC_POINTER && ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT #if ABSL_HAVE_WORKING_ATOMIC_POINTER && ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
explicit constexpr AtomicHook(FnPtr default_fn)
: hook_(default_fn), default_fn_(default_fn) {}
explicit constexpr AtomicHook(FnPtr default_fn) :
hook_(default_fn),
default_fn_(default_fn)
{
}
#elif ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT #elif ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
explicit constexpr AtomicHook(FnPtr default_fn)
: hook_(kUninitialized), default_fn_(default_fn) {}
explicit constexpr AtomicHook(FnPtr default_fn) :
hook_(kUninitialized),
default_fn_(default_fn)
{
}
#else #else
// As of January 2020, on all known versions of MSVC this constructor runs in
// the global constructor sequence. If `Store()` is called by a dynamic
// initializer, we want to preserve the value, even if this constructor runs
// after the call to `Store()`. If not, `hook_` will be
// zero-initialized by the linker and we have no need to set it.
// https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html
explicit constexpr AtomicHook(FnPtr default_fn)
: /* hook_(deliberately omitted), */ default_fn_(default_fn) {
static_assert(kUninitialized == 0, "here we rely on zero-initialization");
}
// As of January 2020, on all known versions of MSVC this constructor runs in
// the global constructor sequence. If `Store()` is called by a dynamic
// initializer, we want to preserve the value, even if this constructor runs
// after the call to `Store()`. If not, `hook_` will be
// zero-initialized by the linker and we have no need to set it.
// https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html
explicit constexpr AtomicHook(FnPtr default_fn) :
/* hook_(deliberately omitted), */ default_fn_(default_fn)
{
static_assert(kUninitialized == 0, "here we rely on zero-initialization");
}
#endif #endif


// Stores the provided function pointer as the value for this hook.
//
// This is intended to be called once. Multiple calls are legal only if the
// same function pointer is provided for each call. The store is implemented
// as a memory_order_release operation, and read accesses are implemented as
// memory_order_acquire.
void Store(FnPtr fn) {
bool success = DoStore(fn);
static_cast<void>(success);
assert(success);
}

// Invokes the registered callback. If no callback has yet been registered, a
// default-constructed object of the appropriate type is returned instead.
template <typename... CallArgs>
ReturnType operator()(CallArgs&&... args) const {
return DoLoad()(std::forward<CallArgs>(args)...);
}

// Returns the registered callback, or nullptr if none has been registered.
// Useful if client code needs to conditionalize behavior based on whether a
// callback was registered.
//
// Note that atomic_hook.Load()() and atomic_hook() have different semantics:
// operator()() will perform a no-op if no callback was registered, while
// Load()() will dereference a null function pointer. Prefer operator()() to
// Load()() unless you must conditionalize behavior on whether a hook was
// registered.
FnPtr Load() const {
FnPtr ptr = DoLoad();
return (ptr == DummyFunction) ? nullptr : ptr;
}

private:
static ReturnType DummyFunction(Args...) {
return ReturnType();
}

// Current versions of MSVC (as of September 2017) have a broken
// implementation of std::atomic<T*>: Its constructor attempts to do the
// equivalent of a reinterpret_cast in a constexpr context, which is not
// allowed.
//
// This causes an issue when building with LLVM under Windows. To avoid this,
// we use a less-efficient, intptr_t-based implementation on Windows.
// Stores the provided function pointer as the value for this hook.
//
// This is intended to be called once. Multiple calls are legal only if the
// same function pointer is provided for each call. The store is implemented
// as a memory_order_release operation, and read accesses are implemented as
// memory_order_acquire.
void Store(FnPtr fn)
{
bool success = DoStore(fn);
static_cast<void>(success);
assert(success);
}

// Invokes the registered callback. If no callback has yet been registered, a
// default-constructed object of the appropriate type is returned instead.
template<typename... CallArgs>
ReturnType operator()(CallArgs&&... args) const
{
return DoLoad()(std::forward<CallArgs>(args)...);
}

// Returns the registered callback, or nullptr if none has been registered.
// Useful if client code needs to conditionalize behavior based on whether a
// callback was registered.
//
// Note that atomic_hook.Load()() and atomic_hook() have different semantics:
// operator()() will perform a no-op if no callback was registered, while
// Load()() will dereference a null function pointer. Prefer operator()() to
// Load()() unless you must conditionalize behavior on whether a hook was
// registered.
FnPtr Load() const
{
FnPtr ptr = DoLoad();
return (ptr == DummyFunction) ? nullptr : ptr;
}

private:
static ReturnType DummyFunction(Args...)
{
return ReturnType();
}

// Current versions of MSVC (as of September 2017) have a broken
// implementation of std::atomic<T*>: Its constructor attempts to do the
// equivalent of a reinterpret_cast in a constexpr context, which is not
// allowed.
//
// This causes an issue when building with LLVM under Windows. To avoid this,
// we use a less-efficient, intptr_t-based implementation on Windows.
#if ABSL_HAVE_WORKING_ATOMIC_POINTER #if ABSL_HAVE_WORKING_ATOMIC_POINTER
// Return the stored value, or DummyFunction if no value has been stored.
FnPtr DoLoad() const { return hook_.load(std::memory_order_acquire); }

// Store the given value. Returns false if a different value was already
// stored to this object.
bool DoStore(FnPtr fn) {
assert(fn);
FnPtr expected = default_fn_;
const bool store_succeeded = hook_.compare_exchange_strong(
expected, fn, std::memory_order_acq_rel, std::memory_order_acquire);
const bool same_value_already_stored = (expected == fn);
return store_succeeded || same_value_already_stored;
}

std::atomic<FnPtr> hook_;
// Return the stored value, or DummyFunction if no value has been stored.
FnPtr DoLoad() const
{
return hook_.load(std::memory_order_acquire);
}

// Store the given value. Returns false if a different value was already
// stored to this object.
bool DoStore(FnPtr fn)
{
assert(fn);
FnPtr expected = default_fn_;
const bool store_succeeded = hook_.compare_exchange_strong(
expected, fn, std::memory_order_acq_rel, std::memory_order_acquire
);
const bool same_value_already_stored = (expected == fn);
return store_succeeded || same_value_already_stored;
}

std::atomic<FnPtr> hook_;
#else // !ABSL_HAVE_WORKING_ATOMIC_POINTER #else // !ABSL_HAVE_WORKING_ATOMIC_POINTER
// Use a sentinel value unlikely to be the address of an actual function.
static constexpr intptr_t kUninitialized = 0;

static_assert(sizeof(intptr_t) >= sizeof(FnPtr),
"intptr_t can't contain a function pointer");

FnPtr DoLoad() const {
const intptr_t value = hook_.load(std::memory_order_acquire);
if (value == kUninitialized) {
return default_fn_;
}
return reinterpret_cast<FnPtr>(value);
}

bool DoStore(FnPtr fn) {
assert(fn);
const auto value = reinterpret_cast<intptr_t>(fn);
intptr_t expected = kUninitialized;
const bool store_succeeded = hook_.compare_exchange_strong(
expected, value, std::memory_order_acq_rel, std::memory_order_acquire);
const bool same_value_already_stored = (expected == value);
return store_succeeded || same_value_already_stored;
}

std::atomic<intptr_t> hook_;
// Use a sentinel value unlikely to be the address of an actual function.
static constexpr intptr_t kUninitialized = 0;

static_assert(sizeof(intptr_t) >= sizeof(FnPtr), "intptr_t can't contain a function pointer");

FnPtr DoLoad() const
{
const intptr_t value = hook_.load(std::memory_order_acquire);
if (value == kUninitialized)
{
return default_fn_;
}
return reinterpret_cast<FnPtr>(value);
}

bool DoStore(FnPtr fn)
{
assert(fn);
const auto value = reinterpret_cast<intptr_t>(fn);
intptr_t expected = kUninitialized;
const bool store_succeeded = hook_.compare_exchange_strong(
expected, value, std::memory_order_acq_rel, std::memory_order_acquire
);
const bool same_value_already_stored = (expected == value);
return store_succeeded || same_value_already_stored;
}

std::atomic<intptr_t> hook_;
#endif #endif


const FnPtr default_fn_;
};
const FnPtr default_fn_;
};


#undef ABSL_HAVE_WORKING_ATOMIC_POINTER #undef ABSL_HAVE_WORKING_ATOMIC_POINTER
#undef ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT #undef ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT


} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ #endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_

+ 12
- 10
CAPI/cpp/grpc/include/absl/base/internal/atomic_hook_test_helper.h View File

@@ -17,18 +17,20 @@


#include "absl/base/internal/atomic_hook.h" #include "absl/base/internal/atomic_hook.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace atomic_hook_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace atomic_hook_internal
{


using VoidF = void (*)();
extern absl::base_internal::AtomicHook<VoidF> func;
extern int default_func_calls;
void DefaultFunc();
void RegisterFunc(VoidF func);
using VoidF = void (*)();
extern absl::base_internal::AtomicHook<VoidF> func;
extern int default_func_calls;
void DefaultFunc();
void RegisterFunc(VoidF func);


} // namespace atomic_hook_internal
ABSL_NAMESPACE_END
} // namespace atomic_hook_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ #endif // ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_

+ 89
- 80
CAPI/cpp/grpc/include/absl/base/internal/cycleclock.h View File

@@ -49,111 +49,120 @@
#include "absl/base/config.h" #include "absl/base/config.h"
#include "absl/base/internal/unscaledcycleclock.h" #include "absl/base/internal/unscaledcycleclock.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {

using CycleClockSourceFunc = int64_t (*)();

// -----------------------------------------------------------------------------
// CycleClock
// -----------------------------------------------------------------------------
class CycleClock {
public:
// CycleClock::Now()
//
// Returns the value of a cycle counter that counts at a rate that is
// approximately constant.
static int64_t Now();

// CycleClock::Frequency()
//
// Returns the amount by which `CycleClock::Now()` increases per second. Note
// that this value may not necessarily match the core CPU clock frequency.
static double Frequency();

private:
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{

using CycleClockSourceFunc = int64_t (*)();

// -----------------------------------------------------------------------------
// CycleClock
// -----------------------------------------------------------------------------
class CycleClock
{
public:
// CycleClock::Now()
//
// Returns the value of a cycle counter that counts at a rate that is
// approximately constant.
static int64_t Now();

// CycleClock::Frequency()
//
// Returns the amount by which `CycleClock::Now()` increases per second. Note
// that this value may not necessarily match the core CPU clock frequency.
static double Frequency();

private:
#if ABSL_USE_UNSCALED_CYCLECLOCK #if ABSL_USE_UNSCALED_CYCLECLOCK
static CycleClockSourceFunc LoadCycleClockSource();
static CycleClockSourceFunc LoadCycleClockSource();


#ifdef NDEBUG #ifdef NDEBUG
#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY #ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
// Not debug mode and the UnscaledCycleClock frequency is the CPU
// frequency. Scale the CycleClock to prevent overflow if someone
// tries to represent the time as cycles since the Unix epoch.
static constexpr int32_t kShift = 1;
// Not debug mode and the UnscaledCycleClock frequency is the CPU
// frequency. Scale the CycleClock to prevent overflow if someone
// tries to represent the time as cycles since the Unix epoch.
static constexpr int32_t kShift = 1;
#else #else
// Not debug mode and the UnscaledCycleClock isn't operating at the
// raw CPU frequency. There is no need to do any scaling, so don't
// needlessly sacrifice precision.
static constexpr int32_t kShift = 0;
// Not debug mode and the UnscaledCycleClock isn't operating at the
// raw CPU frequency. There is no need to do any scaling, so don't
// needlessly sacrifice precision.
static constexpr int32_t kShift = 0;
#endif #endif
#else // NDEBUG #else // NDEBUG
// In debug mode use a different shift to discourage depending on a
// particular shift value.
static constexpr int32_t kShift = 2;
// In debug mode use a different shift to discourage depending on a
// particular shift value.
static constexpr int32_t kShift = 2;
#endif // NDEBUG #endif // NDEBUG


static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
ABSL_CONST_INIT static std::atomic<CycleClockSourceFunc> cycle_clock_source_;
static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
ABSL_CONST_INIT static std::atomic<CycleClockSourceFunc> cycle_clock_source_;
#endif // ABSL_USE_UNSCALED_CYCLECLOC #endif // ABSL_USE_UNSCALED_CYCLECLOC


CycleClock() = delete; // no instances
CycleClock(const CycleClock&) = delete;
CycleClock& operator=(const CycleClock&) = delete;

friend class CycleClockSource;
};

class CycleClockSource {
private:
// CycleClockSource::Register()
//
// Register a function that provides an alternate source for the unscaled CPU
// cycle count value. The source function must be async signal safe, must not
// call CycleClock::Now(), and must have a frequency that matches that of the
// unscaled clock used by CycleClock. A nullptr value resets CycleClock to use
// the default source.
static void Register(CycleClockSourceFunc source);
};
CycleClock() = delete; // no instances
CycleClock(const CycleClock&) = delete;
CycleClock& operator=(const CycleClock&) = delete;

friend class CycleClockSource;
};

class CycleClockSource
{
private:
// CycleClockSource::Register()
//
// Register a function that provides an alternate source for the unscaled CPU
// cycle count value. The source function must be async signal safe, must not
// call CycleClock::Now(), and must have a frequency that matches that of the
// unscaled clock used by CycleClock. A nullptr value resets CycleClock to use
// the default source.
static void Register(CycleClockSourceFunc source);
};


#if ABSL_USE_UNSCALED_CYCLECLOCK #if ABSL_USE_UNSCALED_CYCLECLOCK


inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() {
inline CycleClockSourceFunc CycleClock::LoadCycleClockSource()
{
#if !defined(__x86_64__) #if !defined(__x86_64__)
// Optimize for the common case (no callback) by first doing a relaxed load;
// this is significantly faster on non-x86 platforms.
if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) {
return nullptr;
}
// Optimize for the common case (no callback) by first doing a relaxed load;
// this is significantly faster on non-x86 platforms.
if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr)
{
return nullptr;
}
#endif // !defined(__x86_64__) #endif // !defined(__x86_64__)


// This corresponds to the store(std::memory_order_release) in
// CycleClockSource::Register, and makes sure that any updates made prior to
// registering the callback are visible to this thread before the callback
// is invoked.
return cycle_clock_source_.load(std::memory_order_acquire);
}
// This corresponds to the store(std::memory_order_release) in
// CycleClockSource::Register, and makes sure that any updates made prior to
// registering the callback are visible to this thread before the callback
// is invoked.
return cycle_clock_source_.load(std::memory_order_acquire);
}


// Accessing globals in inlined code in Window DLLs is problematic. // Accessing globals in inlined code in Window DLLs is problematic.
#ifndef _WIN32 #ifndef _WIN32
inline int64_t CycleClock::Now() {
auto fn = LoadCycleClockSource();
if (fn == nullptr) {
return base_internal::UnscaledCycleClock::Now() >> kShift;
}
return fn() >> kShift;
}
inline int64_t CycleClock::Now()
{
auto fn = LoadCycleClockSource();
if (fn == nullptr)
{
return base_internal::UnscaledCycleClock::Now() >> kShift;
}
return fn() >> kShift;
}
#endif #endif


inline double CycleClock::Frequency() {
return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
}
inline double CycleClock::Frequency()
{
return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
}


#endif // ABSL_USE_UNSCALED_CYCLECLOCK #endif // ABSL_USE_UNSCALED_CYCLECLOCK


} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_ #endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_

+ 65
- 57
CAPI/cpp/grpc/include/absl/base/internal/direct_mmap.h View File

@@ -65,14 +65,16 @@ extern "C" void* __mmap2(void*, size_t, int, int, int, size_t);
#define SYS_mmap2 __NR_mmap2 #define SYS_mmap2 __NR_mmap2
#endif #endif


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {

// Platform specific logic extracted from
// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h
inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
off64_t offset) noexcept {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{

// Platform specific logic extracted from
// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h
inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, off64_t offset) noexcept
{
#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ #if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
defined(__m68k__) || defined(__sh__) || \ defined(__m68k__) || defined(__sh__) || \
(defined(__hppa__) && !defined(__LP64__)) || \ (defined(__hppa__) && !defined(__LP64__)) || \
@@ -81,37 +83,39 @@ inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
(defined(__riscv) && __riscv_xlen == 32) || \ (defined(__riscv) && __riscv_xlen == 32) || \
(defined(__s390__) && !defined(__s390x__)) || \ (defined(__s390__) && !defined(__s390x__)) || \
(defined(__sparc__) && !defined(__arch64__)) (defined(__sparc__) && !defined(__arch64__))
// On these architectures, implement mmap with mmap2.
static int pagesize = 0;
if (pagesize == 0) {
// On these architectures, implement mmap with mmap2.
static int pagesize = 0;
if (pagesize == 0)
{
#if defined(__wasm__) || defined(__asmjs__) #if defined(__wasm__) || defined(__asmjs__)
pagesize = getpagesize();
pagesize = getpagesize();
#else #else
pagesize = sysconf(_SC_PAGESIZE);
pagesize = sysconf(_SC_PAGESIZE);
#endif #endif
}
if (offset < 0 || offset % pagesize != 0) {
errno = EINVAL;
return MAP_FAILED;
}
}
if (offset < 0 || offset % pagesize != 0)
{
errno = EINVAL;
return MAP_FAILED;
}
#ifdef __BIONIC__ #ifdef __BIONIC__
// SYS_mmap2 has problems on Android API level <= 16.
// Workaround by invoking __mmap2() instead.
return __mmap2(start, length, prot, flags, fd, offset / pagesize);
// SYS_mmap2 has problems on Android API level <= 16.
// Workaround by invoking __mmap2() instead.
return __mmap2(start, length, prot, flags, fd, offset / pagesize);
#else #else
return reinterpret_cast<void*>(
syscall(SYS_mmap2, start, length, prot, flags, fd,
static_cast<off_t>(offset / pagesize)));
return reinterpret_cast<void*>(
syscall(SYS_mmap2, start, length, prot, flags, fd, static_cast<off_t>(offset / pagesize))
);
#endif #endif
#elif defined(__s390x__) #elif defined(__s390x__)
// On s390x, mmap() arguments are passed in memory.
unsigned long buf[6] = {reinterpret_cast<unsigned long>(start), // NOLINT
static_cast<unsigned long>(length), // NOLINT
static_cast<unsigned long>(prot), // NOLINT
static_cast<unsigned long>(flags), // NOLINT
static_cast<unsigned long>(fd), // NOLINT
static_cast<unsigned long>(offset)}; // NOLINT
return reinterpret_cast<void*>(syscall(SYS_mmap, buf));
// On s390x, mmap() arguments are passed in memory.
unsigned long buf[6] = {reinterpret_cast<unsigned long>(start), // NOLINT
static_cast<unsigned long>(length), // NOLINT
static_cast<unsigned long>(prot), // NOLINT
static_cast<unsigned long>(flags), // NOLINT
static_cast<unsigned long>(fd), // NOLINT
static_cast<unsigned long>(offset)}; // NOLINT
return reinterpret_cast<void*>(syscall(SYS_mmap, buf));
#elif defined(__x86_64__) #elif defined(__x86_64__)
// The x32 ABI has 32 bit longs, but the syscall interface is 64 bit. // The x32 ABI has 32 bit longs, but the syscall interface is 64 bit.
// We need to explicitly cast to an unsigned 64 bit type to avoid implicit // We need to explicitly cast to an unsigned 64 bit type to avoid implicit
@@ -120,24 +124,25 @@ inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
// to an integer of a different size. We also need to make sure __off64_t // to an integer of a different size. We also need to make sure __off64_t
// isn't truncated to 32-bits under x32. // isn't truncated to 32-bits under x32.
#define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x)) #define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x))
return reinterpret_cast<void*>(
syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length),
MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags),
MMAP_SYSCALL_ARG(fd), static_cast<uint64_t>(offset)));
return reinterpret_cast<void*>(
syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length), MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags), MMAP_SYSCALL_ARG(fd), static_cast<uint64_t>(offset))
);
#undef MMAP_SYSCALL_ARG #undef MMAP_SYSCALL_ARG
#else // Remaining 64-bit aritectures. #else // Remaining 64-bit aritectures.
static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit");
return reinterpret_cast<void*>(
syscall(SYS_mmap, start, length, prot, flags, fd, offset));
static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit");
return reinterpret_cast<void*>(
syscall(SYS_mmap, start, length, prot, flags, fd, offset)
);
#endif #endif
}
}


inline int DirectMunmap(void* start, size_t length) {
return static_cast<int>(syscall(SYS_munmap, start, length));
}
inline int DirectMunmap(void* start, size_t length)
{
return static_cast<int>(syscall(SYS_munmap, start, length));
}


} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#else // !__linux__ #else // !__linux__
@@ -145,21 +150,24 @@ ABSL_NAMESPACE_END
// For non-linux platforms where we have mmap, just dispatch directly to the // For non-linux platforms where we have mmap, just dispatch directly to the
// actual mmap()/munmap() methods. // actual mmap()/munmap() methods.


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{


inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
off_t offset) {
return mmap(start, length, prot, flags, fd, offset);
}
inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, off_t offset)
{
return mmap(start, length, prot, flags, fd, offset);
}


inline int DirectMunmap(void* start, size_t length) {
return munmap(start, length);
}
inline int DirectMunmap(void* start, size_t length)
{
return munmap(start, length);
}


} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // __linux__ #endif // __linux__


+ 93
- 75
CAPI/cpp/grpc/include/absl/base/internal/dynamic_annotations.h View File

@@ -82,10 +82,10 @@


// ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1 // ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1
#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \ #define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \
defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
// Read/write annotations are enabled in Annotalysis mode; disabled otherwise. // Read/write annotations are enabled in Annotalysis mode; disabled otherwise.
#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ #define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \
ABSL_INTERNAL_ANNOTALYSIS_ENABLED
ABSL_INTERNAL_ANNOTALYSIS_ENABLED
#endif #endif


// Memory annotations are also made available to LLVM's Memory Sanitizer // Memory annotations are also made available to LLVM's Memory Sanitizer
@@ -98,7 +98,9 @@
#endif #endif


#ifdef __cplusplus #ifdef __cplusplus
#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" {
#define ABSL_INTERNAL_BEGIN_EXTERN_C \
extern "C" \
{
#define ABSL_INTERNAL_END_EXTERN_C } // extern "C" #define ABSL_INTERNAL_END_EXTERN_C } // extern "C"
#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F #define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F
#define ABSL_INTERNAL_STATIC_INLINE inline #define ABSL_INTERNAL_STATIC_INLINE inline
@@ -123,29 +125,30 @@
// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the // "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the
// point where `pointer` has been allocated, preferably close to the point // point where `pointer` has been allocated, preferably close to the point
// where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. // where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC.
#define ANNOTATE_BENIGN_RACE(pointer, description) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
(__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)
#define ANNOTATE_BENIGN_RACE(pointer, description) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
(__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)


// Same as ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to // Same as ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to
// the memory range [`address`, `address`+`size`). // the memory range [`address`, `address`+`size`).
#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
(__FILE__, __LINE__, address, size, description)
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
(__FILE__, __LINE__, address, size, description)


// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. // Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads.
// This annotation could be useful if you want to skip expensive race analysis // This annotation could be useful if you want to skip expensive race analysis
// during some period of program execution, e.g. during initialization. // during some period of program execution, e.g. during initialization.
#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \
(__FILE__, __LINE__, enable)
#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \
(__FILE__, __LINE__, enable)


// ------------------------------------------------------------- // -------------------------------------------------------------
// Annotations useful for debugging. // Annotations useful for debugging.


// Report the current thread `name` to a race detector. // Report the current thread `name` to a race detector.
#define ANNOTATE_THREAD_NAME(name) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name)
#define ANNOTATE_THREAD_NAME(name) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName) \
(__FILE__, __LINE__, name)


// ------------------------------------------------------------- // -------------------------------------------------------------
// Annotations useful when implementing locks. They are not normally needed by // Annotations useful when implementing locks. They are not normally needed by
@@ -153,46 +156,50 @@
// object. // object.


// Report that a lock has been created at address `lock`. // Report that a lock has been created at address `lock`.
#define ANNOTATE_RWLOCK_CREATE(lock) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
#define ANNOTATE_RWLOCK_CREATE(lock) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate) \
(__FILE__, __LINE__, lock)


// Report that a linker initialized lock has been created at address `lock`. // Report that a linker initialized lock has been created at address `lock`.
#ifdef ABSL_HAVE_THREAD_SANITIZER #ifdef ABSL_HAVE_THREAD_SANITIZER
#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \
(__FILE__, __LINE__, lock)
#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \
(__FILE__, __LINE__, lock)
#else #else
#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock) #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock)
#endif #endif


// Report that the lock at address `lock` is about to be destroyed. // Report that the lock at address `lock` is about to be destroyed.
#define ANNOTATE_RWLOCK_DESTROY(lock) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
#define ANNOTATE_RWLOCK_DESTROY(lock) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy) \
(__FILE__, __LINE__, lock)


// Report that the lock at address `lock` has been acquired. // Report that the lock at address `lock` has been acquired.
// `is_w`=1 for writer lock, `is_w`=0 for reader lock. // `is_w`=1 for writer lock, `is_w`=0 for reader lock.
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \
(__FILE__, __LINE__, lock, is_w)
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \
(__FILE__, __LINE__, lock, is_w)


// Report that the lock at address `lock` is about to be released. // Report that the lock at address `lock` is about to be released.
// `is_w`=1 for writer lock, `is_w`=0 for reader lock. // `is_w`=1 for writer lock, `is_w`=0 for reader lock.
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \
(__FILE__, __LINE__, lock, is_w)
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \
(__FILE__, __LINE__, lock, is_w)


// Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. // Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`.
#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
namespace { \
class static_var##_annotator { \
public: \
static_var##_annotator() { \
ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \
#static_var ": " description); \
} \
}; \
static static_var##_annotator the##static_var##_annotator; \
} // namespace
#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
namespace \
{ \
class static_var##_annotator \
{ \
public: \
static_var##_annotator() \
{ \
ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), #static_var ": " description); \
} \
}; \
static static_var##_annotator the##static_var##_annotator; \
} // namespace


#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0 #else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0


@@ -217,24 +224,26 @@
#include <sanitizer/msan_interface.h> #include <sanitizer/msan_interface.h>


#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
__msan_unpoison(address, size)
__msan_unpoison(address, size)


#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
__msan_allocated_memory(address, size)
__msan_allocated_memory(address, size)


#else // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0 #else // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0


#if DYNAMIC_ANNOTATIONS_ENABLED == 1 #if DYNAMIC_ANNOTATIONS_ENABLED == 1
#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
do { \
(void)(address); \
(void)(size); \
} while (0)
do \
{ \
(void)(address); \
(void)(size); \
} while (0)
#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
do { \
(void)(address); \
(void)(size); \
} while (0)
do \
{ \
(void)(address); \
(void)(size); \
} while (0)
#else #else
#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty
#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty
@@ -248,9 +257,9 @@
#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) #if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)


#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ #define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \
__attribute((exclusive_lock_function("*")))
__attribute((exclusive_lock_function("*")))
#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ #define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \
__attribute((unlock_function("*")))
__attribute((unlock_function("*")))


#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) #else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)


@@ -268,12 +277,14 @@
// ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey // ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey
// reads, while still checking other reads and all writes. // reads, while still checking other reads and all writes.
// See also ANNOTATE_UNPROTECTED_READ. // See also ANNOTATE_UNPROTECTED_READ.
#define ANNOTATE_IGNORE_READS_BEGIN() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__)
#define ANNOTATE_IGNORE_READS_BEGIN() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \
(__FILE__, __LINE__)


// Stop ignoring reads. // Stop ignoring reads.
#define ANNOTATE_IGNORE_READS_END() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__)
#define ANNOTATE_IGNORE_READS_END() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \
(__FILE__, __LINE__)


#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) #elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED)


@@ -284,11 +295,13 @@
// TODO(delesley) -- The exclusive lock here ignores writes as well, but // TODO(delesley) -- The exclusive lock here ignores writes as well, but
// allows IGNORE_READS_AND_WRITES to work properly. // allows IGNORE_READS_AND_WRITES to work properly.


#define ANNOTATE_IGNORE_READS_BEGIN() \
ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)()
#define ANNOTATE_IGNORE_READS_BEGIN() \
ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin) \
()


#define ANNOTATE_IGNORE_READS_END() \
ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)()
#define ANNOTATE_IGNORE_READS_END() \
ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd) \
()


#else #else


@@ -303,12 +316,14 @@
#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 #if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1


// Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. // Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead.
#define ANNOTATE_IGNORE_WRITES_BEGIN() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
#define ANNOTATE_IGNORE_WRITES_BEGIN() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin) \
(__FILE__, __LINE__)


// Stop ignoring writes. // Stop ignoring writes.
#define ANNOTATE_IGNORE_WRITES_END() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
#define ANNOTATE_IGNORE_WRITES_END() \
ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd) \
(__FILE__, __LINE__)


#else #else


@@ -332,22 +347,24 @@


// Start ignoring all memory accesses (both reads and writes). // Start ignoring all memory accesses (both reads and writes).
#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
do { \
ANNOTATE_IGNORE_READS_BEGIN(); \
ANNOTATE_IGNORE_WRITES_BEGIN(); \
} while (0)
do \
{ \
ANNOTATE_IGNORE_READS_BEGIN(); \
ANNOTATE_IGNORE_WRITES_BEGIN(); \
} while (0)


// Stop ignoring both reads and writes. // Stop ignoring both reads and writes.
#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
do { \
ANNOTATE_IGNORE_WRITES_END(); \
ANNOTATE_IGNORE_READS_END(); \
} while (0)
do \
{ \
ANNOTATE_IGNORE_WRITES_END(); \
ANNOTATE_IGNORE_READS_END(); \
} while (0)


#ifdef __cplusplus #ifdef __cplusplus
// ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. // ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
#define ANNOTATE_UNPROTECTED_READ(x) \ #define ANNOTATE_UNPROTECTED_READ(x) \
absl::base_internal::AnnotateUnprotectedRead(x)
absl::base_internal::AnnotateUnprotectedRead(x)


#endif #endif


@@ -369,11 +386,12 @@
#include <sanitizer/common_interface_defs.h> #include <sanitizer/common_interface_defs.h>


#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ #define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \
__sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
#define ADDRESS_SANITIZER_REDZONE(name) \
struct { \
char x[8] __attribute__((aligned(8))); \
} name
__sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
#define ADDRESS_SANITIZER_REDZONE(name) \
struct \
{ \
char x[8] __attribute__((aligned(8))); \
} name


#else #else




+ 398
- 208
CAPI/cpp/grpc/include/absl/base/internal/endian.h View File

@@ -24,66 +24,77 @@
#include "absl/base/internal/unaligned_access.h" #include "absl/base/internal/unaligned_access.h"
#include "absl/base/port.h" #include "absl/base/port.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace absl
{
ABSL_NAMESPACE_BEGIN


inline uint64_t gbswap_64(uint64_t host_int) {
inline uint64_t gbswap_64(uint64_t host_int)
{
#if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__) #if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__)
return __builtin_bswap64(host_int);
return __builtin_bswap64(host_int);
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
return _byteswap_uint64(host_int);
return _byteswap_uint64(host_int);
#else #else
return (((host_int & uint64_t{0xFF}) << 56) |
((host_int & uint64_t{0xFF00}) << 40) |
((host_int & uint64_t{0xFF0000}) << 24) |
((host_int & uint64_t{0xFF000000}) << 8) |
((host_int & uint64_t{0xFF00000000}) >> 8) |
((host_int & uint64_t{0xFF0000000000}) >> 24) |
((host_int & uint64_t{0xFF000000000000}) >> 40) |
((host_int & uint64_t{0xFF00000000000000}) >> 56));
return (((host_int & uint64_t{0xFF}) << 56) | ((host_int & uint64_t{0xFF00}) << 40) | ((host_int & uint64_t{0xFF0000}) << 24) | ((host_int & uint64_t{0xFF000000}) << 8) | ((host_int & uint64_t{0xFF00000000}) >> 8) | ((host_int & uint64_t{0xFF0000000000}) >> 24) | ((host_int & uint64_t{0xFF000000000000}) >> 40) | ((host_int & uint64_t{0xFF00000000000000}) >> 56));
#endif #endif
}
}


inline uint32_t gbswap_32(uint32_t host_int) {
inline uint32_t gbswap_32(uint32_t host_int)
{
#if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__) #if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__)
return __builtin_bswap32(host_int);
return __builtin_bswap32(host_int);
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
return _byteswap_ulong(host_int);
return _byteswap_ulong(host_int);
#else #else
return (((host_int & uint32_t{0xFF}) << 24) |
((host_int & uint32_t{0xFF00}) << 8) |
((host_int & uint32_t{0xFF0000}) >> 8) |
((host_int & uint32_t{0xFF000000}) >> 24));
return (((host_int & uint32_t{0xFF}) << 24) | ((host_int & uint32_t{0xFF00}) << 8) | ((host_int & uint32_t{0xFF0000}) >> 8) | ((host_int & uint32_t{0xFF000000}) >> 24));
#endif #endif
}
}


inline uint16_t gbswap_16(uint16_t host_int) {
inline uint16_t gbswap_16(uint16_t host_int)
{
#if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__) #if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__)
return __builtin_bswap16(host_int);
return __builtin_bswap16(host_int);
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
return _byteswap_ushort(host_int);
return _byteswap_ushort(host_int);
#else #else
return (((host_int & uint16_t{0xFF}) << 8) |
((host_int & uint16_t{0xFF00}) >> 8));
return (((host_int & uint16_t{0xFF}) << 8) | ((host_int & uint16_t{0xFF00}) >> 8));
#endif #endif
}
}


#ifdef ABSL_IS_LITTLE_ENDIAN #ifdef ABSL_IS_LITTLE_ENDIAN


// Portable definitions for htonl (host-to-network) and friends on little-endian
// architectures.
inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
// Portable definitions for htonl (host-to-network) and friends on little-endian
// architectures.
inline uint16_t ghtons(uint16_t x)
{
return gbswap_16(x);
}
inline uint32_t ghtonl(uint32_t x)
{
return gbswap_32(x);
}
inline uint64_t ghtonll(uint64_t x)
{
return gbswap_64(x);
}


#elif defined ABSL_IS_BIG_ENDIAN #elif defined ABSL_IS_BIG_ENDIAN


// Portable definitions for htonl (host-to-network) etc on big-endian
// architectures. These definitions are simpler since the host byte order is the
// same as network byte order.
inline uint16_t ghtons(uint16_t x) { return x; }
inline uint32_t ghtonl(uint32_t x) { return x; }
inline uint64_t ghtonll(uint64_t x) { return x; }
// Portable definitions for htonl (host-to-network) etc on big-endian
// architectures. These definitions are simpler since the host byte order is the
// same as network byte order.
inline uint16_t ghtons(uint16_t x)
{
return x;
}
inline uint32_t ghtonl(uint32_t x)
{
return x;
}
inline uint64_t ghtonll(uint64_t x)
{
return x;
}


#else #else
#error \ #error \
@@ -91,192 +102,371 @@ inline uint64_t ghtonll(uint64_t x) { return x; }
"ABSL_IS_LITTLE_ENDIAN must be defined" "ABSL_IS_LITTLE_ENDIAN must be defined"
#endif // byte order #endif // byte order


inline uint16_t gntohs(uint16_t x) { return ghtons(x); }
inline uint32_t gntohl(uint32_t x) { return ghtonl(x); }
inline uint64_t gntohll(uint64_t x) { return ghtonll(x); }

// Utilities to convert numbers between the current hosts's native byte
// order and little-endian byte order
//
// Load/Store methods are alignment safe
namespace little_endian {
inline uint16_t gntohs(uint16_t x)
{
return ghtons(x);
}
inline uint32_t gntohl(uint32_t x)
{
return ghtonl(x);
}
inline uint64_t gntohll(uint64_t x)
{
return ghtonll(x);
}

// Utilities to convert numbers between the current hosts's native byte
// order and little-endian byte order
//
// Load/Store methods are alignment safe
namespace little_endian
{
// Conversion functions. // Conversion functions.
#ifdef ABSL_IS_LITTLE_ENDIAN #ifdef ABSL_IS_LITTLE_ENDIAN


inline uint16_t FromHost16(uint16_t x) { return x; }
inline uint16_t ToHost16(uint16_t x) { return x; }

inline uint32_t FromHost32(uint32_t x) { return x; }
inline uint32_t ToHost32(uint32_t x) { return x; }

inline uint64_t FromHost64(uint64_t x) { return x; }
inline uint64_t ToHost64(uint64_t x) { return x; }

inline constexpr bool IsLittleEndian() { return true; }
inline uint16_t FromHost16(uint16_t x)
{
return x;
}
inline uint16_t ToHost16(uint16_t x)
{
return x;
}

inline uint32_t FromHost32(uint32_t x)
{
return x;
}
inline uint32_t ToHost32(uint32_t x)
{
return x;
}

inline uint64_t FromHost64(uint64_t x)
{
return x;
}
inline uint64_t ToHost64(uint64_t x)
{
return x;
}

inline constexpr bool IsLittleEndian()
{
return true;
}


#elif defined ABSL_IS_BIG_ENDIAN #elif defined ABSL_IS_BIG_ENDIAN


inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }

inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }

inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }

inline constexpr bool IsLittleEndian() { return false; }
inline uint16_t FromHost16(uint16_t x)
{
return gbswap_16(x);
}
inline uint16_t ToHost16(uint16_t x)
{
return gbswap_16(x);
}

inline uint32_t FromHost32(uint32_t x)
{
return gbswap_32(x);
}
inline uint32_t ToHost32(uint32_t x)
{
return gbswap_32(x);
}

inline uint64_t FromHost64(uint64_t x)
{
return gbswap_64(x);
}
inline uint64_t ToHost64(uint64_t x)
{
return gbswap_64(x);
}

inline constexpr bool IsLittleEndian()
{
return false;
}


#endif /* ENDIAN */ #endif /* ENDIAN */


inline uint8_t FromHost(uint8_t x) { return x; }
inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
inline uint8_t ToHost(uint8_t x) { return x; }
inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }

inline int8_t FromHost(int8_t x) { return x; }
inline int16_t FromHost(int16_t x) {
return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
}
inline int32_t FromHost(int32_t x) {
return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
}
inline int64_t FromHost(int64_t x) {
return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
}
inline int8_t ToHost(int8_t x) { return x; }
inline int16_t ToHost(int16_t x) {
return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
}
inline int32_t ToHost(int32_t x) {
return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
}
inline int64_t ToHost(int64_t x) {
return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
}

// Functions to do unaligned loads and stores in little-endian order.
inline uint16_t Load16(const void *p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
}

inline void Store16(void *p, uint16_t v) {
ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
}

inline uint32_t Load32(const void *p) {
return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
}

inline void Store32(void *p, uint32_t v) {
ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
}

inline uint64_t Load64(const void *p) {
return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
}

inline void Store64(void *p, uint64_t v) {
ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
}

} // namespace little_endian

// Utilities to convert numbers between the current hosts's native byte
// order and big-endian byte order (same as network byte order)
//
// Load/Store methods are alignment safe
namespace big_endian {
inline uint8_t FromHost(uint8_t x)
{
return x;
}
inline uint16_t FromHost(uint16_t x)
{
return FromHost16(x);
}
inline uint32_t FromHost(uint32_t x)
{
return FromHost32(x);
}
inline uint64_t FromHost(uint64_t x)
{
return FromHost64(x);
}
inline uint8_t ToHost(uint8_t x)
{
return x;
}
inline uint16_t ToHost(uint16_t x)
{
return ToHost16(x);
}
inline uint32_t ToHost(uint32_t x)
{
return ToHost32(x);
}
inline uint64_t ToHost(uint64_t x)
{
return ToHost64(x);
}

inline int8_t FromHost(int8_t x)
{
return x;
}
inline int16_t FromHost(int16_t x)
{
return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
}
inline int32_t FromHost(int32_t x)
{
return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
}
inline int64_t FromHost(int64_t x)
{
return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
}
inline int8_t ToHost(int8_t x)
{
return x;
}
inline int16_t ToHost(int16_t x)
{
return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
}
inline int32_t ToHost(int32_t x)
{
return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
}
inline int64_t ToHost(int64_t x)
{
return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
}

// Functions to do unaligned loads and stores in little-endian order.
inline uint16_t Load16(const void* p)
{
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
}

inline void Store16(void* p, uint16_t v)
{
ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
}

inline uint32_t Load32(const void* p)
{
return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
}

inline void Store32(void* p, uint32_t v)
{
ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
}

inline uint64_t Load64(const void* p)
{
return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
}

inline void Store64(void* p, uint64_t v)
{
ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
}

} // namespace little_endian

// Utilities to convert numbers between the current hosts's native byte
// order and big-endian byte order (same as network byte order)
//
// Load/Store methods are alignment safe
namespace big_endian
{
#ifdef ABSL_IS_LITTLE_ENDIAN #ifdef ABSL_IS_LITTLE_ENDIAN


inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }

inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }

inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }

inline constexpr bool IsLittleEndian() { return true; }
inline uint16_t FromHost16(uint16_t x)
{
return gbswap_16(x);
}
inline uint16_t ToHost16(uint16_t x)
{
return gbswap_16(x);
}

inline uint32_t FromHost32(uint32_t x)
{
return gbswap_32(x);
}
inline uint32_t ToHost32(uint32_t x)
{
return gbswap_32(x);
}

inline uint64_t FromHost64(uint64_t x)
{
return gbswap_64(x);
}
inline uint64_t ToHost64(uint64_t x)
{
return gbswap_64(x);
}

inline constexpr bool IsLittleEndian()
{
return true;
}


#elif defined ABSL_IS_BIG_ENDIAN #elif defined ABSL_IS_BIG_ENDIAN


inline uint16_t FromHost16(uint16_t x) { return x; }
inline uint16_t ToHost16(uint16_t x) { return x; }

inline uint32_t FromHost32(uint32_t x) { return x; }
inline uint32_t ToHost32(uint32_t x) { return x; }

inline uint64_t FromHost64(uint64_t x) { return x; }
inline uint64_t ToHost64(uint64_t x) { return x; }

inline constexpr bool IsLittleEndian() { return false; }
inline uint16_t FromHost16(uint16_t x)
{
return x;
}
inline uint16_t ToHost16(uint16_t x)
{
return x;
}

inline uint32_t FromHost32(uint32_t x)
{
return x;
}
inline uint32_t ToHost32(uint32_t x)
{
return x;
}

inline uint64_t FromHost64(uint64_t x)
{
return x;
}
inline uint64_t ToHost64(uint64_t x)
{
return x;
}

inline constexpr bool IsLittleEndian()
{
return false;
}


#endif /* ENDIAN */ #endif /* ENDIAN */


inline uint8_t FromHost(uint8_t x) { return x; }
inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
inline uint8_t ToHost(uint8_t x) { return x; }
inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }

inline int8_t FromHost(int8_t x) { return x; }
inline int16_t FromHost(int16_t x) {
return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
}
inline int32_t FromHost(int32_t x) {
return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
}
inline int64_t FromHost(int64_t x) {
return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
}
inline int8_t ToHost(int8_t x) { return x; }
inline int16_t ToHost(int16_t x) {
return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
}
inline int32_t ToHost(int32_t x) {
return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
}
inline int64_t ToHost(int64_t x) {
return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
}

// Functions to do unaligned loads and stores in big-endian order.
inline uint16_t Load16(const void *p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
}

inline void Store16(void *p, uint16_t v) {
ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
}

inline uint32_t Load32(const void *p) {
return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
}

inline void Store32(void *p, uint32_t v) {
ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
}

inline uint64_t Load64(const void *p) {
return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
}

inline void Store64(void *p, uint64_t v) {
ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
}

} // namespace big_endian

ABSL_NAMESPACE_END
inline uint8_t FromHost(uint8_t x)
{
return x;
}
inline uint16_t FromHost(uint16_t x)
{
return FromHost16(x);
}
inline uint32_t FromHost(uint32_t x)
{
return FromHost32(x);
}
inline uint64_t FromHost(uint64_t x)
{
return FromHost64(x);
}
inline uint8_t ToHost(uint8_t x)
{
return x;
}
inline uint16_t ToHost(uint16_t x)
{
return ToHost16(x);
}
inline uint32_t ToHost(uint32_t x)
{
return ToHost32(x);
}
inline uint64_t ToHost(uint64_t x)
{
return ToHost64(x);
}

inline int8_t FromHost(int8_t x)
{
return x;
}
inline int16_t FromHost(int16_t x)
{
return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
}
inline int32_t FromHost(int32_t x)
{
return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
}
inline int64_t FromHost(int64_t x)
{
return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
}
inline int8_t ToHost(int8_t x)
{
return x;
}
inline int16_t ToHost(int16_t x)
{
return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
}
inline int32_t ToHost(int32_t x)
{
return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
}
inline int64_t ToHost(int64_t x)
{
return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
}

// Functions to do unaligned loads and stores in big-endian order.
inline uint16_t Load16(const void* p)
{
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
}

inline void Store16(void* p, uint16_t v)
{
ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
}

inline uint32_t Load32(const void* p)
{
return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
}

inline void Store32(void* p, uint32_t v)
{
ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
}

inline uint64_t Load64(const void* p)
{
return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
}

inline void Store64(void* p, uint64_t v)
{
ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
}

} // namespace big_endian

ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_ENDIAN_H_ #endif // ABSL_BASE_INTERNAL_ENDIAN_H_

+ 31
- 19
CAPI/cpp/grpc/include/absl/base/internal/errno_saver.h View File

@@ -19,25 +19,37 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {

// `ErrnoSaver` captures the value of `errno` upon construction and restores it
// upon deletion. It is used in low-level code and must be super fast. Do not
// add instrumentation, even in debug modes.
class ErrnoSaver {
public:
ErrnoSaver() : saved_errno_(errno) {}
~ErrnoSaver() { errno = saved_errno_; }
int operator()() const { return saved_errno_; }

private:
const int saved_errno_;
};

} // namespace base_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{

// `ErrnoSaver` captures the value of `errno` upon construction and restores it
// upon deletion. It is used in low-level code and must be super fast. Do not
// add instrumentation, even in debug modes.
class ErrnoSaver
{
public:
ErrnoSaver() :
saved_errno_(errno)
{
}
~ErrnoSaver()
{
errno = saved_errno_;
}
int operator()() const
{
return saved_errno_;
}

private:
const int saved_errno_;
};

} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ #endif // ABSL_BASE_INTERNAL_ERRNO_SAVER_H_

+ 1286
- 1050
CAPI/cpp/grpc/include/absl/base/internal/exception_safety_testing.h
File diff suppressed because it is too large
View File


+ 3
- 3
CAPI/cpp/grpc/include/absl/base/internal/exception_testing.h View File

@@ -26,16 +26,16 @@
#ifdef ABSL_HAVE_EXCEPTIONS #ifdef ABSL_HAVE_EXCEPTIONS


#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ #define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
EXPECT_THROW(expr, exception_t)
EXPECT_THROW(expr, exception_t)


#elif defined(__ANDROID__) #elif defined(__ANDROID__)
// Android asserts do not log anywhere that gtest can currently inspect. // Android asserts do not log anywhere that gtest can currently inspect.
// So we expect exit, but cannot match the message. // So we expect exit, but cannot match the message.
#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ #define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
EXPECT_DEATH(expr, ".*")
EXPECT_DEATH(expr, ".*")
#else #else
#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ #define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
EXPECT_DEATH_IF_SUPPORTED(expr, text)
EXPECT_DEATH_IF_SUPPORTED(expr, text)


#endif #endif




+ 24
- 20
CAPI/cpp/grpc/include/absl/base/internal/fast_type_id.h View File

@@ -19,32 +19,36 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {

template <typename Type>
struct FastTypeTag {
constexpr static char dummy_var = 0;
};
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{

template<typename Type>
struct FastTypeTag
{
constexpr static char dummy_var = 0;
};


#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
template <typename Type>
constexpr char FastTypeTag<Type>::dummy_var;
template<typename Type>
constexpr char FastTypeTag<Type>::dummy_var;
#endif #endif


// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
// passed-in type. These are meant to be good match for keys into maps or
// straight up comparisons.
using FastTypeIdType = const void*;
// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
// passed-in type. These are meant to be good match for keys into maps or
// straight up comparisons.
using FastTypeIdType = const void*;


template <typename Type>
constexpr inline FastTypeIdType FastTypeId() {
return &FastTypeTag<Type>::dummy_var;
}
template<typename Type>
constexpr inline FastTypeIdType FastTypeId()
{
return &FastTypeTag<Type>::dummy_var;
}


} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ #endif // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_

+ 32
- 27
CAPI/cpp/grpc/include/absl/base/internal/hide_ptr.h View File

@@ -19,33 +19,38 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {

// Arbitrary value with high bits set. Xor'ing with it is unlikely
// to map one valid pointer to another valid pointer.
constexpr uintptr_t HideMask() {
return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU;
}

// Hide a pointer from the leak checker. For internal use only.
// Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr
// and all objects reachable from ptr to be ignored by the leak checker.
template <class T>
inline uintptr_t HidePtr(T* ptr) {
return reinterpret_cast<uintptr_t>(ptr) ^ HideMask();
}

// Return a pointer that has been hidden from the leak checker.
// For internal use only.
template <class T>
inline T* UnhidePtr(uintptr_t hidden) {
return reinterpret_cast<T*>(hidden ^ HideMask());
}

} // namespace base_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{

// Arbitrary value with high bits set. Xor'ing with it is unlikely
// to map one valid pointer to another valid pointer.
constexpr uintptr_t HideMask()
{
return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU;
}

// Hide a pointer from the leak checker. For internal use only.
// Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr
// and all objects reachable from ptr to be ignored by the leak checker.
template<class T>
inline uintptr_t HidePtr(T* ptr)
{
return reinterpret_cast<uintptr_t>(ptr) ^ HideMask();
}

// Return a pointer that has been hidden from the leak checker.
// For internal use only.
template<class T>
inline T* UnhidePtr(uintptr_t hidden)
{
return reinterpret_cast<T*>(hidden ^ HideMask());
}

} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_ #endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_

+ 14
- 11
CAPI/cpp/grpc/include/absl/base/internal/identity.h View File

@@ -18,20 +18,23 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace internal
{


template <typename T>
struct identity {
typedef T type;
};
template<typename T>
struct identity
{
typedef T type;
};


template <typename T>
using identity_t = typename identity<T>::type;
template<typename T>
using identity_t = typename identity<T>::type;


} // namespace internal
ABSL_NAMESPACE_END
} // namespace internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_IDENTITY_H_ #endif // ABSL_BASE_INTERNAL_IDENTITY_H_

+ 18
- 18
CAPI/cpp/grpc/include/absl/base/internal/inline_variable.h View File

@@ -68,15 +68,15 @@
// types, etc.. // types, etc..
#if defined(__clang__) #if defined(__clang__)
#define ABSL_INTERNAL_EXTERN_DECL(type, name) \ #define ABSL_INTERNAL_EXTERN_DECL(type, name) \
extern const ::absl::internal::identity_t<type> name;
extern const ::absl::internal::identity_t<type> name;
#else // Otherwise, just define the macro to do nothing. #else // Otherwise, just define the macro to do nothing.
#define ABSL_INTERNAL_EXTERN_DECL(type, name) #define ABSL_INTERNAL_EXTERN_DECL(type, name)
#endif // defined(__clang__) #endif // defined(__clang__)


// See above comment at top of file for details. // See above comment at top of file for details.
#define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \ #define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \
ABSL_INTERNAL_EXTERN_DECL(type, name) \
inline constexpr ::absl::internal::identity_t<type> name = init
ABSL_INTERNAL_EXTERN_DECL(type, name) \
inline constexpr ::absl::internal::identity_t<type> name = init


#else #else


@@ -86,21 +86,21 @@
// identity_t is used here so that the const and name are in the // identity_t is used here so that the const and name are in the
// appropriate place for pointer types, reference types, function pointer // appropriate place for pointer types, reference types, function pointer
// types, etc.. // types, etc..
#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \
template <class /*AbslInternalDummy*/ = void> \
struct AbslInternalInlineVariableHolder##name { \
static constexpr ::absl::internal::identity_t<var_type> kInstance = init; \
}; \
\
template <class AbslInternalDummy> \
constexpr ::absl::internal::identity_t<var_type> \
AbslInternalInlineVariableHolder##name<AbslInternalDummy>::kInstance; \
\
static constexpr const ::absl::internal::identity_t<var_type>& \
name = /* NOLINT */ \
AbslInternalInlineVariableHolder##name<>::kInstance; \
static_assert(sizeof(void (*)(decltype(name))) != 0, \
"Silence unused variable warnings.")
#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \
template<class /*AbslInternalDummy*/ = void> \
struct AbslInternalInlineVariableHolder##name \
{ \
static constexpr ::absl::internal::identity_t<var_type> kInstance = init; \
}; \
\
template<class AbslInternalDummy> \
constexpr ::absl::internal::identity_t<var_type> \
AbslInternalInlineVariableHolder##name<AbslInternalDummy>::kInstance; \
\
static constexpr const ::absl::internal::identity_t<var_type>& \
name = /* NOLINT */ \
AbslInternalInlineVariableHolder##name<>::kInstance; \
static_assert(sizeof(void (*)(decltype(name))) != 0, "Silence unused variable warnings.")


#endif // __cpp_inline_variables #endif // __cpp_inline_variables




+ 20
- 17
CAPI/cpp/grpc/include/absl/base/internal/inline_variable_testing.h View File

@@ -17,30 +17,33 @@


#include "absl/base/internal/inline_variable.h" #include "absl/base/internal/inline_variable.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace inline_variable_testing_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace inline_variable_testing_internal
{


struct Foo {
int value = 5;
};
struct Foo
{
int value = 5;
};


ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {});


ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5);
ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5);
ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5);
ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5);


ABSL_INTERNAL_INLINE_CONSTEXPR(void(*)(), inline_variable_fun_ptr, nullptr);
ABSL_INTERNAL_INLINE_CONSTEXPR(void (*)(), inline_variable_fun_ptr, nullptr);


const Foo& get_foo_a();
const Foo& get_foo_b();
const Foo& get_foo_a();
const Foo& get_foo_b();


const int& get_int_a();
const int& get_int_b();
const int& get_int_a();
const int& get_int_b();


} // namespace inline_variable_testing_internal
ABSL_NAMESPACE_END
} // namespace inline_variable_testing_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INLINE_VARIABLE_TESTING_H_ #endif // ABSL_BASE_INLINE_VARIABLE_TESTING_H_

+ 194
- 168
CAPI/cpp/grpc/include/absl/base/internal/invoke.h View File

@@ -43,16 +43,18 @@


#include <functional> #include <functional>


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {

using std::invoke;
using std::invoke_result_t;
using std::is_invocable_r;

} // namespace base_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{

using std::invoke;
using std::invoke_result_t;
using std::is_invocable_r;

} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#else // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L #else // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
@@ -66,42 +68,48 @@ ABSL_NAMESPACE_END
// The following code is internal implementation detail. See the comment at the // The following code is internal implementation detail. See the comment at the
// top of this file for the API documentation. // top of this file for the API documentation.


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {

// The five classes below each implement one of the clauses from the definition
// of INVOKE. The inner class template Accept<F, Args...> checks whether the
// clause is applicable; static function template Invoke(f, args...) does the
// invocation.
//
// By separating the clause selection logic from invocation we make sure that
// Invoke() does exactly what the standard says.

template <typename Derived>
struct StrippedAccept {
template <typename... Args>
struct Accept : Derived::template AcceptImpl<typename std::remove_cv<
typename std::remove_reference<Args>::type>::type...> {};
};

// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
// and t1 is an object of type T or a reference to an object of type T or a
// reference to an object of a type derived from T.
struct MemFunAndRef : StrippedAccept<MemFunAndRef> {
template <typename... Args>
struct AcceptImpl : std::false_type {};

template <typename MemFunType, typename C, typename Obj, typename... Args>
struct AcceptImpl<MemFunType C::*, Obj, Args...>
: std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
absl::is_function<MemFunType>::value> {
};

template <typename MemFun, typename Obj, typename... Args>
static decltype((std::declval<Obj>().*
std::declval<MemFun>())(std::declval<Args>()...))
Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{

// The five classes below each implement one of the clauses from the definition
// of INVOKE. The inner class template Accept<F, Args...> checks whether the
// clause is applicable; static function template Invoke(f, args...) does the
// invocation.
//
// By separating the clause selection logic from invocation we make sure that
// Invoke() does exactly what the standard says.

template<typename Derived>
struct StrippedAccept
{
template<typename... Args>
struct Accept : Derived::template AcceptImpl<typename std::remove_cv<typename std::remove_reference<Args>::type>::type...>
{
};
};

// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
// and t1 is an object of type T or a reference to an object of type T or a
// reference to an object of a type derived from T.
struct MemFunAndRef : StrippedAccept<MemFunAndRef>
{
template<typename... Args>
struct AcceptImpl : std::false_type
{
};

template<typename MemFunType, typename C, typename Obj, typename... Args>
struct AcceptImpl<MemFunType C::*, Obj, Args...> : std::integral_constant<bool, std::is_base_of<C, Obj>::value && absl::is_function<MemFunType>::value>
{
};

template<typename MemFun, typename Obj, typename... Args>
static decltype((std::declval<Obj>().*std::declval<MemFun>())(std::declval<Args>()...))
Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args)
{
// Ignore bogus GCC warnings on this line. // Ignore bogus GCC warnings on this line.
// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101436 for similar example. // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101436 for similar example.
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0)
@@ -109,131 +117,149 @@ struct MemFunAndRef : StrippedAccept<MemFunAndRef> {
#pragma GCC diagnostic ignored "-Warray-bounds" #pragma GCC diagnostic ignored "-Warray-bounds"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif #endif
return (std::forward<Obj>(obj).*
std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
return (std::forward<Obj>(obj).*std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0)
#pragma GCC diagnostic pop #pragma GCC diagnostic pop
#endif #endif
}
};

// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
// class T and t1 is not one of the types described in the previous item.
struct MemFunAndPtr : StrippedAccept<MemFunAndPtr> {
template <typename... Args>
struct AcceptImpl : std::false_type {};

template <typename MemFunType, typename C, typename Ptr, typename... Args>
struct AcceptImpl<MemFunType C::*, Ptr, Args...>
: std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
absl::is_function<MemFunType>::value> {
};

template <typename MemFun, typename Ptr, typename... Args>
static decltype(((*std::declval<Ptr>()).*
std::declval<MemFun>())(std::declval<Args>()...))
Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) {
return ((*std::forward<Ptr>(ptr)).*
std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
}
};

// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
// an object of type T or a reference to an object of type T or a reference
// to an object of a type derived from T.
struct DataMemAndRef : StrippedAccept<DataMemAndRef> {
template <typename... Args>
struct AcceptImpl : std::false_type {};

template <typename R, typename C, typename Obj>
struct AcceptImpl<R C::*, Obj>
: std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
!absl::is_function<R>::value> {};

template <typename DataMem, typename Ref>
static decltype(std::declval<Ref>().*std::declval<DataMem>()) Invoke(
DataMem&& data_mem, Ref&& ref) {
return std::forward<Ref>(ref).*std::forward<DataMem>(data_mem);
}
};

// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
// is not one of the types described in the previous item.
struct DataMemAndPtr : StrippedAccept<DataMemAndPtr> {
template <typename... Args>
struct AcceptImpl : std::false_type {};

template <typename R, typename C, typename Ptr>
struct AcceptImpl<R C::*, Ptr>
: std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
!absl::is_function<R>::value> {};

template <typename DataMem, typename Ptr>
static decltype((*std::declval<Ptr>()).*std::declval<DataMem>()) Invoke(
DataMem&& data_mem, Ptr&& ptr) {
return (*std::forward<Ptr>(ptr)).*std::forward<DataMem>(data_mem);
}
};

// f(t1, t2, ..., tN) in all other cases.
struct Callable {
// Callable doesn't have Accept because it's the last clause that gets picked
// when none of the previous clauses are applicable.
template <typename F, typename... Args>
static decltype(std::declval<F>()(std::declval<Args>()...)) Invoke(
F&& f, Args&&... args) {
return std::forward<F>(f)(std::forward<Args>(args)...);
}
};

// Resolves to the first matching clause.
template <typename... Args>
struct Invoker {
typedef typename std::conditional<
MemFunAndRef::Accept<Args...>::value, MemFunAndRef,
typename std::conditional<
MemFunAndPtr::Accept<Args...>::value, MemFunAndPtr,
typename std::conditional<
DataMemAndRef::Accept<Args...>::value, DataMemAndRef,
typename std::conditional<DataMemAndPtr::Accept<Args...>::value,
DataMemAndPtr, Callable>::type>::type>::
type>::type type;
};

// The result type of Invoke<F, Args...>.
template <typename F, typename... Args>
using invoke_result_t = decltype(Invoker<F, Args...>::type::Invoke(
std::declval<F>(), std::declval<Args>()...));

// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section
// [func.require] of the C++ standard.
template <typename F, typename... Args>
invoke_result_t<F, Args...> invoke(F&& f, Args&&... args) {
return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
std::forward<Args>(args)...);
}

template <typename AlwaysVoid, typename, typename, typename...>
struct IsInvocableRImpl : std::false_type {};

template <typename R, typename F, typename... Args>
struct IsInvocableRImpl<
absl::void_t<absl::base_internal::invoke_result_t<F, Args...> >, R, F,
Args...>
: std::integral_constant<
bool,
std::is_convertible<absl::base_internal::invoke_result_t<F, Args...>,
R>::value ||
std::is_void<R>::value> {};

// Type trait whose member `value` is true if invoking `F` with `Args` is valid,
// and either the return type is convertible to `R`, or `R` is void.
// C++11-compatible version of `std::is_invocable_r`.
template <typename R, typename F, typename... Args>
using is_invocable_r = IsInvocableRImpl<void, R, F, Args...>;

} // namespace base_internal
ABSL_NAMESPACE_END
}
};

// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
// class T and t1 is not one of the types described in the previous item.
struct MemFunAndPtr : StrippedAccept<MemFunAndPtr>
{
template<typename... Args>
struct AcceptImpl : std::false_type
{
};

template<typename MemFunType, typename C, typename Ptr, typename... Args>
struct AcceptImpl<MemFunType C::*, Ptr, Args...> : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value && absl::is_function<MemFunType>::value>
{
};

template<typename MemFun, typename Ptr, typename... Args>
static decltype(((*std::declval<Ptr>()).*std::declval<MemFun>())(std::declval<Args>()...))
Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args)
{
return ((*std::forward<Ptr>(ptr)).*std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
}
};

// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
// an object of type T or a reference to an object of type T or a reference
// to an object of a type derived from T.
struct DataMemAndRef : StrippedAccept<DataMemAndRef>
{
template<typename... Args>
struct AcceptImpl : std::false_type
{
};

template<typename R, typename C, typename Obj>
struct AcceptImpl<R C::*, Obj> : std::integral_constant<bool, std::is_base_of<C, Obj>::value && !absl::is_function<R>::value>
{
};

template<typename DataMem, typename Ref>
static decltype(std::declval<Ref>().*std::declval<DataMem>()) Invoke(
DataMem&& data_mem, Ref&& ref
)
{
return std::forward<Ref>(ref).*std::forward<DataMem>(data_mem);
}
};

// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
// is not one of the types described in the previous item.
struct DataMemAndPtr : StrippedAccept<DataMemAndPtr>
{
template<typename... Args>
struct AcceptImpl : std::false_type
{
};

template<typename R, typename C, typename Ptr>
struct AcceptImpl<R C::*, Ptr> : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value && !absl::is_function<R>::value>
{
};

template<typename DataMem, typename Ptr>
static decltype((*std::declval<Ptr>()).*std::declval<DataMem>()) Invoke(
DataMem&& data_mem, Ptr&& ptr
)
{
return (*std::forward<Ptr>(ptr)).*std::forward<DataMem>(data_mem);
}
};

// f(t1, t2, ..., tN) in all other cases.
struct Callable
{
// Callable doesn't have Accept because it's the last clause that gets picked
// when none of the previous clauses are applicable.
template<typename F, typename... Args>
static decltype(std::declval<F>()(std::declval<Args>()...)) Invoke(
F&& f, Args&&... args
)
{
return std::forward<F>(f)(std::forward<Args>(args)...);
}
};

// Resolves to the first matching clause.
template<typename... Args>
struct Invoker
{
typedef typename std::conditional<
MemFunAndRef::Accept<Args...>::value,
MemFunAndRef,
typename std::conditional<
MemFunAndPtr::Accept<Args...>::value,
MemFunAndPtr,
typename std::conditional<
DataMemAndRef::Accept<Args...>::value,
DataMemAndRef,
typename std::conditional<DataMemAndPtr::Accept<Args...>::value, DataMemAndPtr, Callable>::type>::type>::
type>::type type;
};

// The result type of Invoke<F, Args...>.
template<typename F, typename... Args>
using invoke_result_t = decltype(Invoker<F, Args...>::type::Invoke(
std::declval<F>(), std::declval<Args>()...
));

// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section
// [func.require] of the C++ standard.
template<typename F, typename... Args>
invoke_result_t<F, Args...> invoke(F&& f, Args&&... args)
{
return Invoker<F, Args...>::type::Invoke(std::forward<F>(f), std::forward<Args>(args)...);
}

template<typename AlwaysVoid, typename, typename, typename...>
struct IsInvocableRImpl : std::false_type
{
};

template<typename R, typename F, typename... Args>
struct IsInvocableRImpl<
absl::void_t<absl::base_internal::invoke_result_t<F, Args...>>,
R,
F,
Args...> : std::integral_constant<bool, std::is_convertible<absl::base_internal::invoke_result_t<F, Args...>, R>::value || std::is_void<R>::value>
{
};

// Type trait whose member `value` is true if invoking `F` with `Args` is valid,
// and either the return type is convertible to `R`, or `R` is void.
// C++11-compatible version of `std::is_invocable_r`.
template<typename R, typename F, typename... Args>
using is_invocable_r = IsInvocableRImpl<void, R, F, Args...>;

} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L #endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L


+ 68
- 64
CAPI/cpp/grpc/include/absl/base/internal/low_level_alloc.h View File

@@ -54,73 +54,77 @@


#include "absl/base/port.h" #include "absl/base/port.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {

class LowLevelAlloc {
public:
struct Arena; // an arena from which memory may be allocated

// Returns a pointer to a block of at least "request" bytes
// that have been newly allocated from the specific arena.
// for Alloc() call the DefaultArena() is used.
// Returns 0 if passed request==0.
// Does not return 0 under other circumstances; it crashes if memory
// is not available.
static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook);
static void *AllocWithArena(size_t request, Arena *arena)
ABSL_ATTRIBUTE_SECTION(malloc_hook);

// Deallocates a region of memory that was previously allocated with
// Alloc(). Does nothing if passed 0. "s" must be either 0,
// or must have been returned from a call to Alloc() and not yet passed to
// Free() since that call to Alloc(). The space is returned to the arena
// from which it was allocated.
static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook);

// ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free
// are to put all callers of MallocHook::Invoke* in this module
// into special section,
// so that MallocHook::GetCallerStackTrace can function accurately.

// Create a new arena.
// The root metadata for the new arena is allocated in the
// meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
// These values may be ored into flags:
enum {
// Report calls to Alloc() and Free() via the MallocHook interface.
// Set in the DefaultArena.
kCallMallocHook = 0x0001,
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{

class LowLevelAlloc
{
public:
struct Arena; // an arena from which memory may be allocated

// Returns a pointer to a block of at least "request" bytes
// that have been newly allocated from the specific arena.
// for Alloc() call the DefaultArena() is used.
// Returns 0 if passed request==0.
// Does not return 0 under other circumstances; it crashes if memory
// is not available.
static void* Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook);
static void* AllocWithArena(size_t request, Arena* arena)
ABSL_ATTRIBUTE_SECTION(malloc_hook);

// Deallocates a region of memory that was previously allocated with
// Alloc(). Does nothing if passed 0. "s" must be either 0,
// or must have been returned from a call to Alloc() and not yet passed to
// Free() since that call to Alloc(). The space is returned to the arena
// from which it was allocated.
static void Free(void* s) ABSL_ATTRIBUTE_SECTION(malloc_hook);

// ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free
// are to put all callers of MallocHook::Invoke* in this module
// into special section,
// so that MallocHook::GetCallerStackTrace can function accurately.

// Create a new arena.
// The root metadata for the new arena is allocated in the
// meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
// These values may be ored into flags:
enum
{
// Report calls to Alloc() and Free() via the MallocHook interface.
// Set in the DefaultArena.
kCallMallocHook = 0x0001,


#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
// Make calls to Alloc(), Free() be async-signal-safe. Not set in
// DefaultArena(). Not supported on all platforms.
kAsyncSignalSafe = 0x0002,
// Make calls to Alloc(), Free() be async-signal-safe. Not set in
// DefaultArena(). Not supported on all platforms.
kAsyncSignalSafe = 0x0002,
#endif #endif
};
// Construct a new arena. The allocation of the underlying metadata honors
// the provided flags. For example, the call NewArena(kAsyncSignalSafe)
// is itself async-signal-safe, as well as generatating an arena that provides
// async-signal-safe Alloc/Free.
static Arena *NewArena(int32_t flags);
// Destroys an arena allocated by NewArena and returns true,
// provided no allocated blocks remain in the arena.
// If allocated blocks remain in the arena, does nothing and
// returns false.
// It is illegal to attempt to destroy the DefaultArena().
static bool DeleteArena(Arena *arena);
// The default arena that always exists.
static Arena *DefaultArena();
private:
LowLevelAlloc(); // no instances
};
} // namespace base_internal
ABSL_NAMESPACE_END
};
// Construct a new arena. The allocation of the underlying metadata honors
// the provided flags. For example, the call NewArena(kAsyncSignalSafe)
// is itself async-signal-safe, as well as generatating an arena that provides
// async-signal-safe Alloc/Free.
static Arena* NewArena(int32_t flags);
// Destroys an arena allocated by NewArena and returns true,
// provided no allocated blocks remain in the arena.
// If allocated blocks remain in the arena, does nothing and
// returns false.
// It is illegal to attempt to destroy the DefaultArena().
static bool DeleteArena(Arena* arena);
// The default arena that always exists.
static Arena* DefaultArena();
private:
LowLevelAlloc(); // no instances
};
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ #endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_

+ 119
- 101
CAPI/cpp/grpc/include/absl/base/internal/low_level_scheduling.h View File

@@ -28,107 +28,125 @@
extern "C" bool __google_disable_rescheduling(void); extern "C" bool __google_disable_rescheduling(void);
extern "C" void __google_enable_rescheduling(bool disable_result); extern "C" void __google_enable_rescheduling(bool disable_result);


namespace absl {
ABSL_NAMESPACE_BEGIN
class CondVar;
class Mutex;

namespace synchronization_internal {
int MutexDelay(int32_t c, int mode);
} // namespace synchronization_internal

namespace base_internal {

class SchedulingHelper; // To allow use of SchedulingGuard.
class SpinLock; // To allow use of SchedulingGuard.

// SchedulingGuard
// Provides guard semantics that may be used to disable cooperative rescheduling
// of the calling thread within specific program blocks. This is used to
// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative
// scheduling depends on.
//
// Domain implementations capable of rescheduling in reaction to involuntary
// kernel thread actions (e.g blocking due to a pagefault or syscall) must
// guarantee that an annotated thread is not allowed to (cooperatively)
// reschedule until the annotated region is complete.
//
// It is an error to attempt to use a cooperatively scheduled resource (e.g.
// Mutex) within a rescheduling-disabled region.
//
// All methods are async-signal safe.
class SchedulingGuard {
public:
// Returns true iff the calling thread may be cooperatively rescheduled.
static bool ReschedulingIsAllowed();
SchedulingGuard(const SchedulingGuard&) = delete;
SchedulingGuard& operator=(const SchedulingGuard&) = delete;

private:
// Disable cooperative rescheduling of the calling thread. It may still
// initiate scheduling operations (e.g. wake-ups), however, it may not itself
// reschedule. Nestable. The returned result is opaque, clients should not
// attempt to interpret it.
// REQUIRES: Result must be passed to a pairing EnableScheduling().
static bool DisableRescheduling();

// Marks the end of a rescheduling disabled region, previously started by
// DisableRescheduling().
// REQUIRES: Pairs with innermost call (and result) of DisableRescheduling().
static void EnableRescheduling(bool disable_result);

// A scoped helper for {Disable, Enable}Rescheduling().
// REQUIRES: destructor must run in same thread as constructor.
struct ScopedDisable {
ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); }
~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); }

bool disabled;
};

// A scoped helper to enable rescheduling temporarily.
// REQUIRES: destructor must run in same thread as constructor.
class ScopedEnable {
public:
ScopedEnable();
~ScopedEnable();

private:
int scheduling_disabled_depth_;
};

// Access to SchedulingGuard is explicitly permitted.
friend class absl::CondVar;
friend class absl::Mutex;
friend class SchedulingHelper;
friend class SpinLock;
friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode);
};

//------------------------------------------------------------------------------
// End of public interfaces.
//------------------------------------------------------------------------------

inline bool SchedulingGuard::ReschedulingIsAllowed() {
return false;
}

inline bool SchedulingGuard::DisableRescheduling() {
return false;
}

inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) {
return;
}

inline SchedulingGuard::ScopedEnable::ScopedEnable()
: scheduling_disabled_depth_(0) {}
inline SchedulingGuard::ScopedEnable::~ScopedEnable() {
ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning");
}

} // namespace base_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
class CondVar;
class Mutex;

namespace synchronization_internal
{
int MutexDelay(int32_t c, int mode);
} // namespace synchronization_internal

namespace base_internal
{

class SchedulingHelper; // To allow use of SchedulingGuard.
class SpinLock; // To allow use of SchedulingGuard.

// SchedulingGuard
// Provides guard semantics that may be used to disable cooperative rescheduling
// of the calling thread within specific program blocks. This is used to
// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative
// scheduling depends on.
//
// Domain implementations capable of rescheduling in reaction to involuntary
// kernel thread actions (e.g blocking due to a pagefault or syscall) must
// guarantee that an annotated thread is not allowed to (cooperatively)
// reschedule until the annotated region is complete.
//
// It is an error to attempt to use a cooperatively scheduled resource (e.g.
// Mutex) within a rescheduling-disabled region.
//
// All methods are async-signal safe.
class SchedulingGuard
{
public:
// Returns true iff the calling thread may be cooperatively rescheduled.
static bool ReschedulingIsAllowed();
SchedulingGuard(const SchedulingGuard&) = delete;
SchedulingGuard& operator=(const SchedulingGuard&) = delete;

private:
// Disable cooperative rescheduling of the calling thread. It may still
// initiate scheduling operations (e.g. wake-ups), however, it may not itself
// reschedule. Nestable. The returned result is opaque, clients should not
// attempt to interpret it.
// REQUIRES: Result must be passed to a pairing EnableScheduling().
static bool DisableRescheduling();

// Marks the end of a rescheduling disabled region, previously started by
// DisableRescheduling().
// REQUIRES: Pairs with innermost call (and result) of DisableRescheduling().
static void EnableRescheduling(bool disable_result);

// A scoped helper for {Disable, Enable}Rescheduling().
// REQUIRES: destructor must run in same thread as constructor.
struct ScopedDisable
{
ScopedDisable()
{
disabled = SchedulingGuard::DisableRescheduling();
}
~ScopedDisable()
{
SchedulingGuard::EnableRescheduling(disabled);
}

bool disabled;
};

// A scoped helper to enable rescheduling temporarily.
// REQUIRES: destructor must run in same thread as constructor.
class ScopedEnable
{
public:
ScopedEnable();
~ScopedEnable();

private:
int scheduling_disabled_depth_;
};

// Access to SchedulingGuard is explicitly permitted.
friend class absl::CondVar;
friend class absl::Mutex;
friend class SchedulingHelper;
friend class SpinLock;
friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode);
};

//------------------------------------------------------------------------------
// End of public interfaces.
//------------------------------------------------------------------------------

inline bool SchedulingGuard::ReschedulingIsAllowed()
{
return false;
}

inline bool SchedulingGuard::DisableRescheduling()
{
return false;
}

inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */)
{
return;
}

inline SchedulingGuard::ScopedEnable::ScopedEnable() :
scheduling_disabled_depth_(0)
{
}
inline SchedulingGuard::ScopedEnable::~ScopedEnable()
{
ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning");
}

} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ #endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_

+ 67
- 49
CAPI/cpp/grpc/include/absl/base/internal/prefetch.h View File

@@ -68,71 +68,89 @@
// //
// SNB = Sandy Bridge, SKL = Skylake, SKX = Skylake Xeon. // SNB = Sandy Bridge, SKL = Skylake, SKX = Skylake Xeon.
// //
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{


void PrefetchT0(const void* addr);
void PrefetchT1(const void* addr);
void PrefetchT2(const void* addr);
void PrefetchNta(const void* addr);
void PrefetchT0(const void* addr);
void PrefetchT1(const void* addr);
void PrefetchT2(const void* addr);
void PrefetchNta(const void* addr);


// Implementation details follow.
// Implementation details follow.


#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__) #if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)


#define ABSL_INTERNAL_HAVE_PREFETCH 1 #define ABSL_INTERNAL_HAVE_PREFETCH 1


// See __builtin_prefetch:
// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html.
//
// These functions speculatively load for read only. This is
// safe for all currently supported platforms. However, prefetch for
// store may have problems depending on the target platform.
//
inline void PrefetchT0(const void* addr) {
// Note: this uses prefetcht0 on Intel.
__builtin_prefetch(addr, 0, 3);
}
inline void PrefetchT1(const void* addr) {
// Note: this uses prefetcht1 on Intel.
__builtin_prefetch(addr, 0, 2);
}
inline void PrefetchT2(const void* addr) {
// Note: this uses prefetcht2 on Intel.
__builtin_prefetch(addr, 0, 1);
}
inline void PrefetchNta(const void* addr) {
// Note: this uses prefetchtnta on Intel.
__builtin_prefetch(addr, 0, 0);
}
// See __builtin_prefetch:
// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html.
//
// These functions speculatively load for read only. This is
// safe for all currently supported platforms. However, prefetch for
// store may have problems depending on the target platform.
//
inline void PrefetchT0(const void* addr)
{
// Note: this uses prefetcht0 on Intel.
__builtin_prefetch(addr, 0, 3);
}
inline void PrefetchT1(const void* addr)
{
// Note: this uses prefetcht1 on Intel.
__builtin_prefetch(addr, 0, 2);
}
inline void PrefetchT2(const void* addr)
{
// Note: this uses prefetcht2 on Intel.
__builtin_prefetch(addr, 0, 1);
}
inline void PrefetchNta(const void* addr)
{
// Note: this uses prefetchtnta on Intel.
__builtin_prefetch(addr, 0, 0);
}


#elif defined(ABSL_INTERNAL_HAVE_SSE) #elif defined(ABSL_INTERNAL_HAVE_SSE)


#define ABSL_INTERNAL_HAVE_PREFETCH 1 #define ABSL_INTERNAL_HAVE_PREFETCH 1


inline void PrefetchT0(const void* addr) {
_mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T0);
}
inline void PrefetchT1(const void* addr) {
_mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T1);
}
inline void PrefetchT2(const void* addr) {
_mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T2);
}
inline void PrefetchNta(const void* addr) {
_mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_NTA);
}
inline void PrefetchT0(const void* addr)
{
_mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T0);
}
inline void PrefetchT1(const void* addr)
{
_mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T1);
}
inline void PrefetchT2(const void* addr)
{
_mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T2);
}
inline void PrefetchNta(const void* addr)
{
_mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_NTA);
}


#else #else
inline void PrefetchT0(const void*) {}
inline void PrefetchT1(const void*) {}
inline void PrefetchT2(const void*) {}
inline void PrefetchNta(const void*) {}
inline void PrefetchT0(const void*)
{
}
inline void PrefetchT1(const void*)
{
}
inline void PrefetchT2(const void*)
{
}
inline void PrefetchNta(const void*)
{
}
#endif #endif


} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_PREFETCH_H_ #endif // ABSL_BASE_INTERNAL_PREFETCH_H_

+ 129
- 128
CAPI/cpp/grpc/include/absl/base/internal/raw_logging.h View File

@@ -41,27 +41,27 @@
// This will print an almost standard log line like this to stderr only: // This will print an almost standard log line like this to stderr only:
// E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file // E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file


#define ABSL_RAW_LOG(severity, ...) \
do { \
constexpr const char* absl_raw_logging_internal_basename = \
::absl::raw_logging_internal::Basename(__FILE__, \
sizeof(__FILE__) - 1); \
::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \
absl_raw_logging_internal_basename, \
__LINE__, __VA_ARGS__); \
} while (0)
#define ABSL_RAW_LOG(severity, ...) \
do \
{ \
constexpr const char* absl_raw_logging_internal_basename = \
::absl::raw_logging_internal::Basename(__FILE__, sizeof(__FILE__) - 1); \
::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, absl_raw_logging_internal_basename, __LINE__, __VA_ARGS__); \
} while (0)


// Similar to CHECK(condition) << message, but for low-level modules: // Similar to CHECK(condition) << message, but for low-level modules:
// we use only ABSL_RAW_LOG that does not allocate memory. // we use only ABSL_RAW_LOG that does not allocate memory.
// We do not want to provide args list here to encourage this usage: // We do not want to provide args list here to encourage this usage:
// if (!cond) ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args); // if (!cond) ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args);
// so that the args are not computed when not needed. // so that the args are not computed when not needed.
#define ABSL_RAW_CHECK(condition, message) \
do { \
if (ABSL_PREDICT_FALSE(!(condition))) { \
ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \
} \
} while (0)
#define ABSL_RAW_CHECK(condition, message) \
do \
{ \
if (ABSL_PREDICT_FALSE(!(condition))) \
{ \
ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \
} \
} while (0)


// ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above, // ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above,
// except that if the richer log library is linked into the binary, we dispatch // except that if the richer log library is linked into the binary, we dispatch
@@ -72,125 +72,126 @@
// //
// The API is a subset of the above: each macro only takes two arguments. Use // The API is a subset of the above: each macro only takes two arguments. Use
// StrCat if you need to build a richer message. // StrCat if you need to build a richer message.
#define ABSL_INTERNAL_LOG(severity, message) \
do { \
constexpr const char* absl_raw_logging_internal_filename = __FILE__; \
::absl::raw_logging_internal::internal_log_function( \
ABSL_RAW_LOGGING_INTERNAL_##severity, \
absl_raw_logging_internal_filename, __LINE__, message); \
if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \
ABSL_INTERNAL_UNREACHABLE; \
} while (0)

#define ABSL_INTERNAL_CHECK(condition, message) \
do { \
if (ABSL_PREDICT_FALSE(!(condition))) { \
std::string death_message = "Check " #condition " failed: "; \
death_message += std::string(message); \
ABSL_INTERNAL_LOG(FATAL, death_message); \
} \
} while (0)
#define ABSL_INTERNAL_LOG(severity, message) \
do \
{ \
constexpr const char* absl_raw_logging_internal_filename = __FILE__; \
::absl::raw_logging_internal::internal_log_function( \
ABSL_RAW_LOGGING_INTERNAL_##severity, \
absl_raw_logging_internal_filename, \
__LINE__, \
message \
); \
if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \
ABSL_INTERNAL_UNREACHABLE; \
} while (0)

#define ABSL_INTERNAL_CHECK(condition, message) \
do \
{ \
if (ABSL_PREDICT_FALSE(!(condition))) \
{ \
std::string death_message = "Check " #condition " failed: "; \
death_message += std::string(message); \
ABSL_INTERNAL_LOG(FATAL, death_message); \
} \
} while (0)


#define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo #define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo
#define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning #define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning
#define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError #define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError
#define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal #define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal
#define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \ #define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \
::absl::NormalizeLogSeverity(severity)

namespace absl {
ABSL_NAMESPACE_BEGIN
namespace raw_logging_internal {

// Helper function to implement ABSL_RAW_LOG
// Logs format... at "severity" level, reporting it
// as called from file:line.
// This does not allocate memory or acquire locks.
void RawLog(absl::LogSeverity severity, const char* file, int line,
const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);

// Writes the provided buffer directly to stderr, in a signal-safe, low-level
// manner.
void AsyncSignalSafeWriteToStderr(const char* s, size_t len);

// compile-time function to get the "base" filename, that is, the part of
// a filename after the last "/" or "\" path separator. The search starts at
// the end of the string; the second parameter is the length of the string.
constexpr const char* Basename(const char* fname, int offset) {
return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\'
? fname + offset
: Basename(fname, offset - 1);
}

// For testing only.
// Returns true if raw logging is fully supported. When it is not
// fully supported, no messages will be emitted, but a log at FATAL
// severity will cause an abort.
//
// TODO(gfalcon): Come up with a better name for this method.
bool RawLoggingFullySupported();

// Function type for a raw_logging customization hook for suppressing messages
// by severity, and for writing custom prefixes on non-suppressed messages.
//
// The installed hook is called for every raw log invocation. The message will
// be logged to stderr only if the hook returns true. FATAL errors will cause
// the process to abort, even if writing to stderr is suppressed. The hook is
// also provided with an output buffer, where it can write a custom log message
// prefix.
//
// The raw_logging system does not allocate memory or grab locks. User-provided
// hooks must avoid these operations, and must not throw exceptions.
//
// 'severity' is the severity level of the message being written.
// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
// was located.
// 'buf' and 'buf_size' are pointers to the buffer and buffer size. If the
// hook writes a prefix, it must increment *buf and decrement *buf_size
// accordingly.
using LogFilterAndPrefixHook = bool (*)(absl::LogSeverity severity,
const char* file, int line, char** buf,
int* buf_size);

// Function type for a raw_logging customization hook called to abort a process
// when a FATAL message is logged. If the provided AbortHook() returns, the
// logging system will call abort().
//
// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
// was located.
// The NUL-terminated logged message lives in the buffer between 'buf_start'
// and 'buf_end'. 'prefix_end' points to the first non-prefix character of the
// buffer (as written by the LogFilterAndPrefixHook.)
//
// The lifetime of the filename and message buffers will not end while the
// process remains alive.
using AbortHook = void (*)(const char* file, int line, const char* buf_start,
const char* prefix_end, const char* buf_end);

// Internal logging function for ABSL_INTERNAL_LOG to dispatch to.
//
// TODO(gfalcon): When string_view no longer depends on base, change this
// interface to take its message as a string_view instead.
using InternalLogFunction = void (*)(absl::LogSeverity severity,
const char* file, int line,
const std::string& message);

ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook<
InternalLogFunction>
internal_log_function;

// Registers hooks of the above types. Only a single hook of each type may be
// registered. It is an error to call these functions multiple times with
// different input arguments.
//
// These functions are safe to call at any point during initialization; they do
// not block or malloc, and are async-signal safe.
void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func);
void RegisterAbortHook(AbortHook func);
void RegisterInternalLogFunction(InternalLogFunction func);

} // namespace raw_logging_internal
ABSL_NAMESPACE_END
::absl::NormalizeLogSeverity(severity)

namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace raw_logging_internal
{

// Helper function to implement ABSL_RAW_LOG
// Logs format... at "severity" level, reporting it
// as called from file:line.
// This does not allocate memory or acquire locks.
void RawLog(absl::LogSeverity severity, const char* file, int line, const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);

// Writes the provided buffer directly to stderr, in a signal-safe, low-level
// manner.
void AsyncSignalSafeWriteToStderr(const char* s, size_t len);

// compile-time function to get the "base" filename, that is, the part of
// a filename after the last "/" or "\" path separator. The search starts at
// the end of the string; the second parameter is the length of the string.
constexpr const char* Basename(const char* fname, int offset)
{
return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\' ? fname + offset : Basename(fname, offset - 1);
}

// For testing only.
// Returns true if raw logging is fully supported. When it is not
// fully supported, no messages will be emitted, but a log at FATAL
// severity will cause an abort.
//
// TODO(gfalcon): Come up with a better name for this method.
bool RawLoggingFullySupported();

// Function type for a raw_logging customization hook for suppressing messages
// by severity, and for writing custom prefixes on non-suppressed messages.
//
// The installed hook is called for every raw log invocation. The message will
// be logged to stderr only if the hook returns true. FATAL errors will cause
// the process to abort, even if writing to stderr is suppressed. The hook is
// also provided with an output buffer, where it can write a custom log message
// prefix.
//
// The raw_logging system does not allocate memory or grab locks. User-provided
// hooks must avoid these operations, and must not throw exceptions.
//
// 'severity' is the severity level of the message being written.
// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
// was located.
// 'buf' and 'buf_size' are pointers to the buffer and buffer size. If the
// hook writes a prefix, it must increment *buf and decrement *buf_size
// accordingly.
using LogFilterAndPrefixHook = bool (*)(absl::LogSeverity severity, const char* file, int line, char** buf, int* buf_size);

// Function type for a raw_logging customization hook called to abort a process
// when a FATAL message is logged. If the provided AbortHook() returns, the
// logging system will call abort().
//
// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
// was located.
// The NUL-terminated logged message lives in the buffer between 'buf_start'
// and 'buf_end'. 'prefix_end' points to the first non-prefix character of the
// buffer (as written by the LogFilterAndPrefixHook.)
//
// The lifetime of the filename and message buffers will not end while the
// process remains alive.
using AbortHook = void (*)(const char* file, int line, const char* buf_start, const char* prefix_end, const char* buf_end);

// Internal logging function for ABSL_INTERNAL_LOG to dispatch to.
//
// TODO(gfalcon): When string_view no longer depends on base, change this
// interface to take its message as a string_view instead.
using InternalLogFunction = void (*)(absl::LogSeverity severity, const char* file, int line, const std::string& message);

ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook<
InternalLogFunction>
internal_log_function;

// Registers hooks of the above types. Only a single hook of each type may be
// registered. It is an error to call these functions multiple times with
// different input arguments.
//
// These functions are safe to call at any point during initialization; they do
// not block or malloc, and are async-signal safe.
void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func);
void RegisterAbortHook(AbortHook func);
void RegisterInternalLogFunction(InternalLogFunction func);

} // namespace raw_logging_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_ #endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_

+ 34
- 31
CAPI/cpp/grpc/include/absl/base/internal/scheduling_mode.h View File

@@ -20,39 +20,42 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{


// Used to describe how a thread may be scheduled. Typically associated with
// the declaration of a resource supporting synchronized access.
//
// SCHEDULE_COOPERATIVE_AND_KERNEL:
// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may
// reschedule (using base::scheduling semantics); allowing other cooperative
// threads to proceed.
//
// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative")
// Specifies that no cooperative scheduling semantics may be used, even if the
// current thread is itself cooperatively scheduled. This means that
// cooperative threads will NOT allow other cooperative threads to execute in
// their place while waiting for a resource of this type. Host operating system
// semantics (e.g. a futex) may still be used.
//
// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL
// by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which
// base::scheduling (e.g. the implementation of a Scheduler) may depend.
//
// NOTE: Cooperative resources may not be nested below non-cooperative ones.
// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL
// resource if a SCHEDULE_KERNEL_ONLY resource is already held.
enum SchedulingMode {
SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS.
SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling.
};
// Used to describe how a thread may be scheduled. Typically associated with
// the declaration of a resource supporting synchronized access.
//
// SCHEDULE_COOPERATIVE_AND_KERNEL:
// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may
// reschedule (using base::scheduling semantics); allowing other cooperative
// threads to proceed.
//
// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative")
// Specifies that no cooperative scheduling semantics may be used, even if the
// current thread is itself cooperatively scheduled. This means that
// cooperative threads will NOT allow other cooperative threads to execute in
// their place while waiting for a resource of this type. Host operating system
// semantics (e.g. a futex) may still be used.
//
// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL
// by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which
// base::scheduling (e.g. the implementation of a Scheduler) may depend.
//
// NOTE: Cooperative resources may not be nested below non-cooperative ones.
// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL
// resource if a SCHEDULE_KERNEL_ONLY resource is already held.
enum SchedulingMode
{
SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS.
SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling.
};


} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ #endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_

+ 22
- 19
CAPI/cpp/grpc/include/absl/base/internal/scoped_set_env.h View File

@@ -21,25 +21,28 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {

class ScopedSetEnv {
public:
ScopedSetEnv(const char* var_name, const char* new_value);
~ScopedSetEnv();

private:
std::string var_name_;
std::string old_value_;

// True if the environment variable was initially not set.
bool was_unset_;
};

} // namespace base_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{

class ScopedSetEnv
{
public:
ScopedSetEnv(const char* var_name, const char* new_value);
~ScopedSetEnv();

private:
std::string var_name_;
std::string old_value_;

// True if the environment variable was initially not set.
bool was_unset_;
};

} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ #endif // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_

+ 232
- 202
CAPI/cpp/grpc/include/absl/base/internal/spinlock.h View File

@@ -45,212 +45,242 @@
#include "absl/base/port.h" #include "absl/base/port.h"
#include "absl/base/thread_annotations.h" #include "absl/base/thread_annotations.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {

class ABSL_LOCKABLE SpinLock {
public:
SpinLock() : lockword_(kSpinLockCooperative) {
ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
}

// Constructors that allow non-cooperative spinlocks to be created for use
// inside thread schedulers. Normal clients should not use these.
explicit SpinLock(base_internal::SchedulingMode mode);

// Constructor for global SpinLock instances. See absl/base/const_init.h.
constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode)
: lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}

// For global SpinLock instances prefer trivial destructor when possible.
// Default but non-trivial destructor in some build configurations causes an
// extra static initializer.
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{

class ABSL_LOCKABLE SpinLock
{
public:
SpinLock() :
lockword_(kSpinLockCooperative)
{
ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
}

// Constructors that allow non-cooperative spinlocks to be created for use
// inside thread schedulers. Normal clients should not use these.
explicit SpinLock(base_internal::SchedulingMode mode);

// Constructor for global SpinLock instances. See absl/base/const_init.h.
constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode) :
lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0)
{
}

// For global SpinLock instances prefer trivial destructor when possible.
// Default but non-trivial destructor in some build configurations causes an
// extra static initializer.
#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
~SpinLock()
{
ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static);
}
#else #else
~SpinLock() = default;
~SpinLock() = default;
#endif #endif


// Acquire this SpinLock.
inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
if (!TryLockImpl()) {
SlowLock();
}
ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
}

// Try to acquire this SpinLock without blocking and return true if the
// acquisition was successful. If the lock was not acquired, false is
// returned. If this SpinLock is free at the time of the call, TryLock
// will return true with high probability.
inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
bool res = TryLockImpl();
ABSL_TSAN_MUTEX_POST_LOCK(
this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
0);
return res;
}

// Release this SpinLock, which must be held by the calling thread.
inline void Unlock() ABSL_UNLOCK_FUNCTION() {
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
std::memory_order_release);

if ((lock_value & kSpinLockDisabledScheduling) != 0) {
base_internal::SchedulingGuard::EnableRescheduling(true);
}
if ((lock_value & kWaitTimeMask) != 0) {
// Collect contentionz profile info, and speed the wakeup of any waiter.
// The wait_cycles value indicates how long this thread spent waiting
// for the lock.
SlowUnlock(lock_value);
}
ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
}

// Determine if the lock is held. When the lock is held by the invoking
// thread, true will always be returned. Intended to be used as
// CHECK(lock.IsHeld()).
inline bool IsHeld() const {
return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
}

// Return immediately if this thread holds the SpinLock exclusively.
// Otherwise, report an error by crashing with a diagnostic.
inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() {
if (!IsHeld()) {
ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock");
}
}

protected:
// These should not be exported except for testing.

// Store number of cycles between wait_start_time and wait_end_time in a
// lock value.
static uint32_t EncodeWaitCycles(int64_t wait_start_time,
int64_t wait_end_time);

// Extract number of wait cycles in a lock value.
static uint64_t DecodeWaitCycles(uint32_t lock_value);

// Provide access to protected method above. Use for testing only.
friend struct SpinLockTest;

private:
// lockword_ is used to store the following:
//
// bit[0] encodes whether a lock is being held.
// bit[1] encodes whether a lock uses cooperative scheduling.
// bit[2] encodes whether the current lock holder disabled scheduling when
// acquiring the lock. Only set when kSpinLockHeld is also set.
// bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
// This is set by the lock holder to indicate how long it waited on
// the lock before eventually acquiring it. The number of cycles is
// encoded as a 29-bit unsigned int, or in the case that the current
// holder did not wait but another waiter is queued, the LSB
// (kSpinLockSleeper) is set. The implementation does not explicitly
// track the number of queued waiters beyond this. It must always be
// assumed that waiters may exist if the current holder was required to
// queue.
//
// Invariant: if the lock is not held, the value is either 0 or
// kSpinLockCooperative.
static constexpr uint32_t kSpinLockHeld = 1;
static constexpr uint32_t kSpinLockCooperative = 2;
static constexpr uint32_t kSpinLockDisabledScheduling = 4;
static constexpr uint32_t kSpinLockSleeper = 8;
// Includes kSpinLockSleeper.
static constexpr uint32_t kWaitTimeMask =
~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);

// Returns true if the provided scheduling mode is cooperative.
static constexpr bool IsCooperative(
base_internal::SchedulingMode scheduling_mode) {
return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
}

uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
void SlowLock() ABSL_ATTRIBUTE_COLD;
void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
uint32_t SpinLoop();

inline bool TryLockImpl() {
uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
}

std::atomic<uint32_t> lockword_;

SpinLock(const SpinLock&) = delete;
SpinLock& operator=(const SpinLock&) = delete;
};

// Corresponding locker object that arranges to acquire a spinlock for
// the duration of a C++ scope.
class ABSL_SCOPED_LOCKABLE SpinLockHolder {
public:
inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
: lock_(l) {
l->Lock();
}
inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); }

SpinLockHolder(const SpinLockHolder&) = delete;
SpinLockHolder& operator=(const SpinLockHolder&) = delete;

private:
SpinLock* lock_;
};

// Register a hook for profiling support.
//
// The function pointer registered here will be called whenever a spinlock is
// contended. The callback is given an opaque handle to the contended spinlock
// and the number of wait cycles. This is thread-safe, but only a single
// profiler can be registered. It is an error to call this function multiple
// times with different arguments.
void RegisterSpinLockProfiler(void (*fn)(const void* lock,
int64_t wait_cycles));

//------------------------------------------------------------------------------
// Public interface ends here.
//------------------------------------------------------------------------------

// If (result & kSpinLockHeld) == 0, then *this was successfully locked.
// Otherwise, returns last observed value for lockword_.
inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
uint32_t wait_cycles) {
if ((lock_value & kSpinLockHeld) != 0) {
return lock_value;
}

uint32_t sched_disabled_bit = 0;
if ((lock_value & kSpinLockCooperative) == 0) {
// For non-cooperative locks we must make sure we mark ourselves as
// non-reschedulable before we attempt to CompareAndSwap.
if (base_internal::SchedulingGuard::DisableRescheduling()) {
sched_disabled_bit = kSpinLockDisabledScheduling;
}
}

if (!lockword_.compare_exchange_strong(
lock_value,
kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
std::memory_order_acquire, std::memory_order_relaxed)) {
base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
}

return lock_value;
}

} // namespace base_internal
ABSL_NAMESPACE_END
// Acquire this SpinLock.
inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION()
{
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
if (!TryLockImpl())
{
SlowLock();
}
ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
}

// Try to acquire this SpinLock without blocking and return true if the
// acquisition was successful. If the lock was not acquired, false is
// returned. If this SpinLock is free at the time of the call, TryLock
// will return true with high probability.
inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true)
{
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
bool res = TryLockImpl();
ABSL_TSAN_MUTEX_POST_LOCK(
this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed), 0
);
return res;
}

// Release this SpinLock, which must be held by the calling thread.
inline void Unlock() ABSL_UNLOCK_FUNCTION()
{
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
lock_value = lockword_.exchange(lock_value & kSpinLockCooperative, std::memory_order_release);

if ((lock_value & kSpinLockDisabledScheduling) != 0)
{
base_internal::SchedulingGuard::EnableRescheduling(true);
}
if ((lock_value & kWaitTimeMask) != 0)
{
// Collect contentionz profile info, and speed the wakeup of any waiter.
// The wait_cycles value indicates how long this thread spent waiting
// for the lock.
SlowUnlock(lock_value);
}
ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
}

// Determine if the lock is held. When the lock is held by the invoking
// thread, true will always be returned. Intended to be used as
// CHECK(lock.IsHeld()).
inline bool IsHeld() const
{
return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
}

// Return immediately if this thread holds the SpinLock exclusively.
// Otherwise, report an error by crashing with a diagnostic.
inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK()
{
if (!IsHeld())
{
ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock");
}
}

protected:
// These should not be exported except for testing.

// Store number of cycles between wait_start_time and wait_end_time in a
// lock value.
static uint32_t EncodeWaitCycles(int64_t wait_start_time, int64_t wait_end_time);

// Extract number of wait cycles in a lock value.
static uint64_t DecodeWaitCycles(uint32_t lock_value);

// Provide access to protected method above. Use for testing only.
friend struct SpinLockTest;

private:
// lockword_ is used to store the following:
//
// bit[0] encodes whether a lock is being held.
// bit[1] encodes whether a lock uses cooperative scheduling.
// bit[2] encodes whether the current lock holder disabled scheduling when
// acquiring the lock. Only set when kSpinLockHeld is also set.
// bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
// This is set by the lock holder to indicate how long it waited on
// the lock before eventually acquiring it. The number of cycles is
// encoded as a 29-bit unsigned int, or in the case that the current
// holder did not wait but another waiter is queued, the LSB
// (kSpinLockSleeper) is set. The implementation does not explicitly
// track the number of queued waiters beyond this. It must always be
// assumed that waiters may exist if the current holder was required to
// queue.
//
// Invariant: if the lock is not held, the value is either 0 or
// kSpinLockCooperative.
static constexpr uint32_t kSpinLockHeld = 1;
static constexpr uint32_t kSpinLockCooperative = 2;
static constexpr uint32_t kSpinLockDisabledScheduling = 4;
static constexpr uint32_t kSpinLockSleeper = 8;
// Includes kSpinLockSleeper.
static constexpr uint32_t kWaitTimeMask =
~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);

// Returns true if the provided scheduling mode is cooperative.
static constexpr bool IsCooperative(
base_internal::SchedulingMode scheduling_mode
)
{
return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
}

uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
void SlowLock() ABSL_ATTRIBUTE_COLD;
void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
uint32_t SpinLoop();

inline bool TryLockImpl()
{
uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
}

std::atomic<uint32_t> lockword_;

SpinLock(const SpinLock&) = delete;
SpinLock& operator=(const SpinLock&) = delete;
};

// Corresponding locker object that arranges to acquire a spinlock for
// the duration of a C++ scope.
class ABSL_SCOPED_LOCKABLE SpinLockHolder
{
public:
inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l) :
lock_(l)
{
l->Lock();
}
inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION()
{
lock_->Unlock();
}

SpinLockHolder(const SpinLockHolder&) = delete;
SpinLockHolder& operator=(const SpinLockHolder&) = delete;

private:
SpinLock* lock_;
};

// Register a hook for profiling support.
//
// The function pointer registered here will be called whenever a spinlock is
// contended. The callback is given an opaque handle to the contended spinlock
// and the number of wait cycles. This is thread-safe, but only a single
// profiler can be registered. It is an error to call this function multiple
// times with different arguments.
void RegisterSpinLockProfiler(void (*fn)(const void* lock, int64_t wait_cycles));

//------------------------------------------------------------------------------
// Public interface ends here.
//------------------------------------------------------------------------------

// If (result & kSpinLockHeld) == 0, then *this was successfully locked.
// Otherwise, returns last observed value for lockword_.
inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, uint32_t wait_cycles)
{
if ((lock_value & kSpinLockHeld) != 0)
{
return lock_value;
}

uint32_t sched_disabled_bit = 0;
if ((lock_value & kSpinLockCooperative) == 0)
{
// For non-cooperative locks we must make sure we mark ourselves as
// non-reschedulable before we attempt to CompareAndSwap.
if (base_internal::SchedulingGuard::DisableRescheduling())
{
sched_disabled_bit = kSpinLockDisabledScheduling;
}
}

if (!lockword_.compare_exchange_strong(
lock_value,
kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
std::memory_order_acquire,
std::memory_order_relaxed
))
{
base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
}

return lock_value;
}

} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_SPINLOCK_H_ #endif // ABSL_BASE_INTERNAL_SPINLOCK_H_

+ 50
- 48
CAPI/cpp/grpc/include/absl/base/internal/spinlock_wait.h View File

@@ -23,47 +23,47 @@


#include "absl/base/internal/scheduling_mode.h" #include "absl/base/internal/scheduling_mode.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{


// SpinLockWait() waits until it can perform one of several transitions from
// "from" to "to". It returns when it performs a transition where done==true.
struct SpinLockWaitTransition {
uint32_t from;
uint32_t to;
bool done;
};
// SpinLockWait() waits until it can perform one of several transitions from
// "from" to "to". It returns when it performs a transition where done==true.
struct SpinLockWaitTransition
{
uint32_t from;
uint32_t to;
bool done;
};


// Wait until *w can transition from trans[i].from to trans[i].to for some i
// satisfying 0<=i<n && trans[i].done, atomically make the transition,
// then return the old value of *w. Make any other atomic transitions
// where !trans[i].done, but continue waiting.
//
// Wakeups for threads blocked on SpinLockWait do not respect priorities.
uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
const SpinLockWaitTransition trans[],
SchedulingMode scheduling_mode);
// Wait until *w can transition from trans[i].from to trans[i].to for some i
// satisfying 0<=i<n && trans[i].done, atomically make the transition,
// then return the old value of *w. Make any other atomic transitions
// where !trans[i].done, but continue waiting.
//
// Wakeups for threads blocked on SpinLockWait do not respect priorities.
uint32_t SpinLockWait(std::atomic<uint32_t>* w, int n, const SpinLockWaitTransition trans[], SchedulingMode scheduling_mode);


// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all`
// is true, wake all such threads. On some systems, this may be a no-op; on
// those systems, threads calling SpinLockDelay() will always wake eventually
// even if SpinLockWake() is never called.
void SpinLockWake(std::atomic<uint32_t> *w, bool all);
// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all`
// is true, wake all such threads. On some systems, this may be a no-op; on
// those systems, threads calling SpinLockDelay() will always wake eventually
// even if SpinLockWake() is never called.
void SpinLockWake(std::atomic<uint32_t>* w, bool all);


// Wait for an appropriate spin delay on iteration "loop" of a
// spin loop on location *w, whose previously observed value was "value".
// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
// or may wait for a call to SpinLockWake(w).
void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
base_internal::SchedulingMode scheduling_mode);
// Wait for an appropriate spin delay on iteration "loop" of a
// spin loop on location *w, whose previously observed value was "value".
// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
// or may wait for a call to SpinLockWake(w).
void SpinLockDelay(std::atomic<uint32_t>* w, uint32_t value, int loop, base_internal::SchedulingMode scheduling_mode);


// Helper used by AbslInternalSpinLockDelay.
// Returns a suggested delay in nanoseconds for iteration number "loop".
int SpinLockSuggestedDelayNS(int loop);
// Helper used by AbslInternalSpinLockDelay.
// Returns a suggested delay in nanoseconds for iteration number "loop".
int SpinLockSuggestedDelayNS(int loop);


} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


// In some build configurations we pass --detect-odr-violations to the // In some build configurations we pass --detect-odr-violations to the
@@ -72,24 +72,26 @@ ABSL_NAMESPACE_END
// --detect-odr-violations ignores symbols not mangled with C++ names. // --detect-odr-violations ignores symbols not mangled with C++ names.
// By changing our extension points to be extern "C", we dodge this // By changing our extension points to be extern "C", we dodge this
// check. // check.
extern "C" {
void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic<uint32_t> *w,
bool all);
void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t> *w, uint32_t value, int loop,
absl::base_internal::SchedulingMode scheduling_mode);
extern "C"
{
void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic<uint32_t>* w, bool all);
void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
std::atomic<uint32_t>* w, uint32_t value, int loop, absl::base_internal::SchedulingMode scheduling_mode
);
} }


inline void absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w,
bool all) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all);
inline void absl::base_internal::SpinLockWake(std::atomic<uint32_t>* w, bool all)
{
ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)
(w, all);
} }


inline void absl::base_internal::SpinLockDelay( inline void absl::base_internal::SpinLockDelay(
std::atomic<uint32_t> *w, uint32_t value, int loop,
absl::base_internal::SchedulingMode scheduling_mode) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)
(w, value, loop, scheduling_mode);
std::atomic<uint32_t>* w, uint32_t value, int loop, absl::base_internal::SchedulingMode scheduling_mode
)
{
ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)
(w, value, loop, scheduling_mode);
} }


#endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ #endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_

+ 15
- 13
CAPI/cpp/grpc/include/absl/base/internal/strerror.h View File

@@ -19,21 +19,23 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{


// A portable and thread-safe alternative to C89's `strerror`.
//
// The C89 specification of `strerror` is not suitable for use in a
// multi-threaded application as the returned string may be changed by calls to
// `strerror` from another thread. The many non-stdlib alternatives differ
// enough in their names, availability, and semantics to justify this wrapper
// around them. `errno` will not be modified by a call to `absl::StrError`.
std::string StrError(int errnum);
// A portable and thread-safe alternative to C89's `strerror`.
//
// The C89 specification of `strerror` is not suitable for use in a
// multi-threaded application as the returned string may be changed by calls to
// `strerror` from another thread. The many non-stdlib alternatives differ
// enough in their names, availability, and semantics to justify this wrapper
// around them. `errno` will not be modified by a call to `absl::StrError`.
std::string StrError(int errnum);


} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_STRERROR_H_ #endif // ABSL_BASE_INTERNAL_STRERROR_H_

+ 24
- 22
CAPI/cpp/grpc/include/absl/base/internal/sysinfo.h View File

@@ -33,17 +33,19 @@
#include "absl/base/config.h" #include "absl/base/config.h"
#include "absl/base/port.h" #include "absl/base/port.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{


// Nominal core processor cycles per second of each processor. This is _not_
// necessarily the frequency of the CycleClock counter (see cycleclock.h)
// Thread-safe.
double NominalCPUFrequency();
// Nominal core processor cycles per second of each processor. This is _not_
// necessarily the frequency of the CycleClock counter (see cycleclock.h)
// Thread-safe.
double NominalCPUFrequency();


// Number of logical processors (hyperthreads) in system. Thread-safe.
int NumCPUs();
// Number of logical processors (hyperthreads) in system. Thread-safe.
int NumCPUs();


// Return the thread id of the current thread, as told by the system. // Return the thread id of the current thread, as told by the system.
// No two currently-live threads implemented by the OS shall have the same ID. // No two currently-live threads implemented by the OS shall have the same ID.
@@ -53,22 +55,22 @@ int NumCPUs();
// On Linux, you may send a signal to the resulting ID with kill(). However, // On Linux, you may send a signal to the resulting ID with kill(). However,
// it is recommended for portability that you use pthread_kill() instead. // it is recommended for portability that you use pthread_kill() instead.
#ifdef _WIN32 #ifdef _WIN32
// On Windows, process id and thread id are of the same type according to the
// return types of GetProcessId() and GetThreadId() are both DWORD, an unsigned
// 32-bit type.
using pid_t = uint32_t;
// On Windows, process id and thread id are of the same type according to the
// return types of GetProcessId() and GetThreadId() are both DWORD, an unsigned
// 32-bit type.
using pid_t = uint32_t;
#endif #endif
pid_t GetTID();
pid_t GetTID();


// Like GetTID(), but caches the result in thread-local storage in order
// to avoid unnecessary system calls. Note that there are some cases where
// one must call through to GetTID directly, which is why this exists as a
// separate function. For example, GetCachedTID() is not safe to call in
// an asynchronous signal-handling context nor right after a call to fork().
pid_t GetCachedTID();
// Like GetTID(), but caches the result in thread-local storage in order
// to avoid unnecessary system calls. Note that there are some cases where
// one must call through to GetTID directly, which is why this exists as a
// separate function. For example, GetCachedTID() is not safe to call in
// an asynchronous signal-handling context nor right after a call to fork().
pid_t GetCachedTID();


} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_SYSINFO_H_ #endif // ABSL_BASE_INTERNAL_SYSINFO_H_

+ 35
- 33
CAPI/cpp/grpc/include/absl/base/internal/thread_annotations.h View File

@@ -39,9 +39,9 @@
#define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ #define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_


#if defined(__clang__) #if defined(__clang__)
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
#else #else
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
#endif #endif


// GUARDED_BY() // GUARDED_BY()
@@ -101,10 +101,10 @@
// Mutex m1_; // Mutex m1_;
// Mutex m2_ ACQUIRED_AFTER(m1_); // Mutex m2_ ACQUIRED_AFTER(m1_);
#define ACQUIRED_AFTER(...) \ #define ACQUIRED_AFTER(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))


#define ACQUIRED_BEFORE(...) \ #define ACQUIRED_BEFORE(...) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))


// EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED() // EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED()
// //
@@ -130,10 +130,10 @@
// void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } // void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... }
// void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } // void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... }
#define EXCLUSIVE_LOCKS_REQUIRED(...) \ #define EXCLUSIVE_LOCKS_REQUIRED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__))


#define SHARED_LOCKS_REQUIRED(...) \ #define SHARED_LOCKS_REQUIRED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__))


// LOCKS_EXCLUDED() // LOCKS_EXCLUDED()
// //
@@ -141,7 +141,7 @@
// cannot be held when calling this function (as Abseil's `Mutex` locks are // cannot be held when calling this function (as Abseil's `Mutex` locks are
// non-reentrant). // non-reentrant).
#define LOCKS_EXCLUDED(...) \ #define LOCKS_EXCLUDED(...) \
THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))


// LOCK_RETURNED() // LOCK_RETURNED()
// //
@@ -149,13 +149,13 @@
// a public getter method that returns a pointer to a private mutex should // a public getter method that returns a pointer to a private mutex should
// be annotated with LOCK_RETURNED. // be annotated with LOCK_RETURNED.
#define LOCK_RETURNED(x) \ #define LOCK_RETURNED(x) \
THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))


// LOCKABLE // LOCKABLE
// //
// Documents if a class/type is a lockable type (such as the `Mutex` class). // Documents if a class/type is a lockable type (such as the `Mutex` class).
#define LOCKABLE \ #define LOCKABLE \
THREAD_ANNOTATION_ATTRIBUTE__(lockable)
THREAD_ANNOTATION_ATTRIBUTE__(lockable)


// SCOPED_LOCKABLE // SCOPED_LOCKABLE
// //
@@ -165,28 +165,28 @@
// arguments; the analysis will assume that the destructor unlocks whatever the // arguments; the analysis will assume that the destructor unlocks whatever the
// constructor locked. // constructor locked.
#define SCOPED_LOCKABLE \ #define SCOPED_LOCKABLE \
THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)


// EXCLUSIVE_LOCK_FUNCTION() // EXCLUSIVE_LOCK_FUNCTION()
// //
// Documents functions that acquire a lock in the body of a function, and do // Documents functions that acquire a lock in the body of a function, and do
// not release it. // not release it.
#define EXCLUSIVE_LOCK_FUNCTION(...) \ #define EXCLUSIVE_LOCK_FUNCTION(...) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))


// SHARED_LOCK_FUNCTION() // SHARED_LOCK_FUNCTION()
// //
// Documents functions that acquire a shared (reader) lock in the body of a // Documents functions that acquire a shared (reader) lock in the body of a
// function, and do not release it. // function, and do not release it.
#define SHARED_LOCK_FUNCTION(...) \ #define SHARED_LOCK_FUNCTION(...) \
THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__))


// UNLOCK_FUNCTION() // UNLOCK_FUNCTION()
// //
// Documents functions that expect a lock to be held on entry to the function, // Documents functions that expect a lock to be held on entry to the function,
// and release it in the body of the function. // and release it in the body of the function.
#define UNLOCK_FUNCTION(...) \ #define UNLOCK_FUNCTION(...) \
THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))


// EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION() // EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION()
// //
@@ -197,20 +197,20 @@
// argument specifies the mutex that is locked on success. If unspecified, this // argument specifies the mutex that is locked on success. If unspecified, this
// mutex is assumed to be `this`. // mutex is assumed to be `this`.
#define EXCLUSIVE_TRYLOCK_FUNCTION(...) \ #define EXCLUSIVE_TRYLOCK_FUNCTION(...) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__))


#define SHARED_TRYLOCK_FUNCTION(...) \ #define SHARED_TRYLOCK_FUNCTION(...) \
THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__))


// ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK() // ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK()
// //
// Documents functions that dynamically check to see if a lock is held, and fail // Documents functions that dynamically check to see if a lock is held, and fail
// if it is not held. // if it is not held.
#define ASSERT_EXCLUSIVE_LOCK(...) \ #define ASSERT_EXCLUSIVE_LOCK(...) \
THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__))


#define ASSERT_SHARED_LOCK(...) \ #define ASSERT_SHARED_LOCK(...) \
THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))
THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__))


// NO_THREAD_SAFETY_ANALYSIS // NO_THREAD_SAFETY_ANALYSIS
// //
@@ -218,7 +218,7 @@
// This annotation is used to mark functions that are known to be correct, but // This annotation is used to mark functions that are known to be correct, but
// the locking behavior is more complicated than the analyzer can handle. // the locking behavior is more complicated than the analyzer can handle.
#define NO_THREAD_SAFETY_ANALYSIS \ #define NO_THREAD_SAFETY_ANALYSIS \
THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)


//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Tool-Supplied Annotations // Tool-Supplied Annotations
@@ -239,7 +239,7 @@
// that are incorrect and need to be fixed. It is used by automated tools to // that are incorrect and need to be fixed. It is used by automated tools to
// avoid breaking the build when the analysis is updated. // avoid breaking the build when the analysis is updated.
// Code owners are expected to eventually fix the routine. // Code owners are expected to eventually fix the routine.
#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS
#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS


// Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY // Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY
// annotation that needs to be fixed, because it is producing thread safety // annotation that needs to be fixed, because it is producing thread safety
@@ -251,20 +251,22 @@
// but the compiler cannot confirm that. // but the compiler cannot confirm that.
#define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x) #define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x)



namespace thread_safety_analysis {

// Takes a reference to a guarded data member, and returns an unguarded
// reference.
template <typename T>
inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS {
return v;
}

template <typename T>
inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS {
return v;
}
namespace thread_safety_analysis
{

// Takes a reference to a guarded data member, and returns an unguarded
// reference.
template<typename T>
inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS
{
return v;
}

template<typename T>
inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS
{
return v;
}


} // namespace thread_safety_analysis } // namespace thread_safety_analysis




+ 167
- 160
CAPI/cpp/grpc/include/absl/base/internal/thread_identity.h View File

@@ -34,156 +34,162 @@
#include "absl/base/internal/per_thread_tls.h" #include "absl/base/internal/per_thread_tls.h"
#include "absl/base/optimization.h" #include "absl/base/optimization.h"


namespace absl {
ABSL_NAMESPACE_BEGIN

struct SynchLocksHeld;
struct SynchWaitParams;

namespace base_internal {

class SpinLock;
struct ThreadIdentity;

// Used by the implementation of absl::Mutex and absl::CondVar.
struct PerThreadSynch {
// The internal representation of absl::Mutex and absl::CondVar rely
// on the alignment of PerThreadSynch. Both store the address of the
// PerThreadSynch in the high-order bits of their internal state,
// which means the low kLowZeroBits of the address of PerThreadSynch
// must be zero.
static constexpr int kLowZeroBits = 8;
static constexpr int kAlignment = 1 << kLowZeroBits;

// Returns the associated ThreadIdentity.
// This can be implemented as a cast because we guarantee
// PerThreadSynch is the first element of ThreadIdentity.
ThreadIdentity* thread_identity() {
return reinterpret_cast<ThreadIdentity*>(this);
}

PerThreadSynch *next; // Circular waiter queue; initialized to 0.
PerThreadSynch *skip; // If non-zero, all entries in Mutex queue
// up to and including "skip" have same
// condition as this, and will be woken later
bool may_skip; // if false while on mutex queue, a mutex unlocker
// is using this PerThreadSynch as a terminator. Its
// skip field must not be filled in because the loop
// might then skip over the terminator.
bool wake; // This thread is to be woken from a Mutex.
// If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
// waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
//
// The value of "x->cond_waiter" is meaningless if "x" is not on a
// Mutex waiter list.
bool cond_waiter;
bool maybe_unlocking; // Valid at head of Mutex waiter queue;
// true if UnlockSlow could be searching
// for a waiter to wake. Used for an optimization
// in Enqueue(). true is always a valid value.
// Can be reset to false when the unlocker or any
// writer releases the lock, or a reader fully
// releases the lock. It may not be set to false
// by a reader that decrements the count to
// non-zero. protected by mutex spinlock
bool suppress_fatal_errors; // If true, try to proceed even in the face
// of broken invariants. This is used within
// fatal signal handlers to improve the
// chances of debug logging information being
// output successfully.
int priority; // Priority of thread (updated every so often).

// State values:
// kAvailable: This PerThreadSynch is available.
// kQueued: This PerThreadSynch is unavailable, it's currently queued on a
// Mutex or CondVar waistlist.
//
// Transitions from kQueued to kAvailable require a release
// barrier. This is needed as a waiter may use "state" to
// independently observe that it's no longer queued.
//
// Transitions from kAvailable to kQueued require no barrier, they
// are externally ordered by the Mutex.
enum State {
kAvailable,
kQueued
};
std::atomic<State> state;

// The wait parameters of the current wait. waitp is null if the
// thread is not waiting. Transitions from null to non-null must
// occur before the enqueue commit point (state = kQueued in
// Enqueue() and CondVarEnqueue()). Transitions from non-null to
// null must occur after the wait is finished (state = kAvailable in
// Mutex::Block() and CondVar::WaitCommon()). This field may be
// changed only by the thread that describes this PerThreadSynch. A
// special case is Fer(), which calls Enqueue() on another thread,
// but with an identical SynchWaitParams pointer, thus leaving the
// pointer unchanged.
SynchWaitParams* waitp;

intptr_t readers; // Number of readers in mutex.

// When priority will next be read (cycles).
int64_t next_priority_read_cycles;

// Locks held; used during deadlock detection.
// Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
SynchLocksHeld *all_locks;
};

// The instances of this class are allocated in NewThreadIdentity() with an
// alignment of PerThreadSynch::kAlignment.
struct ThreadIdentity {
// Must be the first member. The Mutex implementation requires that
// the PerThreadSynch object associated with each thread is
// PerThreadSynch::kAlignment aligned. We provide this alignment on
// ThreadIdentity itself.
PerThreadSynch per_thread_synch;

// Private: Reserved for absl::synchronization_internal::Waiter.
struct WaiterState {
alignas(void*) char data[128];
} waiter_state;

// Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
std::atomic<int>* blocked_count_ptr;

// The following variables are mostly read/written just by the
// thread itself. The only exception is that these are read by
// a ticker thread as a hint.
std::atomic<int> ticker; // Tick counter, incremented once per second.
std::atomic<int> wait_start; // Ticker value when thread started waiting.
std::atomic<bool> is_idle; // Has thread become idle yet?

ThreadIdentity* next;
};

// Returns the ThreadIdentity object representing the calling thread; guaranteed
// to be unique for its lifetime. The returned object will remain valid for the
// program's lifetime; although it may be re-assigned to a subsequent thread.
// If one does not exist, return nullptr instead.
//
// Does not malloc(*), and is async-signal safe.
// [*] Technically pthread_setspecific() does malloc on first use; however this
// is handled internally within tcmalloc's initialization already.
//
// New ThreadIdentity objects can be constructed and associated with a thread
// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
ThreadIdentity* CurrentThreadIdentityIfPresent();

using ThreadIdentityReclaimerFunction = void (*)(void*);

// Sets the current thread identity to the given value. 'reclaimer' is a
// pointer to the global function for cleaning up instances on thread
// destruction.
void SetCurrentThreadIdentity(ThreadIdentity* identity,
ThreadIdentityReclaimerFunction reclaimer);

// Removes the currently associated ThreadIdentity from the running thread.
// This must be called from inside the ThreadIdentityReclaimerFunction, and only
// from that function.
void ClearCurrentThreadIdentity();
namespace absl
{
ABSL_NAMESPACE_BEGIN

struct SynchLocksHeld;
struct SynchWaitParams;

namespace base_internal
{

class SpinLock;
struct ThreadIdentity;

// Used by the implementation of absl::Mutex and absl::CondVar.
struct PerThreadSynch
{
// The internal representation of absl::Mutex and absl::CondVar rely
// on the alignment of PerThreadSynch. Both store the address of the
// PerThreadSynch in the high-order bits of their internal state,
// which means the low kLowZeroBits of the address of PerThreadSynch
// must be zero.
static constexpr int kLowZeroBits = 8;
static constexpr int kAlignment = 1 << kLowZeroBits;

// Returns the associated ThreadIdentity.
// This can be implemented as a cast because we guarantee
// PerThreadSynch is the first element of ThreadIdentity.
ThreadIdentity* thread_identity()
{
return reinterpret_cast<ThreadIdentity*>(this);
}

PerThreadSynch* next; // Circular waiter queue; initialized to 0.
PerThreadSynch* skip; // If non-zero, all entries in Mutex queue
// up to and including "skip" have same
// condition as this, and will be woken later
bool may_skip; // if false while on mutex queue, a mutex unlocker
// is using this PerThreadSynch as a terminator. Its
// skip field must not be filled in because the loop
// might then skip over the terminator.
bool wake; // This thread is to be woken from a Mutex.
// If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
// waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
//
// The value of "x->cond_waiter" is meaningless if "x" is not on a
// Mutex waiter list.
bool cond_waiter;
bool maybe_unlocking; // Valid at head of Mutex waiter queue;
// true if UnlockSlow could be searching
// for a waiter to wake. Used for an optimization
// in Enqueue(). true is always a valid value.
// Can be reset to false when the unlocker or any
// writer releases the lock, or a reader fully
// releases the lock. It may not be set to false
// by a reader that decrements the count to
// non-zero. protected by mutex spinlock
bool suppress_fatal_errors; // If true, try to proceed even in the face
// of broken invariants. This is used within
// fatal signal handlers to improve the
// chances of debug logging information being
// output successfully.
int priority; // Priority of thread (updated every so often).

// State values:
// kAvailable: This PerThreadSynch is available.
// kQueued: This PerThreadSynch is unavailable, it's currently queued on a
// Mutex or CondVar waistlist.
//
// Transitions from kQueued to kAvailable require a release
// barrier. This is needed as a waiter may use "state" to
// independently observe that it's no longer queued.
//
// Transitions from kAvailable to kQueued require no barrier, they
// are externally ordered by the Mutex.
enum State
{
kAvailable,
kQueued
};
std::atomic<State> state;

// The wait parameters of the current wait. waitp is null if the
// thread is not waiting. Transitions from null to non-null must
// occur before the enqueue commit point (state = kQueued in
// Enqueue() and CondVarEnqueue()). Transitions from non-null to
// null must occur after the wait is finished (state = kAvailable in
// Mutex::Block() and CondVar::WaitCommon()). This field may be
// changed only by the thread that describes this PerThreadSynch. A
// special case is Fer(), which calls Enqueue() on another thread,
// but with an identical SynchWaitParams pointer, thus leaving the
// pointer unchanged.
SynchWaitParams* waitp;

intptr_t readers; // Number of readers in mutex.

// When priority will next be read (cycles).
int64_t next_priority_read_cycles;

// Locks held; used during deadlock detection.
// Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
SynchLocksHeld* all_locks;
};

// The instances of this class are allocated in NewThreadIdentity() with an
// alignment of PerThreadSynch::kAlignment.
struct ThreadIdentity
{
// Must be the first member. The Mutex implementation requires that
// the PerThreadSynch object associated with each thread is
// PerThreadSynch::kAlignment aligned. We provide this alignment on
// ThreadIdentity itself.
PerThreadSynch per_thread_synch;

// Private: Reserved for absl::synchronization_internal::Waiter.
struct WaiterState
{
alignas(void*) char data[128];
} waiter_state;

// Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
std::atomic<int>* blocked_count_ptr;

// The following variables are mostly read/written just by the
// thread itself. The only exception is that these are read by
// a ticker thread as a hint.
std::atomic<int> ticker; // Tick counter, incremented once per second.
std::atomic<int> wait_start; // Ticker value when thread started waiting.
std::atomic<bool> is_idle; // Has thread become idle yet?

ThreadIdentity* next;
};

// Returns the ThreadIdentity object representing the calling thread; guaranteed
// to be unique for its lifetime. The returned object will remain valid for the
// program's lifetime; although it may be re-assigned to a subsequent thread.
// If one does not exist, return nullptr instead.
//
// Does not malloc(*), and is async-signal safe.
// [*] Technically pthread_setspecific() does malloc on first use; however this
// is handled internally within tcmalloc's initialization already.
//
// New ThreadIdentity objects can be constructed and associated with a thread
// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
ThreadIdentity* CurrentThreadIdentityIfPresent();

using ThreadIdentityReclaimerFunction = void (*)(void*);

// Sets the current thread identity to the given value. 'reclaimer' is a
// pointer to the global function for cleaning up instances on thread
// destruction.
void SetCurrentThreadIdentity(ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer);

// Removes the currently associated ThreadIdentity from the running thread.
// This must be called from inside the ThreadIdentityReclaimerFunction, and only
// from that function.
void ClearCurrentThreadIdentity();


// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode // May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode
// index> // index>
@@ -213,7 +219,7 @@ void ClearCurrentThreadIdentity();
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL) #elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
(__GOOGLE_GRTE_VERSION__ >= 20140228L) (__GOOGLE_GRTE_VERSION__ >= 20140228L)
// Support for async-safe TLS was specifically added in GRTEv4. It's not // Support for async-safe TLS was specifically added in GRTEv4. It's not
// present in the upstream eglibc. // present in the upstream eglibc.
@@ -221,17 +227,17 @@ void ClearCurrentThreadIdentity();
#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS
#else #else
#define ABSL_THREAD_IDENTITY_MODE \ #define ABSL_THREAD_IDENTITY_MODE \
ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
#endif #endif


#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ #if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11


#if ABSL_PER_THREAD_TLS #if ABSL_PER_THREAD_TLS
ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity*
thread_identity_ptr;
ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity*
thread_identity_ptr;
#elif defined(ABSL_HAVE_THREAD_LOCAL) #elif defined(ABSL_HAVE_THREAD_LOCAL)
ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr;
ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr;
#else #else
#error Thread-local storage not detected on this platform #error Thread-local storage not detected on this platform
#endif #endif
@@ -248,9 +254,10 @@ ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr;
#endif #endif


#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT #ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
return thread_identity_ptr;
}
inline ThreadIdentity* CurrentThreadIdentityIfPresent()
{
return thread_identity_ptr;
}
#endif #endif


#elif ABSL_THREAD_IDENTITY_MODE != \ #elif ABSL_THREAD_IDENTITY_MODE != \
@@ -258,8 +265,8 @@ inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
#error Unknown ABSL_THREAD_IDENTITY_MODE #error Unknown ABSL_THREAD_IDENTITY_MODE
#endif #endif


} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ #endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_

+ 46
- 44
CAPI/cpp/grpc/include/absl/base/internal/throw_delegate.h View File

@@ -21,55 +21,57 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{


// Helper functions that allow throwing exceptions consistently from anywhere.
// The main use case is for header-based libraries (eg templates), as they will
// be built by many different targets with their own compiler options.
// In particular, this will allow a safe way to throw exceptions even if the
// caller is compiled with -fno-exceptions. This is intended for implementing
// things like map<>::at(), which the standard documents as throwing an
// exception on error.
//
// Using other techniques like #if tricks could lead to ODR violations.
//
// You shouldn't use it unless you're writing code that you know will be built
// both with and without exceptions and you need to conform to an interface
// that uses exceptions.
// Helper functions that allow throwing exceptions consistently from anywhere.
// The main use case is for header-based libraries (eg templates), as they will
// be built by many different targets with their own compiler options.
// In particular, this will allow a safe way to throw exceptions even if the
// caller is compiled with -fno-exceptions. This is intended for implementing
// things like map<>::at(), which the standard documents as throwing an
// exception on error.
//
// Using other techniques like #if tricks could lead to ODR violations.
//
// You shouldn't use it unless you're writing code that you know will be built
// both with and without exceptions and you need to conform to an interface
// that uses exceptions.


[[noreturn]] void ThrowStdLogicError(const std::string& what_arg);
[[noreturn]] void ThrowStdLogicError(const char* what_arg);
[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg);
[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg);
[[noreturn]] void ThrowStdDomainError(const std::string& what_arg);
[[noreturn]] void ThrowStdDomainError(const char* what_arg);
[[noreturn]] void ThrowStdLengthError(const std::string& what_arg);
[[noreturn]] void ThrowStdLengthError(const char* what_arg);
[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg);
[[noreturn]] void ThrowStdOutOfRange(const char* what_arg);
[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg);
[[noreturn]] void ThrowStdRuntimeError(const char* what_arg);
[[noreturn]] void ThrowStdRangeError(const std::string& what_arg);
[[noreturn]] void ThrowStdRangeError(const char* what_arg);
[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg);
[[noreturn]] void ThrowStdOverflowError(const char* what_arg);
[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg);
[[noreturn]] void ThrowStdUnderflowError(const char* what_arg);
[[noreturn]] void ThrowStdLogicError(const std::string& what_arg);
[[noreturn]] void ThrowStdLogicError(const char* what_arg);
[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg);
[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg);
[[noreturn]] void ThrowStdDomainError(const std::string& what_arg);
[[noreturn]] void ThrowStdDomainError(const char* what_arg);
[[noreturn]] void ThrowStdLengthError(const std::string& what_arg);
[[noreturn]] void ThrowStdLengthError(const char* what_arg);
[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg);
[[noreturn]] void ThrowStdOutOfRange(const char* what_arg);
[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg);
[[noreturn]] void ThrowStdRuntimeError(const char* what_arg);
[[noreturn]] void ThrowStdRangeError(const std::string& what_arg);
[[noreturn]] void ThrowStdRangeError(const char* what_arg);
[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg);
[[noreturn]] void ThrowStdOverflowError(const char* what_arg);
[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg);
[[noreturn]] void ThrowStdUnderflowError(const char* what_arg);


[[noreturn]] void ThrowStdBadFunctionCall();
[[noreturn]] void ThrowStdBadAlloc();
[[noreturn]] void ThrowStdBadFunctionCall();
[[noreturn]] void ThrowStdBadAlloc();


// ThrowStdBadArrayNewLength() cannot be consistently supported because
// std::bad_array_new_length is missing in libstdc++ until 4.9.0.
// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html
// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html
// libcxx (as of 3.2) and msvc (as of 2015) both have it.
// [[noreturn]] void ThrowStdBadArrayNewLength();
// ThrowStdBadArrayNewLength() cannot be consistently supported because
// std::bad_array_new_length is missing in libstdc++ until 4.9.0.
// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html
// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html
// libcxx (as of 3.2) and msvc (as of 2015) both have it.
// [[noreturn]] void ThrowStdBadArrayNewLength();


} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ #endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_

+ 50
- 36
CAPI/cpp/grpc/include/absl/base/internal/unaligned_access.h View File

@@ -31,51 +31,65 @@
// The unaligned API is C++ only. The declarations use C++ features // The unaligned API is C++ only. The declarations use C++ features
// (namespaces, inline) which are absent or incompatible in C. // (namespaces, inline) which are absent or incompatible in C.
#if defined(__cplusplus) #if defined(__cplusplus)
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {

inline uint16_t UnalignedLoad16(const void *p) {
uint16_t t;
memcpy(&t, p, sizeof t);
return t;
}

inline uint32_t UnalignedLoad32(const void *p) {
uint32_t t;
memcpy(&t, p, sizeof t);
return t;
}

inline uint64_t UnalignedLoad64(const void *p) {
uint64_t t;
memcpy(&t, p, sizeof t);
return t;
}

inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); }

inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); }

inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); }

} // namespace base_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{

inline uint16_t UnalignedLoad16(const void* p)
{
uint16_t t;
memcpy(&t, p, sizeof t);
return t;
}

inline uint32_t UnalignedLoad32(const void* p)
{
uint32_t t;
memcpy(&t, p, sizeof t);
return t;
}

inline uint64_t UnalignedLoad64(const void* p)
{
uint64_t t;
memcpy(&t, p, sizeof t);
return t;
}

inline void UnalignedStore16(void* p, uint16_t v)
{
memcpy(p, &v, sizeof v);
}

inline void UnalignedStore32(void* p, uint32_t v)
{
memcpy(p, &v, sizeof v);
}

inline void UnalignedStore64(void* p, uint64_t v)
{
memcpy(p, &v, sizeof v);
}

} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ #define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \
(absl::base_internal::UnalignedLoad16(_p))
(absl::base_internal::UnalignedLoad16(_p))
#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ #define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
(absl::base_internal::UnalignedLoad32(_p))
(absl::base_internal::UnalignedLoad32(_p))
#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ #define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \
(absl::base_internal::UnalignedLoad64(_p))
(absl::base_internal::UnalignedLoad64(_p))


#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ #define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
(absl::base_internal::UnalignedStore16(_p, _val))
(absl::base_internal::UnalignedStore16(_p, _val))
#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ #define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
(absl::base_internal::UnalignedStore32(_p, _val))
(absl::base_internal::UnalignedStore32(_p, _val))
#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ #define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
(absl::base_internal::UnalignedStore64(_p, _val))
(absl::base_internal::UnalignedStore64(_p, _val))


#endif // defined(__cplusplus), end of unaligned API #endif // defined(__cplusplus), end of unaligned API




+ 46
- 41
CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock.h View File

@@ -70,62 +70,67 @@
// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence. // Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence.
// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1 // Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1
#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK) #if !defined(ABSL_USE_UNSCALED_CYCLECLOCK)
#define ABSL_USE_UNSCALED_CYCLECLOCK \
(ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \
ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT)
#define ABSL_USE_UNSCALED_CYCLECLOCK \
(ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \
ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT)
#endif #endif


#if ABSL_USE_UNSCALED_CYCLECLOCK #if ABSL_USE_UNSCALED_CYCLECLOCK


// This macro can be used to test if UnscaledCycleClock::Frequency() // This macro can be used to test if UnscaledCycleClock::Frequency()
// is NominalCPUFrequency() on a particular platform. // is NominalCPUFrequency() on a particular platform.
#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || \
defined(_M_IX86) || defined(_M_X64))
#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || defined(_M_IX86) || defined(_M_X64))
#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY #define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
#endif #endif


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace time_internal {
class UnscaledCycleClockWrapperForGetCurrentTime;
} // namespace time_internal

namespace base_internal {
class CycleClock;
class UnscaledCycleClockWrapperForInitializeFrequency;

class UnscaledCycleClock {
private:
UnscaledCycleClock() = delete;

// Return the value of a cycle counter that counts at a rate that is
// approximately constant.
static int64_t Now();

// Return the how much UnscaledCycleClock::Now() increases per second.
// This is not necessarily the core CPU clock frequency.
// It may be the nominal value report by the kernel, rather than a measured
// value.
static double Frequency();

// Allowed users
friend class base_internal::CycleClock;
friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime;
friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;
};
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace time_internal
{
class UnscaledCycleClockWrapperForGetCurrentTime;
} // namespace time_internal

namespace base_internal
{
class CycleClock;
class UnscaledCycleClockWrapperForInitializeFrequency;

class UnscaledCycleClock
{
private:
UnscaledCycleClock() = delete;

// Return the value of a cycle counter that counts at a rate that is
// approximately constant.
static int64_t Now();

// Return the how much UnscaledCycleClock::Now() increases per second.
// This is not necessarily the core CPU clock frequency.
// It may be the nominal value report by the kernel, rather than a measured
// value.
static double Frequency();

// Allowed users
friend class base_internal::CycleClock;
friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime;
friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;
};


#if defined(__x86_64__) #if defined(__x86_64__)


inline int64_t UnscaledCycleClock::Now() {
uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
return (high << 32) | low;
}
inline int64_t UnscaledCycleClock::Now()
{
uint64_t low, high;
__asm__ volatile("rdtsc"
: "=a"(low), "=d"(high));
return (high << 32) | low;
}


#endif #endif


} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_USE_UNSCALED_CYCLECLOCK #endif // ABSL_USE_UNSCALED_CYCLECLOCK


+ 147
- 142
CAPI/cpp/grpc/include/absl/base/log_severity.h View File

@@ -21,152 +21,157 @@
#include "absl/base/attributes.h" #include "absl/base/attributes.h"
#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN

// absl::LogSeverity
//
// Four severity levels are defined. Logging APIs should terminate the program
// when a message is logged at severity `kFatal`; the other levels have no
// special semantics.
//
// Values other than the four defined levels (e.g. produced by `static_cast`)
// are valid, but their semantics when passed to a function, macro, or flag
// depend on the function, macro, or flag. The usual behavior is to normalize
// such values to a defined severity level, however in some cases values other
// than the defined levels are useful for comparison.
//
// Example:
//
// // Effectively disables all logging:
// SetMinLogLevel(static_cast<absl::LogSeverity>(100));
//
// Abseil flags may be defined with type `LogSeverity`. Dependency layering
// constraints require that the `AbslParseFlag()` overload be declared and
// defined in the flags library itself rather than here. The `AbslUnparseFlag()`
// overload is defined there as well for consistency.
//
// absl::LogSeverity Flag String Representation
//
// An `absl::LogSeverity` has a string representation used for parsing
// command-line flags based on the enumerator name (e.g. `kFatal`) or
// its unprefixed name (without the `k`) in any case-insensitive form. (E.g.
// "FATAL", "fatal" or "Fatal" are all valid.) Unparsing such flags produces an
// unprefixed string representation in all caps (e.g. "FATAL") or an integer.
//
// Additionally, the parser accepts arbitrary integers (as if the type were
// `int`).
//
// Examples:
//
// --my_log_level=kInfo
// --my_log_level=INFO
// --my_log_level=info
// --my_log_level=0
//
// Unparsing a flag produces the same result as `absl::LogSeverityName()` for
// the standard levels and a base-ten integer otherwise.
enum class LogSeverity : int {
kInfo = 0,
kWarning = 1,
kError = 2,
kFatal = 3,
};

// LogSeverities()
//
// Returns an iterable of all standard `absl::LogSeverity` values, ordered from
// least to most severe.
constexpr std::array<absl::LogSeverity, 4> LogSeverities() {
return {{absl::LogSeverity::kInfo, absl::LogSeverity::kWarning,
absl::LogSeverity::kError, absl::LogSeverity::kFatal}};
}

// LogSeverityName()
//
// Returns the all-caps string representation (e.g. "INFO") of the specified
// severity level if it is one of the standard levels and "UNKNOWN" otherwise.
constexpr const char* LogSeverityName(absl::LogSeverity s) {
return s == absl::LogSeverity::kInfo
? "INFO"
: s == absl::LogSeverity::kWarning
? "WARNING"
: s == absl::LogSeverity::kError
? "ERROR"
: s == absl::LogSeverity::kFatal ? "FATAL" : "UNKNOWN";
}

// NormalizeLogSeverity()
//
// Values less than `kInfo` normalize to `kInfo`; values greater than `kFatal`
// normalize to `kError` (**NOT** `kFatal`).
constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s) {
return s < absl::LogSeverity::kInfo
? absl::LogSeverity::kInfo
: s > absl::LogSeverity::kFatal ? absl::LogSeverity::kError : s;
}
constexpr absl::LogSeverity NormalizeLogSeverity(int s) {
return absl::NormalizeLogSeverity(static_cast<absl::LogSeverity>(s));
}

// operator<<
//
// The exact representation of a streamed `absl::LogSeverity` is deliberately
// unspecified; do not rely on it.
std::ostream& operator<<(std::ostream& os, absl::LogSeverity s);

// Enums representing a lower bound for LogSeverity. APIs that only operate on
// messages of at least a certain level (for example, `SetMinLogLevel()`) use
// this type to specify that level. absl::LogSeverityAtLeast::kInfinity is
// a level above all threshold levels and therefore no log message will
// ever meet this threshold.
enum class LogSeverityAtLeast : int {
kInfo = static_cast<int>(absl::LogSeverity::kInfo),
kWarning = static_cast<int>(absl::LogSeverity::kWarning),
kError = static_cast<int>(absl::LogSeverity::kError),
kFatal = static_cast<int>(absl::LogSeverity::kFatal),
kInfinity = 1000,
};

std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s);

// Enums representing an upper bound for LogSeverity. APIs that only operate on
// messages of at most a certain level (for example, buffer all messages at or
// below a certain level) use this type to specify that level.
// absl::LogSeverityAtMost::kNegativeInfinity is a level below all threshold
// levels and therefore will exclude all log messages.
enum class LogSeverityAtMost : int {
kNegativeInfinity = -1000,
kInfo = static_cast<int>(absl::LogSeverity::kInfo),
kWarning = static_cast<int>(absl::LogSeverity::kWarning),
kError = static_cast<int>(absl::LogSeverity::kError),
kFatal = static_cast<int>(absl::LogSeverity::kFatal),
};

std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s);
namespace absl
{
ABSL_NAMESPACE_BEGIN

// absl::LogSeverity
//
// Four severity levels are defined. Logging APIs should terminate the program
// when a message is logged at severity `kFatal`; the other levels have no
// special semantics.
//
// Values other than the four defined levels (e.g. produced by `static_cast`)
// are valid, but their semantics when passed to a function, macro, or flag
// depend on the function, macro, or flag. The usual behavior is to normalize
// such values to a defined severity level, however in some cases values other
// than the defined levels are useful for comparison.
//
// Example:
//
// // Effectively disables all logging:
// SetMinLogLevel(static_cast<absl::LogSeverity>(100));
//
// Abseil flags may be defined with type `LogSeverity`. Dependency layering
// constraints require that the `AbslParseFlag()` overload be declared and
// defined in the flags library itself rather than here. The `AbslUnparseFlag()`
// overload is defined there as well for consistency.
//
// absl::LogSeverity Flag String Representation
//
// An `absl::LogSeverity` has a string representation used for parsing
// command-line flags based on the enumerator name (e.g. `kFatal`) or
// its unprefixed name (without the `k`) in any case-insensitive form. (E.g.
// "FATAL", "fatal" or "Fatal" are all valid.) Unparsing such flags produces an
// unprefixed string representation in all caps (e.g. "FATAL") or an integer.
//
// Additionally, the parser accepts arbitrary integers (as if the type were
// `int`).
//
// Examples:
//
// --my_log_level=kInfo
// --my_log_level=INFO
// --my_log_level=info
// --my_log_level=0
//
// Unparsing a flag produces the same result as `absl::LogSeverityName()` for
// the standard levels and a base-ten integer otherwise.
enum class LogSeverity : int
{
kInfo = 0,
kWarning = 1,
kError = 2,
kFatal = 3,
};

// LogSeverities()
//
// Returns an iterable of all standard `absl::LogSeverity` values, ordered from
// least to most severe.
constexpr std::array<absl::LogSeverity, 4> LogSeverities()
{
return {{absl::LogSeverity::kInfo, absl::LogSeverity::kWarning, absl::LogSeverity::kError, absl::LogSeverity::kFatal}};
}

// LogSeverityName()
//
// Returns the all-caps string representation (e.g. "INFO") of the specified
// severity level if it is one of the standard levels and "UNKNOWN" otherwise.
constexpr const char* LogSeverityName(absl::LogSeverity s)
{
return s == absl::LogSeverity::kInfo ? "INFO" : s == absl::LogSeverity::kWarning ? "WARNING" :
s == absl::LogSeverity::kError ? "ERROR" :
s == absl::LogSeverity::kFatal ? "FATAL" :
"UNKNOWN";
}

// NormalizeLogSeverity()
//
// Values less than `kInfo` normalize to `kInfo`; values greater than `kFatal`
// normalize to `kError` (**NOT** `kFatal`).
constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s)
{
return s < absl::LogSeverity::kInfo ? absl::LogSeverity::kInfo : s > absl::LogSeverity::kFatal ? absl::LogSeverity::kError :
s;
}
constexpr absl::LogSeverity NormalizeLogSeverity(int s)
{
return absl::NormalizeLogSeverity(static_cast<absl::LogSeverity>(s));
}

// operator<<
//
// The exact representation of a streamed `absl::LogSeverity` is deliberately
// unspecified; do not rely on it.
std::ostream& operator<<(std::ostream& os, absl::LogSeverity s);

// Enums representing a lower bound for LogSeverity. APIs that only operate on
// messages of at least a certain level (for example, `SetMinLogLevel()`) use
// this type to specify that level. absl::LogSeverityAtLeast::kInfinity is
// a level above all threshold levels and therefore no log message will
// ever meet this threshold.
enum class LogSeverityAtLeast : int
{
kInfo = static_cast<int>(absl::LogSeverity::kInfo),
kWarning = static_cast<int>(absl::LogSeverity::kWarning),
kError = static_cast<int>(absl::LogSeverity::kError),
kFatal = static_cast<int>(absl::LogSeverity::kFatal),
kInfinity = 1000,
};

std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s);

// Enums representing an upper bound for LogSeverity. APIs that only operate on
// messages of at most a certain level (for example, buffer all messages at or
// below a certain level) use this type to specify that level.
// absl::LogSeverityAtMost::kNegativeInfinity is a level below all threshold
// levels and therefore will exclude all log messages.
enum class LogSeverityAtMost : int
{
kNegativeInfinity = -1000,
kInfo = static_cast<int>(absl::LogSeverity::kInfo),
kWarning = static_cast<int>(absl::LogSeverity::kWarning),
kError = static_cast<int>(absl::LogSeverity::kError),
kFatal = static_cast<int>(absl::LogSeverity::kFatal),
};

std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s);


#define COMPOP(op1, op2, T) \ #define COMPOP(op1, op2, T) \
constexpr bool operator op1(absl::T lhs, absl::LogSeverity rhs) { \
return static_cast<absl::LogSeverity>(lhs) op1 rhs; \
} \
constexpr bool operator op2(absl::LogSeverity lhs, absl::T rhs) { \
return lhs op2 static_cast<absl::LogSeverity>(rhs); \
}

// Comparisons between `LogSeverity` and `LogSeverityAtLeast`/
// `LogSeverityAtMost` are only supported in one direction.
// Valid checks are:
// LogSeverity >= LogSeverityAtLeast
// LogSeverity < LogSeverityAtLeast
// LogSeverity <= LogSeverityAtMost
// LogSeverity > LogSeverityAtMost
COMPOP(>, <, LogSeverityAtLeast)
COMPOP(<=, >=, LogSeverityAtLeast)
COMPOP(<, >, LogSeverityAtMost)
COMPOP(>=, <=, LogSeverityAtMost)
constexpr bool operator op1(absl::T lhs, absl::LogSeverity rhs) \
{ \
return static_cast<absl::LogSeverity>(lhs) op1 rhs; \
} \
constexpr bool operator op2(absl::LogSeverity lhs, absl::T rhs) \
{ \
return lhs op2 static_cast<absl::LogSeverity>(rhs); \
}

// Comparisons between `LogSeverity` and `LogSeverityAtLeast`/
// `LogSeverityAtMost` are only supported in one direction.
// Valid checks are:
// LogSeverity >= LogSeverityAtLeast
// LogSeverity < LogSeverityAtLeast
// LogSeverity <= LogSeverityAtMost
// LogSeverity > LogSeverityAtMost
COMPOP(>, <, LogSeverityAtLeast)
COMPOP(<=, >=, LogSeverityAtLeast)
COMPOP(<, >, LogSeverityAtMost)
COMPOP(>=, <=, LogSeverityAtMost)
#undef COMPOP #undef COMPOP


ABSL_NAMESPACE_END
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_LOG_SEVERITY_H_ #endif // ABSL_BASE_LOG_SEVERITY_H_

+ 33
- 26
CAPI/cpp/grpc/include/absl/base/macros.h View File

@@ -42,17 +42,19 @@
// can be used in defining new arrays. If you use this macro on a pointer by // can be used in defining new arrays. If you use this macro on a pointer by
// mistake, you will get a compile-time error. // mistake, you will get a compile-time error.
#define ABSL_ARRAYSIZE(array) \ #define ABSL_ARRAYSIZE(array) \
(sizeof(::absl::macros_internal::ArraySizeHelper(array)))
(sizeof(::absl::macros_internal::ArraySizeHelper(array)))


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace macros_internal {
// Note: this internal template function declaration is used by ABSL_ARRAYSIZE.
// The function doesn't need a definition, as we only use its type.
template <typename T, size_t N>
auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N];
} // namespace macros_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace macros_internal
{
// Note: this internal template function declaration is used by ABSL_ARRAYSIZE.
// The function doesn't need a definition, as we only use its type.
template<typename T, size_t N>
auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N];
} // namespace macros_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


// ABSL_BAD_CALL_IF() // ABSL_BAD_CALL_IF()
@@ -75,7 +77,7 @@ ABSL_NAMESPACE_END
// #endif // ABSL_BAD_CALL_IF // #endif // ABSL_BAD_CALL_IF
#if ABSL_HAVE_ATTRIBUTE(enable_if) #if ABSL_HAVE_ATTRIBUTE(enable_if)
#define ABSL_BAD_CALL_IF(expr, msg) \ #define ABSL_BAD_CALL_IF(expr, msg) \
__attribute__((enable_if(expr, "Bad call trap"), unavailable(msg)))
__attribute__((enable_if(expr, "Bad call trap"), unavailable(msg)))
#endif #endif


// ABSL_ASSERT() // ABSL_ASSERT()
@@ -92,25 +94,24 @@ ABSL_NAMESPACE_END
// https://akrzemi1.wordpress.com/2017/05/18/asserts-in-constexpr-functions/ // https://akrzemi1.wordpress.com/2017/05/18/asserts-in-constexpr-functions/
#if defined(NDEBUG) #if defined(NDEBUG)
#define ABSL_ASSERT(expr) \ #define ABSL_ASSERT(expr) \
(false ? static_cast<void>(expr) : static_cast<void>(0))
(false ? static_cast<void>(expr) : static_cast<void>(0))
#else #else
#define ABSL_ASSERT(expr) \
(ABSL_PREDICT_TRUE((expr)) ? static_cast<void>(0) \
: [] { assert(false && #expr); }()) // NOLINT
#define ABSL_ASSERT(expr) \
(ABSL_PREDICT_TRUE((expr)) ? static_cast<void>(0) : [] { assert(false && #expr); }()) // NOLINT
#endif #endif


// `ABSL_INTERNAL_HARDENING_ABORT()` controls how `ABSL_HARDENING_ASSERT()` // `ABSL_INTERNAL_HARDENING_ABORT()` controls how `ABSL_HARDENING_ASSERT()`
// aborts the program in release mode (when NDEBUG is defined). The // aborts the program in release mode (when NDEBUG is defined). The
// implementation should abort the program as quickly as possible and ideally it // implementation should abort the program as quickly as possible and ideally it
// should not be possible to ignore the abort request. // should not be possible to ignore the abort request.
#if (ABSL_HAVE_BUILTIN(__builtin_trap) && \
ABSL_HAVE_BUILTIN(__builtin_unreachable)) || \
#if (ABSL_HAVE_BUILTIN(__builtin_trap) && ABSL_HAVE_BUILTIN(__builtin_unreachable)) || \
(defined(__GNUC__) && !defined(__clang__)) (defined(__GNUC__) && !defined(__clang__))
#define ABSL_INTERNAL_HARDENING_ABORT() \ #define ABSL_INTERNAL_HARDENING_ABORT() \
do { \
__builtin_trap(); \
__builtin_unreachable(); \
} while (false)
do \
{ \
__builtin_trap(); \
__builtin_unreachable(); \
} while (false)
#else #else
#define ABSL_INTERNAL_HARDENING_ABORT() abort() #define ABSL_INTERNAL_HARDENING_ABORT() abort()
#endif #endif
@@ -127,9 +128,8 @@ ABSL_NAMESPACE_END
// See `ABSL_OPTION_HARDENED` in `absl/base/options.h` for more information on // See `ABSL_OPTION_HARDENED` in `absl/base/options.h` for more information on
// hardened mode. // hardened mode.
#if ABSL_OPTION_HARDENED == 1 && defined(NDEBUG) #if ABSL_OPTION_HARDENED == 1 && defined(NDEBUG)
#define ABSL_HARDENING_ASSERT(expr) \
(ABSL_PREDICT_TRUE((expr)) ? static_cast<void>(0) \
: [] { ABSL_INTERNAL_HARDENING_ABORT(); }())
#define ABSL_HARDENING_ASSERT(expr) \
(ABSL_PREDICT_TRUE((expr)) ? static_cast<void>(0) : [] { ABSL_INTERNAL_HARDENING_ABORT(); }())
#else #else
#define ABSL_HARDENING_ASSERT(expr) ABSL_ASSERT(expr) #define ABSL_HARDENING_ASSERT(expr) ABSL_ASSERT(expr)
#endif #endif
@@ -137,11 +137,18 @@ ABSL_NAMESPACE_END
#ifdef ABSL_HAVE_EXCEPTIONS #ifdef ABSL_HAVE_EXCEPTIONS
#define ABSL_INTERNAL_TRY try #define ABSL_INTERNAL_TRY try
#define ABSL_INTERNAL_CATCH_ANY catch (...) #define ABSL_INTERNAL_CATCH_ANY catch (...)
#define ABSL_INTERNAL_RETHROW do { throw; } while (false)
#define ABSL_INTERNAL_RETHROW \
do \
{ \
throw; \
} while (false)
#else // ABSL_HAVE_EXCEPTIONS #else // ABSL_HAVE_EXCEPTIONS
#define ABSL_INTERNAL_TRY if (true) #define ABSL_INTERNAL_TRY if (true)
#define ABSL_INTERNAL_CATCH_ANY else if (false) #define ABSL_INTERNAL_CATCH_ANY else if (false)
#define ABSL_INTERNAL_RETHROW do {} while (false)
#define ABSL_INTERNAL_RETHROW \
do \
{ \
} while (false)
#endif // ABSL_HAVE_EXCEPTIONS #endif // ABSL_HAVE_EXCEPTIONS


// `ABSL_INTERNAL_UNREACHABLE` is an unreachable statement. A program which // `ABSL_INTERNAL_UNREACHABLE` is an unreachable statement. A program which


+ 22
- 11
CAPI/cpp/grpc/include/absl/base/optimization.h View File

@@ -40,7 +40,11 @@
// return result; // return result;
// } // }
#if defined(__pnacl__) #if defined(__pnacl__)
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() \
if (volatile int x = 0) \
{ \
(void)x; \
}
#elif defined(__clang__) #elif defined(__clang__)
// Clang will not tail call given inline volatile assembly. // Clang will not tail call given inline volatile assembly.
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("")
@@ -52,7 +56,11 @@
// The __nop() intrinsic blocks the optimisation. // The __nop() intrinsic blocks the optimisation.
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop() #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop()
#else #else
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() \
if (volatile int x = 0) \
{ \
(void)x; \
}
#endif #endif


// ABSL_CACHELINE_SIZE // ABSL_CACHELINE_SIZE
@@ -210,17 +218,20 @@
#elif ABSL_HAVE_BUILTIN(__builtin_assume) #elif ABSL_HAVE_BUILTIN(__builtin_assume)
#define ABSL_ASSUME(cond) __builtin_assume(cond) #define ABSL_ASSUME(cond) __builtin_assume(cond)
#elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) #elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable)
#define ABSL_ASSUME(cond) \
do { \
if (!(cond)) __builtin_unreachable(); \
} while (0)
#define ABSL_ASSUME(cond) \
do \
{ \
if (!(cond)) \
__builtin_unreachable(); \
} while (0)
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
#define ABSL_ASSUME(cond) __assume(cond) #define ABSL_ASSUME(cond) __assume(cond)
#else #else
#define ABSL_ASSUME(cond) \
do { \
static_cast<void>(false && (cond)); \
} while (0)
#define ABSL_ASSUME(cond) \
do \
{ \
static_cast<void>(false && (cond)); \
} while (0)
#endif #endif


// ABSL_INTERNAL_UNIQUE_SMALL_NAME(cond) // ABSL_INTERNAL_UNIQUE_SMALL_NAME(cond)
@@ -244,7 +255,7 @@
#define ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) #x #define ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) #x
#define ABSL_INTERNAL_UNIQUE_SMALL_NAME1(x) ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) #define ABSL_INTERNAL_UNIQUE_SMALL_NAME1(x) ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x)
#define ABSL_INTERNAL_UNIQUE_SMALL_NAME() \ #define ABSL_INTERNAL_UNIQUE_SMALL_NAME() \
asm(ABSL_INTERNAL_UNIQUE_SMALL_NAME1(.absl.__COUNTER__))
asm(ABSL_INTERNAL_UNIQUE_SMALL_NAME1(.absl.__COUNTER__))
#else #else
#define ABSL_INTERNAL_UNIQUE_SMALL_NAME() #define ABSL_INTERNAL_UNIQUE_SMALL_NAME()
#endif #endif


+ 0
- 3
CAPI/cpp/grpc/include/absl/base/options.h View File

@@ -102,7 +102,6 @@


#define ABSL_OPTION_USE_STD_ANY 0 #define ABSL_OPTION_USE_STD_ANY 0



// ABSL_OPTION_USE_STD_OPTIONAL // ABSL_OPTION_USE_STD_OPTIONAL
// //
// This option controls whether absl::optional is implemented as an alias to // This option controls whether absl::optional is implemented as an alias to
@@ -129,7 +128,6 @@


#define ABSL_OPTION_USE_STD_OPTIONAL 0 #define ABSL_OPTION_USE_STD_OPTIONAL 0



// ABSL_OPTION_USE_STD_STRING_VIEW // ABSL_OPTION_USE_STD_STRING_VIEW
// //
// This option controls whether absl::string_view is implemented as an alias to // This option controls whether absl::string_view is implemented as an alias to
@@ -182,7 +180,6 @@


#define ABSL_OPTION_USE_STD_VARIANT 0 #define ABSL_OPTION_USE_STD_VARIANT 0



// ABSL_OPTION_USE_INLINE_NAMESPACE // ABSL_OPTION_USE_INLINE_NAMESPACE
// ABSL_OPTION_INLINE_NAMESPACE_NAME // ABSL_OPTION_INLINE_NAMESPACE_NAME
// //


+ 29
- 25
CAPI/cpp/grpc/include/absl/base/thread_annotations.h View File

@@ -140,14 +140,14 @@
// void bar() const ABSL_SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } // void bar() const ABSL_SHARED_LOCKS_REQUIRED(mu1, mu2) { ... }
#if ABSL_HAVE_ATTRIBUTE(exclusive_locks_required) #if ABSL_HAVE_ATTRIBUTE(exclusive_locks_required)
#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \ #define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \
__attribute__((exclusive_locks_required(__VA_ARGS__)))
__attribute__((exclusive_locks_required(__VA_ARGS__)))
#else #else
#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) #define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...)
#endif #endif


#if ABSL_HAVE_ATTRIBUTE(shared_locks_required) #if ABSL_HAVE_ATTRIBUTE(shared_locks_required)
#define ABSL_SHARED_LOCKS_REQUIRED(...) \ #define ABSL_SHARED_LOCKS_REQUIRED(...) \
__attribute__((shared_locks_required(__VA_ARGS__)))
__attribute__((shared_locks_required(__VA_ARGS__)))
#else #else
#define ABSL_SHARED_LOCKS_REQUIRED(...) #define ABSL_SHARED_LOCKS_REQUIRED(...)
#endif #endif
@@ -202,7 +202,7 @@
// not release it. // not release it.
#if ABSL_HAVE_ATTRIBUTE(exclusive_lock_function) #if ABSL_HAVE_ATTRIBUTE(exclusive_lock_function)
#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \ #define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \
__attribute__((exclusive_lock_function(__VA_ARGS__)))
__attribute__((exclusive_lock_function(__VA_ARGS__)))
#else #else
#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) #define ABSL_EXCLUSIVE_LOCK_FUNCTION(...)
#endif #endif
@@ -213,7 +213,7 @@
// function, and do not release it. // function, and do not release it.
#if ABSL_HAVE_ATTRIBUTE(shared_lock_function) #if ABSL_HAVE_ATTRIBUTE(shared_lock_function)
#define ABSL_SHARED_LOCK_FUNCTION(...) \ #define ABSL_SHARED_LOCK_FUNCTION(...) \
__attribute__((shared_lock_function(__VA_ARGS__)))
__attribute__((shared_lock_function(__VA_ARGS__)))
#else #else
#define ABSL_SHARED_LOCK_FUNCTION(...) #define ABSL_SHARED_LOCK_FUNCTION(...)
#endif #endif
@@ -238,14 +238,14 @@
// mutex is assumed to be `this`. // mutex is assumed to be `this`.
#if ABSL_HAVE_ATTRIBUTE(exclusive_trylock_function) #if ABSL_HAVE_ATTRIBUTE(exclusive_trylock_function)
#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) \ #define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) \
__attribute__((exclusive_trylock_function(__VA_ARGS__)))
__attribute__((exclusive_trylock_function(__VA_ARGS__)))
#else #else
#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) #define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...)
#endif #endif


#if ABSL_HAVE_ATTRIBUTE(shared_trylock_function) #if ABSL_HAVE_ATTRIBUTE(shared_trylock_function)
#define ABSL_SHARED_TRYLOCK_FUNCTION(...) \ #define ABSL_SHARED_TRYLOCK_FUNCTION(...) \
__attribute__((shared_trylock_function(__VA_ARGS__)))
__attribute__((shared_trylock_function(__VA_ARGS__)))
#else #else
#define ABSL_SHARED_TRYLOCK_FUNCTION(...) #define ABSL_SHARED_TRYLOCK_FUNCTION(...)
#endif #endif
@@ -256,14 +256,14 @@
// if it is not held. // if it is not held.
#if ABSL_HAVE_ATTRIBUTE(assert_exclusive_lock) #if ABSL_HAVE_ATTRIBUTE(assert_exclusive_lock)
#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) \ #define ABSL_ASSERT_EXCLUSIVE_LOCK(...) \
__attribute__((assert_exclusive_lock(__VA_ARGS__)))
__attribute__((assert_exclusive_lock(__VA_ARGS__)))
#else #else
#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) #define ABSL_ASSERT_EXCLUSIVE_LOCK(...)
#endif #endif


#if ABSL_HAVE_ATTRIBUTE(assert_shared_lock) #if ABSL_HAVE_ATTRIBUTE(assert_shared_lock)
#define ABSL_ASSERT_SHARED_LOCK(...) \ #define ABSL_ASSERT_SHARED_LOCK(...) \
__attribute__((assert_shared_lock(__VA_ARGS__)))
__attribute__((assert_shared_lock(__VA_ARGS__)))
#else #else
#define ABSL_ASSERT_SHARED_LOCK(...) #define ABSL_ASSERT_SHARED_LOCK(...)
#endif #endif
@@ -275,7 +275,7 @@
// the locking behavior is more complicated than the analyzer can handle. // the locking behavior is more complicated than the analyzer can handle.
#if ABSL_HAVE_ATTRIBUTE(no_thread_safety_analysis) #if ABSL_HAVE_ATTRIBUTE(no_thread_safety_analysis)
#define ABSL_NO_THREAD_SAFETY_ANALYSIS \ #define ABSL_NO_THREAD_SAFETY_ANALYSIS \
__attribute__((no_thread_safety_analysis))
__attribute__((no_thread_safety_analysis))
#else #else
#define ABSL_NO_THREAD_SAFETY_ANALYSIS #define ABSL_NO_THREAD_SAFETY_ANALYSIS
#endif #endif
@@ -311,25 +311,29 @@
// but the compiler cannot confirm that. // but the compiler cannot confirm that.
#define ABSL_TS_UNCHECKED_READ(x) absl::base_internal::ts_unchecked_read(x) #define ABSL_TS_UNCHECKED_READ(x) absl::base_internal::ts_unchecked_read(x)


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace base_internal
{


// Takes a reference to a guarded data member, and returns an unguarded
// reference.
// Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead.
template <typename T>
inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS {
return v;
}
// Takes a reference to a guarded data member, and returns an unguarded
// reference.
// Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead.
template<typename T>
inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS
{
return v;
}


template <typename T>
inline T& ts_unchecked_read(T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS {
return v;
}
template<typename T>
inline T& ts_unchecked_read(T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS
{
return v;
}


} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_BASE_THREAD_ANNOTATIONS_H_ #endif // ABSL_BASE_THREAD_ANNOTATIONS_H_

+ 57
- 51
CAPI/cpp/grpc/include/absl/cleanup/cleanup.h View File

@@ -74,67 +74,73 @@
#include "absl/base/macros.h" #include "absl/base/macros.h"
#include "absl/cleanup/internal/cleanup.h" #include "absl/cleanup/internal/cleanup.h"


namespace absl {
ABSL_NAMESPACE_BEGIN

template <typename Arg, typename Callback = void()>
class ABSL_MUST_USE_RESULT Cleanup final {
static_assert(cleanup_internal::WasDeduced<Arg>(),
"Explicit template parameters are not supported.");

static_assert(cleanup_internal::ReturnsVoid<Callback>(),
"Callbacks that return values are not supported.");

public:
Cleanup(Callback callback) : storage_(std::move(callback)) {} // NOLINT

Cleanup(Cleanup&& other) = default;

void Cancel() && {
ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged());
storage_.DestroyCallback();
}

void Invoke() && {
ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged());
storage_.InvokeCallback();
storage_.DestroyCallback();
}

~Cleanup() {
if (storage_.IsCallbackEngaged()) {
storage_.InvokeCallback();
storage_.DestroyCallback();
}
}

private:
cleanup_internal::Storage<Callback> storage_;
};
namespace absl
{
ABSL_NAMESPACE_BEGIN

template<typename Arg, typename Callback = void()>
class ABSL_MUST_USE_RESULT Cleanup final
{
static_assert(cleanup_internal::WasDeduced<Arg>(), "Explicit template parameters are not supported.");

static_assert(cleanup_internal::ReturnsVoid<Callback>(), "Callbacks that return values are not supported.");

public:
Cleanup(Callback callback) :
storage_(std::move(callback))
{
} // NOLINT

Cleanup(Cleanup&& other) = default;

void Cancel() &&
{
ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged());
storage_.DestroyCallback();
}

void Invoke() &&
{
ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged());
storage_.InvokeCallback();
storage_.DestroyCallback();
}

~Cleanup()
{
if (storage_.IsCallbackEngaged())
{
storage_.InvokeCallback();
storage_.DestroyCallback();
}
}

private:
cleanup_internal::Storage<Callback> storage_;
};


// `absl::Cleanup c = /* callback */;` // `absl::Cleanup c = /* callback */;`
// //
// C++17 type deduction API for creating an instance of `absl::Cleanup` // C++17 type deduction API for creating an instance of `absl::Cleanup`
#if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) #if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)
template <typename Callback>
Cleanup(Callback callback) -> Cleanup<cleanup_internal::Tag, Callback>;
template<typename Callback>
Cleanup(Callback callback) -> Cleanup<cleanup_internal::Tag, Callback>;
#endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) #endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION)


// `auto c = absl::MakeCleanup(/* callback */);`
//
// C++11 type deduction API for creating an instance of `absl::Cleanup`
template <typename... Args, typename Callback>
absl::Cleanup<cleanup_internal::Tag, Callback> MakeCleanup(Callback callback) {
static_assert(cleanup_internal::WasDeduced<cleanup_internal::Tag, Args...>(),
"Explicit template parameters are not supported.");
// `auto c = absl::MakeCleanup(/* callback */);`
//
// C++11 type deduction API for creating an instance of `absl::Cleanup`
template<typename... Args, typename Callback>
absl::Cleanup<cleanup_internal::Tag, Callback> MakeCleanup(Callback callback)
{
static_assert(cleanup_internal::WasDeduced<cleanup_internal::Tag, Args...>(), "Explicit template parameters are not supported.");


static_assert(cleanup_internal::ReturnsVoid<Callback>(),
"Callbacks that return values are not supported.");
static_assert(cleanup_internal::ReturnsVoid<Callback>(), "Callbacks that return values are not supported.");


return {std::move(callback)};
}
return {std::move(callback)};
}


ABSL_NAMESPACE_END
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CLEANUP_CLEANUP_H_ #endif // ABSL_CLEANUP_CLEANUP_H_

+ 89
- 71
CAPI/cpp/grpc/include/absl/cleanup/internal/cleanup.h View File

@@ -24,77 +24,95 @@
#include "absl/base/thread_annotations.h" #include "absl/base/thread_annotations.h"
#include "absl/utility/utility.h" #include "absl/utility/utility.h"


namespace absl {
ABSL_NAMESPACE_BEGIN

namespace cleanup_internal {

struct Tag {};

template <typename Arg, typename... Args>
constexpr bool WasDeduced() {
return (std::is_same<cleanup_internal::Tag, Arg>::value) &&
(sizeof...(Args) == 0);
}

template <typename Callback>
constexpr bool ReturnsVoid() {
return (std::is_same<base_internal::invoke_result_t<Callback>, void>::value);
}

template <typename Callback>
class Storage {
public:
Storage() = delete;

explicit Storage(Callback callback) {
// Placement-new into a character buffer is used for eager destruction when
// the cleanup is invoked or cancelled. To ensure this optimizes well, the
// behavior is implemented locally instead of using an absl::optional.
::new (GetCallbackBuffer()) Callback(std::move(callback));
is_callback_engaged_ = true;
}

Storage(Storage&& other) {
ABSL_HARDENING_ASSERT(other.IsCallbackEngaged());

::new (GetCallbackBuffer()) Callback(std::move(other.GetCallback()));
is_callback_engaged_ = true;

other.DestroyCallback();
}

Storage(const Storage& other) = delete;

Storage& operator=(Storage&& other) = delete;

Storage& operator=(const Storage& other) = delete;

void* GetCallbackBuffer() { return static_cast<void*>(+callback_buffer_); }

Callback& GetCallback() {
return *reinterpret_cast<Callback*>(GetCallbackBuffer());
}

bool IsCallbackEngaged() const { return is_callback_engaged_; }

void DestroyCallback() {
is_callback_engaged_ = false;
GetCallback().~Callback();
}

void InvokeCallback() ABSL_NO_THREAD_SAFETY_ANALYSIS {
std::move(GetCallback())();
}

private:
bool is_callback_engaged_;
alignas(Callback) char callback_buffer_[sizeof(Callback)];
};

} // namespace cleanup_internal

ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN

namespace cleanup_internal
{

struct Tag
{
};

template<typename Arg, typename... Args>
constexpr bool WasDeduced()
{
return (std::is_same<cleanup_internal::Tag, Arg>::value) &&
(sizeof...(Args) == 0);
}

template<typename Callback>
constexpr bool ReturnsVoid()
{
return (std::is_same<base_internal::invoke_result_t<Callback>, void>::value);
}

template<typename Callback>
class Storage
{
public:
Storage() = delete;

explicit Storage(Callback callback)
{
// Placement-new into a character buffer is used for eager destruction when
// the cleanup is invoked or cancelled. To ensure this optimizes well, the
// behavior is implemented locally instead of using an absl::optional.
::new (GetCallbackBuffer()) Callback(std::move(callback));
is_callback_engaged_ = true;
}

Storage(Storage&& other)
{
ABSL_HARDENING_ASSERT(other.IsCallbackEngaged());

::new (GetCallbackBuffer()) Callback(std::move(other.GetCallback()));
is_callback_engaged_ = true;

other.DestroyCallback();
}

Storage(const Storage& other) = delete;

Storage& operator=(Storage&& other) = delete;

Storage& operator=(const Storage& other) = delete;

void* GetCallbackBuffer()
{
return static_cast<void*>(+callback_buffer_);
}

Callback& GetCallback()
{
return *reinterpret_cast<Callback*>(GetCallbackBuffer());
}

bool IsCallbackEngaged() const
{
return is_callback_engaged_;
}

void DestroyCallback()
{
is_callback_engaged_ = false;
GetCallback().~Callback();
}

void InvokeCallback() ABSL_NO_THREAD_SAFETY_ANALYSIS
{
std::move(GetCallback())();
}

private:
bool is_callback_engaged_;
alignas(Callback) char callback_buffer_[sizeof(Callback)];
};

} // namespace cleanup_internal

ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CLEANUP_INTERNAL_CLEANUP_H_ #endif // ABSL_CLEANUP_INTERNAL_CLEANUP_H_

+ 812
- 794
CAPI/cpp/grpc/include/absl/container/btree_map.h
File diff suppressed because it is too large
View File


+ 760
- 737
CAPI/cpp/grpc/include/absl/container/btree_set.h
File diff suppressed because it is too large
View File


+ 182
- 133
CAPI/cpp/grpc/include/absl/container/btree_test.h View File

@@ -28,139 +28,188 @@
#include "absl/strings/cord.h" #include "absl/strings/cord.h"
#include "absl/time/time.h" #include "absl/time/time.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {

// Like remove_const but propagates the removal through std::pair.
template <typename T>
struct remove_pair_const {
using type = typename std::remove_const<T>::type;
};
template <typename T, typename U>
struct remove_pair_const<std::pair<T, U> > {
using type = std::pair<typename remove_pair_const<T>::type,
typename remove_pair_const<U>::type>;
};

// Utility class to provide an accessor for a key given a value. The default
// behavior is to treat the value as a pair and return the first element.
template <typename K, typename V>
struct KeyOfValue {
struct type {
const K& operator()(const V& p) const { return p.first; }
};
};

// Partial specialization of KeyOfValue class for when the key and value are
// the same type such as in set<> and btree_set<>.
template <typename K>
struct KeyOfValue<K, K> {
struct type {
const K& operator()(const K& k) const { return k; }
};
};

inline char* GenerateDigits(char buf[16], unsigned val, unsigned maxval) {
assert(val <= maxval);
constexpr unsigned kBase = 64; // avoid integer division.
unsigned p = 15;
buf[p--] = 0;
while (maxval > 0) {
buf[p--] = ' ' + (val % kBase);
val /= kBase;
maxval /= kBase;
}
return buf + p + 1;
}

template <typename K>
struct Generator {
int maxval;
explicit Generator(int m) : maxval(m) {}
K operator()(int i) const {
assert(i <= maxval);
return K(i);
}
};

template <>
struct Generator<absl::Time> {
int maxval;
explicit Generator(int m) : maxval(m) {}
absl::Time operator()(int i) const { return absl::FromUnixMillis(i); }
};

template <>
struct Generator<std::string> {
int maxval;
explicit Generator(int m) : maxval(m) {}
std::string operator()(int i) const {
char buf[16];
return GenerateDigits(buf, i, maxval);
}
};

template <>
struct Generator<Cord> {
int maxval;
explicit Generator(int m) : maxval(m) {}
Cord operator()(int i) const {
char buf[16];
return Cord(GenerateDigits(buf, i, maxval));
}
};

template <typename T, typename U>
struct Generator<std::pair<T, U> > {
Generator<typename remove_pair_const<T>::type> tgen;
Generator<typename remove_pair_const<U>::type> ugen;

explicit Generator(int m) : tgen(m), ugen(m) {}
std::pair<T, U> operator()(int i) const {
return std::make_pair(tgen(i), ugen(i));
}
};

// Generate n values for our tests and benchmarks. Value range is [0, maxval].
inline std::vector<int> GenerateNumbersWithSeed(int n, int maxval, int seed) {
// NOTE: Some tests rely on generated numbers not changing between test runs.
// We use std::minstd_rand0 because it is well-defined, but don't use
// std::uniform_int_distribution because platforms use different algorithms.
std::minstd_rand0 rng(seed);

std::vector<int> values;
absl::flat_hash_set<int> unique_values;
if (values.size() < n) {
for (int i = values.size(); i < n; i++) {
int value;
do {
value = static_cast<int>(rng()) % (maxval + 1);
} while (!unique_values.insert(value).second);

values.push_back(value);
}
}
return values;
}

// Generates n values in the range [0, maxval].
template <typename V>
std::vector<V> GenerateValuesWithSeed(int n, int maxval, int seed) {
const std::vector<int> nums = GenerateNumbersWithSeed(n, maxval, seed);
Generator<V> gen(maxval);
std::vector<V> vec;

vec.reserve(n);
for (int i = 0; i < n; i++) {
vec.push_back(gen(nums[i]));
}

return vec;
}

} // namespace container_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{

// Like remove_const but propagates the removal through std::pair.
template<typename T>
struct remove_pair_const
{
using type = typename std::remove_const<T>::type;
};
template<typename T, typename U>
struct remove_pair_const<std::pair<T, U>>
{
using type = std::pair<typename remove_pair_const<T>::type, typename remove_pair_const<U>::type>;
};

// Utility class to provide an accessor for a key given a value. The default
// behavior is to treat the value as a pair and return the first element.
template<typename K, typename V>
struct KeyOfValue
{
struct type
{
const K& operator()(const V& p) const
{
return p.first;
}
};
};

// Partial specialization of KeyOfValue class for when the key and value are
// the same type such as in set<> and btree_set<>.
template<typename K>
struct KeyOfValue<K, K>
{
struct type
{
const K& operator()(const K& k) const
{
return k;
}
};
};

inline char* GenerateDigits(char buf[16], unsigned val, unsigned maxval)
{
assert(val <= maxval);
constexpr unsigned kBase = 64; // avoid integer division.
unsigned p = 15;
buf[p--] = 0;
while (maxval > 0)
{
buf[p--] = ' ' + (val % kBase);
val /= kBase;
maxval /= kBase;
}
return buf + p + 1;
}

template<typename K>
struct Generator
{
int maxval;
explicit Generator(int m) :
maxval(m)
{
}
K operator()(int i) const
{
assert(i <= maxval);
return K(i);
}
};

template<>
struct Generator<absl::Time>
{
int maxval;
explicit Generator(int m) :
maxval(m)
{
}
absl::Time operator()(int i) const
{
return absl::FromUnixMillis(i);
}
};

template<>
struct Generator<std::string>
{
int maxval;
explicit Generator(int m) :
maxval(m)
{
}
std::string operator()(int i) const
{
char buf[16];
return GenerateDigits(buf, i, maxval);
}
};

template<>
struct Generator<Cord>
{
int maxval;
explicit Generator(int m) :
maxval(m)
{
}
Cord operator()(int i) const
{
char buf[16];
return Cord(GenerateDigits(buf, i, maxval));
}
};

template<typename T, typename U>
struct Generator<std::pair<T, U>>
{
Generator<typename remove_pair_const<T>::type> tgen;
Generator<typename remove_pair_const<U>::type> ugen;

explicit Generator(int m) :
tgen(m),
ugen(m)
{
}
std::pair<T, U> operator()(int i) const
{
return std::make_pair(tgen(i), ugen(i));
}
};

// Generate n values for our tests and benchmarks. Value range is [0, maxval].
inline std::vector<int> GenerateNumbersWithSeed(int n, int maxval, int seed)
{
// NOTE: Some tests rely on generated numbers not changing between test runs.
// We use std::minstd_rand0 because it is well-defined, but don't use
// std::uniform_int_distribution because platforms use different algorithms.
std::minstd_rand0 rng(seed);

std::vector<int> values;
absl::flat_hash_set<int> unique_values;
if (values.size() < n)
{
for (int i = values.size(); i < n; i++)
{
int value;
do
{
value = static_cast<int>(rng()) % (maxval + 1);
} while (!unique_values.insert(value).second);

values.push_back(value);
}
}
return values;
}

// Generates n values in the range [0, maxval].
template<typename V>
std::vector<V> GenerateValuesWithSeed(int n, int maxval, int seed)
{
const std::vector<int> nums = GenerateNumbersWithSeed(n, maxval, seed);
Generator<V> gen(maxval);
std::vector<V> vec;

vec.reserve(n);
for (int i = 0; i < n; i++)
{
vec.push_back(gen(nums[i]));
}

return vec;
}

} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_BTREE_TEST_H_ #endif // ABSL_CONTAINER_BTREE_TEST_H_

+ 586
- 462
CAPI/cpp/grpc/include/absl/container/fixed_array.h
File diff suppressed because it is too large
View File


+ 588
- 567
CAPI/cpp/grpc/include/absl/container/flat_hash_map.h
File diff suppressed because it is too large
View File


+ 484
- 467
CAPI/cpp/grpc/include/absl/container/flat_hash_set.h View File

@@ -36,475 +36,492 @@
#include "absl/base/macros.h" #include "absl/base/macros.h"
#include "absl/container/internal/container_memory.h" #include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
#include "absl/memory/memory.h" #include "absl/memory/memory.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
template <typename T>
struct FlatHashSetPolicy;
} // namespace container_internal

// -----------------------------------------------------------------------------
// absl::flat_hash_set
// -----------------------------------------------------------------------------
//
// An `absl::flat_hash_set<T>` is an unordered associative container which has
// been optimized for both speed and memory footprint in most common use cases.
// Its interface is similar to that of `std::unordered_set<T>` with the
// following notable differences:
//
// * Requires keys that are CopyConstructible
// * Supports heterogeneous lookup, through `find()` and `insert()`, provided
// that the set is provided a compatible heterogeneous hashing function and
// equality operator.
// * Invalidates any references and pointers to elements within the table after
// `rehash()`.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash set.
// * Returns `void` from the `erase(iterator)` overload.
//
// By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All
// fundamental and Abseil types that support the `absl::Hash` framework have a
// compatible equality operator for comparing insertions into `flat_hash_set`.
// If your type is not yet supported by the `absl::Hash` framework, see
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
// Using `absl::flat_hash_set` at interface boundaries in dynamically loaded
// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
// be randomized across dynamically loaded libraries.
//
// NOTE: A `flat_hash_set` stores its keys directly inside its implementation
// array to avoid memory indirection. Because a `flat_hash_set` is designed to
// move data when rehashed, set keys will not retain pointer stability. If you
// require pointer stability, consider using
// `absl::flat_hash_set<std::unique_ptr<T>>`. If your type is not moveable and
// you require pointer stability, consider `absl::node_hash_set` instead.
//
// Example:
//
// // Create a flat hash set of three strings
// absl::flat_hash_set<std::string> ducks =
// {"huey", "dewey", "louie"};
//
// // Insert a new element into the flat hash set
// ducks.insert("donald");
//
// // Force a rehash of the flat hash set
// ducks.rehash(0);
//
// // See if "dewey" is present
// if (ducks.contains("dewey")) {
// std::cout << "We found dewey!" << std::endl;
// }
template <class T, class Hash = absl::container_internal::hash_default_hash<T>,
class Eq = absl::container_internal::hash_default_eq<T>,
class Allocator = std::allocator<T>>
class flat_hash_set
: public absl::container_internal::raw_hash_set<
absl::container_internal::FlatHashSetPolicy<T>, Hash, Eq, Allocator> {
using Base = typename flat_hash_set::raw_hash_set;

public:
// Constructors and Assignment Operators
//
// A flat_hash_set supports the same overload set as `std::unordered_set`
// for construction and assignment:
//
// * Default constructor
//
// // No allocation for the table's elements is made.
// absl::flat_hash_set<std::string> set1;
//
// * Initializer List constructor
//
// absl::flat_hash_set<std::string> set2 =
// {{"huey"}, {"dewey"}, {"louie"},};
//
// * Copy constructor
//
// absl::flat_hash_set<std::string> set3(set2);
//
// * Copy assignment operator
//
// // Hash functor and Comparator are copied as well
// absl::flat_hash_set<std::string> set4;
// set4 = set3;
//
// * Move constructor
//
// // Move is guaranteed efficient
// absl::flat_hash_set<std::string> set5(std::move(set4));
//
// * Move assignment operator
//
// // May be efficient if allocators are compatible
// absl::flat_hash_set<std::string> set6;
// set6 = std::move(set5);
//
// * Range constructor
//
// std::vector<std::string> v = {"a", "b"};
// absl::flat_hash_set<std::string> set7(v.begin(), v.end());
flat_hash_set() {}
using Base::Base;

// flat_hash_set::begin()
//
// Returns an iterator to the beginning of the `flat_hash_set`.
using Base::begin;

// flat_hash_set::cbegin()
//
// Returns a const iterator to the beginning of the `flat_hash_set`.
using Base::cbegin;

// flat_hash_set::cend()
//
// Returns a const iterator to the end of the `flat_hash_set`.
using Base::cend;

// flat_hash_set::end()
//
// Returns an iterator to the end of the `flat_hash_set`.
using Base::end;

// flat_hash_set::capacity()
//
// Returns the number of element slots (assigned, deleted, and empty)
// available within the `flat_hash_set`.
//
// NOTE: this member function is particular to `absl::flat_hash_set` and is
// not provided in the `std::unordered_set` API.
using Base::capacity;

// flat_hash_set::empty()
//
// Returns whether or not the `flat_hash_set` is empty.
using Base::empty;

// flat_hash_set::max_size()
//
// Returns the largest theoretical possible number of elements within a
// `flat_hash_set` under current memory constraints. This value can be thought
// of the largest value of `std::distance(begin(), end())` for a
// `flat_hash_set<T>`.
using Base::max_size;

// flat_hash_set::size()
//
// Returns the number of elements currently within the `flat_hash_set`.
using Base::size;

// flat_hash_set::clear()
//
// Removes all elements from the `flat_hash_set`. Invalidates any references,
// pointers, or iterators referring to contained elements.
//
// NOTE: this operation may shrink the underlying buffer. To avoid shrinking
// the underlying buffer call `erase(begin(), end())`.
using Base::clear;

// flat_hash_set::erase()
//
// Erases elements within the `flat_hash_set`. Erasing does not trigger a
// rehash. Overloads are listed below.
//
// void erase(const_iterator pos):
//
// Erases the element at `position` of the `flat_hash_set`, returning
// `void`.
//
// NOTE: returning `void` in this case is different than that of STL
// containers in general and `std::unordered_set` in particular (which
// return an iterator to the element following the erased element). If that
// iterator is needed, simply post increment the iterator:
//
// set.erase(it++);
//
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
// iterator pointing to `last`.
//
// size_type erase(const key_type& key):
//
// Erases the element with the matching key, if it exists, returning the
// number of elements erased (0 or 1).
using Base::erase;

// flat_hash_set::insert()
//
// Inserts an element of the specified value into the `flat_hash_set`,
// returning an iterator pointing to the newly inserted element, provided that
// an element with the given key does not already exist. If rehashing occurs
// due to the insertion, all iterators are invalidated. Overloads are listed
// below.
//
// std::pair<iterator,bool> insert(const T& value):
//
// Inserts a value into the `flat_hash_set`. Returns a pair consisting of an
// iterator to the inserted element (or to the element that prevented the
// insertion) and a bool denoting whether the insertion took place.
//
// std::pair<iterator,bool> insert(T&& value):
//
// Inserts a moveable value into the `flat_hash_set`. Returns a pair
// consisting of an iterator to the inserted element (or to the element that
// prevented the insertion) and a bool denoting whether the insertion took
// place.
//
// iterator insert(const_iterator hint, const T& value):
// iterator insert(const_iterator hint, T&& value):
//
// Inserts a value, using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search. Returns an iterator to the
// inserted element, or to the existing element that prevented the
// insertion.
//
// void insert(InputIterator first, InputIterator last):
//
// Inserts a range of values [`first`, `last`).
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently, for `flat_hash_set` we guarantee the
// first match is inserted.
//
// void insert(std::initializer_list<T> ilist):
//
// Inserts the elements within the initializer list `ilist`.
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently within the initializer list, for
// `flat_hash_set` we guarantee the first match is inserted.
using Base::insert;

// flat_hash_set::emplace()
//
// Inserts an element of the specified value by constructing it in-place
// within the `flat_hash_set`, provided that no element with the given key
// already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace;

// flat_hash_set::emplace_hint()
//
// Inserts an element of the specified value by constructing it in-place
// within the `flat_hash_set`, using the position of `hint` as a non-binding
// suggestion for where to begin the insertion search, and only inserts
// provided that no element with the given key already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace_hint;

// flat_hash_set::extract()
//
// Extracts the indicated element, erasing it in the process, and returns it
// as a C++17-compatible node handle. Overloads are listed below.
//
// node_type extract(const_iterator position):
//
// Extracts the element at the indicated position and returns a node handle
// owning that extracted data.
//
// node_type extract(const key_type& x):
//
// Extracts the element with the key matching the passed key value and
// returns a node handle owning that extracted data. If the `flat_hash_set`
// does not contain an element with a matching key, this function returns an
// empty node handle.
using Base::extract;

// flat_hash_set::merge()
//
// Extracts elements from a given `source` flat hash set into this
// `flat_hash_set`. If the destination `flat_hash_set` already contains an
// element with an equivalent key, that element is not extracted.
using Base::merge;

// flat_hash_set::swap(flat_hash_set& other)
//
// Exchanges the contents of this `flat_hash_set` with those of the `other`
// flat hash set, avoiding invocation of any move, copy, or swap operations on
// individual elements.
//
// All iterators and references on the `flat_hash_set` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
//
// `swap()` requires that the flat hash set's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to
// non-member `swap()`. If the set's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
// to non-member `swap()`; otherwise, the allocators are not swapped.
using Base::swap;

// flat_hash_set::rehash(count)
//
// Rehashes the `flat_hash_set`, setting the number of slots to be at least
// the passed value. If the new number of slots increases the load factor more
// than the current maximum load factor
// (`count` < `size()` / `max_load_factor()`), then the new number of slots
// will be at least `size()` / `max_load_factor()`.
//
// To force a rehash, pass rehash(0).
//
// NOTE: unlike behavior in `std::unordered_set`, references are also
// invalidated upon a `rehash()`.
using Base::rehash;

// flat_hash_set::reserve(count)
//
// Sets the number of slots in the `flat_hash_set` to the number needed to
// accommodate at least `count` total elements without exceeding the current
// maximum load factor, and may rehash the container if needed.
using Base::reserve;

// flat_hash_set::contains()
//
// Determines whether an element comparing equal to the given `key` exists
// within the `flat_hash_set`, returning `true` if so or `false` otherwise.
using Base::contains;

// flat_hash_set::count(const Key& key) const
//
// Returns the number of elements comparing equal to the given `key` within
// the `flat_hash_set`. note that this function will return either `1` or `0`
// since duplicate elements are not allowed within a `flat_hash_set`.
using Base::count;

// flat_hash_set::equal_range()
//
// Returns a closed range [first, last], defined by a `std::pair` of two
// iterators, containing all elements with the passed key in the
// `flat_hash_set`.
using Base::equal_range;

// flat_hash_set::find()
//
// Finds an element with the passed `key` within the `flat_hash_set`.
using Base::find;

// flat_hash_set::bucket_count()
//
// Returns the number of "buckets" within the `flat_hash_set`. Note that
// because a flat hash set contains all elements within its internal storage,
// this value simply equals the current capacity of the `flat_hash_set`.
using Base::bucket_count;

// flat_hash_set::load_factor()
//
// Returns the current load factor of the `flat_hash_set` (the average number
// of slots occupied with a value within the hash set).
using Base::load_factor;

// flat_hash_set::max_load_factor()
//
// Manages the maximum load factor of the `flat_hash_set`. Overloads are
// listed below.
//
// float flat_hash_set::max_load_factor()
//
// Returns the current maximum load factor of the `flat_hash_set`.
//
// void flat_hash_set::max_load_factor(float ml)
//
// Sets the maximum load factor of the `flat_hash_set` to the passed value.
//
// NOTE: This overload is provided only for API compatibility with the STL;
// `flat_hash_set` will ignore any set load factor and manage its rehashing
// internally as an implementation detail.
using Base::max_load_factor;

// flat_hash_set::get_allocator()
//
// Returns the allocator function associated with this `flat_hash_set`.
using Base::get_allocator;

// flat_hash_set::hash_function()
//
// Returns the hashing function used to hash the keys within this
// `flat_hash_set`.
using Base::hash_function;

// flat_hash_set::key_eq()
//
// Returns the function used for comparing keys equality.
using Base::key_eq;
};

// erase_if(flat_hash_set<>, Pred)
//
// Erases all elements that satisfy the predicate `pred` from the container `c`.
// Returns the number of erased elements.
template <typename T, typename H, typename E, typename A, typename Predicate>
typename flat_hash_set<T, H, E, A>::size_type erase_if(
flat_hash_set<T, H, E, A>& c, Predicate pred) {
return container_internal::EraseIf(pred, &c);
}

namespace container_internal {

template <class T>
struct FlatHashSetPolicy {
using slot_type = T;
using key_type = T;
using init_type = T;
using constant_iterators = std::true_type;

template <class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
absl::allocator_traits<Allocator>::construct(*alloc, slot,
std::forward<Args>(args)...);
}

template <class Allocator>
static void destroy(Allocator* alloc, slot_type* slot) {
absl::allocator_traits<Allocator>::destroy(*alloc, slot);
}

template <class Allocator>
static void transfer(Allocator* alloc, slot_type* new_slot,
slot_type* old_slot) {
construct(alloc, new_slot, std::move(*old_slot));
destroy(alloc, old_slot);
}

static T& element(slot_type* slot) { return *slot; }

template <class F, class... Args>
static decltype(absl::container_internal::DecomposeValue(
std::declval<F>(), std::declval<Args>()...))
apply(F&& f, Args&&... args) {
return absl::container_internal::DecomposeValue(
std::forward<F>(f), std::forward<Args>(args)...);
}

static size_t space_used(const T*) { return 0; }
};
} // namespace container_internal

namespace container_algorithm_internal {

// Specialization of trait in absl/algorithm/container.h
template <class Key, class Hash, class KeyEqual, class Allocator>
struct IsUnorderedContainer<absl::flat_hash_set<Key, Hash, KeyEqual, Allocator>>
: std::true_type {};

} // namespace container_algorithm_internal

ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{
template<typename T>
struct FlatHashSetPolicy;
} // namespace container_internal

// -----------------------------------------------------------------------------
// absl::flat_hash_set
// -----------------------------------------------------------------------------
//
// An `absl::flat_hash_set<T>` is an unordered associative container which has
// been optimized for both speed and memory footprint in most common use cases.
// Its interface is similar to that of `std::unordered_set<T>` with the
// following notable differences:
//
// * Requires keys that are CopyConstructible
// * Supports heterogeneous lookup, through `find()` and `insert()`, provided
// that the set is provided a compatible heterogeneous hashing function and
// equality operator.
// * Invalidates any references and pointers to elements within the table after
// `rehash()`.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash set.
// * Returns `void` from the `erase(iterator)` overload.
//
// By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All
// fundamental and Abseil types that support the `absl::Hash` framework have a
// compatible equality operator for comparing insertions into `flat_hash_set`.
// If your type is not yet supported by the `absl::Hash` framework, see
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
// Using `absl::flat_hash_set` at interface boundaries in dynamically loaded
// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
// be randomized across dynamically loaded libraries.
//
// NOTE: A `flat_hash_set` stores its keys directly inside its implementation
// array to avoid memory indirection. Because a `flat_hash_set` is designed to
// move data when rehashed, set keys will not retain pointer stability. If you
// require pointer stability, consider using
// `absl::flat_hash_set<std::unique_ptr<T>>`. If your type is not moveable and
// you require pointer stability, consider `absl::node_hash_set` instead.
//
// Example:
//
// // Create a flat hash set of three strings
// absl::flat_hash_set<std::string> ducks =
// {"huey", "dewey", "louie"};
//
// // Insert a new element into the flat hash set
// ducks.insert("donald");
//
// // Force a rehash of the flat hash set
// ducks.rehash(0);
//
// // See if "dewey" is present
// if (ducks.contains("dewey")) {
// std::cout << "We found dewey!" << std::endl;
// }
template<class T, class Hash = absl::container_internal::hash_default_hash<T>, class Eq = absl::container_internal::hash_default_eq<T>, class Allocator = std::allocator<T>>
class flat_hash_set : public absl::container_internal::raw_hash_set<absl::container_internal::FlatHashSetPolicy<T>, Hash, Eq, Allocator>
{
using Base = typename flat_hash_set::raw_hash_set;

public:
// Constructors and Assignment Operators
//
// A flat_hash_set supports the same overload set as `std::unordered_set`
// for construction and assignment:
//
// * Default constructor
//
// // No allocation for the table's elements is made.
// absl::flat_hash_set<std::string> set1;
//
// * Initializer List constructor
//
// absl::flat_hash_set<std::string> set2 =
// {{"huey"}, {"dewey"}, {"louie"},};
//
// * Copy constructor
//
// absl::flat_hash_set<std::string> set3(set2);
//
// * Copy assignment operator
//
// // Hash functor and Comparator are copied as well
// absl::flat_hash_set<std::string> set4;
// set4 = set3;
//
// * Move constructor
//
// // Move is guaranteed efficient
// absl::flat_hash_set<std::string> set5(std::move(set4));
//
// * Move assignment operator
//
// // May be efficient if allocators are compatible
// absl::flat_hash_set<std::string> set6;
// set6 = std::move(set5);
//
// * Range constructor
//
// std::vector<std::string> v = {"a", "b"};
// absl::flat_hash_set<std::string> set7(v.begin(), v.end());
flat_hash_set()
{
}
using Base::Base;

// flat_hash_set::begin()
//
// Returns an iterator to the beginning of the `flat_hash_set`.
using Base::begin;

// flat_hash_set::cbegin()
//
// Returns a const iterator to the beginning of the `flat_hash_set`.
using Base::cbegin;

// flat_hash_set::cend()
//
// Returns a const iterator to the end of the `flat_hash_set`.
using Base::cend;

// flat_hash_set::end()
//
// Returns an iterator to the end of the `flat_hash_set`.
using Base::end;

// flat_hash_set::capacity()
//
// Returns the number of element slots (assigned, deleted, and empty)
// available within the `flat_hash_set`.
//
// NOTE: this member function is particular to `absl::flat_hash_set` and is
// not provided in the `std::unordered_set` API.
using Base::capacity;

// flat_hash_set::empty()
//
// Returns whether or not the `flat_hash_set` is empty.
using Base::empty;

// flat_hash_set::max_size()
//
// Returns the largest theoretical possible number of elements within a
// `flat_hash_set` under current memory constraints. This value can be thought
// of the largest value of `std::distance(begin(), end())` for a
// `flat_hash_set<T>`.
using Base::max_size;

// flat_hash_set::size()
//
// Returns the number of elements currently within the `flat_hash_set`.
using Base::size;

// flat_hash_set::clear()
//
// Removes all elements from the `flat_hash_set`. Invalidates any references,
// pointers, or iterators referring to contained elements.
//
// NOTE: this operation may shrink the underlying buffer. To avoid shrinking
// the underlying buffer call `erase(begin(), end())`.
using Base::clear;

// flat_hash_set::erase()
//
// Erases elements within the `flat_hash_set`. Erasing does not trigger a
// rehash. Overloads are listed below.
//
// void erase(const_iterator pos):
//
// Erases the element at `position` of the `flat_hash_set`, returning
// `void`.
//
// NOTE: returning `void` in this case is different than that of STL
// containers in general and `std::unordered_set` in particular (which
// return an iterator to the element following the erased element). If that
// iterator is needed, simply post increment the iterator:
//
// set.erase(it++);
//
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
// iterator pointing to `last`.
//
// size_type erase(const key_type& key):
//
// Erases the element with the matching key, if it exists, returning the
// number of elements erased (0 or 1).
using Base::erase;

// flat_hash_set::insert()
//
// Inserts an element of the specified value into the `flat_hash_set`,
// returning an iterator pointing to the newly inserted element, provided that
// an element with the given key does not already exist. If rehashing occurs
// due to the insertion, all iterators are invalidated. Overloads are listed
// below.
//
// std::pair<iterator,bool> insert(const T& value):
//
// Inserts a value into the `flat_hash_set`. Returns a pair consisting of an
// iterator to the inserted element (or to the element that prevented the
// insertion) and a bool denoting whether the insertion took place.
//
// std::pair<iterator,bool> insert(T&& value):
//
// Inserts a moveable value into the `flat_hash_set`. Returns a pair
// consisting of an iterator to the inserted element (or to the element that
// prevented the insertion) and a bool denoting whether the insertion took
// place.
//
// iterator insert(const_iterator hint, const T& value):
// iterator insert(const_iterator hint, T&& value):
//
// Inserts a value, using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search. Returns an iterator to the
// inserted element, or to the existing element that prevented the
// insertion.
//
// void insert(InputIterator first, InputIterator last):
//
// Inserts a range of values [`first`, `last`).
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently, for `flat_hash_set` we guarantee the
// first match is inserted.
//
// void insert(std::initializer_list<T> ilist):
//
// Inserts the elements within the initializer list `ilist`.
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently within the initializer list, for
// `flat_hash_set` we guarantee the first match is inserted.
using Base::insert;

// flat_hash_set::emplace()
//
// Inserts an element of the specified value by constructing it in-place
// within the `flat_hash_set`, provided that no element with the given key
// already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace;

// flat_hash_set::emplace_hint()
//
// Inserts an element of the specified value by constructing it in-place
// within the `flat_hash_set`, using the position of `hint` as a non-binding
// suggestion for where to begin the insertion search, and only inserts
// provided that no element with the given key already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace_hint;

// flat_hash_set::extract()
//
// Extracts the indicated element, erasing it in the process, and returns it
// as a C++17-compatible node handle. Overloads are listed below.
//
// node_type extract(const_iterator position):
//
// Extracts the element at the indicated position and returns a node handle
// owning that extracted data.
//
// node_type extract(const key_type& x):
//
// Extracts the element with the key matching the passed key value and
// returns a node handle owning that extracted data. If the `flat_hash_set`
// does not contain an element with a matching key, this function returns an
// empty node handle.
using Base::extract;

// flat_hash_set::merge()
//
// Extracts elements from a given `source` flat hash set into this
// `flat_hash_set`. If the destination `flat_hash_set` already contains an
// element with an equivalent key, that element is not extracted.
using Base::merge;

// flat_hash_set::swap(flat_hash_set& other)
//
// Exchanges the contents of this `flat_hash_set` with those of the `other`
// flat hash set, avoiding invocation of any move, copy, or swap operations on
// individual elements.
//
// All iterators and references on the `flat_hash_set` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
//
// `swap()` requires that the flat hash set's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to
// non-member `swap()`. If the set's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
// to non-member `swap()`; otherwise, the allocators are not swapped.
using Base::swap;

// flat_hash_set::rehash(count)
//
// Rehashes the `flat_hash_set`, setting the number of slots to be at least
// the passed value. If the new number of slots increases the load factor more
// than the current maximum load factor
// (`count` < `size()` / `max_load_factor()`), then the new number of slots
// will be at least `size()` / `max_load_factor()`.
//
// To force a rehash, pass rehash(0).
//
// NOTE: unlike behavior in `std::unordered_set`, references are also
// invalidated upon a `rehash()`.
using Base::rehash;

// flat_hash_set::reserve(count)
//
// Sets the number of slots in the `flat_hash_set` to the number needed to
// accommodate at least `count` total elements without exceeding the current
// maximum load factor, and may rehash the container if needed.
using Base::reserve;

// flat_hash_set::contains()
//
// Determines whether an element comparing equal to the given `key` exists
// within the `flat_hash_set`, returning `true` if so or `false` otherwise.
using Base::contains;

// flat_hash_set::count(const Key& key) const
//
// Returns the number of elements comparing equal to the given `key` within
// the `flat_hash_set`. note that this function will return either `1` or `0`
// since duplicate elements are not allowed within a `flat_hash_set`.
using Base::count;

// flat_hash_set::equal_range()
//
// Returns a closed range [first, last], defined by a `std::pair` of two
// iterators, containing all elements with the passed key in the
// `flat_hash_set`.
using Base::equal_range;

// flat_hash_set::find()
//
// Finds an element with the passed `key` within the `flat_hash_set`.
using Base::find;

// flat_hash_set::bucket_count()
//
// Returns the number of "buckets" within the `flat_hash_set`. Note that
// because a flat hash set contains all elements within its internal storage,
// this value simply equals the current capacity of the `flat_hash_set`.
using Base::bucket_count;

// flat_hash_set::load_factor()
//
// Returns the current load factor of the `flat_hash_set` (the average number
// of slots occupied with a value within the hash set).
using Base::load_factor;

// flat_hash_set::max_load_factor()
//
// Manages the maximum load factor of the `flat_hash_set`. Overloads are
// listed below.
//
// float flat_hash_set::max_load_factor()
//
// Returns the current maximum load factor of the `flat_hash_set`.
//
// void flat_hash_set::max_load_factor(float ml)
//
// Sets the maximum load factor of the `flat_hash_set` to the passed value.
//
// NOTE: This overload is provided only for API compatibility with the STL;
// `flat_hash_set` will ignore any set load factor and manage its rehashing
// internally as an implementation detail.
using Base::max_load_factor;

// flat_hash_set::get_allocator()
//
// Returns the allocator function associated with this `flat_hash_set`.
using Base::get_allocator;

// flat_hash_set::hash_function()
//
// Returns the hashing function used to hash the keys within this
// `flat_hash_set`.
using Base::hash_function;

// flat_hash_set::key_eq()
//
// Returns the function used for comparing keys equality.
using Base::key_eq;
};

// erase_if(flat_hash_set<>, Pred)
//
// Erases all elements that satisfy the predicate `pred` from the container `c`.
// Returns the number of erased elements.
template<typename T, typename H, typename E, typename A, typename Predicate>
typename flat_hash_set<T, H, E, A>::size_type erase_if(
flat_hash_set<T, H, E, A>& c, Predicate pred
)
{
return container_internal::EraseIf(pred, &c);
}

namespace container_internal
{

template<class T>
struct FlatHashSetPolicy
{
using slot_type = T;
using key_type = T;
using init_type = T;
using constant_iterators = std::true_type;

template<class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args)
{
absl::allocator_traits<Allocator>::construct(*alloc, slot, std::forward<Args>(args)...);
}

template<class Allocator>
static void destroy(Allocator* alloc, slot_type* slot)
{
absl::allocator_traits<Allocator>::destroy(*alloc, slot);
}

template<class Allocator>
static void transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot)
{
construct(alloc, new_slot, std::move(*old_slot));
destroy(alloc, old_slot);
}

static T& element(slot_type* slot)
{
return *slot;
}

template<class F, class... Args>
static decltype(absl::container_internal::DecomposeValue(
std::declval<F>(), std::declval<Args>()...
))
apply(F&& f, Args&&... args)
{
return absl::container_internal::DecomposeValue(
std::forward<F>(f), std::forward<Args>(args)...
);
}

static size_t space_used(const T*)
{
return 0;
}
};
} // namespace container_internal

namespace container_algorithm_internal
{

// Specialization of trait in absl/algorithm/container.h
template<class Key, class Hash, class KeyEqual, class Allocator>
struct IsUnorderedContainer<absl::flat_hash_set<Key, Hash, KeyEqual, Allocator>> : std::true_type
{
};

} // namespace container_algorithm_internal

ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_FLAT_HASH_SET_H_ #endif // ABSL_CONTAINER_FLAT_HASH_SET_H_

+ 909
- 791
CAPI/cpp/grpc/include/absl/container/inlined_vector.h
File diff suppressed because it is too large
View File


+ 3292
- 2727
CAPI/cpp/grpc/include/absl/container/internal/btree.h
File diff suppressed because it is too large
View File


+ 831
- 667
CAPI/cpp/grpc/include/absl/container/internal/btree_container.h
File diff suppressed because it is too large
View File


+ 232
- 181
CAPI/cpp/grpc/include/absl/container/internal/common.h View File

@@ -21,187 +21,238 @@
#include "absl/meta/type_traits.h" #include "absl/meta/type_traits.h"
#include "absl/types/optional.h" #include "absl/types/optional.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {

template <class, class = void>
struct IsTransparent : std::false_type {};
template <class T>
struct IsTransparent<T, absl::void_t<typename T::is_transparent>>
: std::true_type {};

template <bool is_transparent>
struct KeyArg {
// Transparent. Forward `K`.
template <typename K, typename key_type>
using type = K;
};

template <>
struct KeyArg<false> {
// Not transparent. Always use `key_type`.
template <typename K, typename key_type>
using type = key_type;
};

// The node_handle concept from C++17.
// We specialize node_handle for sets and maps. node_handle_base holds the
// common API of both.
template <typename PolicyTraits, typename Alloc>
class node_handle_base {
protected:
using slot_type = typename PolicyTraits::slot_type;

public:
using allocator_type = Alloc;

constexpr node_handle_base() = default;
node_handle_base(node_handle_base&& other) noexcept {
*this = std::move(other);
}
~node_handle_base() { destroy(); }
node_handle_base& operator=(node_handle_base&& other) noexcept {
destroy();
if (!other.empty()) {
alloc_ = other.alloc_;
PolicyTraits::transfer(alloc(), slot(), other.slot());
other.reset();
}
return *this;
}

bool empty() const noexcept { return !alloc_; }
explicit operator bool() const noexcept { return !empty(); }
allocator_type get_allocator() const { return *alloc_; }

protected:
friend struct CommonAccess;

struct transfer_tag_t {};
node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s)
: alloc_(a) {
PolicyTraits::transfer(alloc(), slot(), s);
}

struct construct_tag_t {};
template <typename... Args>
node_handle_base(construct_tag_t, const allocator_type& a, Args&&... args)
: alloc_(a) {
PolicyTraits::construct(alloc(), slot(), std::forward<Args>(args)...);
}

void destroy() {
if (!empty()) {
PolicyTraits::destroy(alloc(), slot());
reset();
}
}

void reset() {
assert(alloc_.has_value());
alloc_ = absl::nullopt;
}

slot_type* slot() const {
assert(!empty());
return reinterpret_cast<slot_type*>(std::addressof(slot_space_));
}
allocator_type* alloc() { return std::addressof(*alloc_); }

private:
absl::optional<allocator_type> alloc_ = {};
alignas(slot_type) mutable unsigned char slot_space_[sizeof(slot_type)] = {};
};

// For sets.
template <typename Policy, typename PolicyTraits, typename Alloc,
typename = void>
class node_handle : public node_handle_base<PolicyTraits, Alloc> {
using Base = node_handle_base<PolicyTraits, Alloc>;

public:
using value_type = typename PolicyTraits::value_type;

constexpr node_handle() {}

value_type& value() const { return PolicyTraits::element(this->slot()); }

private:
friend struct CommonAccess;

using Base::Base;
};

// For maps.
template <typename Policy, typename PolicyTraits, typename Alloc>
class node_handle<Policy, PolicyTraits, Alloc,
absl::void_t<typename Policy::mapped_type>>
: public node_handle_base<PolicyTraits, Alloc> {
using Base = node_handle_base<PolicyTraits, Alloc>;
using slot_type = typename PolicyTraits::slot_type;

public:
using key_type = typename Policy::key_type;
using mapped_type = typename Policy::mapped_type;

constexpr node_handle() {}

// When C++17 is available, we can use std::launder to provide mutable
// access to the key. Otherwise, we provide const access.
auto key() const
-> decltype(PolicyTraits::mutable_key(std::declval<slot_type*>())) {
return PolicyTraits::mutable_key(this->slot());
}

mapped_type& mapped() const {
return PolicyTraits::value(&PolicyTraits::element(this->slot()));
}

private:
friend struct CommonAccess;

using Base::Base;
};

// Provide access to non-public node-handle functions.
struct CommonAccess {
template <typename Node>
static auto GetSlot(const Node& node) -> decltype(node.slot()) {
return node.slot();
}

template <typename Node>
static void Destroy(Node* node) {
node->destroy();
}

template <typename Node>
static void Reset(Node* node) {
node->reset();
}

template <typename T, typename... Args>
static T Transfer(Args&&... args) {
return T(typename T::transfer_tag_t{}, std::forward<Args>(args)...);
}

template <typename T, typename... Args>
static T Construct(Args&&... args) {
return T(typename T::construct_tag_t{}, std::forward<Args>(args)...);
}
};

// Implement the insert_return_type<> concept of C++17.
template <class Iterator, class NodeType>
struct InsertReturnType {
Iterator position;
bool inserted;
NodeType node;
};

} // namespace container_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{

template<class, class = void>
struct IsTransparent : std::false_type
{
};
template<class T>
struct IsTransparent<T, absl::void_t<typename T::is_transparent>> : std::true_type
{
};

template<bool is_transparent>
struct KeyArg
{
// Transparent. Forward `K`.
template<typename K, typename key_type>
using type = K;
};

template<>
struct KeyArg<false>
{
// Not transparent. Always use `key_type`.
template<typename K, typename key_type>
using type = key_type;
};

// The node_handle concept from C++17.
// We specialize node_handle for sets and maps. node_handle_base holds the
// common API of both.
template<typename PolicyTraits, typename Alloc>
class node_handle_base
{
protected:
using slot_type = typename PolicyTraits::slot_type;

public:
using allocator_type = Alloc;

constexpr node_handle_base() = default;
node_handle_base(node_handle_base&& other) noexcept
{
*this = std::move(other);
}
~node_handle_base()
{
destroy();
}
node_handle_base& operator=(node_handle_base&& other) noexcept
{
destroy();
if (!other.empty())
{
alloc_ = other.alloc_;
PolicyTraits::transfer(alloc(), slot(), other.slot());
other.reset();
}
return *this;
}

bool empty() const noexcept
{
return !alloc_;
}
explicit operator bool() const noexcept
{
return !empty();
}
allocator_type get_allocator() const
{
return *alloc_;
}

protected:
friend struct CommonAccess;

struct transfer_tag_t
{
};
node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s) :
alloc_(a)
{
PolicyTraits::transfer(alloc(), slot(), s);
}

struct construct_tag_t
{
};
template<typename... Args>
node_handle_base(construct_tag_t, const allocator_type& a, Args&&... args) :
alloc_(a)
{
PolicyTraits::construct(alloc(), slot(), std::forward<Args>(args)...);
}

void destroy()
{
if (!empty())
{
PolicyTraits::destroy(alloc(), slot());
reset();
}
}

void reset()
{
assert(alloc_.has_value());
alloc_ = absl::nullopt;
}

slot_type* slot() const
{
assert(!empty());
return reinterpret_cast<slot_type*>(std::addressof(slot_space_));
}
allocator_type* alloc()
{
return std::addressof(*alloc_);
}

private:
absl::optional<allocator_type> alloc_ = {};
alignas(slot_type) mutable unsigned char slot_space_[sizeof(slot_type)] = {};
};

// For sets.
template<typename Policy, typename PolicyTraits, typename Alloc, typename = void>
class node_handle : public node_handle_base<PolicyTraits, Alloc>
{
using Base = node_handle_base<PolicyTraits, Alloc>;

public:
using value_type = typename PolicyTraits::value_type;

constexpr node_handle()
{
}

value_type& value() const
{
return PolicyTraits::element(this->slot());
}

private:
friend struct CommonAccess;

using Base::Base;
};

// For maps.
template<typename Policy, typename PolicyTraits, typename Alloc>
class node_handle<Policy, PolicyTraits, Alloc, absl::void_t<typename Policy::mapped_type>> : public node_handle_base<PolicyTraits, Alloc>
{
using Base = node_handle_base<PolicyTraits, Alloc>;
using slot_type = typename PolicyTraits::slot_type;

public:
using key_type = typename Policy::key_type;
using mapped_type = typename Policy::mapped_type;

constexpr node_handle()
{
}

// When C++17 is available, we can use std::launder to provide mutable
// access to the key. Otherwise, we provide const access.
auto key() const
-> decltype(PolicyTraits::mutable_key(std::declval<slot_type*>()))
{
return PolicyTraits::mutable_key(this->slot());
}

mapped_type& mapped() const
{
return PolicyTraits::value(&PolicyTraits::element(this->slot()));
}

private:
friend struct CommonAccess;

using Base::Base;
};

// Provide access to non-public node-handle functions.
struct CommonAccess
{
template<typename Node>
static auto GetSlot(const Node& node) -> decltype(node.slot())
{
return node.slot();
}

template<typename Node>
static void Destroy(Node* node)
{
node->destroy();
}

template<typename Node>
static void Reset(Node* node)
{
node->reset();
}

template<typename T, typename... Args>
static T Transfer(Args&&... args)
{
return T(typename T::transfer_tag_t{}, std::forward<Args>(args)...);
}

template<typename T, typename... Args>
static T Construct(Args&&... args)
{
return T(typename T::construct_tag_t{}, std::forward<Args>(args)...);
}
};

// Implement the insert_return_type<> concept of C++17.
template<class Iterator, class NodeType>
struct InsertReturnType
{
Iterator position;
bool inserted;
NodeType node;
};

} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_H_ #endif // ABSL_CONTAINER_INTERNAL_CONTAINER_H_

+ 276
- 227
CAPI/cpp/grpc/include/absl/container/internal/compressed_tuple.h View File

@@ -47,242 +47,291 @@
#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC #define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
#endif #endif


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {

template <typename... Ts>
class CompressedTuple;

namespace internal_compressed_tuple {

template <typename D, size_t I>
struct Elem;
template <typename... B, size_t I>
struct Elem<CompressedTuple<B...>, I>
: std::tuple_element<I, std::tuple<B...>> {};
template <typename D, size_t I>
using ElemT = typename Elem<D, I>::type;

// Use the __is_final intrinsic if available. Where it's not available, classes
// declared with the 'final' specifier cannot be used as CompressedTuple
// elements.
// TODO(sbenza): Replace this with std::is_final in C++14.
template <typename T>
constexpr bool IsFinal() {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{

template<typename... Ts>
class CompressedTuple;

namespace internal_compressed_tuple
{

template<typename D, size_t I>
struct Elem;
template<typename... B, size_t I>
struct Elem<CompressedTuple<B...>, I> : std::tuple_element<I, std::tuple<B...>>
{
};
template<typename D, size_t I>
using ElemT = typename Elem<D, I>::type;

// Use the __is_final intrinsic if available. Where it's not available, classes
// declared with the 'final' specifier cannot be used as CompressedTuple
// elements.
// TODO(sbenza): Replace this with std::is_final in C++14.
template<typename T>
constexpr bool IsFinal()
{
#if defined(__clang__) || defined(__GNUC__) #if defined(__clang__) || defined(__GNUC__)
return __is_final(T);
return __is_final(T);
#else #else
return false;
return false;
#endif #endif
}

// We can't use EBCO on other CompressedTuples because that would mean that we
// derive from multiple Storage<> instantiations with the same I parameter,
// and potentially from multiple identical Storage<> instantiations. So anytime
// we use type inheritance rather than encapsulation, we mark
// CompressedTupleImpl, to make this easy to detect.
struct uses_inheritance {};

template <typename T>
constexpr bool ShouldUseBase() {
return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>() &&
!std::is_base_of<uses_inheritance, T>::value;
}

// The storage class provides two specializations:
// - For empty classes, it stores T as a base class.
// - For everything else, it stores T as a member.
template <typename T, size_t I,
}

// We can't use EBCO on other CompressedTuples because that would mean that we
// derive from multiple Storage<> instantiations with the same I parameter,
// and potentially from multiple identical Storage<> instantiations. So anytime
// we use type inheritance rather than encapsulation, we mark
// CompressedTupleImpl, to make this easy to detect.
struct uses_inheritance
{
};

template<typename T>
constexpr bool ShouldUseBase()
{
return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>() &&
!std::is_base_of<uses_inheritance, T>::value;
}

// The storage class provides two specializations:
// - For empty classes, it stores T as a base class.
// - For everything else, it stores T as a member.
template<typename T, size_t I,
#if defined(_MSC_VER) #if defined(_MSC_VER)
bool UseBase =
ShouldUseBase<typename std::enable_if<true, T>::type>()>
bool UseBase = ShouldUseBase<typename std::enable_if<true, T>::type>()>
#else #else
bool UseBase = ShouldUseBase<T>()>
bool UseBase = ShouldUseBase<T>()>
#endif #endif
struct Storage {
T value;
constexpr Storage() = default;
template <typename V>
explicit constexpr Storage(absl::in_place_t, V&& v)
: value(absl::forward<V>(v)) {}
constexpr const T& get() const& { return value; }
T& get() & { return value; }
constexpr const T&& get() const&& { return absl::move(*this).value; }
T&& get() && { return std::move(*this).value; }
};

template <typename T, size_t I>
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T {
constexpr Storage() = default;

template <typename V>
explicit constexpr Storage(absl::in_place_t, V&& v)
: T(absl::forward<V>(v)) {}

constexpr const T& get() const& { return *this; }
T& get() & { return *this; }
constexpr const T&& get() const&& { return absl::move(*this); }
T&& get() && { return std::move(*this); }
};

template <typename D, typename I, bool ShouldAnyUseBase>
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;

template <typename... Ts, size_t... I, bool ShouldAnyUseBase>
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
CompressedTuple<Ts...>, absl::index_sequence<I...>, ShouldAnyUseBase>
// We use the dummy identity function through std::integral_constant to
// convince MSVC of accepting and expanding I in that context. Without it
// you would get:
// error C3548: 'I': parameter pack cannot be used in this context
: uses_inheritance,
Storage<Ts, std::integral_constant<size_t, I>::value>... {
constexpr CompressedTupleImpl() = default;
template <typename... Vs>
explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
: Storage<Ts, I>(absl::in_place, absl::forward<Vs>(args))... {}
friend CompressedTuple<Ts...>;
};

template <typename... Ts, size_t... I>
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
CompressedTuple<Ts...>, absl::index_sequence<I...>, false>
// We use the dummy identity function as above...
: Storage<Ts, std::integral_constant<size_t, I>::value, false>... {
constexpr CompressedTupleImpl() = default;
template <typename... Vs>
explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
: Storage<Ts, I, false>(absl::in_place, absl::forward<Vs>(args))... {}
friend CompressedTuple<Ts...>;
};

std::false_type Or(std::initializer_list<std::false_type>);
std::true_type Or(std::initializer_list<bool>);

// MSVC requires this to be done separately rather than within the declaration
// of CompressedTuple below.
template <typename... Ts>
constexpr bool ShouldAnyUseBase() {
return decltype(
Or({std::integral_constant<bool, ShouldUseBase<Ts>()>()...})){};
}

template <typename T, typename V>
using TupleElementMoveConstructible =
typename std::conditional<std::is_reference<T>::value,
std::is_convertible<V, T>,
std::is_constructible<T, V&&>>::type;

template <bool SizeMatches, class T, class... Vs>
struct TupleMoveConstructible : std::false_type {};

template <class... Ts, class... Vs>
struct TupleMoveConstructible<true, CompressedTuple<Ts...>, Vs...>
: std::integral_constant<
bool, absl::conjunction<
TupleElementMoveConstructible<Ts, Vs&&>...>::value> {};

template <typename T>
struct compressed_tuple_size;

template <typename... Es>
struct compressed_tuple_size<CompressedTuple<Es...>>
: public std::integral_constant<std::size_t, sizeof...(Es)> {};

template <class T, class... Vs>
struct TupleItemsMoveConstructible
: std::integral_constant<
bool, TupleMoveConstructible<compressed_tuple_size<T>::value ==
sizeof...(Vs),
T, Vs...>::value> {};

} // namespace internal_compressed_tuple

// Helper class to perform the Empty Base Class Optimization.
// Ts can contain classes and non-classes, empty or not. For the ones that
// are empty classes, we perform the CompressedTuple. If all types in Ts are
// empty classes, then CompressedTuple<Ts...> is itself an empty class. (This
// does not apply when one or more of those empty classes is itself an empty
// CompressedTuple.)
//
// To access the members, use member .get<N>() function.
//
// Eg:
// absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
// t3);
// assert(value.get<0>() == 7);
// T1& t1 = value.get<1>();
// const T2& t2 = value.get<2>();
// ...
//
// https://en.cppreference.com/w/cpp/language/ebo
template <typename... Ts>
class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
: private internal_compressed_tuple::CompressedTupleImpl<
CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>,
internal_compressed_tuple::ShouldAnyUseBase<Ts...>()> {
private:
template <int I>
using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;

template <int I>
using StorageT = internal_compressed_tuple::Storage<ElemT<I>, I>;

public:
// There seems to be a bug in MSVC dealing in which using '=default' here will
// cause the compiler to ignore the body of other constructors. The work-
// around is to explicitly implement the default constructor.
struct Storage
{
T value;
constexpr Storage() = default;
template<typename V>
explicit constexpr Storage(absl::in_place_t, V&& v) :
value(absl::forward<V>(v))
{
}
constexpr const T& get() const&
{
return value;
}
T& get() &
{
return value;
}
constexpr const T&& get() const&&
{
return absl::move(*this).value;
}
T&& get() &&
{
return std::move(*this).value;
}
};

template<typename T, size_t I>
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T
{
constexpr Storage() = default;

template<typename V>
explicit constexpr Storage(absl::in_place_t, V&& v) :
T(absl::forward<V>(v))
{
}

constexpr const T& get() const&
{
return *this;
}
T& get() &
{
return *this;
}
constexpr const T&& get() const&&
{
return absl::move(*this);
}
T&& get() &&
{
return std::move(*this);
}
};

template<typename D, typename I, bool ShouldAnyUseBase>
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;

template<typename... Ts, size_t... I, bool ShouldAnyUseBase>
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
CompressedTuple<Ts...>,
absl::index_sequence<I...>,
ShouldAnyUseBase>
// We use the dummy identity function through std::integral_constant to
// convince MSVC of accepting and expanding I in that context. Without it
// you would get:
// error C3548: 'I': parameter pack cannot be used in this context
: uses_inheritance, Storage<Ts, std::integral_constant<size_t, I>::value>...
{
constexpr CompressedTupleImpl() = default;
template<typename... Vs>
explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) :
Storage<Ts, I>(absl::in_place, absl::forward<Vs>(args))...
{
}
friend CompressedTuple<Ts...>;
};

template<typename... Ts, size_t... I>
struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
CompressedTuple<Ts...>,
absl::index_sequence<I...>,
false>
// We use the dummy identity function as above...
: Storage<Ts, std::integral_constant<size_t, I>::value, false>...
{
constexpr CompressedTupleImpl() = default;
template<typename... Vs>
explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) :
Storage<Ts, I, false>(absl::in_place, absl::forward<Vs>(args))...
{
}
friend CompressedTuple<Ts...>;
};

std::false_type Or(std::initializer_list<std::false_type>);
std::true_type Or(std::initializer_list<bool>);

// MSVC requires this to be done separately rather than within the declaration
// of CompressedTuple below.
template<typename... Ts>
constexpr bool ShouldAnyUseBase()
{
return decltype(Or({std::integral_constant<bool, ShouldUseBase<Ts>()>()...})){};
}

template<typename T, typename V>
using TupleElementMoveConstructible =
typename std::conditional<std::is_reference<T>::value, std::is_convertible<V, T>, std::is_constructible<T, V&&>>::type;

template<bool SizeMatches, class T, class... Vs>
struct TupleMoveConstructible : std::false_type
{
};

template<class... Ts, class... Vs>
struct TupleMoveConstructible<true, CompressedTuple<Ts...>, Vs...> : std::integral_constant<bool, absl::conjunction<TupleElementMoveConstructible<Ts, Vs&&>...>::value>
{
};

template<typename T>
struct compressed_tuple_size;

template<typename... Es>
struct compressed_tuple_size<CompressedTuple<Es...>> : public std::integral_constant<std::size_t, sizeof...(Es)>
{
};

template<class T, class... Vs>
struct TupleItemsMoveConstructible : std::integral_constant<bool, TupleMoveConstructible<compressed_tuple_size<T>::value == sizeof...(Vs), T, Vs...>::value>
{
};

} // namespace internal_compressed_tuple

// Helper class to perform the Empty Base Class Optimization.
// Ts can contain classes and non-classes, empty or not. For the ones that
// are empty classes, we perform the CompressedTuple. If all types in Ts are
// empty classes, then CompressedTuple<Ts...> is itself an empty class. (This
// does not apply when one or more of those empty classes is itself an empty
// CompressedTuple.)
//
// To access the members, use member .get<N>() function.
//
// Eg:
// absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
// t3);
// assert(value.get<0>() == 7);
// T1& t1 = value.get<1>();
// const T2& t2 = value.get<2>();
// ...
//
// https://en.cppreference.com/w/cpp/language/ebo
template<typename... Ts>
class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple : private internal_compressed_tuple::CompressedTupleImpl<CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>, internal_compressed_tuple::ShouldAnyUseBase<Ts...>()>
{
private:
template<int I>
using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;

template<int I>
using StorageT = internal_compressed_tuple::Storage<ElemT<I>, I>;

public:
// There seems to be a bug in MSVC dealing in which using '=default' here will
// cause the compiler to ignore the body of other constructors. The work-
// around is to explicitly implement the default constructor.
#if defined(_MSC_VER) #if defined(_MSC_VER)
constexpr CompressedTuple() : CompressedTuple::CompressedTupleImpl() {}
constexpr CompressedTuple() :
CompressedTuple::CompressedTupleImpl()
{
}
#else #else
constexpr CompressedTuple() = default;
constexpr CompressedTuple() = default;
#endif #endif
explicit constexpr CompressedTuple(const Ts&... base)
: CompressedTuple::CompressedTupleImpl(absl::in_place, base...) {}

template <typename First, typename... Vs,
absl::enable_if_t<
absl::conjunction<
// Ensure we are not hiding default copy/move constructors.
absl::negation<std::is_same<void(CompressedTuple),
void(absl::decay_t<First>)>>,
internal_compressed_tuple::TupleItemsMoveConstructible<
CompressedTuple<Ts...>, First, Vs...>>::value,
bool> = true>
explicit constexpr CompressedTuple(First&& first, Vs&&... base)
: CompressedTuple::CompressedTupleImpl(absl::in_place,
absl::forward<First>(first),
absl::forward<Vs>(base)...) {}

template <int I>
ElemT<I>& get() & {
return StorageT<I>::get();
}

template <int I>
constexpr const ElemT<I>& get() const& {
return StorageT<I>::get();
}

template <int I>
ElemT<I>&& get() && {
return std::move(*this).StorageT<I>::get();
}

template <int I>
constexpr const ElemT<I>&& get() const&& {
return absl::move(*this).StorageT<I>::get();
}
};

// Explicit specialization for a zero-element tuple
// (needed to avoid ambiguous overloads for the default constructor).
template <>
class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {};

} // namespace container_internal
ABSL_NAMESPACE_END
explicit constexpr CompressedTuple(const Ts&... base) :
CompressedTuple::CompressedTupleImpl(absl::in_place, base...)
{
}

template<typename First, typename... Vs, absl::enable_if_t<absl::conjunction<
// Ensure we are not hiding default copy/move constructors.
absl::negation<std::is_same<void(CompressedTuple), void(absl::decay_t<First>)>>,
internal_compressed_tuple::TupleItemsMoveConstructible<CompressedTuple<Ts...>, First, Vs...>>::value,
bool> = true>
explicit constexpr CompressedTuple(First&& first, Vs&&... base) :
CompressedTuple::CompressedTupleImpl(absl::in_place, absl::forward<First>(first), absl::forward<Vs>(base)...)
{
}

template<int I>
ElemT<I>& get() &
{
return StorageT<I>::get();
}

template<int I>
constexpr const ElemT<I>& get() const&
{
return StorageT<I>::get();
}

template<int I>
ElemT<I>&& get() &&
{
return std::move(*this).StorageT<I>::get();
}

template<int I>
constexpr const ElemT<I>&& get() const&&
{
return absl::move(*this).StorageT<I>::get();
}
};

// Explicit specialization for a zero-element tuple
// (needed to avoid ambiguous overloads for the default constructor).
template<>
class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<>
{
};

} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC #undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC


+ 445
- 388
CAPI/cpp/grpc/include/absl/container/internal/container_memory.h View File

@@ -36,407 +36,464 @@
#include <sanitizer/msan_interface.h> #include <sanitizer/msan_interface.h>
#endif #endif


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {

template <size_t Alignment>
struct alignas(Alignment) AlignedType {};

// Allocates at least n bytes aligned to the specified alignment.
// Alignment must be a power of 2. It must be positive.
//
// Note that many allocators don't honor alignment requirements above certain
// threshold (usually either alignof(std::max_align_t) or alignof(void*)).
// Allocate() doesn't apply alignment corrections. If the underlying allocator
// returns insufficiently alignment pointer, that's what you are going to get.
template <size_t Alignment, class Alloc>
void* Allocate(Alloc* alloc, size_t n) {
static_assert(Alignment > 0, "");
assert(n && "n must be positive");
using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
// On macOS, "mem_alloc" is a #define with one argument defined in
// rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
// with the "foo(bar)" syntax.
A my_mem_alloc(*alloc);
void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 &&
"allocator does not respect alignment");
return p;
}

// The pointer must have been previously obtained by calling
// Allocate<Alignment>(alloc, n).
template <size_t Alignment, class Alloc>
void Deallocate(Alloc* alloc, void* p, size_t n) {
static_assert(Alignment > 0, "");
assert(n && "n must be positive");
using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
// On macOS, "mem_alloc" is a #define with one argument defined in
// rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
// with the "foo(bar)" syntax.
A my_mem_alloc(*alloc);
AT::deallocate(my_mem_alloc, static_cast<M*>(p),
(n + sizeof(M) - 1) / sizeof(M));
}

namespace memory_internal {

// Constructs T into uninitialized storage pointed by `ptr` using the args
// specified in the tuple.
template <class Alloc, class T, class Tuple, size_t... I>
void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t,
absl::index_sequence<I...>) {
absl::allocator_traits<Alloc>::construct(
*alloc, ptr, std::get<I>(std::forward<Tuple>(t))...);
}

template <class T, class F>
struct WithConstructedImplF {
template <class... Args>
decltype(std::declval<F>()(std::declval<T>())) operator()(
Args&&... args) const {
return std::forward<F>(f)(T(std::forward<Args>(args)...));
}
F&& f;
};

template <class T, class Tuple, size_t... Is, class F>
decltype(std::declval<F>()(std::declval<T>())) WithConstructedImpl(
Tuple&& t, absl::index_sequence<Is...>, F&& f) {
return WithConstructedImplF<T, F>{std::forward<F>(f)}(
std::get<Is>(std::forward<Tuple>(t))...);
}

template <class T, size_t... Is>
auto TupleRefImpl(T&& t, absl::index_sequence<Is...>)
-> decltype(std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...)) {
return std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...);
}

// Returns a tuple of references to the elements of the input tuple. T must be a
// tuple.
template <class T>
auto TupleRef(T&& t) -> decltype(
TupleRefImpl(std::forward<T>(t),
absl::make_index_sequence<
std::tuple_size<typename std::decay<T>::type>::value>())) {
return TupleRefImpl(
std::forward<T>(t),
absl::make_index_sequence<
std::tuple_size<typename std::decay<T>::type>::value>());
}

template <class F, class K, class V>
decltype(std::declval<F>()(std::declval<const K&>(), std::piecewise_construct,
std::declval<std::tuple<K>>(), std::declval<V>()))
DecomposePairImpl(F&& f, std::pair<std::tuple<K>, V> p) {
const auto& key = std::get<0>(p.first);
return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
std::move(p.second));
}

} // namespace memory_internal

// Constructs T into uninitialized storage pointed by `ptr` using the args
// specified in the tuple.
template <class Alloc, class T, class Tuple>
void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) {
memory_internal::ConstructFromTupleImpl(
alloc, ptr, std::forward<Tuple>(t),
absl::make_index_sequence<
std::tuple_size<typename std::decay<Tuple>::type>::value>());
}

// Constructs T using the args specified in the tuple and calls F with the
// constructed value.
template <class T, class Tuple, class F>
decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
Tuple&& t, F&& f) {
return memory_internal::WithConstructedImpl<T>(
std::forward<Tuple>(t),
absl::make_index_sequence<
std::tuple_size<typename std::decay<Tuple>::type>::value>(),
std::forward<F>(f));
}

// Given arguments of an std::pair's consructor, PairArgs() returns a pair of
// tuples with references to the passed arguments. The tuples contain
// constructor arguments for the first and the second elements of the pair.
//
// The following two snippets are equivalent.
//
// 1. std::pair<F, S> p(args...);
//
// 2. auto a = PairArgs(args...);
// std::pair<F, S> p(std::piecewise_construct,
// std::move(a.first), std::move(a.second));
inline std::pair<std::tuple<>, std::tuple<>> PairArgs() { return {}; }
template <class F, class S>
std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(F&& f, S&& s) {
return {std::piecewise_construct, std::forward_as_tuple(std::forward<F>(f)),
std::forward_as_tuple(std::forward<S>(s))};
}
template <class F, class S>
std::pair<std::tuple<const F&>, std::tuple<const S&>> PairArgs(
const std::pair<F, S>& p) {
return PairArgs(p.first, p.second);
}
template <class F, class S>
std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(std::pair<F, S>&& p) {
return PairArgs(std::forward<F>(p.first), std::forward<S>(p.second));
}
template <class F, class S>
auto PairArgs(std::piecewise_construct_t, F&& f, S&& s)
-> decltype(std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
memory_internal::TupleRef(std::forward<S>(s)))) {
return std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
memory_internal::TupleRef(std::forward<S>(s)));
}

// A helper function for implementing apply() in map policies.
template <class F, class... Args>
auto DecomposePair(F&& f, Args&&... args)
-> decltype(memory_internal::DecomposePairImpl(
std::forward<F>(f), PairArgs(std::forward<Args>(args)...))) {
return memory_internal::DecomposePairImpl(
std::forward<F>(f), PairArgs(std::forward<Args>(args)...));
}

// A helper function for implementing apply() in set policies.
template <class F, class Arg>
decltype(std::declval<F>()(std::declval<const Arg&>(), std::declval<Arg>()))
DecomposeValue(F&& f, Arg&& arg) {
const auto& key = arg;
return std::forward<F>(f)(key, std::forward<Arg>(arg));
}

// Helper functions for asan and msan.
inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{

template<size_t Alignment>
struct alignas(Alignment) AlignedType
{
};

// Allocates at least n bytes aligned to the specified alignment.
// Alignment must be a power of 2. It must be positive.
//
// Note that many allocators don't honor alignment requirements above certain
// threshold (usually either alignof(std::max_align_t) or alignof(void*)).
// Allocate() doesn't apply alignment corrections. If the underlying allocator
// returns insufficiently alignment pointer, that's what you are going to get.
template<size_t Alignment, class Alloc>
void* Allocate(Alloc* alloc, size_t n)
{
static_assert(Alignment > 0, "");
assert(n && "n must be positive");
using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
// On macOS, "mem_alloc" is a #define with one argument defined in
// rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
// with the "foo(bar)" syntax.
A my_mem_alloc(*alloc);
void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 && "allocator does not respect alignment");
return p;
}

// The pointer must have been previously obtained by calling
// Allocate<Alignment>(alloc, n).
template<size_t Alignment, class Alloc>
void Deallocate(Alloc* alloc, void* p, size_t n)
{
static_assert(Alignment > 0, "");
assert(n && "n must be positive");
using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
// On macOS, "mem_alloc" is a #define with one argument defined in
// rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
// with the "foo(bar)" syntax.
A my_mem_alloc(*alloc);
AT::deallocate(my_mem_alloc, static_cast<M*>(p), (n + sizeof(M) - 1) / sizeof(M));
}

namespace memory_internal
{

// Constructs T into uninitialized storage pointed by `ptr` using the args
// specified in the tuple.
template<class Alloc, class T, class Tuple, size_t... I>
void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t, absl::index_sequence<I...>)
{
absl::allocator_traits<Alloc>::construct(
*alloc, ptr, std::get<I>(std::forward<Tuple>(t))...
);
}

template<class T, class F>
struct WithConstructedImplF
{
template<class... Args>
decltype(std::declval<F>()(std::declval<T>())) operator()(
Args&&... args
) const
{
return std::forward<F>(f)(T(std::forward<Args>(args)...));
}
F&& f;
};

template<class T, class Tuple, size_t... Is, class F>
decltype(std::declval<F>()(std::declval<T>())) WithConstructedImpl(
Tuple&& t, absl::index_sequence<Is...>, F&& f
)
{
return WithConstructedImplF<T, F>{std::forward<F>(f)}(
std::get<Is>(std::forward<Tuple>(t))...
);
}

template<class T, size_t... Is>
auto TupleRefImpl(T&& t, absl::index_sequence<Is...>)
-> decltype(std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...))
{
return std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...);
}

// Returns a tuple of references to the elements of the input tuple. T must be a
// tuple.
template<class T>
auto TupleRef(T&& t) -> decltype(TupleRefImpl(std::forward<T>(t), absl::make_index_sequence<std::tuple_size<typename std::decay<T>::type>::value>()))
{
return TupleRefImpl(
std::forward<T>(t),
absl::make_index_sequence<
std::tuple_size<typename std::decay<T>::type>::value>()
);
}

template<class F, class K, class V>
decltype(std::declval<F>()(std::declval<const K&>(), std::piecewise_construct, std::declval<std::tuple<K>>(), std::declval<V>()))
DecomposePairImpl(F&& f, std::pair<std::tuple<K>, V> p)
{
const auto& key = std::get<0>(p.first);
return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first), std::move(p.second));
}

} // namespace memory_internal

// Constructs T into uninitialized storage pointed by `ptr` using the args
// specified in the tuple.
template<class Alloc, class T, class Tuple>
void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t)
{
memory_internal::ConstructFromTupleImpl(
alloc, ptr, std::forward<Tuple>(t), absl::make_index_sequence<std::tuple_size<typename std::decay<Tuple>::type>::value>()
);
}

// Constructs T using the args specified in the tuple and calls F with the
// constructed value.
template<class T, class Tuple, class F>
decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
Tuple&& t, F&& f
)
{
return memory_internal::WithConstructedImpl<T>(
std::forward<Tuple>(t),
absl::make_index_sequence<
std::tuple_size<typename std::decay<Tuple>::type>::value>(),
std::forward<F>(f)
);
}

// Given arguments of an std::pair's consructor, PairArgs() returns a pair of
// tuples with references to the passed arguments. The tuples contain
// constructor arguments for the first and the second elements of the pair.
//
// The following two snippets are equivalent.
//
// 1. std::pair<F, S> p(args...);
//
// 2. auto a = PairArgs(args...);
// std::pair<F, S> p(std::piecewise_construct,
// std::move(a.first), std::move(a.second));
inline std::pair<std::tuple<>, std::tuple<>> PairArgs()
{
return {};
}
template<class F, class S>
std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(F&& f, S&& s)
{
return {std::piecewise_construct, std::forward_as_tuple(std::forward<F>(f)), std::forward_as_tuple(std::forward<S>(s))};
}
template<class F, class S>
std::pair<std::tuple<const F&>, std::tuple<const S&>> PairArgs(
const std::pair<F, S>& p
)
{
return PairArgs(p.first, p.second);
}
template<class F, class S>
std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(std::pair<F, S>&& p)
{
return PairArgs(std::forward<F>(p.first), std::forward<S>(p.second));
}
template<class F, class S>
auto PairArgs(std::piecewise_construct_t, F&& f, S&& s)
-> decltype(std::make_pair(memory_internal::TupleRef(std::forward<F>(f)), memory_internal::TupleRef(std::forward<S>(s))))
{
return std::make_pair(memory_internal::TupleRef(std::forward<F>(f)), memory_internal::TupleRef(std::forward<S>(s)));
}

// A helper function for implementing apply() in map policies.
template<class F, class... Args>
auto DecomposePair(F&& f, Args&&... args)
-> decltype(memory_internal::DecomposePairImpl(
std::forward<F>(f), PairArgs(std::forward<Args>(args)...)
))
{
return memory_internal::DecomposePairImpl(
std::forward<F>(f), PairArgs(std::forward<Args>(args)...)
);
}

// A helper function for implementing apply() in set policies.
template<class F, class Arg>
decltype(std::declval<F>()(std::declval<const Arg&>(), std::declval<Arg>()))
DecomposeValue(F&& f, Arg&& arg)
{
const auto& key = arg;
return std::forward<F>(f)(key, std::forward<Arg>(arg));
}

// Helper functions for asan and msan.
inline void SanitizerPoisonMemoryRegion(const void* m, size_t s)
{
#ifdef ABSL_HAVE_ADDRESS_SANITIZER #ifdef ABSL_HAVE_ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(m, s);
ASAN_POISON_MEMORY_REGION(m, s);
#endif #endif
#ifdef ABSL_HAVE_MEMORY_SANITIZER #ifdef ABSL_HAVE_MEMORY_SANITIZER
__msan_poison(m, s);
__msan_poison(m, s);
#endif #endif
(void)m;
(void)s;
}
(void)m;
(void)s;
}


inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) {
inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s)
{
#ifdef ABSL_HAVE_ADDRESS_SANITIZER #ifdef ABSL_HAVE_ADDRESS_SANITIZER
ASAN_UNPOISON_MEMORY_REGION(m, s);
ASAN_UNPOISON_MEMORY_REGION(m, s);
#endif #endif
#ifdef ABSL_HAVE_MEMORY_SANITIZER #ifdef ABSL_HAVE_MEMORY_SANITIZER
__msan_unpoison(m, s);
__msan_unpoison(m, s);
#endif #endif
(void)m;
(void)s;
}

template <typename T>
inline void SanitizerPoisonObject(const T* object) {
SanitizerPoisonMemoryRegion(object, sizeof(T));
}

template <typename T>
inline void SanitizerUnpoisonObject(const T* object) {
SanitizerUnpoisonMemoryRegion(object, sizeof(T));
}

namespace memory_internal {

// If Pair is a standard-layout type, OffsetOf<Pair>::kFirst and
// OffsetOf<Pair>::kSecond are equivalent to offsetof(Pair, first) and
// offsetof(Pair, second) respectively. Otherwise they are -1.
//
// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout
// type, which is non-portable.
template <class Pair, class = std::true_type>
struct OffsetOf {
static constexpr size_t kFirst = static_cast<size_t>(-1);
static constexpr size_t kSecond = static_cast<size_t>(-1);
};

template <class Pair>
struct OffsetOf<Pair, typename std::is_standard_layout<Pair>::type> {
static constexpr size_t kFirst = offsetof(Pair, first);
static constexpr size_t kSecond = offsetof(Pair, second);
};

template <class K, class V>
struct IsLayoutCompatible {
private:
struct Pair {
K first;
V second;
};

// Is P layout-compatible with Pair?
template <class P>
static constexpr bool LayoutCompatible() {
return std::is_standard_layout<P>() && sizeof(P) == sizeof(Pair) &&
alignof(P) == alignof(Pair) &&
memory_internal::OffsetOf<P>::kFirst ==
memory_internal::OffsetOf<Pair>::kFirst &&
memory_internal::OffsetOf<P>::kSecond ==
memory_internal::OffsetOf<Pair>::kSecond;
}

public:
// Whether pair<const K, V> and pair<K, V> are layout-compatible. If they are,
// then it is safe to store them in a union and read from either.
static constexpr bool value = std::is_standard_layout<K>() &&
std::is_standard_layout<Pair>() &&
memory_internal::OffsetOf<Pair>::kFirst == 0 &&
LayoutCompatible<std::pair<K, V>>() &&
LayoutCompatible<std::pair<const K, V>>();
};

} // namespace memory_internal

// The internal storage type for key-value containers like flat_hash_map.
//
// It is convenient for the value_type of a flat_hash_map<K, V> to be
// pair<const K, V>; the "const K" prevents accidental modification of the key
// when dealing with the reference returned from find() and similar methods.
// However, this creates other problems; we want to be able to emplace(K, V)
// efficiently with move operations, and similarly be able to move a
// pair<K, V> in insert().
//
// The solution is this union, which aliases the const and non-const versions
// of the pair. This also allows flat_hash_map<const K, V> to work, even though
// that has the same efficiency issues with move in emplace() and insert() -
// but people do it anyway.
//
// If kMutableKeys is false, only the value member can be accessed.
//
// If kMutableKeys is true, key can be accessed through all slots while value
// and mutable_value must be accessed only via INITIALIZED slots. Slots are
// created and destroyed via mutable_value so that the key can be moved later.
//
// Accessing one of the union fields while the other is active is safe as
// long as they are layout-compatible, which is guaranteed by the definition of
// kMutableKeys. For C++11, the relevant section of the standard is
// https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19)
template <class K, class V>
union map_slot_type {
map_slot_type() {}
~map_slot_type() = delete;
using value_type = std::pair<const K, V>;
using mutable_value_type =
std::pair<absl::remove_const_t<K>, absl::remove_const_t<V>>;

value_type value;
mutable_value_type mutable_value;
absl::remove_const_t<K> key;
};

template <class K, class V>
struct map_slot_policy {
using slot_type = map_slot_type<K, V>;
using value_type = std::pair<const K, V>;
using mutable_value_type = std::pair<K, V>;

private:
static void emplace(slot_type* slot) {
// The construction of union doesn't do anything at runtime but it allows us
// to access its members without violating aliasing rules.
new (slot) slot_type;
}
// If pair<const K, V> and pair<K, V> are layout-compatible, we can accept one
// or the other via slot_type. We are also free to access the key via
// slot_type::key in this case.
using kMutableKeys = memory_internal::IsLayoutCompatible<K, V>;

public:
static value_type& element(slot_type* slot) { return slot->value; }
static const value_type& element(const slot_type* slot) {
return slot->value;
}

// When C++17 is available, we can use std::launder to provide mutable
// access to the key for use in node handle.
(void)m;
(void)s;
}

template<typename T>
inline void SanitizerPoisonObject(const T* object)
{
SanitizerPoisonMemoryRegion(object, sizeof(T));
}

template<typename T>
inline void SanitizerUnpoisonObject(const T* object)
{
SanitizerUnpoisonMemoryRegion(object, sizeof(T));
}

namespace memory_internal
{

// If Pair is a standard-layout type, OffsetOf<Pair>::kFirst and
// OffsetOf<Pair>::kSecond are equivalent to offsetof(Pair, first) and
// offsetof(Pair, second) respectively. Otherwise they are -1.
//
// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout
// type, which is non-portable.
template<class Pair, class = std::true_type>
struct OffsetOf
{
static constexpr size_t kFirst = static_cast<size_t>(-1);
static constexpr size_t kSecond = static_cast<size_t>(-1);
};

template<class Pair>
struct OffsetOf<Pair, typename std::is_standard_layout<Pair>::type>
{
static constexpr size_t kFirst = offsetof(Pair, first);
static constexpr size_t kSecond = offsetof(Pair, second);
};

template<class K, class V>
struct IsLayoutCompatible
{
private:
struct Pair
{
K first;
V second;
};

// Is P layout-compatible with Pair?
template<class P>
static constexpr bool LayoutCompatible()
{
return std::is_standard_layout<P>() && sizeof(P) == sizeof(Pair) &&
alignof(P) == alignof(Pair) &&
memory_internal::OffsetOf<P>::kFirst ==
memory_internal::OffsetOf<Pair>::kFirst &&
memory_internal::OffsetOf<P>::kSecond ==
memory_internal::OffsetOf<Pair>::kSecond;
}

public:
// Whether pair<const K, V> and pair<K, V> are layout-compatible. If they are,
// then it is safe to store them in a union and read from either.
static constexpr bool value = std::is_standard_layout<K>() &&
std::is_standard_layout<Pair>() &&
memory_internal::OffsetOf<Pair>::kFirst == 0 &&
LayoutCompatible<std::pair<K, V>>() &&
LayoutCompatible<std::pair<const K, V>>();
};

} // namespace memory_internal

// The internal storage type for key-value containers like flat_hash_map.
//
// It is convenient for the value_type of a flat_hash_map<K, V> to be
// pair<const K, V>; the "const K" prevents accidental modification of the key
// when dealing with the reference returned from find() and similar methods.
// However, this creates other problems; we want to be able to emplace(K, V)
// efficiently with move operations, and similarly be able to move a
// pair<K, V> in insert().
//
// The solution is this union, which aliases the const and non-const versions
// of the pair. This also allows flat_hash_map<const K, V> to work, even though
// that has the same efficiency issues with move in emplace() and insert() -
// but people do it anyway.
//
// If kMutableKeys is false, only the value member can be accessed.
//
// If kMutableKeys is true, key can be accessed through all slots while value
// and mutable_value must be accessed only via INITIALIZED slots. Slots are
// created and destroyed via mutable_value so that the key can be moved later.
//
// Accessing one of the union fields while the other is active is safe as
// long as they are layout-compatible, which is guaranteed by the definition of
// kMutableKeys. For C++11, the relevant section of the standard is
// https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19)
template<class K, class V>
union map_slot_type
{
map_slot_type()
{
}
~map_slot_type() = delete;
using value_type = std::pair<const K, V>;
using mutable_value_type =
std::pair<absl::remove_const_t<K>, absl::remove_const_t<V>>;

value_type value;
mutable_value_type mutable_value;
absl::remove_const_t<K> key;
};

template<class K, class V>
struct map_slot_policy
{
using slot_type = map_slot_type<K, V>;
using value_type = std::pair<const K, V>;
using mutable_value_type = std::pair<K, V>;

private:
static void emplace(slot_type* slot)
{
// The construction of union doesn't do anything at runtime but it allows us
// to access its members without violating aliasing rules.
new (slot) slot_type;
}
// If pair<const K, V> and pair<K, V> are layout-compatible, we can accept one
// or the other via slot_type. We are also free to access the key via
// slot_type::key in this case.
using kMutableKeys = memory_internal::IsLayoutCompatible<K, V>;

public:
static value_type& element(slot_type* slot)
{
return slot->value;
}
static const value_type& element(const slot_type* slot)
{
return slot->value;
}

// When C++17 is available, we can use std::launder to provide mutable
// access to the key for use in node handle.
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
static K& mutable_key(slot_type* slot) {
// Still check for kMutableKeys so that we can avoid calling std::launder
// unless necessary because it can interfere with optimizations.
return kMutableKeys::value ? slot->key
: *std::launder(const_cast<K*>(
std::addressof(slot->value.first)));
}
static K& mutable_key(slot_type* slot)
{
// Still check for kMutableKeys so that we can avoid calling std::launder
// unless necessary because it can interfere with optimizations.
return kMutableKeys::value ? slot->key : *std::launder(const_cast<K*>(std::addressof(slot->value.first)));
}
#else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606) #else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606)
static const K& mutable_key(slot_type* slot) { return key(slot); }
static const K& mutable_key(slot_type* slot)
{
return key(slot);
}
#endif #endif


static const K& key(const slot_type* slot) {
return kMutableKeys::value ? slot->key : slot->value.first;
}

template <class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
emplace(slot);
if (kMutableKeys::value) {
absl::allocator_traits<Allocator>::construct(*alloc, &slot->mutable_value,
std::forward<Args>(args)...);
} else {
absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
std::forward<Args>(args)...);
}
}

// Construct this slot by moving from another slot.
template <class Allocator>
static void construct(Allocator* alloc, slot_type* slot, slot_type* other) {
emplace(slot);
if (kMutableKeys::value) {
absl::allocator_traits<Allocator>::construct(
*alloc, &slot->mutable_value, std::move(other->mutable_value));
} else {
absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
std::move(other->value));
}
}

// Construct this slot by copying from another slot.
template <class Allocator>
static void construct(Allocator* alloc, slot_type* slot,
const slot_type* other) {
emplace(slot);
absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
other->value);
}

template <class Allocator>
static void destroy(Allocator* alloc, slot_type* slot) {
if (kMutableKeys::value) {
absl::allocator_traits<Allocator>::destroy(*alloc, &slot->mutable_value);
} else {
absl::allocator_traits<Allocator>::destroy(*alloc, &slot->value);
}
}

template <class Allocator>
static void transfer(Allocator* alloc, slot_type* new_slot,
slot_type* old_slot) {
emplace(new_slot);
if (kMutableKeys::value) {
absl::allocator_traits<Allocator>::construct(
*alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value));
} else {
absl::allocator_traits<Allocator>::construct(*alloc, &new_slot->value,
std::move(old_slot->value));
}
destroy(alloc, old_slot);
}
};

} // namespace container_internal
ABSL_NAMESPACE_END
static const K& key(const slot_type* slot)
{
return kMutableKeys::value ? slot->key : slot->value.first;
}

template<class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args)
{
emplace(slot);
if (kMutableKeys::value)
{
absl::allocator_traits<Allocator>::construct(*alloc, &slot->mutable_value, std::forward<Args>(args)...);
}
else
{
absl::allocator_traits<Allocator>::construct(*alloc, &slot->value, std::forward<Args>(args)...);
}
}

// Construct this slot by moving from another slot.
template<class Allocator>
static void construct(Allocator* alloc, slot_type* slot, slot_type* other)
{
emplace(slot);
if (kMutableKeys::value)
{
absl::allocator_traits<Allocator>::construct(
*alloc, &slot->mutable_value, std::move(other->mutable_value)
);
}
else
{
absl::allocator_traits<Allocator>::construct(*alloc, &slot->value, std::move(other->value));
}
}

// Construct this slot by copying from another slot.
template<class Allocator>
static void construct(Allocator* alloc, slot_type* slot, const slot_type* other)
{
emplace(slot);
absl::allocator_traits<Allocator>::construct(*alloc, &slot->value, other->value);
}

template<class Allocator>
static void destroy(Allocator* alloc, slot_type* slot)
{
if (kMutableKeys::value)
{
absl::allocator_traits<Allocator>::destroy(*alloc, &slot->mutable_value);
}
else
{
absl::allocator_traits<Allocator>::destroy(*alloc, &slot->value);
}
}

template<class Allocator>
static void transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot)
{
emplace(new_slot);
if (kMutableKeys::value)
{
absl::allocator_traits<Allocator>::construct(
*alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value)
);
}
else
{
absl::allocator_traits<Allocator>::construct(*alloc, &new_slot->value, std::move(old_slot->value));
}
destroy(alloc, old_slot);
}
};

} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ #endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_

+ 112
- 90
CAPI/cpp/grpc/include/absl/container/internal/counting_allocator.h View File

@@ -20,103 +20,125 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {

// This is a stateful allocator, but the state lives outside of the
// allocator (in whatever test is using the allocator). This is odd
// but helps in tests where the allocator is propagated into nested
// containers - that chain of allocators uses the same state and is
// thus easier to query for aggregate allocation information.
template <typename T>
class CountingAllocator {
public:
using Allocator = std::allocator<T>;
using AllocatorTraits = std::allocator_traits<Allocator>;
using value_type = typename AllocatorTraits::value_type;
using pointer = typename AllocatorTraits::pointer;
using const_pointer = typename AllocatorTraits::const_pointer;
using size_type = typename AllocatorTraits::size_type;
using difference_type = typename AllocatorTraits::difference_type;

CountingAllocator() = default;
explicit CountingAllocator(int64_t* bytes_used) : bytes_used_(bytes_used) {}
CountingAllocator(int64_t* bytes_used, int64_t* instance_count)
: bytes_used_(bytes_used), instance_count_(instance_count) {}

template <typename U>
CountingAllocator(const CountingAllocator<U>& x)
: bytes_used_(x.bytes_used_), instance_count_(x.instance_count_) {}

pointer allocate(
size_type n,
typename AllocatorTraits::const_void_pointer hint = nullptr) {
Allocator allocator;
pointer ptr = AllocatorTraits::allocate(allocator, n, hint);
if (bytes_used_ != nullptr) {
*bytes_used_ += n * sizeof(T);
}
return ptr;
}

void deallocate(pointer p, size_type n) {
Allocator allocator;
AllocatorTraits::deallocate(allocator, p, n);
if (bytes_used_ != nullptr) {
*bytes_used_ -= n * sizeof(T);
}
}

template <typename U, typename... Args>
void construct(U* p, Args&&... args) {
Allocator allocator;
AllocatorTraits::construct(allocator, p, std::forward<Args>(args)...);
if (instance_count_ != nullptr) {
*instance_count_ += 1;
}
}

template <typename U>
void destroy(U* p) {
Allocator allocator;
// Ignore GCC warning bug.
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{

// This is a stateful allocator, but the state lives outside of the
// allocator (in whatever test is using the allocator). This is odd
// but helps in tests where the allocator is propagated into nested
// containers - that chain of allocators uses the same state and is
// thus easier to query for aggregate allocation information.
template<typename T>
class CountingAllocator
{
public:
using Allocator = std::allocator<T>;
using AllocatorTraits = std::allocator_traits<Allocator>;
using value_type = typename AllocatorTraits::value_type;
using pointer = typename AllocatorTraits::pointer;
using const_pointer = typename AllocatorTraits::const_pointer;
using size_type = typename AllocatorTraits::size_type;
using difference_type = typename AllocatorTraits::difference_type;

CountingAllocator() = default;
explicit CountingAllocator(int64_t* bytes_used) :
bytes_used_(bytes_used)
{
}
CountingAllocator(int64_t* bytes_used, int64_t* instance_count) :
bytes_used_(bytes_used),
instance_count_(instance_count)
{
}

template<typename U>
CountingAllocator(const CountingAllocator<U>& x) :
bytes_used_(x.bytes_used_),
instance_count_(x.instance_count_)
{
}

pointer allocate(
size_type n,
typename AllocatorTraits::const_void_pointer hint = nullptr
)
{
Allocator allocator;
pointer ptr = AllocatorTraits::allocate(allocator, n, hint);
if (bytes_used_ != nullptr)
{
*bytes_used_ += n * sizeof(T);
}
return ptr;
}

void deallocate(pointer p, size_type n)
{
Allocator allocator;
AllocatorTraits::deallocate(allocator, p, n);
if (bytes_used_ != nullptr)
{
*bytes_used_ -= n * sizeof(T);
}
}

template<typename U, typename... Args>
void construct(U* p, Args&&... args)
{
Allocator allocator;
AllocatorTraits::construct(allocator, p, std::forward<Args>(args)...);
if (instance_count_ != nullptr)
{
*instance_count_ += 1;
}
}

template<typename U>
void destroy(U* p)
{
Allocator allocator;
// Ignore GCC warning bug.
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic push #pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wuse-after-free" #pragma GCC diagnostic ignored "-Wuse-after-free"
#endif #endif
AllocatorTraits::destroy(allocator, p);
AllocatorTraits::destroy(allocator, p);
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic pop #pragma GCC diagnostic pop
#endif #endif
if (instance_count_ != nullptr) {
*instance_count_ -= 1;
}
}

template <typename U>
class rebind {
public:
using other = CountingAllocator<U>;
};

friend bool operator==(const CountingAllocator& a,
const CountingAllocator& b) {
return a.bytes_used_ == b.bytes_used_ &&
a.instance_count_ == b.instance_count_;
}

friend bool operator!=(const CountingAllocator& a,
const CountingAllocator& b) {
return !(a == b);
}

int64_t* bytes_used_ = nullptr;
int64_t* instance_count_ = nullptr;
};

} // namespace container_internal
ABSL_NAMESPACE_END
if (instance_count_ != nullptr)
{
*instance_count_ -= 1;
}
}

template<typename U>
class rebind
{
public:
using other = CountingAllocator<U>;
};

friend bool operator==(const CountingAllocator& a, const CountingAllocator& b)
{
return a.bytes_used_ == b.bytes_used_ &&
a.instance_count_ == b.instance_count_;
}

friend bool operator!=(const CountingAllocator& a, const CountingAllocator& b)
{
return !(a == b);
}

int64_t* bytes_used_ = nullptr;
int64_t* instance_count_ = nullptr;
};

} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ #endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_

+ 134
- 102
CAPI/cpp/grpc/include/absl/container/internal/hash_function_defaults.h View File

@@ -56,108 +56,140 @@
#include "absl/strings/cord.h" #include "absl/strings/cord.h"
#include "absl/strings/string_view.h" #include "absl/strings/string_view.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {

// The hash of an object of type T is computed by using absl::Hash.
template <class T, class E = void>
struct HashEq {
using Hash = absl::Hash<T>;
using Eq = std::equal_to<T>;
};

struct StringHash {
using is_transparent = void;

size_t operator()(absl::string_view v) const {
return absl::Hash<absl::string_view>{}(v);
}
size_t operator()(const absl::Cord& v) const {
return absl::Hash<absl::Cord>{}(v);
}
};

struct StringEq {
using is_transparent = void;
bool operator()(absl::string_view lhs, absl::string_view rhs) const {
return lhs == rhs;
}
bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const {
return lhs == rhs;
}
bool operator()(const absl::Cord& lhs, absl::string_view rhs) const {
return lhs == rhs;
}
bool operator()(absl::string_view lhs, const absl::Cord& rhs) const {
return lhs == rhs;
}
};

// Supports heterogeneous lookup for string-like elements.
struct StringHashEq {
using Hash = StringHash;
using Eq = StringEq;
};

template <>
struct HashEq<std::string> : StringHashEq {};
template <>
struct HashEq<absl::string_view> : StringHashEq {};
template <>
struct HashEq<absl::Cord> : StringHashEq {};

// Supports heterogeneous lookup for pointers and smart pointers.
template <class T>
struct HashEq<T*> {
struct Hash {
using is_transparent = void;
template <class U>
size_t operator()(const U& ptr) const {
return absl::Hash<const T*>{}(HashEq::ToPtr(ptr));
}
};
struct Eq {
using is_transparent = void;
template <class A, class B>
bool operator()(const A& a, const B& b) const {
return HashEq::ToPtr(a) == HashEq::ToPtr(b);
}
};

private:
static const T* ToPtr(const T* ptr) { return ptr; }
template <class U, class D>
static const T* ToPtr(const std::unique_ptr<U, D>& ptr) {
return ptr.get();
}
template <class U>
static const T* ToPtr(const std::shared_ptr<U>& ptr) {
return ptr.get();
}
};

template <class T, class D>
struct HashEq<std::unique_ptr<T, D>> : HashEq<T*> {};
template <class T>
struct HashEq<std::shared_ptr<T>> : HashEq<T*> {};

// This header's visibility is restricted. If you need to access the default
// hasher please use the container's ::hasher alias instead.
//
// Example: typename Hash = typename absl::flat_hash_map<K, V>::hasher
template <class T>
using hash_default_hash = typename container_internal::HashEq<T>::Hash;

// This header's visibility is restricted. If you need to access the default
// key equal please use the container's ::key_equal alias instead.
//
// Example: typename Eq = typename absl::flat_hash_map<K, V, Hash>::key_equal
template <class T>
using hash_default_eq = typename container_internal::HashEq<T>::Eq;

} // namespace container_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{

// The hash of an object of type T is computed by using absl::Hash.
template<class T, class E = void>
struct HashEq
{
using Hash = absl::Hash<T>;
using Eq = std::equal_to<T>;
};

struct StringHash
{
using is_transparent = void;

size_t operator()(absl::string_view v) const
{
return absl::Hash<absl::string_view>{}(v);
}
size_t operator()(const absl::Cord& v) const
{
return absl::Hash<absl::Cord>{}(v);
}
};

struct StringEq
{
using is_transparent = void;
bool operator()(absl::string_view lhs, absl::string_view rhs) const
{
return lhs == rhs;
}
bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const
{
return lhs == rhs;
}
bool operator()(const absl::Cord& lhs, absl::string_view rhs) const
{
return lhs == rhs;
}
bool operator()(absl::string_view lhs, const absl::Cord& rhs) const
{
return lhs == rhs;
}
};

// Supports heterogeneous lookup for string-like elements.
struct StringHashEq
{
using Hash = StringHash;
using Eq = StringEq;
};

template<>
struct HashEq<std::string> : StringHashEq
{
};
template<>
struct HashEq<absl::string_view> : StringHashEq
{
};
template<>
struct HashEq<absl::Cord> : StringHashEq
{
};

// Supports heterogeneous lookup for pointers and smart pointers.
template<class T>
struct HashEq<T*>
{
struct Hash
{
using is_transparent = void;
template<class U>
size_t operator()(const U& ptr) const
{
return absl::Hash<const T*>{}(HashEq::ToPtr(ptr));
}
};
struct Eq
{
using is_transparent = void;
template<class A, class B>
bool operator()(const A& a, const B& b) const
{
return HashEq::ToPtr(a) == HashEq::ToPtr(b);
}
};

private:
static const T* ToPtr(const T* ptr)
{
return ptr;
}
template<class U, class D>
static const T* ToPtr(const std::unique_ptr<U, D>& ptr)
{
return ptr.get();
}
template<class U>
static const T* ToPtr(const std::shared_ptr<U>& ptr)
{
return ptr.get();
}
};

template<class T, class D>
struct HashEq<std::unique_ptr<T, D>> : HashEq<T*>
{
};
template<class T>
struct HashEq<std::shared_ptr<T>> : HashEq<T*>
{
};

// This header's visibility is restricted. If you need to access the default
// hasher please use the container's ::hasher alias instead.
//
// Example: typename Hash = typename absl::flat_hash_map<K, V>::hasher
template<class T>
using hash_default_hash = typename container_internal::HashEq<T>::Hash;

// This header's visibility is restricted. If you need to access the default
// key equal please use the container's ::key_equal alias instead.
//
// Example: typename Eq = typename absl::flat_hash_map<K, V, Hash>::key_equal
template<class T>
using hash_default_eq = typename container_internal::HashEq<T>::Eq;

} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ #endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_

+ 170
- 143
CAPI/cpp/grpc/include/absl/container/internal/hash_generator_testing.h View File

@@ -34,149 +34,176 @@
#include "absl/meta/type_traits.h" #include "absl/meta/type_traits.h"
#include "absl/strings/string_view.h" #include "absl/strings/string_view.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace hash_internal {
namespace generator_internal {

template <class Container, class = void>
struct IsMap : std::false_type {};

template <class Map>
struct IsMap<Map, absl::void_t<typename Map::mapped_type>> : std::true_type {};

} // namespace generator_internal

std::mt19937_64* GetSharedRng();

enum Enum {
kEnumEmpty,
kEnumDeleted,
};

enum class EnumClass : uint64_t {
kEmpty,
kDeleted,
};

inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) {
return o << static_cast<uint64_t>(ec);
}

template <class T, class E = void>
struct Generator;

template <class T>
struct Generator<T, typename std::enable_if<std::is_integral<T>::value>::type> {
T operator()() const {
std::uniform_int_distribution<T> dist;
return dist(*GetSharedRng());
}
};

template <>
struct Generator<Enum> {
Enum operator()() const {
std::uniform_int_distribution<typename std::underlying_type<Enum>::type>
dist;
while (true) {
auto variate = dist(*GetSharedRng());
if (variate != kEnumEmpty && variate != kEnumDeleted)
return static_cast<Enum>(variate);
}
}
};

template <>
struct Generator<EnumClass> {
EnumClass operator()() const {
std::uniform_int_distribution<
typename std::underlying_type<EnumClass>::type>
dist;
while (true) {
EnumClass variate = static_cast<EnumClass>(dist(*GetSharedRng()));
if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted)
return static_cast<EnumClass>(variate);
}
}
};

template <>
struct Generator<std::string> {
std::string operator()() const;
};

template <>
struct Generator<absl::string_view> {
absl::string_view operator()() const;
};

template <>
struct Generator<NonStandardLayout> {
NonStandardLayout operator()() const {
return NonStandardLayout(Generator<std::string>()());
}
};

template <class K, class V>
struct Generator<std::pair<K, V>> {
std::pair<K, V> operator()() const {
return std::pair<K, V>(Generator<typename std::decay<K>::type>()(),
Generator<typename std::decay<V>::type>()());
}
};

template <class... Ts>
struct Generator<std::tuple<Ts...>> {
std::tuple<Ts...> operator()() const {
return std::tuple<Ts...>(Generator<typename std::decay<Ts>::type>()()...);
}
};

template <class T>
struct Generator<std::unique_ptr<T>> {
std::unique_ptr<T> operator()() const {
return absl::make_unique<T>(Generator<T>()());
}
};

template <class U>
struct Generator<U, absl::void_t<decltype(std::declval<U&>().key()),
decltype(std::declval<U&>().value())>>
: Generator<std::pair<
typename std::decay<decltype(std::declval<U&>().key())>::type,
typename std::decay<decltype(std::declval<U&>().value())>::type>> {};

template <class Container>
using GeneratedType = decltype(
std::declval<const Generator<
typename std::conditional<generator_internal::IsMap<Container>::value,
typename Container::value_type,
typename Container::key_type>::type>&>()());

// Naive wrapper that performs a linear search of previous values.
// Beware this is O(SQR), which is reasonable for smaller kMaxValues.
template <class T, size_t kMaxValues = 64, class E = void>
struct UniqueGenerator {
Generator<T, E> gen;
std::vector<T> values;

T operator()() {
assert(values.size() < kMaxValues);
for (;;) {
T value = gen();
if (std::find(values.begin(), values.end(), value) == values.end()) {
values.push_back(value);
return value;
}
}
}
};

} // namespace hash_internal
} // namespace container_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{
namespace hash_internal
{
namespace generator_internal
{

template<class Container, class = void>
struct IsMap : std::false_type
{
};

template<class Map>
struct IsMap<Map, absl::void_t<typename Map::mapped_type>> : std::true_type
{
};

} // namespace generator_internal

std::mt19937_64* GetSharedRng();

enum Enum
{
kEnumEmpty,
kEnumDeleted,
};

enum class EnumClass : uint64_t
{
kEmpty,
kDeleted,
};

inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec)
{
return o << static_cast<uint64_t>(ec);
}

template<class T, class E = void>
struct Generator;

template<class T>
struct Generator<T, typename std::enable_if<std::is_integral<T>::value>::type>
{
T operator()() const
{
std::uniform_int_distribution<T> dist;
return dist(*GetSharedRng());
}
};

template<>
struct Generator<Enum>
{
Enum operator()() const
{
std::uniform_int_distribution<typename std::underlying_type<Enum>::type>
dist;
while (true)
{
auto variate = dist(*GetSharedRng());
if (variate != kEnumEmpty && variate != kEnumDeleted)
return static_cast<Enum>(variate);
}
}
};

template<>
struct Generator<EnumClass>
{
EnumClass operator()() const
{
std::uniform_int_distribution<
typename std::underlying_type<EnumClass>::type>
dist;
while (true)
{
EnumClass variate = static_cast<EnumClass>(dist(*GetSharedRng()));
if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted)
return static_cast<EnumClass>(variate);
}
}
};

template<>
struct Generator<std::string>
{
std::string operator()() const;
};

template<>
struct Generator<absl::string_view>
{
absl::string_view operator()() const;
};

template<>
struct Generator<NonStandardLayout>
{
NonStandardLayout operator()() const
{
return NonStandardLayout(Generator<std::string>()());
}
};

template<class K, class V>
struct Generator<std::pair<K, V>>
{
std::pair<K, V> operator()() const
{
return std::pair<K, V>(Generator<typename std::decay<K>::type>()(), Generator<typename std::decay<V>::type>()());
}
};

template<class... Ts>
struct Generator<std::tuple<Ts...>>
{
std::tuple<Ts...> operator()() const
{
return std::tuple<Ts...>(Generator<typename std::decay<Ts>::type>()()...);
}
};

template<class T>
struct Generator<std::unique_ptr<T>>
{
std::unique_ptr<T> operator()() const
{
return absl::make_unique<T>(Generator<T>()());
}
};

template<class U>
struct Generator<U, absl::void_t<decltype(std::declval<U&>().key()), decltype(std::declval<U&>().value())>> : Generator<std::pair<typename std::decay<decltype(std::declval<U&>().key())>::type, typename std::decay<decltype(std::declval<U&>().value())>::type>>
{
};

template<class Container>
using GeneratedType = decltype(std::declval<const Generator<
typename std::conditional<generator_internal::IsMap<Container>::value, typename Container::value_type, typename Container::key_type>::type>&>()());

// Naive wrapper that performs a linear search of previous values.
// Beware this is O(SQR), which is reasonable for smaller kMaxValues.
template<class T, size_t kMaxValues = 64, class E = void>
struct UniqueGenerator
{
Generator<T, E> gen;
std::vector<T> values;

T operator()()
{
assert(values.size() < kMaxValues);
for (;;)
{
T value = gen();
if (std::find(values.begin(), values.end(), value) == values.end())
{
values.push_back(value);
return value;
}
}
}
};

} // namespace hash_internal
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ #endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_

+ 193
- 137
CAPI/cpp/grpc/include/absl/container/internal/hash_policy_testing.h View File

@@ -29,141 +29,197 @@
#include "absl/hash/hash.h" #include "absl/hash/hash.h"
#include "absl/strings/string_view.h" #include "absl/strings/string_view.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace hash_testing_internal {

template <class Derived>
struct WithId {
WithId() : id_(next_id<Derived>()) {}
WithId(const WithId& that) : id_(that.id_) {}
WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; }
WithId& operator=(const WithId& that) {
id_ = that.id_;
return *this;
}
WithId& operator=(WithId&& that) {
id_ = that.id_;
that.id_ = 0;
return *this;
}

size_t id() const { return id_; }

friend bool operator==(const WithId& a, const WithId& b) {
return a.id_ == b.id_;
}
friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); }

protected:
explicit WithId(size_t id) : id_(id) {}

private:
size_t id_;

template <class T>
static size_t next_id() {
// 0 is reserved for moved from state.
static size_t gId = 1;
return gId++;
}
};

} // namespace hash_testing_internal

struct NonStandardLayout {
NonStandardLayout() {}
explicit NonStandardLayout(std::string s) : value(std::move(s)) {}
virtual ~NonStandardLayout() {}

friend bool operator==(const NonStandardLayout& a,
const NonStandardLayout& b) {
return a.value == b.value;
}
friend bool operator!=(const NonStandardLayout& a,
const NonStandardLayout& b) {
return a.value != b.value;
}

template <typename H>
friend H AbslHashValue(H h, const NonStandardLayout& v) {
return H::combine(std::move(h), v.value);
}

std::string value;
};

struct StatefulTestingHash
: absl::container_internal::hash_testing_internal::WithId<
StatefulTestingHash> {
template <class T>
size_t operator()(const T& t) const {
return absl::Hash<T>{}(t);
}
};

struct StatefulTestingEqual
: absl::container_internal::hash_testing_internal::WithId<
StatefulTestingEqual> {
template <class T, class U>
bool operator()(const T& t, const U& u) const {
return t == u;
}
};

// It is expected that Alloc() == Alloc() for all allocators so we cannot use
// WithId base. We need to explicitly assign ids.
template <class T = int>
struct Alloc : std::allocator<T> {
using propagate_on_container_swap = std::true_type;

// Using old paradigm for this to ensure compatibility.
explicit Alloc(size_t id = 0) : id_(id) {}

Alloc(const Alloc&) = default;
Alloc& operator=(const Alloc&) = default;

template <class U>
Alloc(const Alloc<U>& that) : std::allocator<T>(that), id_(that.id()) {}

template <class U>
struct rebind {
using other = Alloc<U>;
};

size_t id() const { return id_; }

friend bool operator==(const Alloc& a, const Alloc& b) {
return a.id_ == b.id_;
}
friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); }

private:
size_t id_ = (std::numeric_limits<size_t>::max)();
};

template <class Map>
auto items(const Map& m) -> std::vector<
std::pair<typename Map::key_type, typename Map::mapped_type>> {
using std::get;
std::vector<std::pair<typename Map::key_type, typename Map::mapped_type>> res;
res.reserve(m.size());
for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v));
return res;
}

template <class Set>
auto keys(const Set& s)
-> std::vector<typename std::decay<typename Set::key_type>::type> {
std::vector<typename std::decay<typename Set::key_type>::type> res;
res.reserve(s.size());
for (const auto& v : s) res.emplace_back(v);
return res;
}

} // namespace container_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{
namespace hash_testing_internal
{

template<class Derived>
struct WithId
{
WithId() :
id_(next_id<Derived>())
{
}
WithId(const WithId& that) :
id_(that.id_)
{
}
WithId(WithId&& that) :
id_(that.id_)
{
that.id_ = 0;
}
WithId& operator=(const WithId& that)
{
id_ = that.id_;
return *this;
}
WithId& operator=(WithId&& that)
{
id_ = that.id_;
that.id_ = 0;
return *this;
}

size_t id() const
{
return id_;
}

friend bool operator==(const WithId& a, const WithId& b)
{
return a.id_ == b.id_;
}
friend bool operator!=(const WithId& a, const WithId& b)
{
return !(a == b);
}

protected:
explicit WithId(size_t id) :
id_(id)
{
}

private:
size_t id_;

template<class T>
static size_t next_id()
{
// 0 is reserved for moved from state.
static size_t gId = 1;
return gId++;
}
};

} // namespace hash_testing_internal

struct NonStandardLayout
{
NonStandardLayout()
{
}
explicit NonStandardLayout(std::string s) :
value(std::move(s))
{
}
virtual ~NonStandardLayout()
{
}

friend bool operator==(const NonStandardLayout& a, const NonStandardLayout& b)
{
return a.value == b.value;
}
friend bool operator!=(const NonStandardLayout& a, const NonStandardLayout& b)
{
return a.value != b.value;
}

template<typename H>
friend H AbslHashValue(H h, const NonStandardLayout& v)
{
return H::combine(std::move(h), v.value);
}

std::string value;
};

struct StatefulTestingHash : absl::container_internal::hash_testing_internal::WithId<StatefulTestingHash>
{
template<class T>
size_t operator()(const T& t) const
{
return absl::Hash<T>{}(t);
}
};

struct StatefulTestingEqual : absl::container_internal::hash_testing_internal::WithId<StatefulTestingEqual>
{
template<class T, class U>
bool operator()(const T& t, const U& u) const
{
return t == u;
}
};

// It is expected that Alloc() == Alloc() for all allocators so we cannot use
// WithId base. We need to explicitly assign ids.
template<class T = int>
struct Alloc : std::allocator<T>
{
using propagate_on_container_swap = std::true_type;

// Using old paradigm for this to ensure compatibility.
explicit Alloc(size_t id = 0) :
id_(id)
{
}

Alloc(const Alloc&) = default;
Alloc& operator=(const Alloc&) = default;

template<class U>
Alloc(const Alloc<U>& that) :
std::allocator<T>(that),
id_(that.id())
{
}

template<class U>
struct rebind
{
using other = Alloc<U>;
};

size_t id() const
{
return id_;
}

friend bool operator==(const Alloc& a, const Alloc& b)
{
return a.id_ == b.id_;
}
friend bool operator!=(const Alloc& a, const Alloc& b)
{
return !(a == b);
}

private:
size_t id_ = (std::numeric_limits<size_t>::max)();
};

template<class Map>
auto items(const Map& m) -> std::vector<
std::pair<typename Map::key_type, typename Map::mapped_type>>
{
using std::get;
std::vector<std::pair<typename Map::key_type, typename Map::mapped_type>> res;
res.reserve(m.size());
for (const auto& v : m)
res.emplace_back(get<0>(v), get<1>(v));
return res;
}

template<class Set>
auto keys(const Set& s)
-> std::vector<typename std::decay<typename Set::key_type>::type>
{
std::vector<typename std::decay<typename Set::key_type>::type> res;
res.reserve(s.size());
for (const auto& v : s)
res.emplace_back(v);
return res;
}

} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions // ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions
@@ -174,8 +230,8 @@ ABSL_NAMESPACE_END
// From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html) // From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html)
// "the unordered associative containers in <unordered_map> and <unordered_set> // "the unordered associative containers in <unordered_map> and <unordered_set>
// meet the allocator-aware container requirements;" // meet the allocator-aware container requirements;"
#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 ) || \
( __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9 ))
#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425) || \
(__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9))
#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0 #define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0
#else #else
#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1 #define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1


+ 195
- 177
CAPI/cpp/grpc/include/absl/container/internal/hash_policy_traits.h View File

@@ -23,186 +23,204 @@


#include "absl/meta/type_traits.h" #include "absl/meta/type_traits.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {

// Defines how slots are initialized/destroyed/moved.
template <class Policy, class = void>
struct hash_policy_traits {
// The type of the keys stored in the hashtable.
using key_type = typename Policy::key_type;

private:
struct ReturnKey {
// When C++17 is available, we can use std::launder to provide mutable
// access to the key for use in node handle.
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{

// Defines how slots are initialized/destroyed/moved.
template<class Policy, class = void>
struct hash_policy_traits
{
// The type of the keys stored in the hashtable.
using key_type = typename Policy::key_type;

private:
struct ReturnKey
{
// When C++17 is available, we can use std::launder to provide mutable
// access to the key for use in node handle.
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
template <class Key,
absl::enable_if_t<std::is_lvalue_reference<Key>::value, int> = 0>
static key_type& Impl(Key&& k, int) {
return *std::launder(
const_cast<key_type*>(std::addressof(std::forward<Key>(k))));
}
template<class Key, absl::enable_if_t<std::is_lvalue_reference<Key>::value, int> = 0>
static key_type& Impl(Key&& k, int)
{
return *std::launder(
const_cast<key_type*>(std::addressof(std::forward<Key>(k)))
);
}
#endif #endif


template <class Key>
static Key Impl(Key&& k, char) {
return std::forward<Key>(k);
}

// When Key=T&, we forward the lvalue reference.
// When Key=T, we return by value to avoid a dangling reference.
// eg, for string_hash_map.
template <class Key, class... Args>
auto operator()(Key&& k, const Args&...) const
-> decltype(Impl(std::forward<Key>(k), 0)) {
return Impl(std::forward<Key>(k), 0);
}
};

template <class P = Policy, class = void>
struct ConstantIteratorsImpl : std::false_type {};

template <class P>
struct ConstantIteratorsImpl<P, absl::void_t<typename P::constant_iterators>>
: P::constant_iterators {};

public:
// The actual object stored in the hash table.
using slot_type = typename Policy::slot_type;

// The argument type for insertions into the hashtable. This is different
// from value_type for increased performance. See initializer_list constructor
// and insert() member functions for more details.
using init_type = typename Policy::init_type;

using reference = decltype(Policy::element(std::declval<slot_type*>()));
using pointer = typename std::remove_reference<reference>::type*;
using value_type = typename std::remove_reference<reference>::type;

// Policies can set this variable to tell raw_hash_set that all iterators
// should be constant, even `iterator`. This is useful for set-like
// containers.
// Defaults to false if not provided by the policy.
using constant_iterators = ConstantIteratorsImpl<>;

// PRECONDITION: `slot` is UNINITIALIZED
// POSTCONDITION: `slot` is INITIALIZED
template <class Alloc, class... Args>
static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
Policy::construct(alloc, slot, std::forward<Args>(args)...);
}

// PRECONDITION: `slot` is INITIALIZED
// POSTCONDITION: `slot` is UNINITIALIZED
template <class Alloc>
static void destroy(Alloc* alloc, slot_type* slot) {
Policy::destroy(alloc, slot);
}

// Transfers the `old_slot` to `new_slot`. Any memory allocated by the
// allocator inside `old_slot` to `new_slot` can be transferred.
//
// OPTIONAL: defaults to:
//
// clone(new_slot, std::move(*old_slot));
// destroy(old_slot);
//
// PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED
// POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is
// UNINITIALIZED
template <class Alloc>
static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) {
transfer_impl(alloc, new_slot, old_slot, 0);
}

// PRECONDITION: `slot` is INITIALIZED
// POSTCONDITION: `slot` is INITIALIZED
template <class P = Policy>
static auto element(slot_type* slot) -> decltype(P::element(slot)) {
return P::element(slot);
}

// Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`.
//
// If `slot` is nullptr, returns the constant amount of memory owned by any
// full slot or -1 if slots own variable amounts of memory.
//
// PRECONDITION: `slot` is INITIALIZED or nullptr
template <class P = Policy>
static size_t space_used(const slot_type* slot) {
return P::space_used(slot);
}

// Provides generalized access to the key for elements, both for elements in
// the table and for elements that have not yet been inserted (or even
// constructed). We would like an API that allows us to say: `key(args...)`
// but we cannot do that for all cases, so we use this more general API that
// can be used for many things, including the following:
//
// - Given an element in a table, get its key.
// - Given an element initializer, get its key.
// - Given `emplace()` arguments, get the element key.
//
// Implementations of this must adhere to a very strict technical
// specification around aliasing and consuming arguments:
//
// Let `value_type` be the result type of `element()` without ref- and
// cv-qualifiers. The first argument is a functor, the rest are constructor
// arguments for `value_type`. Returns `std::forward<F>(f)(k, xs...)`, where
// `k` is the element key, and `xs...` are the new constructor arguments for
// `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias
// `ts...`. The key won't be touched once `xs...` are used to construct an
// element; `ts...` won't be touched at all, which allows `apply()` to consume
// any rvalues among them.
//
// If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not
// trigger a hard compile error unless it originates from `f`. In other words,
// `Policy::apply()` must be SFINAE-friendly. If `value_type` is not
// constructible from `Ts&&...`, either SFINAE or a hard compile error is OK.
//
// If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`,
// `Policy::apply()` must work. A compile error is not allowed, SFINAE or not.
template <class F, class... Ts, class P = Policy>
static auto apply(F&& f, Ts&&... ts)
-> decltype(P::apply(std::forward<F>(f), std::forward<Ts>(ts)...)) {
return P::apply(std::forward<F>(f), std::forward<Ts>(ts)...);
}

// Returns the "key" portion of the slot.
// Used for node handle manipulation.
template <class P = Policy>
static auto mutable_key(slot_type* slot)
-> decltype(P::apply(ReturnKey(), element(slot))) {
return P::apply(ReturnKey(), element(slot));
}

// Returns the "value" (as opposed to the "key") portion of the element. Used
// by maps to implement `operator[]`, `at()` and `insert_or_assign()`.
template <class T, class P = Policy>
static auto value(T* elem) -> decltype(P::value(elem)) {
return P::value(elem);
}

private:
// Use auto -> decltype as an enabler.
template <class Alloc, class P = Policy>
static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
slot_type* old_slot, int)
-> decltype((void)P::transfer(alloc, new_slot, old_slot)) {
P::transfer(alloc, new_slot, old_slot);
}
template <class Alloc>
static void transfer_impl(Alloc* alloc, slot_type* new_slot,
slot_type* old_slot, char) {
construct(alloc, new_slot, std::move(element(old_slot)));
destroy(alloc, old_slot);
}
};

} // namespace container_internal
ABSL_NAMESPACE_END
template<class Key>
static Key Impl(Key&& k, char)
{
return std::forward<Key>(k);
}

// When Key=T&, we forward the lvalue reference.
// When Key=T, we return by value to avoid a dangling reference.
// eg, for string_hash_map.
template<class Key, class... Args>
auto operator()(Key&& k, const Args&...) const
-> decltype(Impl(std::forward<Key>(k), 0))
{
return Impl(std::forward<Key>(k), 0);
}
};

template<class P = Policy, class = void>
struct ConstantIteratorsImpl : std::false_type
{
};

template<class P>
struct ConstantIteratorsImpl<P, absl::void_t<typename P::constant_iterators>> : P::constant_iterators
{
};

public:
// The actual object stored in the hash table.
using slot_type = typename Policy::slot_type;

// The argument type for insertions into the hashtable. This is different
// from value_type for increased performance. See initializer_list constructor
// and insert() member functions for more details.
using init_type = typename Policy::init_type;

using reference = decltype(Policy::element(std::declval<slot_type*>()));
using pointer = typename std::remove_reference<reference>::type*;
using value_type = typename std::remove_reference<reference>::type;

// Policies can set this variable to tell raw_hash_set that all iterators
// should be constant, even `iterator`. This is useful for set-like
// containers.
// Defaults to false if not provided by the policy.
using constant_iterators = ConstantIteratorsImpl<>;

// PRECONDITION: `slot` is UNINITIALIZED
// POSTCONDITION: `slot` is INITIALIZED
template<class Alloc, class... Args>
static void construct(Alloc* alloc, slot_type* slot, Args&&... args)
{
Policy::construct(alloc, slot, std::forward<Args>(args)...);
}

// PRECONDITION: `slot` is INITIALIZED
// POSTCONDITION: `slot` is UNINITIALIZED
template<class Alloc>
static void destroy(Alloc* alloc, slot_type* slot)
{
Policy::destroy(alloc, slot);
}

// Transfers the `old_slot` to `new_slot`. Any memory allocated by the
// allocator inside `old_slot` to `new_slot` can be transferred.
//
// OPTIONAL: defaults to:
//
// clone(new_slot, std::move(*old_slot));
// destroy(old_slot);
//
// PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED
// POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is
// UNINITIALIZED
template<class Alloc>
static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot)
{
transfer_impl(alloc, new_slot, old_slot, 0);
}

// PRECONDITION: `slot` is INITIALIZED
// POSTCONDITION: `slot` is INITIALIZED
template<class P = Policy>
static auto element(slot_type* slot) -> decltype(P::element(slot))
{
return P::element(slot);
}

// Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`.
//
// If `slot` is nullptr, returns the constant amount of memory owned by any
// full slot or -1 if slots own variable amounts of memory.
//
// PRECONDITION: `slot` is INITIALIZED or nullptr
template<class P = Policy>
static size_t space_used(const slot_type* slot)
{
return P::space_used(slot);
}

// Provides generalized access to the key for elements, both for elements in
// the table and for elements that have not yet been inserted (or even
// constructed). We would like an API that allows us to say: `key(args...)`
// but we cannot do that for all cases, so we use this more general API that
// can be used for many things, including the following:
//
// - Given an element in a table, get its key.
// - Given an element initializer, get its key.
// - Given `emplace()` arguments, get the element key.
//
// Implementations of this must adhere to a very strict technical
// specification around aliasing and consuming arguments:
//
// Let `value_type` be the result type of `element()` without ref- and
// cv-qualifiers. The first argument is a functor, the rest are constructor
// arguments for `value_type`. Returns `std::forward<F>(f)(k, xs...)`, where
// `k` is the element key, and `xs...` are the new constructor arguments for
// `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias
// `ts...`. The key won't be touched once `xs...` are used to construct an
// element; `ts...` won't be touched at all, which allows `apply()` to consume
// any rvalues among them.
//
// If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not
// trigger a hard compile error unless it originates from `f`. In other words,
// `Policy::apply()` must be SFINAE-friendly. If `value_type` is not
// constructible from `Ts&&...`, either SFINAE or a hard compile error is OK.
//
// If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`,
// `Policy::apply()` must work. A compile error is not allowed, SFINAE or not.
template<class F, class... Ts, class P = Policy>
static auto apply(F&& f, Ts&&... ts)
-> decltype(P::apply(std::forward<F>(f), std::forward<Ts>(ts)...))
{
return P::apply(std::forward<F>(f), std::forward<Ts>(ts)...);
}

// Returns the "key" portion of the slot.
// Used for node handle manipulation.
template<class P = Policy>
static auto mutable_key(slot_type* slot)
-> decltype(P::apply(ReturnKey(), element(slot)))
{
return P::apply(ReturnKey(), element(slot));
}

// Returns the "value" (as opposed to the "key") portion of the element. Used
// by maps to implement `operator[]`, `at()` and `insert_or_assign()`.
template<class T, class P = Policy>
static auto value(T* elem) -> decltype(P::value(elem))
{
return P::value(elem);
}

private:
// Use auto -> decltype as an enabler.
template<class Alloc, class P = Policy>
static auto transfer_impl(Alloc* alloc, slot_type* new_slot, slot_type* old_slot, int)
-> decltype((void)P::transfer(alloc, new_slot, old_slot))
{
P::transfer(alloc, new_slot, old_slot);
}
template<class Alloc>
static void transfer_impl(Alloc* alloc, slot_type* new_slot, slot_type* old_slot, char)
{
construct(alloc, new_slot, std::move(element(old_slot)));
destroy(alloc, old_slot);
}
};

} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ #endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_

+ 73
- 61
CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug.h View File

@@ -37,74 +37,86 @@


#include "absl/container/internal/hashtable_debug_hooks.h" #include "absl/container/internal/hashtable_debug_hooks.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{


// Returns the number of probes required to lookup `key`. Returns 0 for a
// search with no collisions. Higher values mean more hash collisions occurred;
// however, the exact meaning of this number varies according to the container
// type.
template <typename C>
size_t GetHashtableDebugNumProbes(
const C& c, const typename C::key_type& key) {
return absl::container_internal::hashtable_debug_internal::
HashtableDebugAccess<C>::GetNumProbes(c, key);
}
// Returns the number of probes required to lookup `key`. Returns 0 for a
// search with no collisions. Higher values mean more hash collisions occurred;
// however, the exact meaning of this number varies according to the container
// type.
template<typename C>
size_t GetHashtableDebugNumProbes(
const C& c, const typename C::key_type& key
)
{
return absl::container_internal::hashtable_debug_internal::
HashtableDebugAccess<C>::GetNumProbes(c, key);
}


// Gets a histogram of the number of probes for each elements in the container.
// The sum of all the values in the vector is equal to container.size().
template <typename C>
std::vector<size_t> GetHashtableDebugNumProbesHistogram(const C& container) {
std::vector<size_t> v;
for (auto it = container.begin(); it != container.end(); ++it) {
size_t num_probes = GetHashtableDebugNumProbes(
container,
absl::container_internal::hashtable_debug_internal::GetKey<C>(*it, 0));
v.resize((std::max)(v.size(), num_probes + 1));
v[num_probes]++;
}
return v;
}
// Gets a histogram of the number of probes for each elements in the container.
// The sum of all the values in the vector is equal to container.size().
template<typename C>
std::vector<size_t> GetHashtableDebugNumProbesHistogram(const C& container)
{
std::vector<size_t> v;
for (auto it = container.begin(); it != container.end(); ++it)
{
size_t num_probes = GetHashtableDebugNumProbes(
container,
absl::container_internal::hashtable_debug_internal::GetKey<C>(*it, 0)
);
v.resize((std::max)(v.size(), num_probes + 1));
v[num_probes]++;
}
return v;
}


struct HashtableDebugProbeSummary {
size_t total_elements;
size_t total_num_probes;
double mean;
};
struct HashtableDebugProbeSummary
{
size_t total_elements;
size_t total_num_probes;
double mean;
};


// Gets a summary of the probe count distribution for the elements in the
// container.
template <typename C>
HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) {
auto probes = GetHashtableDebugNumProbesHistogram(container);
HashtableDebugProbeSummary summary = {};
for (size_t i = 0; i < probes.size(); ++i) {
summary.total_elements += probes[i];
summary.total_num_probes += probes[i] * i;
}
summary.mean = 1.0 * summary.total_num_probes / summary.total_elements;
return summary;
}
// Gets a summary of the probe count distribution for the elements in the
// container.
template<typename C>
HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container)
{
auto probes = GetHashtableDebugNumProbesHistogram(container);
HashtableDebugProbeSummary summary = {};
for (size_t i = 0; i < probes.size(); ++i)
{
summary.total_elements += probes[i];
summary.total_num_probes += probes[i] * i;
}
summary.mean = 1.0 * summary.total_num_probes / summary.total_elements;
return summary;
}


// Returns the number of bytes requested from the allocator by the container
// and not freed.
template <typename C>
size_t AllocatedByteSize(const C& c) {
return absl::container_internal::hashtable_debug_internal::
HashtableDebugAccess<C>::AllocatedByteSize(c);
}
// Returns the number of bytes requested from the allocator by the container
// and not freed.
template<typename C>
size_t AllocatedByteSize(const C& c)
{
return absl::container_internal::hashtable_debug_internal::
HashtableDebugAccess<C>::AllocatedByteSize(c);
}


// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C`
// and `c.size()` is equal to `num_elements`.
template <typename C>
size_t LowerBoundAllocatedByteSize(size_t num_elements) {
return absl::container_internal::hashtable_debug_internal::
HashtableDebugAccess<C>::LowerBoundAllocatedByteSize(num_elements);
}
// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C`
// and `c.size()` is equal to `num_elements`.
template<typename C>
size_t LowerBoundAllocatedByteSize(size_t num_elements)
{
return absl::container_internal::hashtable_debug_internal::
HashtableDebugAccess<C>::LowerBoundAllocatedByteSize(num_elements);
}


} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ #endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_

+ 59
- 49
CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug_hooks.h View File

@@ -25,61 +25,71 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace hashtable_debug_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{
namespace hashtable_debug_internal
{


// If it is a map, call get<0>().
using std::get;
template <typename T, typename = typename T::mapped_type>
auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) {
return get<0>(pair);
}
// If it is a map, call get<0>().
using std::get;
template<typename T, typename = typename T::mapped_type>
auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair))
{
return get<0>(pair);
}


// If it is not a map, return the value directly.
template <typename T>
const typename T::key_type& GetKey(const typename T::key_type& key, char) {
return key;
}
// If it is not a map, return the value directly.
template<typename T>
const typename T::key_type& GetKey(const typename T::key_type& key, char)
{
return key;
}


// Containers should specialize this to provide debug information for that
// container.
template <class Container, typename Enabler = void>
struct HashtableDebugAccess {
// Returns the number of probes required to find `key` in `c`. The "number of
// probes" is a concept that can vary by container. Implementations should
// return 0 when `key` was found in the minimum number of operations and
// should increment the result for each non-trivial operation required to find
// `key`.
//
// The default implementation uses the bucket api from the standard and thus
// works for `std::unordered_*` containers.
static size_t GetNumProbes(const Container& c,
const typename Container::key_type& key) {
if (!c.bucket_count()) return {};
size_t num_probes = 0;
size_t bucket = c.bucket(key);
for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) {
if (it == e) return num_probes;
if (c.key_eq()(key, GetKey<Container>(*it, 0))) return num_probes;
}
}
// Containers should specialize this to provide debug information for that
// container.
template<class Container, typename Enabler = void>
struct HashtableDebugAccess
{
// Returns the number of probes required to find `key` in `c`. The "number of
// probes" is a concept that can vary by container. Implementations should
// return 0 when `key` was found in the minimum number of operations and
// should increment the result for each non-trivial operation required to find
// `key`.
//
// The default implementation uses the bucket api from the standard and thus
// works for `std::unordered_*` containers.
static size_t GetNumProbes(const Container& c, const typename Container::key_type& key)
{
if (!c.bucket_count())
return {};
size_t num_probes = 0;
size_t bucket = c.bucket(key);
for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes)
{
if (it == e)
return num_probes;
if (c.key_eq()(key, GetKey<Container>(*it, 0)))
return num_probes;
}
}


// Returns the number of bytes requested from the allocator by the container
// and not freed.
//
// static size_t AllocatedByteSize(const Container& c);
// Returns the number of bytes requested from the allocator by the container
// and not freed.
//
// static size_t AllocatedByteSize(const Container& c);


// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type
// `Container` and `c.size()` is equal to `num_elements`.
//
// static size_t LowerBoundAllocatedByteSize(size_t num_elements);
};
// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type
// `Container` and `c.size()` is equal to `num_elements`.
//
// static size_t LowerBoundAllocatedByteSize(size_t num_elements);
};


} // namespace hashtable_debug_internal
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace hashtable_debug_internal
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ #endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_

+ 271
- 217
CAPI/cpp/grpc/include/absl/container/internal/hashtablez_sampler.h View File

@@ -51,249 +51,303 @@
#include "absl/synchronization/mutex.h" #include "absl/synchronization/mutex.h"
#include "absl/utility/utility.h" #include "absl/utility/utility.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {

// Stores information about a sampled hashtable. All mutations to this *must*
// be made through `Record*` functions below. All reads from this *must* only
// occur in the callback to `HashtablezSampler::Iterate`.
struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
// Constructs the object but does not fill in any fields.
HashtablezInfo();
~HashtablezInfo();
HashtablezInfo(const HashtablezInfo&) = delete;
HashtablezInfo& operator=(const HashtablezInfo&) = delete;

// Puts the object into a clean state, fills in the logically `const` members,
// blocking for any readers that are currently sampling the object.
void PrepareForSampling(int64_t stride, size_t inline_element_size_value)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);

// These fields are mutated by the various Record* APIs and need to be
// thread-safe.
std::atomic<size_t> capacity;
std::atomic<size_t> size;
std::atomic<size_t> num_erases;
std::atomic<size_t> num_rehashes;
std::atomic<size_t> max_probe_length;
std::atomic<size_t> total_probe_length;
std::atomic<size_t> hashes_bitwise_or;
std::atomic<size_t> hashes_bitwise_and;
std::atomic<size_t> hashes_bitwise_xor;
std::atomic<size_t> max_reserve;

// All of the fields below are set by `PrepareForSampling`, they must not be
// mutated in `Record*` functions. They are logically `const` in that sense.
// These are guarded by init_mu, but that is not externalized to clients,
// which can read them only during `SampleRecorder::Iterate` which will hold
// the lock.
static constexpr int kMaxStackDepth = 64;
absl::Time create_time;
int32_t depth;
void* stack[kMaxStackDepth];
size_t inline_element_size; // How big is the slot?
};

inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{

// Stores information about a sampled hashtable. All mutations to this *must*
// be made through `Record*` functions below. All reads from this *must* only
// occur in the callback to `HashtablezSampler::Iterate`.
struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo>
{
// Constructs the object but does not fill in any fields.
HashtablezInfo();
~HashtablezInfo();
HashtablezInfo(const HashtablezInfo&) = delete;
HashtablezInfo& operator=(const HashtablezInfo&) = delete;

// Puts the object into a clean state, fills in the logically `const` members,
// blocking for any readers that are currently sampling the object.
void PrepareForSampling(int64_t stride, size_t inline_element_size_value)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);

// These fields are mutated by the various Record* APIs and need to be
// thread-safe.
std::atomic<size_t> capacity;
std::atomic<size_t> size;
std::atomic<size_t> num_erases;
std::atomic<size_t> num_rehashes;
std::atomic<size_t> max_probe_length;
std::atomic<size_t> total_probe_length;
std::atomic<size_t> hashes_bitwise_or;
std::atomic<size_t> hashes_bitwise_and;
std::atomic<size_t> hashes_bitwise_xor;
std::atomic<size_t> max_reserve;

// All of the fields below are set by `PrepareForSampling`, they must not be
// mutated in `Record*` functions. They are logically `const` in that sense.
// These are guarded by init_mu, but that is not externalized to clients,
// which can read them only during `SampleRecorder::Iterate` which will hold
// the lock.
static constexpr int kMaxStackDepth = 64;
absl::Time create_time;
int32_t depth;
void* stack[kMaxStackDepth];
size_t inline_element_size; // How big is the slot?
};

inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length)
{
#ifdef ABSL_INTERNAL_HAVE_SSE2 #ifdef ABSL_INTERNAL_HAVE_SSE2
total_probe_length /= 16;
total_probe_length /= 16;
#else #else
total_probe_length /= 8;
total_probe_length /= 8;
#endif #endif
info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
info->num_erases.store(0, std::memory_order_relaxed);
// There is only one concurrent writer, so `load` then `store` is sufficient
// instead of using `fetch_add`.
info->num_rehashes.store(
1 + info->num_rehashes.load(std::memory_order_relaxed),
std::memory_order_relaxed);
}

inline void RecordReservationSlow(HashtablezInfo* info,
size_t target_capacity) {
info->max_reserve.store(
(std::max)(info->max_reserve.load(std::memory_order_relaxed),
target_capacity),
std::memory_order_relaxed);
}

inline void RecordClearedReservationSlow(HashtablezInfo* info) {
info->max_reserve.store(0, std::memory_order_relaxed);
}

inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
size_t capacity) {
info->size.store(size, std::memory_order_relaxed);
info->capacity.store(capacity, std::memory_order_relaxed);
if (size == 0) {
// This is a clear, reset the total/num_erases too.
info->total_probe_length.store(0, std::memory_order_relaxed);
info->num_erases.store(0, std::memory_order_relaxed);
}
}

void RecordInsertSlow(HashtablezInfo* info, size_t hash,
size_t distance_from_desired);

inline void RecordEraseSlow(HashtablezInfo* info) {
info->size.fetch_sub(1, std::memory_order_relaxed);
// There is only one concurrent writer, so `load` then `store` is sufficient
// instead of using `fetch_add`.
info->num_erases.store(
1 + info->num_erases.load(std::memory_order_relaxed),
std::memory_order_relaxed);
}

struct SamplingState {
int64_t next_sample;
// When we make a sampling decision, we record that distance so we can weight
// each sample.
int64_t sample_stride;
};

HashtablezInfo* SampleSlow(SamplingState& next_sample,
size_t inline_element_size);
void UnsampleSlow(HashtablezInfo* info);
info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
info->num_erases.store(0, std::memory_order_relaxed);
// There is only one concurrent writer, so `load` then `store` is sufficient
// instead of using `fetch_add`.
info->num_rehashes.store(
1 + info->num_rehashes.load(std::memory_order_relaxed),
std::memory_order_relaxed
);
}

inline void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity)
{
info->max_reserve.store(
(std::max)(info->max_reserve.load(std::memory_order_relaxed), target_capacity),
std::memory_order_relaxed
);
}

inline void RecordClearedReservationSlow(HashtablezInfo* info)
{
info->max_reserve.store(0, std::memory_order_relaxed);
}

inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, size_t capacity)
{
info->size.store(size, std::memory_order_relaxed);
info->capacity.store(capacity, std::memory_order_relaxed);
if (size == 0)
{
// This is a clear, reset the total/num_erases too.
info->total_probe_length.store(0, std::memory_order_relaxed);
info->num_erases.store(0, std::memory_order_relaxed);
}
}

void RecordInsertSlow(HashtablezInfo* info, size_t hash, size_t distance_from_desired);

inline void RecordEraseSlow(HashtablezInfo* info)
{
info->size.fetch_sub(1, std::memory_order_relaxed);
// There is only one concurrent writer, so `load` then `store` is sufficient
// instead of using `fetch_add`.
info->num_erases.store(
1 + info->num_erases.load(std::memory_order_relaxed),
std::memory_order_relaxed
);
}

struct SamplingState
{
int64_t next_sample;
// When we make a sampling decision, we record that distance so we can weight
// each sample.
int64_t sample_stride;
};

HashtablezInfo* SampleSlow(SamplingState& next_sample, size_t inline_element_size);
void UnsampleSlow(HashtablezInfo* info);


#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set #error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)


#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
class HashtablezInfoHandle {
public:
explicit HashtablezInfoHandle() : info_(nullptr) {}
explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {}
~HashtablezInfoHandle() {
if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
UnsampleSlow(info_);
}

HashtablezInfoHandle(const HashtablezInfoHandle&) = delete;
HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete;

HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept
: info_(absl::exchange(o.info_, nullptr)) {}
HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept {
if (ABSL_PREDICT_FALSE(info_ != nullptr)) {
UnsampleSlow(info_);
}
info_ = absl::exchange(o.info_, nullptr);
return *this;
}

inline void RecordStorageChanged(size_t size, size_t capacity) {
if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
RecordStorageChangedSlow(info_, size, capacity);
}

inline void RecordRehash(size_t total_probe_length) {
if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
RecordRehashSlow(info_, total_probe_length);
}

inline void RecordReservation(size_t target_capacity) {
if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
RecordReservationSlow(info_, target_capacity);
}

inline void RecordClearedReservation() {
if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
RecordClearedReservationSlow(info_);
}

inline void RecordInsert(size_t hash, size_t distance_from_desired) {
if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
RecordInsertSlow(info_, hash, distance_from_desired);
}

inline void RecordErase() {
if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
RecordEraseSlow(info_);
}

friend inline void swap(HashtablezInfoHandle& lhs,
HashtablezInfoHandle& rhs) {
std::swap(lhs.info_, rhs.info_);
}

private:
friend class HashtablezInfoHandlePeer;
HashtablezInfo* info_;
};
class HashtablezInfoHandle
{
public:
explicit HashtablezInfoHandle() :
info_(nullptr)
{
}
explicit HashtablezInfoHandle(HashtablezInfo* info) :
info_(info)
{
}
~HashtablezInfoHandle()
{
if (ABSL_PREDICT_TRUE(info_ == nullptr))
return;
UnsampleSlow(info_);
}

HashtablezInfoHandle(const HashtablezInfoHandle&) = delete;
HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete;

HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept
:
info_(absl::exchange(o.info_, nullptr))
{
}
HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept
{
if (ABSL_PREDICT_FALSE(info_ != nullptr))
{
UnsampleSlow(info_);
}
info_ = absl::exchange(o.info_, nullptr);
return *this;
}

inline void RecordStorageChanged(size_t size, size_t capacity)
{
if (ABSL_PREDICT_TRUE(info_ == nullptr))
return;
RecordStorageChangedSlow(info_, size, capacity);
}

inline void RecordRehash(size_t total_probe_length)
{
if (ABSL_PREDICT_TRUE(info_ == nullptr))
return;
RecordRehashSlow(info_, total_probe_length);
}

inline void RecordReservation(size_t target_capacity)
{
if (ABSL_PREDICT_TRUE(info_ == nullptr))
return;
RecordReservationSlow(info_, target_capacity);
}

inline void RecordClearedReservation()
{
if (ABSL_PREDICT_TRUE(info_ == nullptr))
return;
RecordClearedReservationSlow(info_);
}

inline void RecordInsert(size_t hash, size_t distance_from_desired)
{
if (ABSL_PREDICT_TRUE(info_ == nullptr))
return;
RecordInsertSlow(info_, hash, distance_from_desired);
}

inline void RecordErase()
{
if (ABSL_PREDICT_TRUE(info_ == nullptr))
return;
RecordEraseSlow(info_);
}

friend inline void swap(HashtablezInfoHandle& lhs, HashtablezInfoHandle& rhs)
{
std::swap(lhs.info_, rhs.info_);
}

private:
friend class HashtablezInfoHandlePeer;
HashtablezInfo* info_;
};
#else #else
// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can
// be removed by the linker, in order to reduce the binary size.
class HashtablezInfoHandle {
public:
explicit HashtablezInfoHandle() = default;
explicit HashtablezInfoHandle(std::nullptr_t) {}

inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
inline void RecordRehash(size_t /*total_probe_length*/) {}
inline void RecordReservation(size_t /*target_capacity*/) {}
inline void RecordClearedReservation() {}
inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {}
inline void RecordErase() {}

friend inline void swap(HashtablezInfoHandle& /*lhs*/,
HashtablezInfoHandle& /*rhs*/) {}
};
// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can
// be removed by the linker, in order to reduce the binary size.
class HashtablezInfoHandle
{
public:
explicit HashtablezInfoHandle() = default;
explicit HashtablezInfoHandle(std::nullptr_t)
{
}

inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/)
{
}
inline void RecordRehash(size_t /*total_probe_length*/)
{
}
inline void RecordReservation(size_t /*target_capacity*/)
{
}
inline void RecordClearedReservation()
{
}
inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/)
{
}
inline void RecordErase()
{
}

friend inline void swap(HashtablezInfoHandle& /*lhs*/, HashtablezInfoHandle& /*rhs*/)
{
}
};
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)


#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample;
extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample;
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)


// Returns an RAII sampling handle that manages registration and unregistation
// with the global sampler.
inline HashtablezInfoHandle Sample(
size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
// Returns an RAII sampling handle that manages registration and unregistation
// with the global sampler.
inline HashtablezInfoHandle Sample(
size_t inline_element_size ABSL_ATTRIBUTE_UNUSED
)
{
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) {
return HashtablezInfoHandle(nullptr);
}
return HashtablezInfoHandle(
SampleSlow(global_next_sample, inline_element_size));
if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0))
{
return HashtablezInfoHandle(nullptr);
}
return HashtablezInfoHandle(
SampleSlow(global_next_sample, inline_element_size)
);
#else #else
return HashtablezInfoHandle(nullptr);
return HashtablezInfoHandle(nullptr);
#endif // !ABSL_PER_THREAD_TLS #endif // !ABSL_PER_THREAD_TLS
}
}


using HashtablezSampler =
::absl::profiling_internal::SampleRecorder<HashtablezInfo>;
using HashtablezSampler =
::absl::profiling_internal::SampleRecorder<HashtablezInfo>;


// Returns a global Sampler.
HashtablezSampler& GlobalHashtablezSampler();
// Returns a global Sampler.
HashtablezSampler& GlobalHashtablezSampler();


using HashtablezConfigListener = void (*)();
void SetHashtablezConfigListener(HashtablezConfigListener l);
using HashtablezConfigListener = void (*)();
void SetHashtablezConfigListener(HashtablezConfigListener l);


// Enables or disables sampling for Swiss tables.
bool IsHashtablezEnabled();
void SetHashtablezEnabled(bool enabled);
void SetHashtablezEnabledInternal(bool enabled);
// Enables or disables sampling for Swiss tables.
bool IsHashtablezEnabled();
void SetHashtablezEnabled(bool enabled);
void SetHashtablezEnabledInternal(bool enabled);


// Sets the rate at which Swiss tables will be sampled.
int32_t GetHashtablezSampleParameter();
void SetHashtablezSampleParameter(int32_t rate);
void SetHashtablezSampleParameterInternal(int32_t rate);
// Sets the rate at which Swiss tables will be sampled.
int32_t GetHashtablezSampleParameter();
void SetHashtablezSampleParameter(int32_t rate);
void SetHashtablezSampleParameterInternal(int32_t rate);


// Sets a soft max for the number of samples that will be kept.
int32_t GetHashtablezMaxSamples();
void SetHashtablezMaxSamples(int32_t max);
void SetHashtablezMaxSamplesInternal(int32_t max);
// Sets a soft max for the number of samples that will be kept.
int32_t GetHashtablezMaxSamples();
void SetHashtablezMaxSamples(int32_t max);
void SetHashtablezMaxSamplesInternal(int32_t max);


// Configuration override.
// This allows process-wide sampling without depending on order of
// initialization of static storage duration objects.
// The definition of this constant is weak, which allows us to inject a
// different value for it at link time.
extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)();
// Configuration override.
// This allows process-wide sampling without depending on order of
// initialization of static storage duration objects.
// The definition of this constant is weak, which allows us to inject a
// different value for it at link time.
extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)();


} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ #endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_

+ 1061
- 903
CAPI/cpp/grpc/include/absl/container/internal/inlined_vector.h
File diff suppressed because it is too large
View File


+ 609
- 543
CAPI/cpp/grpc/include/absl/container/internal/layout.h
File diff suppressed because it is too large
View File


+ 49
- 36
CAPI/cpp/grpc/include/absl/container/internal/node_slot_policy.h View File

@@ -41,52 +41,65 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{


template <class Reference, class Policy>
struct node_slot_policy {
static_assert(std::is_lvalue_reference<Reference>::value, "");
template<class Reference, class Policy>
struct node_slot_policy
{
static_assert(std::is_lvalue_reference<Reference>::value, "");


using slot_type = typename std::remove_cv<
typename std::remove_reference<Reference>::type>::type*;
using slot_type = typename std::remove_cv<
typename std::remove_reference<Reference>::type>::type*;


template <class Alloc, class... Args>
static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
*slot = Policy::new_element(alloc, std::forward<Args>(args)...);
}
template<class Alloc, class... Args>
static void construct(Alloc* alloc, slot_type* slot, Args&&... args)
{
*slot = Policy::new_element(alloc, std::forward<Args>(args)...);
}


template <class Alloc>
static void destroy(Alloc* alloc, slot_type* slot) {
Policy::delete_element(alloc, *slot);
}
template<class Alloc>
static void destroy(Alloc* alloc, slot_type* slot)
{
Policy::delete_element(alloc, *slot);
}


template <class Alloc>
static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) {
*new_slot = *old_slot;
}
template<class Alloc>
static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot)
{
*new_slot = *old_slot;
}


static size_t space_used(const slot_type* slot) {
if (slot == nullptr) return Policy::element_space_used(nullptr);
return Policy::element_space_used(*slot);
}
static size_t space_used(const slot_type* slot)
{
if (slot == nullptr)
return Policy::element_space_used(nullptr);
return Policy::element_space_used(*slot);
}


static Reference element(slot_type* slot) { return **slot; }
static Reference element(slot_type* slot)
{
return **slot;
}


template <class T, class P = Policy>
static auto value(T* elem) -> decltype(P::value(elem)) {
return P::value(elem);
}
template<class T, class P = Policy>
static auto value(T* elem) -> decltype(P::value(elem))
{
return P::value(elem);
}


template <class... Ts, class P = Policy>
static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward<Ts>(ts)...)) {
return P::apply(std::forward<Ts>(ts)...);
}
};
template<class... Ts, class P = Policy>
static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward<Ts>(ts)...))
{
return P::apply(std::forward<Ts>(ts)...);
}
};


} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ #endif // ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_

+ 190
- 170
CAPI/cpp/grpc/include/absl/container/internal/raw_hash_map.h View File

@@ -23,176 +23,196 @@
#include "absl/container/internal/container_memory.h" #include "absl/container/internal/container_memory.h"
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export #include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {

template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
// P is Policy. It's passed as a template argument to support maps that have
// incomplete types as values, as in unordered_map<K, IncompleteType>.
// MappedReference<> may be a non-reference type.
template <class P>
using MappedReference = decltype(P::value(
std::addressof(std::declval<typename raw_hash_map::reference>())));

// MappedConstReference<> may be a non-reference type.
template <class P>
using MappedConstReference = decltype(P::value(
std::addressof(std::declval<typename raw_hash_map::const_reference>())));

using KeyArgImpl =
KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;

public:
using key_type = typename Policy::key_type;
using mapped_type = typename Policy::mapped_type;
template <class K>
using key_arg = typename KeyArgImpl::template type<K, key_type>;

static_assert(!std::is_reference<key_type>::value, "");

// TODO(b/187807849): Evaluate whether to support reference mapped_type and
// remove this assertion if/when it is supported.
static_assert(!std::is_reference<mapped_type>::value, "");

using iterator = typename raw_hash_map::raw_hash_set::iterator;
using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator;

raw_hash_map() {}
using raw_hash_map::raw_hash_set::raw_hash_set;

// The last two template parameters ensure that both arguments are rvalues
// (lvalue arguments are handled by the overloads below). This is necessary
// for supporting bitfield arguments.
//
// union { int n : 1; };
// flat_hash_map<int, int> m;
// m.insert_or_assign(n, n);
template <class K = key_type, class V = mapped_type, K* = nullptr,
V* = nullptr>
std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v) {
return insert_or_assign_impl(std::forward<K>(k), std::forward<V>(v));
}

template <class K = key_type, class V = mapped_type, K* = nullptr>
std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v) {
return insert_or_assign_impl(std::forward<K>(k), v);
}

template <class K = key_type, class V = mapped_type, V* = nullptr>
std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v) {
return insert_or_assign_impl(k, std::forward<V>(v));
}

template <class K = key_type, class V = mapped_type>
std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v) {
return insert_or_assign_impl(k, v);
}

template <class K = key_type, class V = mapped_type, K* = nullptr,
V* = nullptr>
iterator insert_or_assign(const_iterator, key_arg<K>&& k, V&& v) {
return insert_or_assign(std::forward<K>(k), std::forward<V>(v)).first;
}

template <class K = key_type, class V = mapped_type, K* = nullptr>
iterator insert_or_assign(const_iterator, key_arg<K>&& k, const V& v) {
return insert_or_assign(std::forward<K>(k), v).first;
}

template <class K = key_type, class V = mapped_type, V* = nullptr>
iterator insert_or_assign(const_iterator, const key_arg<K>& k, V&& v) {
return insert_or_assign(k, std::forward<V>(v)).first;
}

template <class K = key_type, class V = mapped_type>
iterator insert_or_assign(const_iterator, const key_arg<K>& k, const V& v) {
return insert_or_assign(k, v).first;
}

// All `try_emplace()` overloads make the same guarantees regarding rvalue
// arguments as `std::unordered_map::try_emplace()`, namely that these
// functions will not move from rvalue arguments if insertions do not happen.
template <class K = key_type, class... Args,
typename std::enable_if<
!std::is_convertible<K, const_iterator>::value, int>::type = 0,
K* = nullptr>
std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args) {
return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
}

template <class K = key_type, class... Args,
typename std::enable_if<
!std::is_convertible<K, const_iterator>::value, int>::type = 0>
std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args) {
return try_emplace_impl(k, std::forward<Args>(args)...);
}

template <class K = key_type, class... Args, K* = nullptr>
iterator try_emplace(const_iterator, key_arg<K>&& k, Args&&... args) {
return try_emplace(std::forward<K>(k), std::forward<Args>(args)...).first;
}

template <class K = key_type, class... Args>
iterator try_emplace(const_iterator, const key_arg<K>& k, Args&&... args) {
return try_emplace(k, std::forward<Args>(args)...).first;
}

template <class K = key_type, class P = Policy>
MappedReference<P> at(const key_arg<K>& key) {
auto it = this->find(key);
if (it == this->end()) {
base_internal::ThrowStdOutOfRange(
"absl::container_internal::raw_hash_map<>::at");
}
return Policy::value(&*it);
}

template <class K = key_type, class P = Policy>
MappedConstReference<P> at(const key_arg<K>& key) const {
auto it = this->find(key);
if (it == this->end()) {
base_internal::ThrowStdOutOfRange(
"absl::container_internal::raw_hash_map<>::at");
}
return Policy::value(&*it);
}

template <class K = key_type, class P = Policy, K* = nullptr>
MappedReference<P> operator[](key_arg<K>&& key) {
return Policy::value(&*try_emplace(std::forward<K>(key)).first);
}

template <class K = key_type, class P = Policy>
MappedReference<P> operator[](const key_arg<K>& key) {
return Policy::value(&*try_emplace(key).first);
}

private:
template <class K, class V>
std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v) {
auto res = this->find_or_prepare_insert(k);
if (res.second)
this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v));
else
Policy::value(&*this->iterator_at(res.first)) = std::forward<V>(v);
return {this->iterator_at(res.first), res.second};
}

template <class K = key_type, class... Args>
std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args) {
auto res = this->find_or_prepare_insert(k);
if (res.second)
this->emplace_at(res.first, std::piecewise_construct,
std::forward_as_tuple(std::forward<K>(k)),
std::forward_as_tuple(std::forward<Args>(args)...));
return {this->iterator_at(res.first), res.second};
}
};

} // namespace container_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{

template<class Policy, class Hash, class Eq, class Alloc>
class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc>
{
// P is Policy. It's passed as a template argument to support maps that have
// incomplete types as values, as in unordered_map<K, IncompleteType>.
// MappedReference<> may be a non-reference type.
template<class P>
using MappedReference = decltype(P::value(
std::addressof(std::declval<typename raw_hash_map::reference>())
));

// MappedConstReference<> may be a non-reference type.
template<class P>
using MappedConstReference = decltype(P::value(
std::addressof(std::declval<typename raw_hash_map::const_reference>())
));

using KeyArgImpl =
KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;

public:
using key_type = typename Policy::key_type;
using mapped_type = typename Policy::mapped_type;
template<class K>
using key_arg = typename KeyArgImpl::template type<K, key_type>;

static_assert(!std::is_reference<key_type>::value, "");

// TODO(b/187807849): Evaluate whether to support reference mapped_type and
// remove this assertion if/when it is supported.
static_assert(!std::is_reference<mapped_type>::value, "");

using iterator = typename raw_hash_map::raw_hash_set::iterator;
using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator;

raw_hash_map()
{
}
using raw_hash_map::raw_hash_set::raw_hash_set;

// The last two template parameters ensure that both arguments are rvalues
// (lvalue arguments are handled by the overloads below). This is necessary
// for supporting bitfield arguments.
//
// union { int n : 1; };
// flat_hash_map<int, int> m;
// m.insert_or_assign(n, n);
template<class K = key_type, class V = mapped_type, K* = nullptr, V* = nullptr>
std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v)
{
return insert_or_assign_impl(std::forward<K>(k), std::forward<V>(v));
}

template<class K = key_type, class V = mapped_type, K* = nullptr>
std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v)
{
return insert_or_assign_impl(std::forward<K>(k), v);
}

template<class K = key_type, class V = mapped_type, V* = nullptr>
std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v)
{
return insert_or_assign_impl(k, std::forward<V>(v));
}

template<class K = key_type, class V = mapped_type>
std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v)
{
return insert_or_assign_impl(k, v);
}

template<class K = key_type, class V = mapped_type, K* = nullptr, V* = nullptr>
iterator insert_or_assign(const_iterator, key_arg<K>&& k, V&& v)
{
return insert_or_assign(std::forward<K>(k), std::forward<V>(v)).first;
}

template<class K = key_type, class V = mapped_type, K* = nullptr>
iterator insert_or_assign(const_iterator, key_arg<K>&& k, const V& v)
{
return insert_or_assign(std::forward<K>(k), v).first;
}

template<class K = key_type, class V = mapped_type, V* = nullptr>
iterator insert_or_assign(const_iterator, const key_arg<K>& k, V&& v)
{
return insert_or_assign(k, std::forward<V>(v)).first;
}

template<class K = key_type, class V = mapped_type>
iterator insert_or_assign(const_iterator, const key_arg<K>& k, const V& v)
{
return insert_or_assign(k, v).first;
}

// All `try_emplace()` overloads make the same guarantees regarding rvalue
// arguments as `std::unordered_map::try_emplace()`, namely that these
// functions will not move from rvalue arguments if insertions do not happen.
template<class K = key_type, class... Args, typename std::enable_if<!std::is_convertible<K, const_iterator>::value, int>::type = 0, K* = nullptr>
std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args)
{
return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
}

template<class K = key_type, class... Args, typename std::enable_if<!std::is_convertible<K, const_iterator>::value, int>::type = 0>
std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args)
{
return try_emplace_impl(k, std::forward<Args>(args)...);
}

template<class K = key_type, class... Args, K* = nullptr>
iterator try_emplace(const_iterator, key_arg<K>&& k, Args&&... args)
{
return try_emplace(std::forward<K>(k), std::forward<Args>(args)...).first;
}

template<class K = key_type, class... Args>
iterator try_emplace(const_iterator, const key_arg<K>& k, Args&&... args)
{
return try_emplace(k, std::forward<Args>(args)...).first;
}

template<class K = key_type, class P = Policy>
MappedReference<P> at(const key_arg<K>& key)
{
auto it = this->find(key);
if (it == this->end())
{
base_internal::ThrowStdOutOfRange(
"absl::container_internal::raw_hash_map<>::at"
);
}
return Policy::value(&*it);
}

template<class K = key_type, class P = Policy>
MappedConstReference<P> at(const key_arg<K>& key) const
{
auto it = this->find(key);
if (it == this->end())
{
base_internal::ThrowStdOutOfRange(
"absl::container_internal::raw_hash_map<>::at"
);
}
return Policy::value(&*it);
}

template<class K = key_type, class P = Policy, K* = nullptr>
MappedReference<P> operator[](key_arg<K>&& key)
{
return Policy::value(&*try_emplace(std::forward<K>(key)).first);
}

template<class K = key_type, class P = Policy>
MappedReference<P> operator[](const key_arg<K>& key)
{
return Policy::value(&*try_emplace(key).first);
}

private:
template<class K, class V>
std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v)
{
auto res = this->find_or_prepare_insert(k);
if (res.second)
this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v));
else
Policy::value(&*this->iterator_at(res.first)) = std::forward<V>(v);
return {this->iterator_at(res.first), res.second};
}

template<class K = key_type, class... Args>
std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args)
{
auto res = this->find_or_prepare_insert(k);
if (res.second)
this->emplace_at(res.first, std::piecewise_construct, std::forward_as_tuple(std::forward<K>(k)), std::forward_as_tuple(std::forward<Args>(args)...));
return {this->iterator_at(res.first), res.second};
}
};

} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ #endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_

+ 2492
- 2109
CAPI/cpp/grpc/include/absl/container/internal/raw_hash_set.h
File diff suppressed because it is too large
View File


+ 315
- 249
CAPI/cpp/grpc/include/absl/container/internal/test_instance_tracker.h View File

@@ -20,255 +20,321 @@


#include "absl/types/compare.h" #include "absl/types/compare.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace test_internal {

// A type that counts number of occurrences of the type, the live occurrences of
// the type, as well as the number of copies, moves, swaps, and comparisons that
// have occurred on the type. This is used as a base class for the copyable,
// copyable+movable, and movable types below that are used in actual tests. Use
// InstanceTracker in tests to track the number of instances.
class BaseCountedInstance {
public:
explicit BaseCountedInstance(int x) : value_(x) {
++num_instances_;
++num_live_instances_;
}
BaseCountedInstance(const BaseCountedInstance& x)
: value_(x.value_), is_live_(x.is_live_) {
++num_instances_;
if (is_live_) ++num_live_instances_;
++num_copies_;
}
BaseCountedInstance(BaseCountedInstance&& x)
: value_(x.value_), is_live_(x.is_live_) {
x.is_live_ = false;
++num_instances_;
++num_moves_;
}
~BaseCountedInstance() {
--num_instances_;
if (is_live_) --num_live_instances_;
}

BaseCountedInstance& operator=(const BaseCountedInstance& x) {
value_ = x.value_;
if (is_live_) --num_live_instances_;
is_live_ = x.is_live_;
if (is_live_) ++num_live_instances_;
++num_copies_;
return *this;
}
BaseCountedInstance& operator=(BaseCountedInstance&& x) {
value_ = x.value_;
if (is_live_) --num_live_instances_;
is_live_ = x.is_live_;
x.is_live_ = false;
++num_moves_;
return *this;
}

bool operator==(const BaseCountedInstance& x) const {
++num_comparisons_;
return value_ == x.value_;
}

bool operator!=(const BaseCountedInstance& x) const {
++num_comparisons_;
return value_ != x.value_;
}

bool operator<(const BaseCountedInstance& x) const {
++num_comparisons_;
return value_ < x.value_;
}

bool operator>(const BaseCountedInstance& x) const {
++num_comparisons_;
return value_ > x.value_;
}

bool operator<=(const BaseCountedInstance& x) const {
++num_comparisons_;
return value_ <= x.value_;
}

bool operator>=(const BaseCountedInstance& x) const {
++num_comparisons_;
return value_ >= x.value_;
}

absl::weak_ordering compare(const BaseCountedInstance& x) const {
++num_comparisons_;
return value_ < x.value_
? absl::weak_ordering::less
: value_ == x.value_ ? absl::weak_ordering::equivalent
: absl::weak_ordering::greater;
}

int value() const {
if (!is_live_) std::abort();
return value_;
}

friend std::ostream& operator<<(std::ostream& o,
const BaseCountedInstance& v) {
return o << "[value:" << v.value() << "]";
}

// Implementation of efficient swap() that counts swaps.
static void SwapImpl(
BaseCountedInstance& lhs, // NOLINT(runtime/references)
BaseCountedInstance& rhs) { // NOLINT(runtime/references)
using std::swap;
swap(lhs.value_, rhs.value_);
swap(lhs.is_live_, rhs.is_live_);
++BaseCountedInstance::num_swaps_;
}

private:
friend class InstanceTracker;

int value_;

// Indicates if the value is live, ie it hasn't been moved away from.
bool is_live_ = true;

// Number of instances.
static int num_instances_;

// Number of live instances (those that have not been moved away from.)
static int num_live_instances_;

// Number of times that BaseCountedInstance objects were moved.
static int num_moves_;

// Number of times that BaseCountedInstance objects were copied.
static int num_copies_;

// Number of times that BaseCountedInstance objects were swapped.
static int num_swaps_;

// Number of times that BaseCountedInstance objects were compared.
static int num_comparisons_;
};

// Helper to track the BaseCountedInstance instance counters. Expects that the
// number of instances and live_instances are the same when it is constructed
// and when it is destructed.
class InstanceTracker {
public:
InstanceTracker()
: start_instances_(BaseCountedInstance::num_instances_),
start_live_instances_(BaseCountedInstance::num_live_instances_) {
ResetCopiesMovesSwaps();
}
~InstanceTracker() {
if (instances() != 0) std::abort();
if (live_instances() != 0) std::abort();
}

// Returns the number of BaseCountedInstance instances both containing valid
// values and those moved away from compared to when the InstanceTracker was
// constructed
int instances() const {
return BaseCountedInstance::num_instances_ - start_instances_;
}

// Returns the number of live BaseCountedInstance instances compared to when
// the InstanceTracker was constructed
int live_instances() const {
return BaseCountedInstance::num_live_instances_ - start_live_instances_;
}

// Returns the number of moves on BaseCountedInstance objects since
// construction or since the last call to ResetCopiesMovesSwaps().
int moves() const { return BaseCountedInstance::num_moves_ - start_moves_; }

// Returns the number of copies on BaseCountedInstance objects since
// construction or the last call to ResetCopiesMovesSwaps().
int copies() const {
return BaseCountedInstance::num_copies_ - start_copies_;
}

// Returns the number of swaps on BaseCountedInstance objects since
// construction or the last call to ResetCopiesMovesSwaps().
int swaps() const { return BaseCountedInstance::num_swaps_ - start_swaps_; }

// Returns the number of comparisons on BaseCountedInstance objects since
// construction or the last call to ResetCopiesMovesSwaps().
int comparisons() const {
return BaseCountedInstance::num_comparisons_ - start_comparisons_;
}

// Resets the base values for moves, copies, comparisons, and swaps to the
// current values, so that subsequent Get*() calls for moves, copies,
// comparisons, and swaps will compare to the situation at the point of this
// call.
void ResetCopiesMovesSwaps() {
start_moves_ = BaseCountedInstance::num_moves_;
start_copies_ = BaseCountedInstance::num_copies_;
start_swaps_ = BaseCountedInstance::num_swaps_;
start_comparisons_ = BaseCountedInstance::num_comparisons_;
}

private:
int start_instances_;
int start_live_instances_;
int start_moves_;
int start_copies_;
int start_swaps_;
int start_comparisons_;
};

// Copyable, not movable.
class CopyableOnlyInstance : public BaseCountedInstance {
public:
explicit CopyableOnlyInstance(int x) : BaseCountedInstance(x) {}
CopyableOnlyInstance(const CopyableOnlyInstance& rhs) = default;
CopyableOnlyInstance& operator=(const CopyableOnlyInstance& rhs) = default;

friend void swap(CopyableOnlyInstance& lhs, CopyableOnlyInstance& rhs) {
BaseCountedInstance::SwapImpl(lhs, rhs);
}

static bool supports_move() { return false; }
};

// Copyable and movable.
class CopyableMovableInstance : public BaseCountedInstance {
public:
explicit CopyableMovableInstance(int x) : BaseCountedInstance(x) {}
CopyableMovableInstance(const CopyableMovableInstance& rhs) = default;
CopyableMovableInstance(CopyableMovableInstance&& rhs) = default;
CopyableMovableInstance& operator=(const CopyableMovableInstance& rhs) =
default;
CopyableMovableInstance& operator=(CopyableMovableInstance&& rhs) = default;

friend void swap(CopyableMovableInstance& lhs, CopyableMovableInstance& rhs) {
BaseCountedInstance::SwapImpl(lhs, rhs);
}

static bool supports_move() { return true; }
};

// Only movable, not default-constructible.
class MovableOnlyInstance : public BaseCountedInstance {
public:
explicit MovableOnlyInstance(int x) : BaseCountedInstance(x) {}
MovableOnlyInstance(MovableOnlyInstance&& other) = default;
MovableOnlyInstance& operator=(MovableOnlyInstance&& other) = default;

friend void swap(MovableOnlyInstance& lhs, MovableOnlyInstance& rhs) {
BaseCountedInstance::SwapImpl(lhs, rhs);
}

static bool supports_move() { return true; }
};

} // namespace test_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace test_internal
{

// A type that counts number of occurrences of the type, the live occurrences of
// the type, as well as the number of copies, moves, swaps, and comparisons that
// have occurred on the type. This is used as a base class for the copyable,
// copyable+movable, and movable types below that are used in actual tests. Use
// InstanceTracker in tests to track the number of instances.
class BaseCountedInstance
{
public:
explicit BaseCountedInstance(int x) :
value_(x)
{
++num_instances_;
++num_live_instances_;
}
BaseCountedInstance(const BaseCountedInstance& x) :
value_(x.value_),
is_live_(x.is_live_)
{
++num_instances_;
if (is_live_)
++num_live_instances_;
++num_copies_;
}
BaseCountedInstance(BaseCountedInstance&& x) :
value_(x.value_),
is_live_(x.is_live_)
{
x.is_live_ = false;
++num_instances_;
++num_moves_;
}
~BaseCountedInstance()
{
--num_instances_;
if (is_live_)
--num_live_instances_;
}

BaseCountedInstance& operator=(const BaseCountedInstance& x)
{
value_ = x.value_;
if (is_live_)
--num_live_instances_;
is_live_ = x.is_live_;
if (is_live_)
++num_live_instances_;
++num_copies_;
return *this;
}
BaseCountedInstance& operator=(BaseCountedInstance&& x)
{
value_ = x.value_;
if (is_live_)
--num_live_instances_;
is_live_ = x.is_live_;
x.is_live_ = false;
++num_moves_;
return *this;
}

bool operator==(const BaseCountedInstance& x) const
{
++num_comparisons_;
return value_ == x.value_;
}

bool operator!=(const BaseCountedInstance& x) const
{
++num_comparisons_;
return value_ != x.value_;
}

bool operator<(const BaseCountedInstance& x) const
{
++num_comparisons_;
return value_ < x.value_;
}

bool operator>(const BaseCountedInstance& x) const
{
++num_comparisons_;
return value_ > x.value_;
}

bool operator<=(const BaseCountedInstance& x) const
{
++num_comparisons_;
return value_ <= x.value_;
}

bool operator>=(const BaseCountedInstance& x) const
{
++num_comparisons_;
return value_ >= x.value_;
}

absl::weak_ordering compare(const BaseCountedInstance& x) const
{
++num_comparisons_;
return value_ < x.value_ ? absl::weak_ordering::less : value_ == x.value_ ? absl::weak_ordering::equivalent :
absl::weak_ordering::greater;
}

int value() const
{
if (!is_live_)
std::abort();
return value_;
}

friend std::ostream& operator<<(std::ostream& o, const BaseCountedInstance& v)
{
return o << "[value:" << v.value() << "]";
}

// Implementation of efficient swap() that counts swaps.
static void SwapImpl(
BaseCountedInstance& lhs, // NOLINT(runtime/references)
BaseCountedInstance& rhs
)
{ // NOLINT(runtime/references)
using std::swap;
swap(lhs.value_, rhs.value_);
swap(lhs.is_live_, rhs.is_live_);
++BaseCountedInstance::num_swaps_;
}

private:
friend class InstanceTracker;

int value_;

// Indicates if the value is live, ie it hasn't been moved away from.
bool is_live_ = true;

// Number of instances.
static int num_instances_;

// Number of live instances (those that have not been moved away from.)
static int num_live_instances_;

// Number of times that BaseCountedInstance objects were moved.
static int num_moves_;

// Number of times that BaseCountedInstance objects were copied.
static int num_copies_;

// Number of times that BaseCountedInstance objects were swapped.
static int num_swaps_;

// Number of times that BaseCountedInstance objects were compared.
static int num_comparisons_;
};

// Helper to track the BaseCountedInstance instance counters. Expects that the
// number of instances and live_instances are the same when it is constructed
// and when it is destructed.
class InstanceTracker
{
public:
InstanceTracker() :
start_instances_(BaseCountedInstance::num_instances_),
start_live_instances_(BaseCountedInstance::num_live_instances_)
{
ResetCopiesMovesSwaps();
}
~InstanceTracker()
{
if (instances() != 0)
std::abort();
if (live_instances() != 0)
std::abort();
}

// Returns the number of BaseCountedInstance instances both containing valid
// values and those moved away from compared to when the InstanceTracker was
// constructed
int instances() const
{
return BaseCountedInstance::num_instances_ - start_instances_;
}

// Returns the number of live BaseCountedInstance instances compared to when
// the InstanceTracker was constructed
int live_instances() const
{
return BaseCountedInstance::num_live_instances_ - start_live_instances_;
}

// Returns the number of moves on BaseCountedInstance objects since
// construction or since the last call to ResetCopiesMovesSwaps().
int moves() const
{
return BaseCountedInstance::num_moves_ - start_moves_;
}

// Returns the number of copies on BaseCountedInstance objects since
// construction or the last call to ResetCopiesMovesSwaps().
int copies() const
{
return BaseCountedInstance::num_copies_ - start_copies_;
}

// Returns the number of swaps on BaseCountedInstance objects since
// construction or the last call to ResetCopiesMovesSwaps().
int swaps() const
{
return BaseCountedInstance::num_swaps_ - start_swaps_;
}

// Returns the number of comparisons on BaseCountedInstance objects since
// construction or the last call to ResetCopiesMovesSwaps().
int comparisons() const
{
return BaseCountedInstance::num_comparisons_ - start_comparisons_;
}

// Resets the base values for moves, copies, comparisons, and swaps to the
// current values, so that subsequent Get*() calls for moves, copies,
// comparisons, and swaps will compare to the situation at the point of this
// call.
void ResetCopiesMovesSwaps()
{
start_moves_ = BaseCountedInstance::num_moves_;
start_copies_ = BaseCountedInstance::num_copies_;
start_swaps_ = BaseCountedInstance::num_swaps_;
start_comparisons_ = BaseCountedInstance::num_comparisons_;
}

private:
int start_instances_;
int start_live_instances_;
int start_moves_;
int start_copies_;
int start_swaps_;
int start_comparisons_;
};

// Copyable, not movable.
class CopyableOnlyInstance : public BaseCountedInstance
{
public:
explicit CopyableOnlyInstance(int x) :
BaseCountedInstance(x)
{
}
CopyableOnlyInstance(const CopyableOnlyInstance& rhs) = default;
CopyableOnlyInstance& operator=(const CopyableOnlyInstance& rhs) = default;

friend void swap(CopyableOnlyInstance& lhs, CopyableOnlyInstance& rhs)
{
BaseCountedInstance::SwapImpl(lhs, rhs);
}

static bool supports_move()
{
return false;
}
};

// Copyable and movable.
class CopyableMovableInstance : public BaseCountedInstance
{
public:
explicit CopyableMovableInstance(int x) :
BaseCountedInstance(x)
{
}
CopyableMovableInstance(const CopyableMovableInstance& rhs) = default;
CopyableMovableInstance(CopyableMovableInstance&& rhs) = default;
CopyableMovableInstance& operator=(const CopyableMovableInstance& rhs) =
default;
CopyableMovableInstance& operator=(CopyableMovableInstance&& rhs) = default;

friend void swap(CopyableMovableInstance& lhs, CopyableMovableInstance& rhs)
{
BaseCountedInstance::SwapImpl(lhs, rhs);
}

static bool supports_move()
{
return true;
}
};

// Only movable, not default-constructible.
class MovableOnlyInstance : public BaseCountedInstance
{
public:
explicit MovableOnlyInstance(int x) :
BaseCountedInstance(x)
{
}
MovableOnlyInstance(MovableOnlyInstance&& other) = default;
MovableOnlyInstance& operator=(MovableOnlyInstance&& other) = default;

friend void swap(MovableOnlyInstance& lhs, MovableOnlyInstance& rhs)
{
BaseCountedInstance::SwapImpl(lhs, rhs);
}

static bool supports_move()
{
return true;
}
};

} // namespace test_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_ #endif // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_

+ 73
- 50
CAPI/cpp/grpc/include/absl/container/internal/tracked.h View File

@@ -22,62 +22,85 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{


// A class that tracks its copies and moves so that it can be queried in tests.
template <class T>
class Tracked {
public:
Tracked() {}
// NOLINTNEXTLINE(runtime/explicit)
Tracked(const T& val) : val_(val) {}
Tracked(const Tracked& that)
: val_(that.val_),
num_moves_(that.num_moves_),
num_copies_(that.num_copies_) {
++(*num_copies_);
}
Tracked(Tracked&& that)
: val_(std::move(that.val_)),
num_moves_(std::move(that.num_moves_)),
num_copies_(std::move(that.num_copies_)) {
++(*num_moves_);
}
Tracked& operator=(const Tracked& that) {
val_ = that.val_;
num_moves_ = that.num_moves_;
num_copies_ = that.num_copies_;
++(*num_copies_);
}
Tracked& operator=(Tracked&& that) {
val_ = std::move(that.val_);
num_moves_ = std::move(that.num_moves_);
num_copies_ = std::move(that.num_copies_);
++(*num_moves_);
}
// A class that tracks its copies and moves so that it can be queried in tests.
template<class T>
class Tracked
{
public:
Tracked()
{
}
// NOLINTNEXTLINE(runtime/explicit)
Tracked(const T& val) :
val_(val)
{
}
Tracked(const Tracked& that) :
val_(that.val_),
num_moves_(that.num_moves_),
num_copies_(that.num_copies_)
{
++(*num_copies_);
}
Tracked(Tracked&& that) :
val_(std::move(that.val_)),
num_moves_(std::move(that.num_moves_)),
num_copies_(std::move(that.num_copies_))
{
++(*num_moves_);
}
Tracked& operator=(const Tracked& that)
{
val_ = that.val_;
num_moves_ = that.num_moves_;
num_copies_ = that.num_copies_;
++(*num_copies_);
}
Tracked& operator=(Tracked&& that)
{
val_ = std::move(that.val_);
num_moves_ = std::move(that.num_moves_);
num_copies_ = std::move(that.num_copies_);
++(*num_moves_);
}


const T& val() const { return val_; }
const T& val() const
{
return val_;
}


friend bool operator==(const Tracked& a, const Tracked& b) {
return a.val_ == b.val_;
}
friend bool operator!=(const Tracked& a, const Tracked& b) {
return !(a == b);
}
friend bool operator==(const Tracked& a, const Tracked& b)
{
return a.val_ == b.val_;
}
friend bool operator!=(const Tracked& a, const Tracked& b)
{
return !(a == b);
}


size_t num_copies() { return *num_copies_; }
size_t num_moves() { return *num_moves_; }
size_t num_copies()
{
return *num_copies_;
}
size_t num_moves()
{
return *num_moves_;
}


private:
T val_;
std::shared_ptr<size_t> num_moves_ = std::make_shared<size_t>(0);
std::shared_ptr<size_t> num_copies_ = std::make_shared<size_t>(0);
};
private:
T val_;
std::shared_ptr<size_t> num_moves_ = std::make_shared<size_t>(0);
std::shared_ptr<size_t> num_copies_ = std::make_shared<size_t>(0);
};


} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_TRACKED_H_ #endif // ABSL_CONTAINER_INTERNAL_TRACKED_H_

+ 507
- 455
CAPI/cpp/grpc/include/absl/container/internal/unordered_map_constructor_test.h View File

@@ -24,471 +24,523 @@
#include "absl/container/internal/hash_generator_testing.h" #include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/hash_policy_testing.h" #include "absl/container/internal/hash_policy_testing.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {

template <class UnordMap>
class ConstructorTest : public ::testing::Test {};

TYPED_TEST_SUITE_P(ConstructorTest);

TYPED_TEST_P(ConstructorTest, NoArgs) {
TypeParam m;
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
}

TYPED_TEST_P(ConstructorTest, BucketCount) {
TypeParam m(123);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHash) {
using H = typename TypeParam::hasher;
H hasher;
TypeParam m(123, hasher);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
H hasher;
E equal;
TypeParam m(123, hasher, equal);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

template <typename T>
struct is_std_unordered_map : std::false_type {};

template <typename... T>
struct is_std_unordered_map<std::unordered_map<T...>> : std::true_type {};
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{

template<class UnordMap>
class ConstructorTest : public ::testing::Test
{
};

TYPED_TEST_SUITE_P(ConstructorTest);

TYPED_TEST_P(ConstructorTest, NoArgs)
{
TypeParam m;
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
}

TYPED_TEST_P(ConstructorTest, BucketCount)
{
TypeParam m(123);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHash)
{
using H = typename TypeParam::hasher;
H hasher;
TypeParam m(123, hasher);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHashEqual)
{
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
H hasher;
E equal;
TypeParam m(123, hasher, equal);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc)
{
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

template<typename T>
struct is_std_unordered_map : std::false_type
{
};

template<typename... T>
struct is_std_unordered_map<std::unordered_map<T...>> : std::true_type
{
};


#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17) #if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
using has_cxx14_std_apis = std::true_type;
using has_cxx14_std_apis = std::true_type;
#else #else
using has_cxx14_std_apis = std::false_type;
using has_cxx14_std_apis = std::false_type;
#endif #endif


template <typename T>
using expect_cxx14_apis =
absl::disjunction<absl::negation<is_std_unordered_map<T>>,
has_cxx14_std_apis>;

template <typename TypeParam>
void BucketCountAllocTest(std::false_type) {}

template <typename TypeParam>
void BucketCountAllocTest(std::true_type) {
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

template <typename TypeParam>
void BucketCountHashAllocTest(std::false_type) {}

template <typename TypeParam>
void BucketCountHashAllocTest(std::true_type) {
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
TypeParam m(123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}
template<typename T>
using expect_cxx14_apis =
absl::disjunction<absl::negation<is_std_unordered_map<T>>, has_cxx14_std_apis>;

template<typename TypeParam>
void BucketCountAllocTest(std::false_type)
{
}

template<typename TypeParam>
void BucketCountAllocTest(std::true_type)
{
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountAlloc)
{
BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

template<typename TypeParam>
void BucketCountHashAllocTest(std::false_type)
{
}

template<typename TypeParam>
void BucketCountHashAllocTest(std::true_type)
{
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
TypeParam m(123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc)
{
BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}


#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS #if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
using has_alloc_std_constructors = std::true_type;
using has_alloc_std_constructors = std::true_type;
#else #else
using has_alloc_std_constructors = std::false_type;
using has_alloc_std_constructors = std::false_type;
#endif #endif


template <typename T>
using expect_alloc_constructors =
absl::disjunction<absl::negation<is_std_unordered_map<T>>,
has_alloc_std_constructors>;

template <typename TypeParam>
void AllocTest(std::false_type) {}

template <typename TypeParam>
void AllocTest(std::true_type) {
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
}

TYPED_TEST_P(ConstructorTest, Alloc) {
AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}

TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::UniqueGenerator<T>());
TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

template <typename TypeParam>
void InputIteratorBucketAllocTest(std::false_type) {}

template <typename TypeParam>
void InputIteratorBucketAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::UniqueGenerator<T>());
TypeParam m(values.begin(), values.end(), 123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

template <typename TypeParam>
void InputIteratorBucketHashAllocTest(std::false_type) {}

template <typename TypeParam>
void InputIteratorBucketHashAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::UniqueGenerator<T>());
TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

TYPED_TEST_P(ConstructorTest, CopyConstructor) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam n(m);
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}

template <typename TypeParam>
void CopyConstructorAllocTest(std::false_type) {}

template <typename TypeParam>
void CopyConstructorAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam n(m, A(11));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}

// TODO(alkis): Test non-propagating allocators on copy constructors.

TYPED_TEST_P(ConstructorTest, MoveConstructor) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam t(m);
TypeParam n(std::move(t));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}

template <typename TypeParam>
void MoveConstructorAllocTest(std::false_type) {}

template <typename TypeParam>
void MoveConstructorAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam t(m);
TypeParam n(std::move(t), A(1));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}

// TODO(alkis): Test non-propagating allocators on move constructors.

TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(values, 123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

template <typename TypeParam>
void InitializerListBucketAllocTest(std::false_type) {}

template <typename TypeParam>
void InitializerListBucketAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
A alloc(0);
TypeParam m(values, 123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

template <typename TypeParam>
void InitializerListBucketHashAllocTest(std::false_type) {}

template <typename TypeParam>
void InitializerListBucketHashAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values, 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

TYPED_TEST_P(ConstructorTest, Assignment) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam n;
n = m;
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m, n);
}

// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
// (it depends on traits).

TYPED_TEST_P(ConstructorTest, MoveAssignment) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam t(m);
TypeParam n;
n = std::move(t);
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
}

TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam n({gen()});
n = m;
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam t(m);
TypeParam n({gen()});
n = std::move(t);
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
}

TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values);
m = *&m; // Avoid -Wself-assign
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
}

// We cannot test self move as standard states that it leaves standard
// containers in unspecified state (and in practice in causes memory-leak
// according to heap-checker!).

REGISTER_TYPED_TEST_SUITE_P(
ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment,
MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
MoveAssignmentOverwritesExisting,
AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);

} // namespace container_internal
ABSL_NAMESPACE_END
template<typename T>
using expect_alloc_constructors =
absl::disjunction<absl::negation<is_std_unordered_map<T>>, has_alloc_std_constructors>;

template<typename TypeParam>
void AllocTest(std::false_type)
{
}

template<typename TypeParam>
void AllocTest(std::true_type)
{
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(m, ::testing::UnorderedElementsAre());
}

TYPED_TEST_P(ConstructorTest, Alloc)
{
AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}

TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::UniqueGenerator<T>());
TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

template<typename TypeParam>
void InputIteratorBucketAllocTest(std::false_type)
{
}

template<typename TypeParam>
void InputIteratorBucketAllocTest(std::true_type)
{
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::UniqueGenerator<T>());
TypeParam m(values.begin(), values.end(), 123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc)
{
InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

template<typename TypeParam>
void InputIteratorBucketHashAllocTest(std::false_type)
{
}

template<typename TypeParam>
void InputIteratorBucketHashAllocTest(std::true_type)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::UniqueGenerator<T>());
TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc)
{
InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

TYPED_TEST_P(ConstructorTest, CopyConstructor)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i)
m.insert(gen());
TypeParam n(m);
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}

template<typename TypeParam>
void CopyConstructorAllocTest(std::false_type)
{
}

template<typename TypeParam>
void CopyConstructorAllocTest(std::true_type)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i)
m.insert(gen());
TypeParam n(m, A(11));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc)
{
CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}

// TODO(alkis): Test non-propagating allocators on copy constructors.

TYPED_TEST_P(ConstructorTest, MoveConstructor)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i)
m.insert(gen());
TypeParam t(m);
TypeParam n(std::move(t));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}

template<typename TypeParam>
void MoveConstructorAllocTest(std::false_type)
{
}

template<typename TypeParam>
void MoveConstructorAllocTest(std::true_type)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i)
m.insert(gen());
TypeParam t(m);
TypeParam n(std::move(t), A(1));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc)
{
MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}

// TODO(alkis): Test non-propagating allocators on move constructors.

TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc)
{
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(values, 123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

template<typename TypeParam>
void InitializerListBucketAllocTest(std::false_type)
{
}

template<typename TypeParam>
void InitializerListBucketAllocTest(std::true_type)
{
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
A alloc(0);
TypeParam m(values, 123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc)
{
InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

template<typename TypeParam>
void InitializerListBucketHashAllocTest(std::false_type)
{
}

template<typename TypeParam>
void InitializerListBucketHashAllocTest(std::true_type)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values, 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc)
{
InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

TYPED_TEST_P(ConstructorTest, Assignment)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam n;
n = m;
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m, n);
}

// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
// (it depends on traits).

TYPED_TEST_P(ConstructorTest, MoveAssignment)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam t(m);
TypeParam n;
n = std::move(t);
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList)
{
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
}

TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting)
{
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam n({gen()});
n = m;
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting)
{
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam t(m);
TypeParam n({gen()});
n = std::move(t);
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting)
{
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
}

TYPED_TEST_P(ConstructorTest, AssignmentOnSelf)
{
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values);
m = *&m; // Avoid -Wself-assign
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
}

// We cannot test self move as standard states that it leaves standard
// containers in unspecified state (and in practice in causes memory-leak
// according to heap-checker!).

REGISTER_TYPED_TEST_SUITE_P(
ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc, MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc, InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment, MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting, AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf
);

} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_ #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_

+ 91
- 83
CAPI/cpp/grpc/include/absl/container/internal/unordered_map_lookup_test.h View File

@@ -20,98 +20,106 @@
#include "absl/container/internal/hash_generator_testing.h" #include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/hash_policy_testing.h" #include "absl/container/internal/hash_policy_testing.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{


template <class UnordMap>
class LookupTest : public ::testing::Test {};
template<class UnordMap>
class LookupTest : public ::testing::Test
{
};


TYPED_TEST_SUITE_P(LookupTest);
TYPED_TEST_SUITE_P(LookupTest);


TYPED_TEST_P(LookupTest, At) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
for (const auto& p : values) {
const auto& val = m.at(p.first);
EXPECT_EQ(p.second, val) << ::testing::PrintToString(p.first);
}
}
TYPED_TEST_P(LookupTest, At)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
for (const auto& p : values)
{
const auto& val = m.at(p.first);
EXPECT_EQ(p.second, val) << ::testing::PrintToString(p.first);
}
}


TYPED_TEST_P(LookupTest, OperatorBracket) {
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
for (const auto& p : values) {
auto& val = m[p.first];
EXPECT_EQ(V(), val) << ::testing::PrintToString(p.first);
val = p.second;
}
for (const auto& p : values)
EXPECT_EQ(p.second, m[p.first]) << ::testing::PrintToString(p.first);
}
TYPED_TEST_P(LookupTest, OperatorBracket)
{
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m;
for (const auto& p : values)
{
auto& val = m[p.first];
EXPECT_EQ(V(), val) << ::testing::PrintToString(p.first);
val = p.second;
}
for (const auto& p : values)
EXPECT_EQ(p.second, m[p.first]) << ::testing::PrintToString(p.first);
}


TYPED_TEST_P(LookupTest, Count) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
for (const auto& p : values)
EXPECT_EQ(0, m.count(p.first)) << ::testing::PrintToString(p.first);
m.insert(values.begin(), values.end());
for (const auto& p : values)
EXPECT_EQ(1, m.count(p.first)) << ::testing::PrintToString(p.first);
}
TYPED_TEST_P(LookupTest, Count)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m;
for (const auto& p : values)
EXPECT_EQ(0, m.count(p.first)) << ::testing::PrintToString(p.first);
m.insert(values.begin(), values.end());
for (const auto& p : values)
EXPECT_EQ(1, m.count(p.first)) << ::testing::PrintToString(p.first);
}


TYPED_TEST_P(LookupTest, Find) {
using std::get;
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
for (const auto& p : values)
EXPECT_TRUE(m.end() == m.find(p.first))
<< ::testing::PrintToString(p.first);
m.insert(values.begin(), values.end());
for (const auto& p : values) {
auto it = m.find(p.first);
EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(p.first);
EXPECT_EQ(p.second, get<1>(*it)) << ::testing::PrintToString(p.first);
}
}
TYPED_TEST_P(LookupTest, Find)
{
using std::get;
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m;
for (const auto& p : values)
EXPECT_TRUE(m.end() == m.find(p.first))
<< ::testing::PrintToString(p.first);
m.insert(values.begin(), values.end());
for (const auto& p : values)
{
auto it = m.find(p.first);
EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(p.first);
EXPECT_EQ(p.second, get<1>(*it)) << ::testing::PrintToString(p.first);
}
}


TYPED_TEST_P(LookupTest, EqualRange) {
using std::get;
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
for (const auto& p : values) {
auto r = m.equal_range(p.first);
ASSERT_EQ(0, std::distance(r.first, r.second));
}
m.insert(values.begin(), values.end());
for (const auto& p : values) {
auto r = m.equal_range(p.first);
ASSERT_EQ(1, std::distance(r.first, r.second));
EXPECT_EQ(p.second, get<1>(*r.first)) << ::testing::PrintToString(p.first);
}
}
TYPED_TEST_P(LookupTest, EqualRange)
{
using std::get;
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m;
for (const auto& p : values)
{
auto r = m.equal_range(p.first);
ASSERT_EQ(0, std::distance(r.first, r.second));
}
m.insert(values.begin(), values.end());
for (const auto& p : values)
{
auto r = m.equal_range(p.first);
ASSERT_EQ(1, std::distance(r.first, r.second));
EXPECT_EQ(p.second, get<1>(*r.first)) << ::testing::PrintToString(p.first);
}
}


REGISTER_TYPED_TEST_SUITE_P(LookupTest, At, OperatorBracket, Count, Find,
EqualRange);
REGISTER_TYPED_TEST_SUITE_P(LookupTest, At, OperatorBracket, Count, Find, EqualRange);


} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_ #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_

+ 57
- 54
CAPI/cpp/grpc/include/absl/container/internal/unordered_map_members_test.h View File

@@ -20,68 +20,71 @@
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "absl/meta/type_traits.h" #include "absl/meta/type_traits.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{


template <class UnordMap>
class MembersTest : public ::testing::Test {};
template<class UnordMap>
class MembersTest : public ::testing::Test
{
};


TYPED_TEST_SUITE_P(MembersTest);
TYPED_TEST_SUITE_P(MembersTest);


template <typename T>
void UseType() {}
template<typename T>
void UseType()
{
}


TYPED_TEST_P(MembersTest, Typedefs) {
EXPECT_TRUE((std::is_same<std::pair<const typename TypeParam::key_type,
typename TypeParam::mapped_type>,
typename TypeParam::value_type>()));
EXPECT_TRUE((absl::conjunction<
absl::negation<std::is_signed<typename TypeParam::size_type>>,
std::is_integral<typename TypeParam::size_type>>()));
EXPECT_TRUE((absl::conjunction<
std::is_signed<typename TypeParam::difference_type>,
std::is_integral<typename TypeParam::difference_type>>()));
EXPECT_TRUE((std::is_convertible<
decltype(std::declval<const typename TypeParam::hasher&>()(
std::declval<const typename TypeParam::key_type&>())),
size_t>()));
EXPECT_TRUE((std::is_convertible<
decltype(std::declval<const typename TypeParam::key_equal&>()(
std::declval<const typename TypeParam::key_type&>(),
std::declval<const typename TypeParam::key_type&>())),
bool>()));
EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
typename TypeParam::value_type>()));
EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
typename TypeParam::reference>()));
EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
typename TypeParam::const_reference>()));
EXPECT_TRUE((std::is_same<typename std::allocator_traits<
typename TypeParam::allocator_type>::pointer,
typename TypeParam::pointer>()));
EXPECT_TRUE(
(std::is_same<typename std::allocator_traits<
typename TypeParam::allocator_type>::const_pointer,
typename TypeParam::const_pointer>()));
}
TYPED_TEST_P(MembersTest, Typedefs)
{
EXPECT_TRUE((std::is_same<std::pair<const typename TypeParam::key_type, typename TypeParam::mapped_type>, typename TypeParam::value_type>()));
EXPECT_TRUE((absl::conjunction<
absl::negation<std::is_signed<typename TypeParam::size_type>>,
std::is_integral<typename TypeParam::size_type>>()));
EXPECT_TRUE((absl::conjunction<
std::is_signed<typename TypeParam::difference_type>,
std::is_integral<typename TypeParam::difference_type>>()));
EXPECT_TRUE((std::is_convertible<
decltype(std::declval<const typename TypeParam::hasher&>()(
std::declval<const typename TypeParam::key_type&>()
)),
size_t>()));
EXPECT_TRUE((std::is_convertible<
decltype(std::declval<const typename TypeParam::key_equal&>()(
std::declval<const typename TypeParam::key_type&>(),
std::declval<const typename TypeParam::key_type&>()
)),
bool>()));
EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type, typename TypeParam::value_type>()));
EXPECT_TRUE((std::is_same<typename TypeParam::value_type&, typename TypeParam::reference>()));
EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&, typename TypeParam::const_reference>()));
EXPECT_TRUE((std::is_same<typename std::allocator_traits<typename TypeParam::allocator_type>::pointer, typename TypeParam::pointer>()));
EXPECT_TRUE(
(std::is_same<typename std::allocator_traits<typename TypeParam::allocator_type>::const_pointer, typename TypeParam::const_pointer>())
);
}


TYPED_TEST_P(MembersTest, SimpleFunctions) {
EXPECT_GT(TypeParam().max_size(), 0);
}
TYPED_TEST_P(MembersTest, SimpleFunctions)
{
EXPECT_GT(TypeParam().max_size(), 0);
}


TYPED_TEST_P(MembersTest, BeginEnd) {
TypeParam t = {typename TypeParam::value_type{}};
EXPECT_EQ(t.begin(), t.cbegin());
EXPECT_EQ(t.end(), t.cend());
EXPECT_NE(t.begin(), t.end());
EXPECT_NE(t.cbegin(), t.cend());
}
TYPED_TEST_P(MembersTest, BeginEnd)
{
TypeParam t = {typename TypeParam::value_type{}};
EXPECT_EQ(t.begin(), t.cbegin());
EXPECT_EQ(t.end(), t.cend());
EXPECT_NE(t.begin(), t.end());
EXPECT_NE(t.cbegin(), t.cend());
}


REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);


} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_ #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_

+ 326
- 308
CAPI/cpp/grpc/include/absl/container/internal/unordered_map_modifiers_test.h View File

@@ -22,331 +22,349 @@
#include "absl/container/internal/hash_generator_testing.h" #include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/hash_policy_testing.h" #include "absl/container/internal/hash_policy_testing.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {

template <class UnordMap>
class ModifiersTest : public ::testing::Test {};

TYPED_TEST_SUITE_P(ModifiersTest);

TYPED_TEST_P(ModifiersTest, Clear) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
m.clear();
EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
EXPECT_TRUE(m.empty());
}

TYPED_TEST_P(ModifiersTest, Insert) {
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
auto p = m.insert(val);
EXPECT_TRUE(p.second);
EXPECT_EQ(val, *p.first);
T val2 = {val.first, hash_internal::Generator<V>()()};
p = m.insert(val2);
EXPECT_FALSE(p.second);
EXPECT_EQ(val, *p.first);
}

TYPED_TEST_P(ModifiersTest, InsertHint) {
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
auto it = m.insert(m.end(), val);
EXPECT_TRUE(it != m.end());
EXPECT_EQ(val, *it);
T val2 = {val.first, hash_internal::Generator<V>()()};
it = m.insert(it, val2);
EXPECT_TRUE(it != m.end());
EXPECT_EQ(val, *it);
}

TYPED_TEST_P(ModifiersTest, InsertRange) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
m.insert(values.begin(), values.end());
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
}

TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
m.reserve(10);
const size_t original_capacity = m.bucket_count();
m.insert(val);
EXPECT_EQ(m.bucket_count(), original_capacity);
T val2 = {val.first, hash_internal::Generator<V>()()};
m.insert(val2);
EXPECT_EQ(m.bucket_count(), original_capacity);
}

TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{

template<class UnordMap>
class ModifiersTest : public ::testing::Test
{
};

TYPED_TEST_SUITE_P(ModifiersTest);

TYPED_TEST_P(ModifiersTest, Clear)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
m.clear();
EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
EXPECT_TRUE(m.empty());
}

TYPED_TEST_P(ModifiersTest, Insert)
{
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
auto p = m.insert(val);
EXPECT_TRUE(p.second);
EXPECT_EQ(val, *p.first);
T val2 = {val.first, hash_internal::Generator<V>()()};
p = m.insert(val2);
EXPECT_FALSE(p.second);
EXPECT_EQ(val, *p.first);
}

TYPED_TEST_P(ModifiersTest, InsertHint)
{
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
auto it = m.insert(m.end(), val);
EXPECT_TRUE(it != m.end());
EXPECT_EQ(val, *it);
T val2 = {val.first, hash_internal::Generator<V>()()};
it = m.insert(it, val2);
EXPECT_TRUE(it != m.end());
EXPECT_EQ(val, *it);
}

TYPED_TEST_P(ModifiersTest, InsertRange)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m;
m.insert(values.begin(), values.end());
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
}

TYPED_TEST_P(ModifiersTest, InsertWithinCapacity)
{
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
m.reserve(10);
const size_t original_capacity = m.bucket_count();
m.insert(val);
EXPECT_EQ(m.bucket_count(), original_capacity);
T val2 = {val.first, hash_internal::Generator<V>()()};
m.insert(val2);
EXPECT_EQ(m.bucket_count(), original_capacity);
}

TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity)
{
#if !defined(__GLIBCXX__) #if !defined(__GLIBCXX__)
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> base_values;
std::generate_n(std::back_inserter(base_values), 10,
hash_internal::Generator<T>());
std::vector<T> values;
while (values.size() != 100) {
std::copy_n(base_values.begin(), 10, std::back_inserter(values));
}
TypeParam m;
m.reserve(10);
const size_t original_capacity = m.bucket_count();
m.insert(values.begin(), values.end());
EXPECT_EQ(m.bucket_count(), original_capacity);
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> base_values;
std::generate_n(std::back_inserter(base_values), 10, hash_internal::Generator<T>());
std::vector<T> values;
while (values.size() != 100)
{
std::copy_n(base_values.begin(), 10, std::back_inserter(values));
}
TypeParam m;
m.reserve(10);
const size_t original_capacity = m.bucket_count();
m.insert(values.begin(), values.end());
EXPECT_EQ(m.bucket_count(), original_capacity);
#endif #endif
}
}


TYPED_TEST_P(ModifiersTest, InsertOrAssign) {
TYPED_TEST_P(ModifiersTest, InsertOrAssign)
{
#ifdef UNORDERED_MAP_CXX17 #ifdef UNORDERED_MAP_CXX17
using std::get;
using K = typename TypeParam::key_type;
using V = typename TypeParam::mapped_type;
K k = hash_internal::Generator<K>()();
V val = hash_internal::Generator<V>()();
TypeParam m;
auto p = m.insert_or_assign(k, val);
EXPECT_TRUE(p.second);
EXPECT_EQ(k, get<0>(*p.first));
EXPECT_EQ(val, get<1>(*p.first));
V val2 = hash_internal::Generator<V>()();
p = m.insert_or_assign(k, val2);
EXPECT_FALSE(p.second);
EXPECT_EQ(k, get<0>(*p.first));
EXPECT_EQ(val2, get<1>(*p.first));
using std::get;
using K = typename TypeParam::key_type;
using V = typename TypeParam::mapped_type;
K k = hash_internal::Generator<K>()();
V val = hash_internal::Generator<V>()();
TypeParam m;
auto p = m.insert_or_assign(k, val);
EXPECT_TRUE(p.second);
EXPECT_EQ(k, get<0>(*p.first));
EXPECT_EQ(val, get<1>(*p.first));
V val2 = hash_internal::Generator<V>()();
p = m.insert_or_assign(k, val2);
EXPECT_FALSE(p.second);
EXPECT_EQ(k, get<0>(*p.first));
EXPECT_EQ(val2, get<1>(*p.first));
#endif #endif
}
}


TYPED_TEST_P(ModifiersTest, InsertOrAssignHint) {
TYPED_TEST_P(ModifiersTest, InsertOrAssignHint)
{
#ifdef UNORDERED_MAP_CXX17 #ifdef UNORDERED_MAP_CXX17
using std::get;
using K = typename TypeParam::key_type;
using V = typename TypeParam::mapped_type;
K k = hash_internal::Generator<K>()();
V val = hash_internal::Generator<V>()();
TypeParam m;
auto it = m.insert_or_assign(m.end(), k, val);
EXPECT_TRUE(it != m.end());
EXPECT_EQ(k, get<0>(*it));
EXPECT_EQ(val, get<1>(*it));
V val2 = hash_internal::Generator<V>()();
it = m.insert_or_assign(it, k, val2);
EXPECT_EQ(k, get<0>(*it));
EXPECT_EQ(val2, get<1>(*it));
using std::get;
using K = typename TypeParam::key_type;
using V = typename TypeParam::mapped_type;
K k = hash_internal::Generator<K>()();
V val = hash_internal::Generator<V>()();
TypeParam m;
auto it = m.insert_or_assign(m.end(), k, val);
EXPECT_TRUE(it != m.end());
EXPECT_EQ(k, get<0>(*it));
EXPECT_EQ(val, get<1>(*it));
V val2 = hash_internal::Generator<V>()();
it = m.insert_or_assign(it, k, val2);
EXPECT_EQ(k, get<0>(*it));
EXPECT_EQ(val2, get<1>(*it));
#endif #endif
}

TYPED_TEST_P(ModifiersTest, Emplace) {
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto p = m.emplace(val);
EXPECT_TRUE(p.second);
EXPECT_EQ(val, *p.first);
T val2 = {val.first, hash_internal::Generator<V>()()};
p = m.emplace(val2);
EXPECT_FALSE(p.second);
EXPECT_EQ(val, *p.first);
}

TYPED_TEST_P(ModifiersTest, EmplaceHint) {
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto it = m.emplace_hint(m.end(), val);
EXPECT_EQ(val, *it);
T val2 = {val.first, hash_internal::Generator<V>()()};
it = m.emplace_hint(it, val2);
EXPECT_EQ(val, *it);
}

TYPED_TEST_P(ModifiersTest, TryEmplace) {
}

TYPED_TEST_P(ModifiersTest, Emplace)
{
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto p = m.emplace(val);
EXPECT_TRUE(p.second);
EXPECT_EQ(val, *p.first);
T val2 = {val.first, hash_internal::Generator<V>()()};
p = m.emplace(val2);
EXPECT_FALSE(p.second);
EXPECT_EQ(val, *p.first);
}

TYPED_TEST_P(ModifiersTest, EmplaceHint)
{
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto it = m.emplace_hint(m.end(), val);
EXPECT_EQ(val, *it);
T val2 = {val.first, hash_internal::Generator<V>()()};
it = m.emplace_hint(it, val2);
EXPECT_EQ(val, *it);
}

TYPED_TEST_P(ModifiersTest, TryEmplace)
{
#ifdef UNORDERED_MAP_CXX17 #ifdef UNORDERED_MAP_CXX17
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto p = m.try_emplace(val.first, val.second);
EXPECT_TRUE(p.second);
EXPECT_EQ(val, *p.first);
T val2 = {val.first, hash_internal::Generator<V>()()};
p = m.try_emplace(val2.first, val2.second);
EXPECT_FALSE(p.second);
EXPECT_EQ(val, *p.first);
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto p = m.try_emplace(val.first, val.second);
EXPECT_TRUE(p.second);
EXPECT_EQ(val, *p.first);
T val2 = {val.first, hash_internal::Generator<V>()()};
p = m.try_emplace(val2.first, val2.second);
EXPECT_FALSE(p.second);
EXPECT_EQ(val, *p.first);
#endif #endif
}
}


TYPED_TEST_P(ModifiersTest, TryEmplaceHint) {
TYPED_TEST_P(ModifiersTest, TryEmplaceHint)
{
#ifdef UNORDERED_MAP_CXX17 #ifdef UNORDERED_MAP_CXX17
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto it = m.try_emplace(m.end(), val.first, val.second);
EXPECT_EQ(val, *it);
T val2 = {val.first, hash_internal::Generator<V>()()};
it = m.try_emplace(it, val2.first, val2.second);
EXPECT_EQ(val, *it);
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto it = m.try_emplace(m.end(), val.first, val.second);
EXPECT_EQ(val, *it);
T val2 = {val.first, hash_internal::Generator<V>()()};
it = m.try_emplace(it, val2.first, val2.second);
EXPECT_EQ(val, *it);
#endif #endif
}

template <class V>
using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;

// In openmap we chose not to return the iterator from erase because that's
// more expensive. As such we adapt erase to return an iterator here.
struct EraseFirst {
template <class Map>
auto operator()(Map* m, int) const
-> IfNotVoid<decltype(m->erase(m->begin()))> {
return m->erase(m->begin());
}
template <class Map>
typename Map::iterator operator()(Map* m, ...) const {
auto it = m->begin();
m->erase(it++);
return it;
}
};

TYPED_TEST_P(ModifiersTest, Erase) {
using T = hash_internal::GeneratedType<TypeParam>;
using std::get;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
auto& first = *m.begin();
std::vector<T> values2;
for (const auto& val : values)
if (get<0>(val) != get<0>(first)) values2.push_back(val);
auto it = EraseFirst()(&m, 0);
ASSERT_TRUE(it != m.end());
EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values2.begin(),
values2.end()));
}

TYPED_TEST_P(ModifiersTest, EraseRange) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
auto it = m.erase(m.begin(), m.end());
EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
EXPECT_TRUE(it == m.end());
}

TYPED_TEST_P(ModifiersTest, EraseKey) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_EQ(1, m.erase(values[0].first));
EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
values.end()));
}

TYPED_TEST_P(ModifiersTest, Swap) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> v1;
std::vector<T> v2;
std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
TypeParam m1(v1.begin(), v1.end());
TypeParam m2(v2.begin(), v2.end());
EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v1));
EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v2));
m1.swap(m2);
EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v2));
EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v1));
}

// TODO(alkis): Write tests for extract.
// TODO(alkis): Write tests for merge.

REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint,
InsertRange, InsertWithinCapacity,
InsertRangeWithinCapacity, InsertOrAssign,
InsertOrAssignHint, Emplace, EmplaceHint,
TryEmplace, TryEmplaceHint, Erase, EraseRange,
EraseKey, Swap);

template <typename Type>
struct is_unique_ptr : std::false_type {};

template <typename Type>
struct is_unique_ptr<std::unique_ptr<Type>> : std::true_type {};

template <class UnordMap>
class UniquePtrModifiersTest : public ::testing::Test {
protected:
UniquePtrModifiersTest() {
static_assert(is_unique_ptr<typename UnordMap::mapped_type>::value,
"UniquePtrModifiersTyest may only be called with a "
"std::unique_ptr value type.");
}
};

GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(UniquePtrModifiersTest);

TYPED_TEST_SUITE_P(UniquePtrModifiersTest);

// Test that we do not move from rvalue arguments if an insertion does not
// happen.
TYPED_TEST_P(UniquePtrModifiersTest, TryEmplace) {
}

template<class V>
using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;

// In openmap we chose not to return the iterator from erase because that's
// more expensive. As such we adapt erase to return an iterator here.
struct EraseFirst
{
template<class Map>
auto operator()(Map* m, int) const
-> IfNotVoid<decltype(m->erase(m->begin()))>
{
return m->erase(m->begin());
}
template<class Map>
typename Map::iterator operator()(Map* m, ...) const
{
auto it = m->begin();
m->erase(it++);
return it;
}
};

TYPED_TEST_P(ModifiersTest, Erase)
{
using T = hash_internal::GeneratedType<TypeParam>;
using std::get;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
auto& first = *m.begin();
std::vector<T> values2;
for (const auto& val : values)
if (get<0>(val) != get<0>(first))
values2.push_back(val);
auto it = EraseFirst()(&m, 0);
ASSERT_TRUE(it != m.end());
EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values2.begin(), values2.end()));
}

TYPED_TEST_P(ModifiersTest, EraseRange)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
auto it = m.erase(m.begin(), m.end());
EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
EXPECT_TRUE(it == m.end());
}

TYPED_TEST_P(ModifiersTest, EraseKey)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_EQ(1, m.erase(values[0].first));
EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values.begin() + 1, values.end()));
}

TYPED_TEST_P(ModifiersTest, Swap)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> v1;
std::vector<T> v2;
std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
TypeParam m1(v1.begin(), v1.end());
TypeParam m2(v2.begin(), v2.end());
EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v1));
EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v2));
m1.swap(m2);
EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v2));
EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v1));
}

// TODO(alkis): Write tests for extract.
// TODO(alkis): Write tests for merge.

REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint, InsertRange, InsertWithinCapacity, InsertRangeWithinCapacity, InsertOrAssign, InsertOrAssignHint, Emplace, EmplaceHint, TryEmplace, TryEmplaceHint, Erase, EraseRange, EraseKey, Swap);

template<typename Type>
struct is_unique_ptr : std::false_type
{
};

template<typename Type>
struct is_unique_ptr<std::unique_ptr<Type>> : std::true_type
{
};

template<class UnordMap>
class UniquePtrModifiersTest : public ::testing::Test
{
protected:
UniquePtrModifiersTest()
{
static_assert(is_unique_ptr<typename UnordMap::mapped_type>::value, "UniquePtrModifiersTyest may only be called with a "
"std::unique_ptr value type.");
}
};

GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(UniquePtrModifiersTest);

TYPED_TEST_SUITE_P(UniquePtrModifiersTest);

// Test that we do not move from rvalue arguments if an insertion does not
// happen.
TYPED_TEST_P(UniquePtrModifiersTest, TryEmplace)
{
#ifdef UNORDERED_MAP_CXX17 #ifdef UNORDERED_MAP_CXX17
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
auto p = m.try_emplace(val.first, std::move(val.second));
EXPECT_TRUE(p.second);
// A moved from std::unique_ptr is guaranteed to be nullptr.
EXPECT_EQ(val.second, nullptr);
T val2 = {val.first, hash_internal::Generator<V>()()};
p = m.try_emplace(val2.first, std::move(val2.second));
EXPECT_FALSE(p.second);
EXPECT_NE(val2.second, nullptr);
using T = hash_internal::GeneratedType<TypeParam>;
using V = typename TypeParam::mapped_type;
T val = hash_internal::Generator<T>()();
TypeParam m;
auto p = m.try_emplace(val.first, std::move(val.second));
EXPECT_TRUE(p.second);
// A moved from std::unique_ptr is guaranteed to be nullptr.
EXPECT_EQ(val.second, nullptr);
T val2 = {val.first, hash_internal::Generator<V>()()};
p = m.try_emplace(val2.first, std::move(val2.second));
EXPECT_FALSE(p.second);
EXPECT_NE(val2.second, nullptr);
#endif #endif
}
}


REGISTER_TYPED_TEST_SUITE_P(UniquePtrModifiersTest, TryEmplace);
REGISTER_TYPED_TEST_SUITE_P(UniquePtrModifiersTest, TryEmplace);


} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_ #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_

+ 511
- 456
CAPI/cpp/grpc/include/absl/container/internal/unordered_set_constructor_test.h View File

@@ -25,472 +25,527 @@
#include "absl/container/internal/hash_policy_testing.h" #include "absl/container/internal/hash_policy_testing.h"
#include "absl/meta/type_traits.h" #include "absl/meta/type_traits.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {

template <class UnordMap>
class ConstructorTest : public ::testing::Test {};

TYPED_TEST_SUITE_P(ConstructorTest);

TYPED_TEST_P(ConstructorTest, NoArgs) {
TypeParam m;
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
}

TYPED_TEST_P(ConstructorTest, BucketCount) {
TypeParam m(123);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHash) {
using H = typename TypeParam::hasher;
H hasher;
TypeParam m(123, hasher);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
H hasher;
E equal;
TypeParam m(123, hasher, equal);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);

const auto& cm = m;
EXPECT_EQ(cm.hash_function(), hasher);
EXPECT_EQ(cm.key_eq(), equal);
EXPECT_EQ(cm.get_allocator(), alloc);
EXPECT_TRUE(cm.empty());
EXPECT_THAT(keys(cm), ::testing::UnorderedElementsAre());
EXPECT_GE(cm.bucket_count(), 123);
}

template <typename T>
struct is_std_unordered_set : std::false_type {};

template <typename... T>
struct is_std_unordered_set<std::unordered_set<T...>> : std::true_type {};
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{

template<class UnordMap>
class ConstructorTest : public ::testing::Test
{
};

TYPED_TEST_SUITE_P(ConstructorTest);

TYPED_TEST_P(ConstructorTest, NoArgs)
{
TypeParam m;
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
}

TYPED_TEST_P(ConstructorTest, BucketCount)
{
TypeParam m(123);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHash)
{
using H = typename TypeParam::hasher;
H hasher;
TypeParam m(123, hasher);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHashEqual)
{
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
H hasher;
E equal;
TypeParam m(123, hasher, equal);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc)
{
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);

const auto& cm = m;
EXPECT_EQ(cm.hash_function(), hasher);
EXPECT_EQ(cm.key_eq(), equal);
EXPECT_EQ(cm.get_allocator(), alloc);
EXPECT_TRUE(cm.empty());
EXPECT_THAT(keys(cm), ::testing::UnorderedElementsAre());
EXPECT_GE(cm.bucket_count(), 123);
}

template<typename T>
struct is_std_unordered_set : std::false_type
{
};

template<typename... T>
struct is_std_unordered_set<std::unordered_set<T...>> : std::true_type
{
};


#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17) #if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
using has_cxx14_std_apis = std::true_type;
using has_cxx14_std_apis = std::true_type;
#else #else
using has_cxx14_std_apis = std::false_type;
using has_cxx14_std_apis = std::false_type;
#endif #endif


template <typename T>
using expect_cxx14_apis =
absl::disjunction<absl::negation<is_std_unordered_set<T>>,
has_cxx14_std_apis>;

template <typename TypeParam>
void BucketCountAllocTest(std::false_type) {}

template <typename TypeParam>
void BucketCountAllocTest(std::true_type) {
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

template <typename TypeParam>
void BucketCountHashAllocTest(std::false_type) {}

template <typename TypeParam>
void BucketCountHashAllocTest(std::true_type) {
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
TypeParam m(123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}
template<typename T>
using expect_cxx14_apis =
absl::disjunction<absl::negation<is_std_unordered_set<T>>, has_cxx14_std_apis>;

template<typename TypeParam>
void BucketCountAllocTest(std::false_type)
{
}

template<typename TypeParam>
void BucketCountAllocTest(std::true_type)
{
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountAlloc)
{
BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

template<typename TypeParam>
void BucketCountHashAllocTest(std::false_type)
{
}

template<typename TypeParam>
void BucketCountHashAllocTest(std::true_type)
{
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
TypeParam m(123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc)
{
BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}


#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS #if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
using has_alloc_std_constructors = std::true_type;
using has_alloc_std_constructors = std::true_type;
#else #else
using has_alloc_std_constructors = std::false_type;
using has_alloc_std_constructors = std::false_type;
#endif #endif


template <typename T>
using expect_alloc_constructors =
absl::disjunction<absl::negation<is_std_unordered_set<T>>,
has_alloc_std_constructors>;

template <typename TypeParam>
void AllocTest(std::false_type) {}

template <typename TypeParam>
void AllocTest(std::true_type) {
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
}

TYPED_TEST_P(ConstructorTest, Alloc) {
AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}

TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
std::vector<T> values;
for (size_t i = 0; i != 10; ++i)
values.push_back(hash_internal::Generator<T>()());
TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

template <typename TypeParam>
void InputIteratorBucketAllocTest(std::false_type) {}

template <typename TypeParam>
void InputIteratorBucketAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
A alloc(0);
std::vector<T> values;
for (size_t i = 0; i != 10; ++i)
values.push_back(hash_internal::Generator<T>()());
TypeParam m(values.begin(), values.end(), 123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

template <typename TypeParam>
void InputIteratorBucketHashAllocTest(std::false_type) {}

template <typename TypeParam>
void InputIteratorBucketHashAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
std::vector<T> values;
for (size_t i = 0; i != 10; ++i)
values.push_back(hash_internal::Generator<T>()());
TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

TYPED_TEST_P(ConstructorTest, CopyConstructor) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
TypeParam n(m);
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
EXPECT_NE(TypeParam(0, hasher, equal, alloc), n);
}

template <typename TypeParam>
void CopyConstructorAllocTest(std::false_type) {}

template <typename TypeParam>
void CopyConstructorAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
TypeParam n(m, A(11));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}

// TODO(alkis): Test non-propagating allocators on copy constructors.

TYPED_TEST_P(ConstructorTest, MoveConstructor) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
TypeParam t(m);
TypeParam n(std::move(t));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}

template <typename TypeParam>
void MoveConstructorAllocTest(std::false_type) {}

template <typename TypeParam>
void MoveConstructorAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
TypeParam t(m);
TypeParam n(std::move(t), A(1));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}

// TODO(alkis): Test non-propagating allocators on move constructors.

TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(values, 123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

template <typename TypeParam>
void InitializerListBucketAllocTest(std::false_type) {}

template <typename TypeParam>
void InitializerListBucketAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
A alloc(0);
TypeParam m(values, 123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

template <typename TypeParam>
void InitializerListBucketHashAllocTest(std::false_type) {}

template <typename TypeParam>
void InitializerListBucketHashAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values, 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

TYPED_TEST_P(ConstructorTest, CopyAssignment) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam n;
n = m;
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m, n);
}

// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
// (it depends on traits).

TYPED_TEST_P(ConstructorTest, MoveAssignment) {
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam t(m);
TypeParam n;
n = std::move(t);
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}

TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam n({gen()});
n = m;
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam t(m);
TypeParam n({gen()});
n = std::move(t);
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}

TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values);
m = *&m; // Avoid -Wself-assign.
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}

REGISTER_TYPED_TEST_SUITE_P(
ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
InitializerListBucketAlloc, InitializerListBucketHashAlloc, CopyAssignment,
MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
MoveAssignmentOverwritesExisting,
AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);

} // namespace container_internal
ABSL_NAMESPACE_END
template<typename T>
using expect_alloc_constructors =
absl::disjunction<absl::negation<is_std_unordered_set<T>>, has_alloc_std_constructors>;

template<typename TypeParam>
void AllocTest(std::false_type)
{
}

template<typename TypeParam>
void AllocTest(std::true_type)
{
using A = typename TypeParam::allocator_type;
A alloc(0);
TypeParam m(alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_TRUE(m.empty());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
}

TYPED_TEST_P(ConstructorTest, Alloc)
{
AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}

TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
std::vector<T> values;
for (size_t i = 0; i != 10; ++i)
values.push_back(hash_internal::Generator<T>()());
TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

template<typename TypeParam>
void InputIteratorBucketAllocTest(std::false_type)
{
}

template<typename TypeParam>
void InputIteratorBucketAllocTest(std::true_type)
{
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
A alloc(0);
std::vector<T> values;
for (size_t i = 0; i != 10; ++i)
values.push_back(hash_internal::Generator<T>()());
TypeParam m(values.begin(), values.end(), 123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc)
{
InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

template<typename TypeParam>
void InputIteratorBucketHashAllocTest(std::false_type)
{
}

template<typename TypeParam>
void InputIteratorBucketHashAllocTest(std::true_type)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
std::vector<T> values;
for (size_t i = 0; i != 10; ++i)
values.push_back(hash_internal::Generator<T>()());
TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc)
{
InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

TYPED_TEST_P(ConstructorTest, CopyConstructor)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i)
m.insert(hash_internal::Generator<T>()());
TypeParam n(m);
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
EXPECT_NE(TypeParam(0, hasher, equal, alloc), n);
}

template<typename TypeParam>
void CopyConstructorAllocTest(std::false_type)
{
}

template<typename TypeParam>
void CopyConstructorAllocTest(std::true_type)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i)
m.insert(hash_internal::Generator<T>()());
TypeParam n(m, A(11));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc)
{
CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}

// TODO(alkis): Test non-propagating allocators on copy constructors.

TYPED_TEST_P(ConstructorTest, MoveConstructor)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i)
m.insert(hash_internal::Generator<T>()());
TypeParam t(m);
TypeParam n(std::move(t));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}

template<typename TypeParam>
void MoveConstructorAllocTest(std::false_type)
{
}

template<typename TypeParam>
void MoveConstructorAllocTest(std::true_type)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(123, hasher, equal, alloc);
for (size_t i = 0; i != 10; ++i)
m.insert(hash_internal::Generator<T>()());
TypeParam t(m);
TypeParam n(std::move(t), A(1));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_NE(m.get_allocator(), n.get_allocator());
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc)
{
MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
}

// TODO(alkis): Test non-propagating allocators on move constructors.

TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc)
{
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
TypeParam m(values, 123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

template<typename TypeParam>
void InitializerListBucketAllocTest(std::false_type)
{
}

template<typename TypeParam>
void InitializerListBucketAllocTest(std::true_type)
{
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
A alloc(0);
TypeParam m(values, 123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc)
{
InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

template<typename TypeParam>
void InitializerListBucketHashAllocTest(std::false_type)
{
}

template<typename TypeParam>
void InitializerListBucketHashAllocTest(std::true_type)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values, 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_GE(m.bucket_count(), 123);
}

TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc)
{
InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
}

TYPED_TEST_P(ConstructorTest, CopyAssignment)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam n;
n = m;
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m, n);
}

// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
// (it depends on traits).

TYPED_TEST_P(ConstructorTest, MoveAssignment)
{
using T = hash_internal::GeneratedType<TypeParam>;
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
using A = typename TypeParam::allocator_type;
H hasher;
E equal;
A alloc(0);
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam t(m);
TypeParam n;
n = std::move(t);
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList)
{
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}

TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting)
{
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam n({gen()});
n = m;
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting)
{
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam t(m);
TypeParam n({gen()});
n = std::move(t);
EXPECT_EQ(m, n);
}

TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting)
{
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}

TYPED_TEST_P(ConstructorTest, AssignmentOnSelf)
{
using T = hash_internal::GeneratedType<TypeParam>;
hash_internal::Generator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values);
m = *&m; // Avoid -Wself-assign.
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}

REGISTER_TYPED_TEST_SUITE_P(
ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc, MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc, InitializerListBucketAlloc, InitializerListBucketHashAlloc, CopyAssignment, MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting, AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf
);

} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_ #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_

+ 62
- 59
CAPI/cpp/grpc/include/absl/container/internal/unordered_set_lookup_test.h View File

@@ -20,72 +20,75 @@
#include "absl/container/internal/hash_generator_testing.h" #include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/hash_policy_testing.h" #include "absl/container/internal/hash_policy_testing.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{


template <class UnordSet>
class LookupTest : public ::testing::Test {};
template<class UnordSet>
class LookupTest : public ::testing::Test
{
};


TYPED_TEST_SUITE_P(LookupTest);
TYPED_TEST_SUITE_P(LookupTest);


TYPED_TEST_P(LookupTest, Count) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
for (const auto& v : values)
EXPECT_EQ(0, m.count(v)) << ::testing::PrintToString(v);
m.insert(values.begin(), values.end());
for (const auto& v : values)
EXPECT_EQ(1, m.count(v)) << ::testing::PrintToString(v);
}
TYPED_TEST_P(LookupTest, Count)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m;
for (const auto& v : values)
EXPECT_EQ(0, m.count(v)) << ::testing::PrintToString(v);
m.insert(values.begin(), values.end());
for (const auto& v : values)
EXPECT_EQ(1, m.count(v)) << ::testing::PrintToString(v);
}


TYPED_TEST_P(LookupTest, Find) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
for (const auto& v : values)
EXPECT_TRUE(m.end() == m.find(v)) << ::testing::PrintToString(v);
m.insert(values.begin(), values.end());
for (const auto& v : values) {
typename TypeParam::iterator it = m.find(v);
static_assert(std::is_same<const typename TypeParam::value_type&,
decltype(*it)>::value,
"");
static_assert(std::is_same<const typename TypeParam::value_type*,
decltype(it.operator->())>::value,
"");
EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(v);
EXPECT_EQ(v, *it) << ::testing::PrintToString(v);
}
}
TYPED_TEST_P(LookupTest, Find)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m;
for (const auto& v : values)
EXPECT_TRUE(m.end() == m.find(v)) << ::testing::PrintToString(v);
m.insert(values.begin(), values.end());
for (const auto& v : values)
{
typename TypeParam::iterator it = m.find(v);
static_assert(std::is_same<const typename TypeParam::value_type&, decltype(*it)>::value, "");
static_assert(std::is_same<const typename TypeParam::value_type*, decltype(it.operator->())>::value, "");
EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(v);
EXPECT_EQ(v, *it) << ::testing::PrintToString(v);
}
}


TYPED_TEST_P(LookupTest, EqualRange) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
for (const auto& v : values) {
auto r = m.equal_range(v);
ASSERT_EQ(0, std::distance(r.first, r.second));
}
m.insert(values.begin(), values.end());
for (const auto& v : values) {
auto r = m.equal_range(v);
ASSERT_EQ(1, std::distance(r.first, r.second));
EXPECT_EQ(v, *r.first);
}
}
TYPED_TEST_P(LookupTest, EqualRange)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m;
for (const auto& v : values)
{
auto r = m.equal_range(v);
ASSERT_EQ(0, std::distance(r.first, r.second));
}
m.insert(values.begin(), values.end());
for (const auto& v : values)
{
auto r = m.equal_range(v);
ASSERT_EQ(1, std::distance(r.first, r.second));
EXPECT_EQ(v, *r.first);
}
}


REGISTER_TYPED_TEST_SUITE_P(LookupTest, Count, Find, EqualRange);
REGISTER_TYPED_TEST_SUITE_P(LookupTest, Count, Find, EqualRange);


} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_ #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_

+ 57
- 53
CAPI/cpp/grpc/include/absl/container/internal/unordered_set_members_test.h View File

@@ -20,67 +20,71 @@
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "absl/meta/type_traits.h" #include "absl/meta/type_traits.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{


template <class UnordSet>
class MembersTest : public ::testing::Test {};
template<class UnordSet>
class MembersTest : public ::testing::Test
{
};


TYPED_TEST_SUITE_P(MembersTest);
TYPED_TEST_SUITE_P(MembersTest);


template <typename T>
void UseType() {}
template<typename T>
void UseType()
{
}


TYPED_TEST_P(MembersTest, Typedefs) {
EXPECT_TRUE((std::is_same<typename TypeParam::key_type,
typename TypeParam::value_type>()));
EXPECT_TRUE((absl::conjunction<
absl::negation<std::is_signed<typename TypeParam::size_type>>,
std::is_integral<typename TypeParam::size_type>>()));
EXPECT_TRUE((absl::conjunction<
std::is_signed<typename TypeParam::difference_type>,
std::is_integral<typename TypeParam::difference_type>>()));
EXPECT_TRUE((std::is_convertible<
decltype(std::declval<const typename TypeParam::hasher&>()(
std::declval<const typename TypeParam::key_type&>())),
size_t>()));
EXPECT_TRUE((std::is_convertible<
decltype(std::declval<const typename TypeParam::key_equal&>()(
std::declval<const typename TypeParam::key_type&>(),
std::declval<const typename TypeParam::key_type&>())),
bool>()));
EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
typename TypeParam::value_type>()));
EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
typename TypeParam::reference>()));
EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
typename TypeParam::const_reference>()));
EXPECT_TRUE((std::is_same<typename std::allocator_traits<
typename TypeParam::allocator_type>::pointer,
typename TypeParam::pointer>()));
EXPECT_TRUE(
(std::is_same<typename std::allocator_traits<
typename TypeParam::allocator_type>::const_pointer,
typename TypeParam::const_pointer>()));
}
TYPED_TEST_P(MembersTest, Typedefs)
{
EXPECT_TRUE((std::is_same<typename TypeParam::key_type, typename TypeParam::value_type>()));
EXPECT_TRUE((absl::conjunction<
absl::negation<std::is_signed<typename TypeParam::size_type>>,
std::is_integral<typename TypeParam::size_type>>()));
EXPECT_TRUE((absl::conjunction<
std::is_signed<typename TypeParam::difference_type>,
std::is_integral<typename TypeParam::difference_type>>()));
EXPECT_TRUE((std::is_convertible<
decltype(std::declval<const typename TypeParam::hasher&>()(
std::declval<const typename TypeParam::key_type&>()
)),
size_t>()));
EXPECT_TRUE((std::is_convertible<
decltype(std::declval<const typename TypeParam::key_equal&>()(
std::declval<const typename TypeParam::key_type&>(),
std::declval<const typename TypeParam::key_type&>()
)),
bool>()));
EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type, typename TypeParam::value_type>()));
EXPECT_TRUE((std::is_same<typename TypeParam::value_type&, typename TypeParam::reference>()));
EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&, typename TypeParam::const_reference>()));
EXPECT_TRUE((std::is_same<typename std::allocator_traits<typename TypeParam::allocator_type>::pointer, typename TypeParam::pointer>()));
EXPECT_TRUE(
(std::is_same<typename std::allocator_traits<typename TypeParam::allocator_type>::const_pointer, typename TypeParam::const_pointer>())
);
}


TYPED_TEST_P(MembersTest, SimpleFunctions) {
EXPECT_GT(TypeParam().max_size(), 0);
}
TYPED_TEST_P(MembersTest, SimpleFunctions)
{
EXPECT_GT(TypeParam().max_size(), 0);
}


TYPED_TEST_P(MembersTest, BeginEnd) {
TypeParam t = {typename TypeParam::value_type{}};
EXPECT_EQ(t.begin(), t.cbegin());
EXPECT_EQ(t.end(), t.cend());
EXPECT_NE(t.begin(), t.end());
EXPECT_NE(t.cbegin(), t.cend());
}
TYPED_TEST_P(MembersTest, BeginEnd)
{
TypeParam t = {typename TypeParam::value_type{}};
EXPECT_EQ(t.begin(), t.cbegin());
EXPECT_EQ(t.end(), t.cend());
EXPECT_NE(t.begin(), t.end());
EXPECT_NE(t.cbegin(), t.cend());
}


REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);


} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_ #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_

+ 204
- 194
CAPI/cpp/grpc/include/absl/container/internal/unordered_set_modifiers_test.h View File

@@ -20,202 +20,212 @@
#include "absl/container/internal/hash_generator_testing.h" #include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/hash_policy_testing.h" #include "absl/container/internal/hash_policy_testing.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {

template <class UnordSet>
class ModifiersTest : public ::testing::Test {};

TYPED_TEST_SUITE_P(ModifiersTest);

TYPED_TEST_P(ModifiersTest, Clear) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
m.clear();
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_TRUE(m.empty());
}

TYPED_TEST_P(ModifiersTest, Insert) {
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
TypeParam m;
auto p = m.insert(val);
EXPECT_TRUE(p.second);
EXPECT_EQ(val, *p.first);
p = m.insert(val);
EXPECT_FALSE(p.second);
}

TYPED_TEST_P(ModifiersTest, InsertHint) {
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
TypeParam m;
auto it = m.insert(m.end(), val);
EXPECT_TRUE(it != m.end());
EXPECT_EQ(val, *it);
it = m.insert(it, val);
EXPECT_TRUE(it != m.end());
EXPECT_EQ(val, *it);
}

TYPED_TEST_P(ModifiersTest, InsertRange) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m;
m.insert(values.begin(), values.end());
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}

TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
TypeParam m;
m.reserve(10);
const size_t original_capacity = m.bucket_count();
m.insert(val);
EXPECT_EQ(m.bucket_count(), original_capacity);
m.insert(val);
EXPECT_EQ(m.bucket_count(), original_capacity);
}

TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{

template<class UnordSet>
class ModifiersTest : public ::testing::Test
{
};

TYPED_TEST_SUITE_P(ModifiersTest);

TYPED_TEST_P(ModifiersTest, Clear)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
m.clear();
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_TRUE(m.empty());
}

TYPED_TEST_P(ModifiersTest, Insert)
{
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
TypeParam m;
auto p = m.insert(val);
EXPECT_TRUE(p.second);
EXPECT_EQ(val, *p.first);
p = m.insert(val);
EXPECT_FALSE(p.second);
}

TYPED_TEST_P(ModifiersTest, InsertHint)
{
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
TypeParam m;
auto it = m.insert(m.end(), val);
EXPECT_TRUE(it != m.end());
EXPECT_EQ(val, *it);
it = m.insert(it, val);
EXPECT_TRUE(it != m.end());
EXPECT_EQ(val, *it);
}

TYPED_TEST_P(ModifiersTest, InsertRange)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m;
m.insert(values.begin(), values.end());
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}

TYPED_TEST_P(ModifiersTest, InsertWithinCapacity)
{
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
TypeParam m;
m.reserve(10);
const size_t original_capacity = m.bucket_count();
m.insert(val);
EXPECT_EQ(m.bucket_count(), original_capacity);
m.insert(val);
EXPECT_EQ(m.bucket_count(), original_capacity);
}

TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity)
{
#if !defined(__GLIBCXX__) #if !defined(__GLIBCXX__)
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> base_values;
std::generate_n(std::back_inserter(base_values), 10,
hash_internal::Generator<T>());
std::vector<T> values;
while (values.size() != 100) {
values.insert(values.end(), base_values.begin(), base_values.end());
}
TypeParam m;
m.reserve(10);
const size_t original_capacity = m.bucket_count();
m.insert(values.begin(), values.end());
EXPECT_EQ(m.bucket_count(), original_capacity);
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> base_values;
std::generate_n(std::back_inserter(base_values), 10, hash_internal::Generator<T>());
std::vector<T> values;
while (values.size() != 100)
{
values.insert(values.end(), base_values.begin(), base_values.end());
}
TypeParam m;
m.reserve(10);
const size_t original_capacity = m.bucket_count();
m.insert(values.begin(), values.end());
EXPECT_EQ(m.bucket_count(), original_capacity);
#endif #endif
}

TYPED_TEST_P(ModifiersTest, Emplace) {
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto p = m.emplace(val);
EXPECT_TRUE(p.second);
EXPECT_EQ(val, *p.first);
p = m.emplace(val);
EXPECT_FALSE(p.second);
EXPECT_EQ(val, *p.first);
}

TYPED_TEST_P(ModifiersTest, EmplaceHint) {
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto it = m.emplace_hint(m.end(), val);
EXPECT_EQ(val, *it);
it = m.emplace_hint(it, val);
EXPECT_EQ(val, *it);
}

template <class V>
using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;

// In openmap we chose not to return the iterator from erase because that's
// more expensive. As such we adapt erase to return an iterator here.
struct EraseFirst {
template <class Map>
auto operator()(Map* m, int) const
-> IfNotVoid<decltype(m->erase(m->begin()))> {
return m->erase(m->begin());
}
template <class Map>
typename Map::iterator operator()(Map* m, ...) const {
auto it = m->begin();
m->erase(it++);
return it;
}
};

TYPED_TEST_P(ModifiersTest, Erase) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
std::vector<T> values2;
for (const auto& val : values)
if (val != *m.begin()) values2.push_back(val);
auto it = EraseFirst()(&m, 0);
ASSERT_TRUE(it != m.end());
EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values2.begin(),
values2.end()));
}

TYPED_TEST_P(ModifiersTest, EraseRange) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
auto it = m.erase(m.begin(), m.end());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_TRUE(it == m.end());
}

TYPED_TEST_P(ModifiersTest, EraseKey) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_EQ(1, m.erase(values[0]));
EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
values.end()));
}

TYPED_TEST_P(ModifiersTest, Swap) {
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> v1;
std::vector<T> v2;
std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
TypeParam m1(v1.begin(), v1.end());
TypeParam m2(v2.begin(), v2.end());
EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v1));
EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v2));
m1.swap(m2);
EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v2));
EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v1));
}

// TODO(alkis): Write tests for extract.
// TODO(alkis): Write tests for merge.

REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint,
InsertRange, InsertWithinCapacity,
InsertRangeWithinCapacity, Emplace, EmplaceHint,
Erase, EraseRange, EraseKey, Swap);

} // namespace container_internal
ABSL_NAMESPACE_END
}

TYPED_TEST_P(ModifiersTest, Emplace)
{
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto p = m.emplace(val);
EXPECT_TRUE(p.second);
EXPECT_EQ(val, *p.first);
p = m.emplace(val);
EXPECT_FALSE(p.second);
EXPECT_EQ(val, *p.first);
}

TYPED_TEST_P(ModifiersTest, EmplaceHint)
{
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
TypeParam m;
// TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
// with test traits/policy.
auto it = m.emplace_hint(m.end(), val);
EXPECT_EQ(val, *it);
it = m.emplace_hint(it, val);
EXPECT_EQ(val, *it);
}

template<class V>
using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;

// In openmap we chose not to return the iterator from erase because that's
// more expensive. As such we adapt erase to return an iterator here.
struct EraseFirst
{
template<class Map>
auto operator()(Map* m, int) const
-> IfNotVoid<decltype(m->erase(m->begin()))>
{
return m->erase(m->begin());
}
template<class Map>
typename Map::iterator operator()(Map* m, ...) const
{
auto it = m->begin();
m->erase(it++);
return it;
}
};

TYPED_TEST_P(ModifiersTest, Erase)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
std::vector<T> values2;
for (const auto& val : values)
if (val != *m.begin())
values2.push_back(val);
auto it = EraseFirst()(&m, 0);
ASSERT_TRUE(it != m.end());
EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values2.begin(), values2.end()));
}

TYPED_TEST_P(ModifiersTest, EraseRange)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
auto it = m.erase(m.begin(), m.end());
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
EXPECT_TRUE(it == m.end());
}

TYPED_TEST_P(ModifiersTest, EraseKey)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10, hash_internal::Generator<T>());
TypeParam m(values.begin(), values.end());
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
EXPECT_EQ(1, m.erase(values[0]));
EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values.begin() + 1, values.end()));
}

TYPED_TEST_P(ModifiersTest, Swap)
{
using T = hash_internal::GeneratedType<TypeParam>;
std::vector<T> v1;
std::vector<T> v2;
std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
TypeParam m1(v1.begin(), v1.end());
TypeParam m2(v2.begin(), v2.end());
EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v1));
EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v2));
m1.swap(m2);
EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v2));
EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v1));
}

// TODO(alkis): Write tests for extract.
// TODO(alkis): Write tests for merge.

REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint, InsertRange, InsertWithinCapacity, InsertRangeWithinCapacity, Emplace, EmplaceHint, Erase, EraseRange, EraseKey, Swap);

} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_ #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_

+ 563
- 551
CAPI/cpp/grpc/include/absl/container/node_hash_map.h
File diff suppressed because it is too large
View File


+ 464
- 451
CAPI/cpp/grpc/include/absl/container/node_hash_set.h View File

@@ -44,457 +44,470 @@
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export #include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
#include "absl/memory/memory.h" #include "absl/memory/memory.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
template <typename T>
struct NodeHashSetPolicy;
} // namespace container_internal

// -----------------------------------------------------------------------------
// absl::node_hash_set
// -----------------------------------------------------------------------------
//
// An `absl::node_hash_set<T>` is an unordered associative container which
// has been optimized for both speed and memory footprint in most common use
// cases. Its interface is similar to that of `std::unordered_set<T>` with the
// following notable differences:
//
// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
// `insert()`, provided that the set is provided a compatible heterogeneous
// hashing function and equality operator.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash set.
// * Returns `void` from the `erase(iterator)` overload.
//
// By default, `node_hash_set` uses the `absl::Hash` hashing framework.
// All fundamental and Abseil types that support the `absl::Hash` framework have
// a compatible equality operator for comparing insertions into `node_hash_set`.
// If your type is not yet supported by the `absl::Hash` framework, see
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
// Using `absl::node_hash_set` at interface boundaries in dynamically loaded
// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
// be randomized across dynamically loaded libraries.
//
// Example:
//
// // Create a node hash set of three strings
// absl::node_hash_set<std::string> ducks =
// {"huey", "dewey", "louie"};
//
// // Insert a new element into the node hash set
// ducks.insert("donald");
//
// // Force a rehash of the node hash set
// ducks.rehash(0);
//
// // See if "dewey" is present
// if (ducks.contains("dewey")) {
// std::cout << "We found dewey!" << std::endl;
// }
template <class T, class Hash = absl::container_internal::hash_default_hash<T>,
class Eq = absl::container_internal::hash_default_eq<T>,
class Alloc = std::allocator<T>>
class node_hash_set
: public absl::container_internal::raw_hash_set<
absl::container_internal::NodeHashSetPolicy<T>, Hash, Eq, Alloc> {
using Base = typename node_hash_set::raw_hash_set;

public:
// Constructors and Assignment Operators
//
// A node_hash_set supports the same overload set as `std::unordered_set`
// for construction and assignment:
//
// * Default constructor
//
// // No allocation for the table's elements is made.
// absl::node_hash_set<std::string> set1;
//
// * Initializer List constructor
//
// absl::node_hash_set<std::string> set2 =
// {{"huey"}, {"dewey"}, {"louie"}};
//
// * Copy constructor
//
// absl::node_hash_set<std::string> set3(set2);
//
// * Copy assignment operator
//
// // Hash functor and Comparator are copied as well
// absl::node_hash_set<std::string> set4;
// set4 = set3;
//
// * Move constructor
//
// // Move is guaranteed efficient
// absl::node_hash_set<std::string> set5(std::move(set4));
//
// * Move assignment operator
//
// // May be efficient if allocators are compatible
// absl::node_hash_set<std::string> set6;
// set6 = std::move(set5);
//
// * Range constructor
//
// std::vector<std::string> v = {"a", "b"};
// absl::node_hash_set<std::string> set7(v.begin(), v.end());
node_hash_set() {}
using Base::Base;

// node_hash_set::begin()
//
// Returns an iterator to the beginning of the `node_hash_set`.
using Base::begin;

// node_hash_set::cbegin()
//
// Returns a const iterator to the beginning of the `node_hash_set`.
using Base::cbegin;

// node_hash_set::cend()
//
// Returns a const iterator to the end of the `node_hash_set`.
using Base::cend;

// node_hash_set::end()
//
// Returns an iterator to the end of the `node_hash_set`.
using Base::end;

// node_hash_set::capacity()
//
// Returns the number of element slots (assigned, deleted, and empty)
// available within the `node_hash_set`.
//
// NOTE: this member function is particular to `absl::node_hash_set` and is
// not provided in the `std::unordered_set` API.
using Base::capacity;

// node_hash_set::empty()
//
// Returns whether or not the `node_hash_set` is empty.
using Base::empty;

// node_hash_set::max_size()
//
// Returns the largest theoretical possible number of elements within a
// `node_hash_set` under current memory constraints. This value can be thought
// of the largest value of `std::distance(begin(), end())` for a
// `node_hash_set<T>`.
using Base::max_size;

// node_hash_set::size()
//
// Returns the number of elements currently within the `node_hash_set`.
using Base::size;

// node_hash_set::clear()
//
// Removes all elements from the `node_hash_set`. Invalidates any references,
// pointers, or iterators referring to contained elements.
//
// NOTE: this operation may shrink the underlying buffer. To avoid shrinking
// the underlying buffer call `erase(begin(), end())`.
using Base::clear;

// node_hash_set::erase()
//
// Erases elements within the `node_hash_set`. Erasing does not trigger a
// rehash. Overloads are listed below.
//
// void erase(const_iterator pos):
//
// Erases the element at `position` of the `node_hash_set`, returning
// `void`.
//
// NOTE: this return behavior is different than that of STL containers in
// general and `std::unordered_set` in particular.
//
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
// iterator pointing to `last`.
//
// size_type erase(const key_type& key):
//
// Erases the element with the matching key, if it exists, returning the
// number of elements erased (0 or 1).
using Base::erase;

// node_hash_set::insert()
//
// Inserts an element of the specified value into the `node_hash_set`,
// returning an iterator pointing to the newly inserted element, provided that
// an element with the given key does not already exist. If rehashing occurs
// due to the insertion, all iterators are invalidated. Overloads are listed
// below.
//
// std::pair<iterator,bool> insert(const T& value):
//
// Inserts a value into the `node_hash_set`. Returns a pair consisting of an
// iterator to the inserted element (or to the element that prevented the
// insertion) and a bool denoting whether the insertion took place.
//
// std::pair<iterator,bool> insert(T&& value):
//
// Inserts a moveable value into the `node_hash_set`. Returns a pair
// consisting of an iterator to the inserted element (or to the element that
// prevented the insertion) and a bool denoting whether the insertion took
// place.
//
// iterator insert(const_iterator hint, const T& value):
// iterator insert(const_iterator hint, T&& value):
//
// Inserts a value, using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search. Returns an iterator to the
// inserted element, or to the existing element that prevented the
// insertion.
//
// void insert(InputIterator first, InputIterator last):
//
// Inserts a range of values [`first`, `last`).
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently, for `node_hash_set` we guarantee the
// first match is inserted.
//
// void insert(std::initializer_list<T> ilist):
//
// Inserts the elements within the initializer list `ilist`.
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently within the initializer list, for
// `node_hash_set` we guarantee the first match is inserted.
using Base::insert;

// node_hash_set::emplace()
//
// Inserts an element of the specified value by constructing it in-place
// within the `node_hash_set`, provided that no element with the given key
// already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace;

// node_hash_set::emplace_hint()
//
// Inserts an element of the specified value by constructing it in-place
// within the `node_hash_set`, using the position of `hint` as a non-binding
// suggestion for where to begin the insertion search, and only inserts
// provided that no element with the given key already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace_hint;

// node_hash_set::extract()
//
// Extracts the indicated element, erasing it in the process, and returns it
// as a C++17-compatible node handle. Overloads are listed below.
//
// node_type extract(const_iterator position):
//
// Extracts the element at the indicated position and returns a node handle
// owning that extracted data.
//
// node_type extract(const key_type& x):
//
// Extracts the element with the key matching the passed key value and
// returns a node handle owning that extracted data. If the `node_hash_set`
// does not contain an element with a matching key, this function returns an
// empty node handle.
using Base::extract;

// node_hash_set::merge()
//
// Extracts elements from a given `source` node hash set into this
// `node_hash_set`. If the destination `node_hash_set` already contains an
// element with an equivalent key, that element is not extracted.
using Base::merge;

// node_hash_set::swap(node_hash_set& other)
//
// Exchanges the contents of this `node_hash_set` with those of the `other`
// node hash set, avoiding invocation of any move, copy, or swap operations on
// individual elements.
//
// All iterators and references on the `node_hash_set` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
//
// `swap()` requires that the node hash set's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to
// non-member `swap()`. If the set's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
// to non-member `swap()`; otherwise, the allocators are not swapped.
using Base::swap;

// node_hash_set::rehash(count)
//
// Rehashes the `node_hash_set`, setting the number of slots to be at least
// the passed value. If the new number of slots increases the load factor more
// than the current maximum load factor
// (`count` < `size()` / `max_load_factor()`), then the new number of slots
// will be at least `size()` / `max_load_factor()`.
//
// To force a rehash, pass rehash(0).
//
// NOTE: unlike behavior in `std::unordered_set`, references are also
// invalidated upon a `rehash()`.
using Base::rehash;

// node_hash_set::reserve(count)
//
// Sets the number of slots in the `node_hash_set` to the number needed to
// accommodate at least `count` total elements without exceeding the current
// maximum load factor, and may rehash the container if needed.
using Base::reserve;

// node_hash_set::contains()
//
// Determines whether an element comparing equal to the given `key` exists
// within the `node_hash_set`, returning `true` if so or `false` otherwise.
using Base::contains;

// node_hash_set::count(const Key& key) const
//
// Returns the number of elements comparing equal to the given `key` within
// the `node_hash_set`. note that this function will return either `1` or `0`
// since duplicate elements are not allowed within a `node_hash_set`.
using Base::count;

// node_hash_set::equal_range()
//
// Returns a closed range [first, last], defined by a `std::pair` of two
// iterators, containing all elements with the passed key in the
// `node_hash_set`.
using Base::equal_range;

// node_hash_set::find()
//
// Finds an element with the passed `key` within the `node_hash_set`.
using Base::find;

// node_hash_set::bucket_count()
//
// Returns the number of "buckets" within the `node_hash_set`. Note that
// because a node hash set contains all elements within its internal storage,
// this value simply equals the current capacity of the `node_hash_set`.
using Base::bucket_count;

// node_hash_set::load_factor()
//
// Returns the current load factor of the `node_hash_set` (the average number
// of slots occupied with a value within the hash set).
using Base::load_factor;

// node_hash_set::max_load_factor()
//
// Manages the maximum load factor of the `node_hash_set`. Overloads are
// listed below.
//
// float node_hash_set::max_load_factor()
//
// Returns the current maximum load factor of the `node_hash_set`.
//
// void node_hash_set::max_load_factor(float ml)
//
// Sets the maximum load factor of the `node_hash_set` to the passed value.
//
// NOTE: This overload is provided only for API compatibility with the STL;
// `node_hash_set` will ignore any set load factor and manage its rehashing
// internally as an implementation detail.
using Base::max_load_factor;

// node_hash_set::get_allocator()
//
// Returns the allocator function associated with this `node_hash_set`.
using Base::get_allocator;

// node_hash_set::hash_function()
//
// Returns the hashing function used to hash the keys within this
// `node_hash_set`.
using Base::hash_function;

// node_hash_set::key_eq()
//
// Returns the function used for comparing keys equality.
using Base::key_eq;
};

// erase_if(node_hash_set<>, Pred)
//
// Erases all elements that satisfy the predicate `pred` from the container `c`.
// Returns the number of erased elements.
template <typename T, typename H, typename E, typename A, typename Predicate>
typename node_hash_set<T, H, E, A>::size_type erase_if(
node_hash_set<T, H, E, A>& c, Predicate pred) {
return container_internal::EraseIf(pred, &c);
}

namespace container_internal {

template <class T>
struct NodeHashSetPolicy
: absl::container_internal::node_slot_policy<T&, NodeHashSetPolicy<T>> {
using key_type = T;
using init_type = T;
using constant_iterators = std::true_type;

template <class Allocator, class... Args>
static T* new_element(Allocator* alloc, Args&&... args) {
using ValueAlloc =
typename absl::allocator_traits<Allocator>::template rebind_alloc<T>;
ValueAlloc value_alloc(*alloc);
T* res = absl::allocator_traits<ValueAlloc>::allocate(value_alloc, 1);
absl::allocator_traits<ValueAlloc>::construct(value_alloc, res,
std::forward<Args>(args)...);
return res;
}

template <class Allocator>
static void delete_element(Allocator* alloc, T* elem) {
using ValueAlloc =
typename absl::allocator_traits<Allocator>::template rebind_alloc<T>;
ValueAlloc value_alloc(*alloc);
absl::allocator_traits<ValueAlloc>::destroy(value_alloc, elem);
absl::allocator_traits<ValueAlloc>::deallocate(value_alloc, elem, 1);
}

template <class F, class... Args>
static decltype(absl::container_internal::DecomposeValue(
std::declval<F>(), std::declval<Args>()...))
apply(F&& f, Args&&... args) {
return absl::container_internal::DecomposeValue(
std::forward<F>(f), std::forward<Args>(args)...);
}

static size_t element_space_used(const T*) { return sizeof(T); }
};
} // namespace container_internal

namespace container_algorithm_internal {

// Specialization of trait in absl/algorithm/container.h
template <class Key, class Hash, class KeyEqual, class Allocator>
struct IsUnorderedContainer<absl::node_hash_set<Key, Hash, KeyEqual, Allocator>>
: std::true_type {};

} // namespace container_algorithm_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace container_internal
{
template<typename T>
struct NodeHashSetPolicy;
} // namespace container_internal

// -----------------------------------------------------------------------------
// absl::node_hash_set
// -----------------------------------------------------------------------------
//
// An `absl::node_hash_set<T>` is an unordered associative container which
// has been optimized for both speed and memory footprint in most common use
// cases. Its interface is similar to that of `std::unordered_set<T>` with the
// following notable differences:
//
// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
// `insert()`, provided that the set is provided a compatible heterogeneous
// hashing function and equality operator.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash set.
// * Returns `void` from the `erase(iterator)` overload.
//
// By default, `node_hash_set` uses the `absl::Hash` hashing framework.
// All fundamental and Abseil types that support the `absl::Hash` framework have
// a compatible equality operator for comparing insertions into `node_hash_set`.
// If your type is not yet supported by the `absl::Hash` framework, see
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
// Using `absl::node_hash_set` at interface boundaries in dynamically loaded
// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
// be randomized across dynamically loaded libraries.
//
// Example:
//
// // Create a node hash set of three strings
// absl::node_hash_set<std::string> ducks =
// {"huey", "dewey", "louie"};
//
// // Insert a new element into the node hash set
// ducks.insert("donald");
//
// // Force a rehash of the node hash set
// ducks.rehash(0);
//
// // See if "dewey" is present
// if (ducks.contains("dewey")) {
// std::cout << "We found dewey!" << std::endl;
// }
template<class T, class Hash = absl::container_internal::hash_default_hash<T>, class Eq = absl::container_internal::hash_default_eq<T>, class Alloc = std::allocator<T>>
class node_hash_set : public absl::container_internal::raw_hash_set<absl::container_internal::NodeHashSetPolicy<T>, Hash, Eq, Alloc>
{
using Base = typename node_hash_set::raw_hash_set;

public:
// Constructors and Assignment Operators
//
// A node_hash_set supports the same overload set as `std::unordered_set`
// for construction and assignment:
//
// * Default constructor
//
// // No allocation for the table's elements is made.
// absl::node_hash_set<std::string> set1;
//
// * Initializer List constructor
//
// absl::node_hash_set<std::string> set2 =
// {{"huey"}, {"dewey"}, {"louie"}};
//
// * Copy constructor
//
// absl::node_hash_set<std::string> set3(set2);
//
// * Copy assignment operator
//
// // Hash functor and Comparator are copied as well
// absl::node_hash_set<std::string> set4;
// set4 = set3;
//
// * Move constructor
//
// // Move is guaranteed efficient
// absl::node_hash_set<std::string> set5(std::move(set4));
//
// * Move assignment operator
//
// // May be efficient if allocators are compatible
// absl::node_hash_set<std::string> set6;
// set6 = std::move(set5);
//
// * Range constructor
//
// std::vector<std::string> v = {"a", "b"};
// absl::node_hash_set<std::string> set7(v.begin(), v.end());
node_hash_set()
{
}
using Base::Base;

// node_hash_set::begin()
//
// Returns an iterator to the beginning of the `node_hash_set`.
using Base::begin;

// node_hash_set::cbegin()
//
// Returns a const iterator to the beginning of the `node_hash_set`.
using Base::cbegin;

// node_hash_set::cend()
//
// Returns a const iterator to the end of the `node_hash_set`.
using Base::cend;

// node_hash_set::end()
//
// Returns an iterator to the end of the `node_hash_set`.
using Base::end;

// node_hash_set::capacity()
//
// Returns the number of element slots (assigned, deleted, and empty)
// available within the `node_hash_set`.
//
// NOTE: this member function is particular to `absl::node_hash_set` and is
// not provided in the `std::unordered_set` API.
using Base::capacity;

// node_hash_set::empty()
//
// Returns whether or not the `node_hash_set` is empty.
using Base::empty;

// node_hash_set::max_size()
//
// Returns the largest theoretical possible number of elements within a
// `node_hash_set` under current memory constraints. This value can be thought
// of the largest value of `std::distance(begin(), end())` for a
// `node_hash_set<T>`.
using Base::max_size;

// node_hash_set::size()
//
// Returns the number of elements currently within the `node_hash_set`.
using Base::size;

// node_hash_set::clear()
//
// Removes all elements from the `node_hash_set`. Invalidates any references,
// pointers, or iterators referring to contained elements.
//
// NOTE: this operation may shrink the underlying buffer. To avoid shrinking
// the underlying buffer call `erase(begin(), end())`.
using Base::clear;

// node_hash_set::erase()
//
// Erases elements within the `node_hash_set`. Erasing does not trigger a
// rehash. Overloads are listed below.
//
// void erase(const_iterator pos):
//
// Erases the element at `position` of the `node_hash_set`, returning
// `void`.
//
// NOTE: this return behavior is different than that of STL containers in
// general and `std::unordered_set` in particular.
//
// iterator erase(const_iterator first, const_iterator last):
//
// Erases the elements in the open interval [`first`, `last`), returning an
// iterator pointing to `last`.
//
// size_type erase(const key_type& key):
//
// Erases the element with the matching key, if it exists, returning the
// number of elements erased (0 or 1).
using Base::erase;

// node_hash_set::insert()
//
// Inserts an element of the specified value into the `node_hash_set`,
// returning an iterator pointing to the newly inserted element, provided that
// an element with the given key does not already exist. If rehashing occurs
// due to the insertion, all iterators are invalidated. Overloads are listed
// below.
//
// std::pair<iterator,bool> insert(const T& value):
//
// Inserts a value into the `node_hash_set`. Returns a pair consisting of an
// iterator to the inserted element (or to the element that prevented the
// insertion) and a bool denoting whether the insertion took place.
//
// std::pair<iterator,bool> insert(T&& value):
//
// Inserts a moveable value into the `node_hash_set`. Returns a pair
// consisting of an iterator to the inserted element (or to the element that
// prevented the insertion) and a bool denoting whether the insertion took
// place.
//
// iterator insert(const_iterator hint, const T& value):
// iterator insert(const_iterator hint, T&& value):
//
// Inserts a value, using the position of `hint` as a non-binding suggestion
// for where to begin the insertion search. Returns an iterator to the
// inserted element, or to the existing element that prevented the
// insertion.
//
// void insert(InputIterator first, InputIterator last):
//
// Inserts a range of values [`first`, `last`).
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently, for `node_hash_set` we guarantee the
// first match is inserted.
//
// void insert(std::initializer_list<T> ilist):
//
// Inserts the elements within the initializer list `ilist`.
//
// NOTE: Although the STL does not specify which element may be inserted if
// multiple keys compare equivalently within the initializer list, for
// `node_hash_set` we guarantee the first match is inserted.
using Base::insert;

// node_hash_set::emplace()
//
// Inserts an element of the specified value by constructing it in-place
// within the `node_hash_set`, provided that no element with the given key
// already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace;

// node_hash_set::emplace_hint()
//
// Inserts an element of the specified value by constructing it in-place
// within the `node_hash_set`, using the position of `hint` as a non-binding
// suggestion for where to begin the insertion search, and only inserts
// provided that no element with the given key already exists.
//
// The element may be constructed even if there already is an element with the
// key in the container, in which case the newly constructed element will be
// destroyed immediately.
//
// If rehashing occurs due to the insertion, all iterators are invalidated.
using Base::emplace_hint;

// node_hash_set::extract()
//
// Extracts the indicated element, erasing it in the process, and returns it
// as a C++17-compatible node handle. Overloads are listed below.
//
// node_type extract(const_iterator position):
//
// Extracts the element at the indicated position and returns a node handle
// owning that extracted data.
//
// node_type extract(const key_type& x):
//
// Extracts the element with the key matching the passed key value and
// returns a node handle owning that extracted data. If the `node_hash_set`
// does not contain an element with a matching key, this function returns an
// empty node handle.
using Base::extract;

// node_hash_set::merge()
//
// Extracts elements from a given `source` node hash set into this
// `node_hash_set`. If the destination `node_hash_set` already contains an
// element with an equivalent key, that element is not extracted.
using Base::merge;

// node_hash_set::swap(node_hash_set& other)
//
// Exchanges the contents of this `node_hash_set` with those of the `other`
// node hash set, avoiding invocation of any move, copy, or swap operations on
// individual elements.
//
// All iterators and references on the `node_hash_set` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
//
// `swap()` requires that the node hash set's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to
// non-member `swap()`. If the set's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
// to non-member `swap()`; otherwise, the allocators are not swapped.
using Base::swap;

// node_hash_set::rehash(count)
//
// Rehashes the `node_hash_set`, setting the number of slots to be at least
// the passed value. If the new number of slots increases the load factor more
// than the current maximum load factor
// (`count` < `size()` / `max_load_factor()`), then the new number of slots
// will be at least `size()` / `max_load_factor()`.
//
// To force a rehash, pass rehash(0).
//
// NOTE: unlike behavior in `std::unordered_set`, references are also
// invalidated upon a `rehash()`.
using Base::rehash;

// node_hash_set::reserve(count)
//
// Sets the number of slots in the `node_hash_set` to the number needed to
// accommodate at least `count` total elements without exceeding the current
// maximum load factor, and may rehash the container if needed.
using Base::reserve;

// node_hash_set::contains()
//
// Determines whether an element comparing equal to the given `key` exists
// within the `node_hash_set`, returning `true` if so or `false` otherwise.
using Base::contains;

// node_hash_set::count(const Key& key) const
//
// Returns the number of elements comparing equal to the given `key` within
// the `node_hash_set`. note that this function will return either `1` or `0`
// since duplicate elements are not allowed within a `node_hash_set`.
using Base::count;

// node_hash_set::equal_range()
//
// Returns a closed range [first, last], defined by a `std::pair` of two
// iterators, containing all elements with the passed key in the
// `node_hash_set`.
using Base::equal_range;

// node_hash_set::find()
//
// Finds an element with the passed `key` within the `node_hash_set`.
using Base::find;

// node_hash_set::bucket_count()
//
// Returns the number of "buckets" within the `node_hash_set`. Note that
// because a node hash set contains all elements within its internal storage,
// this value simply equals the current capacity of the `node_hash_set`.
using Base::bucket_count;

// node_hash_set::load_factor()
//
// Returns the current load factor of the `node_hash_set` (the average number
// of slots occupied with a value within the hash set).
using Base::load_factor;

// node_hash_set::max_load_factor()
//
// Manages the maximum load factor of the `node_hash_set`. Overloads are
// listed below.
//
// float node_hash_set::max_load_factor()
//
// Returns the current maximum load factor of the `node_hash_set`.
//
// void node_hash_set::max_load_factor(float ml)
//
// Sets the maximum load factor of the `node_hash_set` to the passed value.
//
// NOTE: This overload is provided only for API compatibility with the STL;
// `node_hash_set` will ignore any set load factor and manage its rehashing
// internally as an implementation detail.
using Base::max_load_factor;

// node_hash_set::get_allocator()
//
// Returns the allocator function associated with this `node_hash_set`.
using Base::get_allocator;

// node_hash_set::hash_function()
//
// Returns the hashing function used to hash the keys within this
// `node_hash_set`.
using Base::hash_function;

// node_hash_set::key_eq()
//
// Returns the function used for comparing keys equality.
using Base::key_eq;
};

// erase_if(node_hash_set<>, Pred)
//
// Erases all elements that satisfy the predicate `pred` from the container `c`.
// Returns the number of erased elements.
template<typename T, typename H, typename E, typename A, typename Predicate>
typename node_hash_set<T, H, E, A>::size_type erase_if(
node_hash_set<T, H, E, A>& c, Predicate pred
)
{
return container_internal::EraseIf(pred, &c);
}

namespace container_internal
{

template<class T>
struct NodeHashSetPolicy : absl::container_internal::node_slot_policy<T&, NodeHashSetPolicy<T>>
{
using key_type = T;
using init_type = T;
using constant_iterators = std::true_type;

template<class Allocator, class... Args>
static T* new_element(Allocator* alloc, Args&&... args)
{
using ValueAlloc =
typename absl::allocator_traits<Allocator>::template rebind_alloc<T>;
ValueAlloc value_alloc(*alloc);
T* res = absl::allocator_traits<ValueAlloc>::allocate(value_alloc, 1);
absl::allocator_traits<ValueAlloc>::construct(value_alloc, res, std::forward<Args>(args)...);
return res;
}

template<class Allocator>
static void delete_element(Allocator* alloc, T* elem)
{
using ValueAlloc =
typename absl::allocator_traits<Allocator>::template rebind_alloc<T>;
ValueAlloc value_alloc(*alloc);
absl::allocator_traits<ValueAlloc>::destroy(value_alloc, elem);
absl::allocator_traits<ValueAlloc>::deallocate(value_alloc, elem, 1);
}

template<class F, class... Args>
static decltype(absl::container_internal::DecomposeValue(
std::declval<F>(), std::declval<Args>()...
))
apply(F&& f, Args&&... args)
{
return absl::container_internal::DecomposeValue(
std::forward<F>(f), std::forward<Args>(args)...
);
}

static size_t element_space_used(const T*)
{
return sizeof(T);
}
};
} // namespace container_internal

namespace container_algorithm_internal
{

// Specialization of trait in absl/algorithm/container.h
template<class Key, class Hash, class KeyEqual, class Allocator>
struct IsUnorderedContainer<absl::node_hash_set<Key, Hash, KeyEqual, Allocator>> : std::true_type
{
};

} // namespace container_algorithm_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_CONTAINER_NODE_HASH_SET_H_ #endif // ABSL_CONTAINER_NODE_HASH_SET_H_

+ 65
- 62
CAPI/cpp/grpc/include/absl/debugging/failure_signal_handler.h View File

@@ -46,76 +46,79 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace absl
{
ABSL_NAMESPACE_BEGIN


// FailureSignalHandlerOptions
//
// Struct for holding `absl::InstallFailureSignalHandler()` configuration
// options.
struct FailureSignalHandlerOptions {
// If true, try to symbolize the stacktrace emitted on failure, provided that
// you have initialized a symbolizer for that purpose. (See symbolize.h for
// more information.)
bool symbolize_stacktrace = true;
// FailureSignalHandlerOptions
//
// Struct for holding `absl::InstallFailureSignalHandler()` configuration
// options.
struct FailureSignalHandlerOptions
{
// If true, try to symbolize the stacktrace emitted on failure, provided that
// you have initialized a symbolizer for that purpose. (See symbolize.h for
// more information.)
bool symbolize_stacktrace = true;


// If true, try to run signal handlers on an alternate stack (if supported on
// the given platform). An alternate stack is useful for program crashes due
// to a stack overflow; by running on a alternate stack, the signal handler
// may run even when normal stack space has been exausted. The downside of
// using an alternate stack is that extra memory for the alternate stack needs
// to be pre-allocated.
bool use_alternate_stack = true;
// If true, try to run signal handlers on an alternate stack (if supported on
// the given platform). An alternate stack is useful for program crashes due
// to a stack overflow; by running on a alternate stack, the signal handler
// may run even when normal stack space has been exausted. The downside of
// using an alternate stack is that extra memory for the alternate stack needs
// to be pre-allocated.
bool use_alternate_stack = true;


// If positive, indicates the number of seconds after which the failure signal
// handler is invoked to abort the program. Setting such an alarm is useful in
// cases where the failure signal handler itself may become hung or
// deadlocked.
int alarm_on_failure_secs = 3;
// If positive, indicates the number of seconds after which the failure signal
// handler is invoked to abort the program. Setting such an alarm is useful in
// cases where the failure signal handler itself may become hung or
// deadlocked.
int alarm_on_failure_secs = 3;


// If true, call the previously registered signal handler for the signal that
// was received (if one was registered) after the existing signal handler
// runs. This mechanism can be used to chain signal handlers together.
//
// If false, the signal is raised to the default handler for that signal
// (which normally terminates the program).
//
// IMPORTANT: If true, the chained fatal signal handlers must not try to
// recover from the fatal signal. Instead, they should terminate the program
// via some mechanism, like raising the default handler for the signal, or by
// calling `_exit()`. Note that the failure signal handler may put parts of
// the Abseil library into a state from which they cannot recover.
bool call_previous_handler = false;
// If true, call the previously registered signal handler for the signal that
// was received (if one was registered) after the existing signal handler
// runs. This mechanism can be used to chain signal handlers together.
//
// If false, the signal is raised to the default handler for that signal
// (which normally terminates the program).
//
// IMPORTANT: If true, the chained fatal signal handlers must not try to
// recover from the fatal signal. Instead, they should terminate the program
// via some mechanism, like raising the default handler for the signal, or by
// calling `_exit()`. Note that the failure signal handler may put parts of
// the Abseil library into a state from which they cannot recover.
bool call_previous_handler = false;


// If non-null, indicates a pointer to a callback function that will be called
// upon failure, with a string argument containing failure data. This function
// may be used as a hook to write failure data to a secondary location, such
// as a log file. This function will also be called with null data, as a hint
// to flush any buffered data before the program may be terminated. Consider
// flushing any buffered data in all calls to this function.
//
// Since this function runs within a signal handler, it should be
// async-signal-safe if possible.
// See http://man7.org/linux/man-pages/man7/signal-safety.7.html
void (*writerfn)(const char*) = nullptr;
};
// If non-null, indicates a pointer to a callback function that will be called
// upon failure, with a string argument containing failure data. This function
// may be used as a hook to write failure data to a secondary location, such
// as a log file. This function will also be called with null data, as a hint
// to flush any buffered data before the program may be terminated. Consider
// flushing any buffered data in all calls to this function.
//
// Since this function runs within a signal handler, it should be
// async-signal-safe if possible.
// See http://man7.org/linux/man-pages/man7/signal-safety.7.html
void (*writerfn)(const char*) = nullptr;
};


// InstallFailureSignalHandler()
//
// Installs a signal handler for the common failure signals `SIGSEGV`, `SIGILL`,
// `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUG`, and `SIGTRAP` (provided they exist
// on the given platform). The failure signal handler dumps program failure data
// useful for debugging in an unspecified format to stderr. This data may
// include the program counter, a stacktrace, and register information on some
// systems; do not rely on an exact format for the output, as it is subject to
// change.
void InstallFailureSignalHandler(const FailureSignalHandlerOptions& options);
// InstallFailureSignalHandler()
//
// Installs a signal handler for the common failure signals `SIGSEGV`, `SIGILL`,
// `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUG`, and `SIGTRAP` (provided they exist
// on the given platform). The failure signal handler dumps program failure data
// useful for debugging in an unspecified format to stderr. This data may
// include the program counter, a stacktrace, and register information on some
// systems; do not rely on an exact format for the output, as it is subject to
// change.
void InstallFailureSignalHandler(const FailureSignalHandlerOptions& options);


namespace debugging_internal {
const char* FailureSignalToString(int signo);
} // namespace debugging_internal
namespace debugging_internal
{
const char* FailureSignalToString(int signo);
} // namespace debugging_internal


ABSL_NAMESPACE_END
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_DEBUGGING_FAILURE_SIGNAL_HANDLER_H_ #endif // ABSL_DEBUGGING_FAILURE_SIGNAL_HANDLER_H_

+ 10
- 8
CAPI/cpp/grpc/include/absl/debugging/internal/address_is_readable.h View File

@@ -17,16 +17,18 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace debugging_internal
{


// Return whether the byte at *addr is readable, without faulting.
// Save and restores errno.
bool AddressIsReadable(const void *addr);
// Return whether the byte at *addr is readable, without faulting.
// Save and restores errno.
bool AddressIsReadable(const void* addr);


} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_ #endif // ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_

+ 11
- 9
CAPI/cpp/grpc/include/absl/debugging/internal/demangle.h View File

@@ -55,17 +55,19 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace debugging_internal
{


// Demangle `mangled`. On success, return true and write the
// demangled symbol name to `out`. Otherwise, return false.
// `out` is modified even if demangling is unsuccessful.
bool Demangle(const char *mangled, char *out, int out_size);
// Demangle `mangled`. On success, return true and write the
// demangled symbol name to `out`. Otherwise, return false.
// `out` is modified even if demangling is unsuccessful.
bool Demangle(const char* mangled, char* out, int out_size);


} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_ #endif // ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_

+ 94
- 87
CAPI/cpp/grpc/include/absl/debugging/internal/elf_mem_image.h View File

@@ -45,93 +45,100 @@
#define ElfW(x) __ElfN(x) #define ElfW(x) __ElfN(x)
#endif #endif


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {

// An in-memory ELF image (may not exist on disk).
class ElfMemImage {
private:
// Sentinel: there could never be an elf image at &kInvalidBaseSentinel.
static const int kInvalidBaseSentinel;

public:
// Sentinel: there could never be an elf image at this address.
static constexpr const void *const kInvalidBase =
static_cast<const void*>(&kInvalidBaseSentinel);

// Information about a single vdso symbol.
// All pointers are into .dynsym, .dynstr, or .text of the VDSO.
// Do not free() them or modify through them.
struct SymbolInfo {
const char *name; // E.g. "__vdso_getcpu"
const char *version; // E.g. "LINUX_2.6", could be ""
// for unversioned symbol.
const void *address; // Relocated symbol address.
const ElfW(Sym) *symbol; // Symbol in the dynamic symbol table.
};

// Supports iteration over all dynamic symbols.
class SymbolIterator {
public:
friend class ElfMemImage;
const SymbolInfo *operator->() const;
const SymbolInfo &operator*() const;
SymbolIterator& operator++();
bool operator!=(const SymbolIterator &rhs) const;
bool operator==(const SymbolIterator &rhs) const;
private:
SymbolIterator(const void *const image, int index);
void Update(int incr);
SymbolInfo info_;
int index_;
const void *const image_;
};


explicit ElfMemImage(const void *base);
void Init(const void *base);
bool IsPresent() const { return ehdr_ != nullptr; }
const ElfW(Phdr)* GetPhdr(int index) const;
const ElfW(Sym)* GetDynsym(int index) const;
const ElfW(Versym)* GetVersym(int index) const;
const ElfW(Verdef)* GetVerdef(int index) const;
const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const;
const char* GetDynstr(ElfW(Word) offset) const;
const void* GetSymAddr(const ElfW(Sym) *sym) const;
const char* GetVerstr(ElfW(Word) offset) const;
int GetNumSymbols() const;

SymbolIterator begin() const;
SymbolIterator end() const;

// Look up versioned dynamic symbol in the image.
// Returns false if image is not present, or doesn't contain given
// symbol/version/type combination.
// If info_out is non-null, additional details are filled in.
bool LookupSymbol(const char *name, const char *version,
int symbol_type, SymbolInfo *info_out) const;

// Find info about symbol (if any) which overlaps given address.
// Returns true if symbol was found; false if image isn't present
// or doesn't have a symbol overlapping given address.
// If info_out is non-null, additional details are filled in.
bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;

private:
const ElfW(Ehdr) *ehdr_;
const ElfW(Sym) *dynsym_;
const ElfW(Versym) *versym_;
const ElfW(Verdef) *verdef_;
const ElfW(Word) *hash_;
const char *dynstr_;
size_t strsize_;
size_t verdefnum_;
ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD).
};

} // namespace debugging_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace debugging_internal
{

// An in-memory ELF image (may not exist on disk).
class ElfMemImage
{
private:
// Sentinel: there could never be an elf image at &kInvalidBaseSentinel.
static const int kInvalidBaseSentinel;

public:
// Sentinel: there could never be an elf image at this address.
static constexpr const void* const kInvalidBase =
static_cast<const void*>(&kInvalidBaseSentinel);

// Information about a single vdso symbol.
// All pointers are into .dynsym, .dynstr, or .text of the VDSO.
// Do not free() them or modify through them.
struct SymbolInfo
{
const char* name; // E.g. "__vdso_getcpu"
const char* version; // E.g. "LINUX_2.6", could be ""
// for unversioned symbol.
const void* address; // Relocated symbol address.
const ElfW(Sym) * symbol; // Symbol in the dynamic symbol table.
};

// Supports iteration over all dynamic symbols.
class SymbolIterator
{
public:
friend class ElfMemImage;
const SymbolInfo* operator->() const;
const SymbolInfo& operator*() const;
SymbolIterator& operator++();
bool operator!=(const SymbolIterator& rhs) const;
bool operator==(const SymbolIterator& rhs) const;

private:
SymbolIterator(const void* const image, int index);
void Update(int incr);
SymbolInfo info_;
int index_;
const void* const image_;
};

explicit ElfMemImage(const void* base);
void Init(const void* base);
bool IsPresent() const
{
return ehdr_ != nullptr;
}
const ElfW(Phdr) * GetPhdr(int index) const;
const ElfW(Sym) * GetDynsym(int index) const;
const ElfW(Versym) * GetVersym(int index) const;
const ElfW(Verdef) * GetVerdef(int index) const;
const ElfW(Verdaux) * GetVerdefAux(const ElfW(Verdef) * verdef) const;
const char* GetDynstr(ElfW(Word) offset) const;
const void* GetSymAddr(const ElfW(Sym) * sym) const;
const char* GetVerstr(ElfW(Word) offset) const;
int GetNumSymbols() const;

SymbolIterator begin() const;
SymbolIterator end() const;

// Look up versioned dynamic symbol in the image.
// Returns false if image is not present, or doesn't contain given
// symbol/version/type combination.
// If info_out is non-null, additional details are filled in.
bool LookupSymbol(const char* name, const char* version, int symbol_type, SymbolInfo* info_out) const;

// Find info about symbol (if any) which overlaps given address.
// Returns true if symbol was found; false if image isn't present
// or doesn't have a symbol overlapping given address.
// If info_out is non-null, additional details are filled in.
bool LookupSymbolByAddress(const void* address, SymbolInfo* info_out) const;

private:
const ElfW(Ehdr) * ehdr_;
const ElfW(Sym) * dynsym_;
const ElfW(Versym) * versym_;
const ElfW(Verdef) * verdef_;
const ElfW(Word) * hash_;
const char* dynstr_;
size_t strsize_;
size_t verdefnum_;
ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD).
};

} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_HAVE_ELF_MEM_IMAGE #endif // ABSL_HAVE_ELF_MEM_IMAGE


+ 35
- 40
CAPI/cpp/grpc/include/absl/debugging/internal/examine_stack.h View File

@@ -19,46 +19,41 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {

// Type of function used for printing in stack trace dumping, etc.
// We avoid closures to keep things simple.
typedef void OutputWriter(const char*, void*);

// RegisterDebugStackTraceHook() allows to register a single routine
// `hook` that is called each time DumpStackTrace() is called.
// `hook` may be called from a signal handler.
typedef void (*SymbolizeUrlEmitter)(void* const stack[], int depth,
OutputWriter* writer, void* writer_arg);

// Registration of SymbolizeUrlEmitter for use inside of a signal handler.
// This is inherently unsafe and must be signal safe code.
void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook);
SymbolizeUrlEmitter GetDebugStackTraceHook();

// Returns the program counter from signal context, or nullptr if
// unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of
// ucontext_t on non-POSIX systems.
void* GetProgramCounter(void* const vuc);

// Uses `writer` to dump the program counter, stack trace, and stack
// frame sizes.
void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[],
int frame_sizes[], int depth,
int min_dropped_frames,
bool symbolize_stacktrace,
OutputWriter* writer, void* writer_arg);

// Dump current stack trace omitting the topmost `min_dropped_frames` stack
// frames.
void DumpStackTrace(int min_dropped_frames, int max_num_frames,
bool symbolize_stacktrace, OutputWriter* writer,
void* writer_arg);

} // namespace debugging_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace debugging_internal
{

// Type of function used for printing in stack trace dumping, etc.
// We avoid closures to keep things simple.
typedef void OutputWriter(const char*, void*);

// RegisterDebugStackTraceHook() allows to register a single routine
// `hook` that is called each time DumpStackTrace() is called.
// `hook` may be called from a signal handler.
typedef void (*SymbolizeUrlEmitter)(void* const stack[], int depth, OutputWriter* writer, void* writer_arg);

// Registration of SymbolizeUrlEmitter for use inside of a signal handler.
// This is inherently unsafe and must be signal safe code.
void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook);
SymbolizeUrlEmitter GetDebugStackTraceHook();

// Returns the program counter from signal context, or nullptr if
// unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of
// ucontext_t on non-POSIX systems.
void* GetProgramCounter(void* const vuc);

// Uses `writer` to dump the program counter, stack trace, and stack
// frame sizes.
void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[], int frame_sizes[], int depth, int min_dropped_frames, bool symbolize_stacktrace, OutputWriter* writer, void* writer_arg);

// Dump current stack trace omitting the topmost `min_dropped_frames` stack
// frames.
void DumpStackTrace(int min_dropped_frames, int max_num_frames, bool symbolize_stacktrace, OutputWriter* writer, void* writer_arg);

} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_ #endif // ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_

+ 16
- 14
CAPI/cpp/grpc/include/absl/debugging/internal/stack_consumption.h View File

@@ -29,20 +29,22 @@
defined(__aarch64__) || defined(__riscv)) defined(__aarch64__) || defined(__riscv))
#define ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION 1 #define ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION 1


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {

// Returns the stack consumption in bytes for the code exercised by
// signal_handler. To measure stack consumption, signal_handler is registered
// as a signal handler, so the code that it exercises must be async-signal
// safe. The argument of signal_handler is an implementation detail of signal
// handlers and should ignored by the code for signal_handler. Use global
// variables to pass information between your test code and signal_handler.
int GetSignalHandlerStackConsumption(void (*signal_handler)(int));

} // namespace debugging_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace debugging_internal
{

// Returns the stack consumption in bytes for the code exercised by
// signal_handler. To measure stack consumption, signal_handler is registered
// as a signal handler, so the code that it exercises must be async-signal
// safe. The argument of signal_handler is an implementation detail of signal
// handlers and should ignored by the code for signal_handler. Use global
// variables to pass information between your test code and signal_handler.
int GetSignalHandlerStackConsumption(void (*signal_handler)(int));

} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION #endif // ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION


+ 10
- 10
CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_config.h View File

@@ -34,13 +34,13 @@
#ifdef ABSL_HAVE_THREAD_LOCAL #ifdef ABSL_HAVE_THREAD_LOCAL
// Thread local support required for UnwindImpl. // Thread local support required for UnwindImpl.
#define ABSL_STACKTRACE_INL_HEADER \ #define ABSL_STACKTRACE_INL_HEADER \
"absl/debugging/internal/stacktrace_generic-inl.inc"
"absl/debugging/internal/stacktrace_generic-inl.inc"
#endif // defined(ABSL_HAVE_THREAD_LOCAL) #endif // defined(ABSL_HAVE_THREAD_LOCAL)


// Emscripten stacktraces rely on JS. Do not use them in standalone mode. // Emscripten stacktraces rely on JS. Do not use them in standalone mode.
#elif defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM) #elif defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM)
#define ABSL_STACKTRACE_INL_HEADER \ #define ABSL_STACKTRACE_INL_HEADER \
"absl/debugging/internal/stacktrace_emscripten-inl.inc"
"absl/debugging/internal/stacktrace_emscripten-inl.inc"


#elif defined(__linux__) && !defined(__ANDROID__) #elif defined(__linux__) && !defined(__ANDROID__)


@@ -49,31 +49,31 @@
// Note: The libunwind-based implementation is not available to open-source // Note: The libunwind-based implementation is not available to open-source
// users. // users.
#define ABSL_STACKTRACE_INL_HEADER \ #define ABSL_STACKTRACE_INL_HEADER \
"absl/debugging/internal/stacktrace_libunwind-inl.inc"
"absl/debugging/internal/stacktrace_libunwind-inl.inc"
#define STACKTRACE_USES_LIBUNWIND 1 #define STACKTRACE_USES_LIBUNWIND 1
#elif defined(NO_FRAME_POINTER) && defined(__has_include) #elif defined(NO_FRAME_POINTER) && defined(__has_include)
#if __has_include(<execinfo.h>) #if __has_include(<execinfo.h>)
// Note: When using glibc this may require -funwind-tables to function properly. // Note: When using glibc this may require -funwind-tables to function properly.
#define ABSL_STACKTRACE_INL_HEADER \ #define ABSL_STACKTRACE_INL_HEADER \
"absl/debugging/internal/stacktrace_generic-inl.inc"
"absl/debugging/internal/stacktrace_generic-inl.inc"
#endif // __has_include(<execinfo.h>) #endif // __has_include(<execinfo.h>)
#elif defined(__i386__) || defined(__x86_64__) #elif defined(__i386__) || defined(__x86_64__)
#define ABSL_STACKTRACE_INL_HEADER \ #define ABSL_STACKTRACE_INL_HEADER \
"absl/debugging/internal/stacktrace_x86-inl.inc"
"absl/debugging/internal/stacktrace_x86-inl.inc"
#elif defined(__ppc__) || defined(__PPC__) #elif defined(__ppc__) || defined(__PPC__)
#define ABSL_STACKTRACE_INL_HEADER \ #define ABSL_STACKTRACE_INL_HEADER \
"absl/debugging/internal/stacktrace_powerpc-inl.inc"
"absl/debugging/internal/stacktrace_powerpc-inl.inc"
#elif defined(__aarch64__) #elif defined(__aarch64__)
#define ABSL_STACKTRACE_INL_HEADER \ #define ABSL_STACKTRACE_INL_HEADER \
"absl/debugging/internal/stacktrace_aarch64-inl.inc"
"absl/debugging/internal/stacktrace_aarch64-inl.inc"
#elif defined(__riscv) #elif defined(__riscv)
#define ABSL_STACKTRACE_INL_HEADER \ #define ABSL_STACKTRACE_INL_HEADER \
"absl/debugging/internal/stacktrace_riscv-inl.inc"
"absl/debugging/internal/stacktrace_riscv-inl.inc"
#elif defined(__has_include) #elif defined(__has_include)
#if __has_include(<execinfo.h>) #if __has_include(<execinfo.h>)
// Note: When using glibc this may require -funwind-tables to function properly. // Note: When using glibc this may require -funwind-tables to function properly.
#define ABSL_STACKTRACE_INL_HEADER \ #define ABSL_STACKTRACE_INL_HEADER \
"absl/debugging/internal/stacktrace_generic-inl.inc"
"absl/debugging/internal/stacktrace_generic-inl.inc"
#endif // __has_include(<execinfo.h>) #endif // __has_include(<execinfo.h>)
#endif // defined(__has_include) #endif // defined(__has_include)


@@ -82,7 +82,7 @@
// Fallback to the empty implementation. // Fallback to the empty implementation.
#if !defined(ABSL_STACKTRACE_INL_HEADER) #if !defined(ABSL_STACKTRACE_INL_HEADER)
#define ABSL_STACKTRACE_INL_HEADER \ #define ABSL_STACKTRACE_INL_HEADER \
"absl/debugging/internal/stacktrace_unimplemented-inl.inc"
"absl/debugging/internal/stacktrace_unimplemented-inl.inc"
#endif #endif


#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_ #endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_

+ 85
- 87
CAPI/cpp/grpc/include/absl/debugging/internal/symbolize.h View File

@@ -28,8 +28,7 @@


#ifdef ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE #ifdef ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE
#error ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE cannot be directly set #error ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE cannot be directly set
#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) \
&& !defined(__asmjs__) && !defined(__wasm__)
#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) && !defined(__asmjs__) && !defined(__wasm__)
#define ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE 1 #define ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE 1


#include <elf.h> #include <elf.h>
@@ -37,27 +36,26 @@
#include <functional> #include <functional>
#include <string> #include <string>


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {

// Iterates over all sections, invoking callback on each with the section name
// and the section header.
//
// Returns true on success; otherwise returns false in case of errors.
//
// This is not async-signal-safe.
bool ForEachSection(int fd,
const std::function<bool(absl::string_view name,
const ElfW(Shdr) &)>& callback);

// Gets the section header for the given name, if it exists. Returns true on
// success. Otherwise, returns false.
bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
ElfW(Shdr) *out);

} // namespace debugging_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace debugging_internal
{

// Iterates over all sections, invoking callback on each with the section name
// and the section header.
//
// Returns true on success; otherwise returns false in case of errors.
//
// This is not async-signal-safe.
bool ForEachSection(int fd, const std::function<bool(absl::string_view name, const ElfW(Shdr) &)>& callback);

// Gets the section header for the given name, if it exists. Returns true on
// success. Otherwise, returns false.
bool GetSectionHeaderByName(int fd, const char* name, size_t name_len, ElfW(Shdr) * out);

} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE #endif // ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE
@@ -74,68 +72,69 @@ ABSL_NAMESPACE_END
#define ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE 1 #define ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE 1
#endif #endif


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {

struct SymbolDecoratorArgs {
// The program counter we are getting symbolic name for.
const void *pc;
// 0 for main executable, load address for shared libraries.
ptrdiff_t relocation;
// Read-only file descriptor for ELF image covering "pc",
// or -1 if no such ELF image exists in /proc/self/maps.
int fd;
// Output buffer, size.
// Note: the buffer may not be empty -- default symbolizer may have already
// produced some output, and earlier decorators may have adorned it in
// some way. You are free to replace or augment the contents (within the
// symbol_buf_size limit).
char *const symbol_buf;
size_t symbol_buf_size;
// Temporary scratch space, size.
// Use that space in preference to allocating your own stack buffer to
// conserve stack.
char *const tmp_buf;
size_t tmp_buf_size;
// User-provided argument
void* arg;
};
using SymbolDecorator = void (*)(const SymbolDecoratorArgs *);

// Installs a function-pointer as a decorator. Returns a value less than zero
// if the system cannot install the decorator. Otherwise, returns a unique
// identifier corresponding to the decorator. This identifier can be used to
// uninstall the decorator - See RemoveSymbolDecorator() below.
int InstallSymbolDecorator(SymbolDecorator decorator, void* arg);

// Removes a previously installed function-pointer decorator. Parameter "ticket"
// is the return-value from calling InstallSymbolDecorator().
bool RemoveSymbolDecorator(int ticket);

// Remove all installed decorators. Returns true if successful, false if
// symbolization is currently in progress.
bool RemoveAllSymbolDecorators(void);

// Registers an address range to a file mapping.
//
// Preconditions:
// start <= end
// filename != nullptr
//
// Returns true if the file was successfully registered.
bool RegisterFileMappingHint(const void* start, const void* end,
uint64_t offset, const char* filename);

// Looks up the file mapping registered by RegisterFileMappingHint for an
// address range. If there is one, the file name is stored in *filename and
// *start and *end are modified to reflect the registered mapping. Returns
// whether any hint was found.
bool GetFileMappingHint(const void** start, const void** end, uint64_t* offset,
const char** filename);

} // namespace debugging_internal
ABSL_NAMESPACE_END
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace debugging_internal
{

struct SymbolDecoratorArgs
{
// The program counter we are getting symbolic name for.
const void* pc;
// 0 for main executable, load address for shared libraries.
ptrdiff_t relocation;
// Read-only file descriptor for ELF image covering "pc",
// or -1 if no such ELF image exists in /proc/self/maps.
int fd;
// Output buffer, size.
// Note: the buffer may not be empty -- default symbolizer may have already
// produced some output, and earlier decorators may have adorned it in
// some way. You are free to replace or augment the contents (within the
// symbol_buf_size limit).
char* const symbol_buf;
size_t symbol_buf_size;
// Temporary scratch space, size.
// Use that space in preference to allocating your own stack buffer to
// conserve stack.
char* const tmp_buf;
size_t tmp_buf_size;
// User-provided argument
void* arg;
};
using SymbolDecorator = void (*)(const SymbolDecoratorArgs*);

// Installs a function-pointer as a decorator. Returns a value less than zero
// if the system cannot install the decorator. Otherwise, returns a unique
// identifier corresponding to the decorator. This identifier can be used to
// uninstall the decorator - See RemoveSymbolDecorator() below.
int InstallSymbolDecorator(SymbolDecorator decorator, void* arg);

// Removes a previously installed function-pointer decorator. Parameter "ticket"
// is the return-value from calling InstallSymbolDecorator().
bool RemoveSymbolDecorator(int ticket);

// Remove all installed decorators. Returns true if successful, false if
// symbolization is currently in progress.
bool RemoveAllSymbolDecorators(void);

// Registers an address range to a file mapping.
//
// Preconditions:
// start <= end
// filename != nullptr
//
// Returns true if the file was successfully registered.
bool RegisterFileMappingHint(const void* start, const void* end, uint64_t offset, const char* filename);

// Looks up the file mapping registered by RegisterFileMappingHint for an
// address range. If there is one, the file name is stored in *filename and
// *start and *end are modified to reflect the registered mapping. Returns
// whether any hint was found.
bool GetFileMappingHint(const void** start, const void** end, uint64_t* offset, const char** filename);

} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // __cplusplus #endif // __cplusplus
@@ -147,7 +146,6 @@ extern "C"
#endif // __cplusplus #endif // __cplusplus


bool bool
AbslInternalGetFileMappingHint(const void** start, const void** end,
uint64_t* offset, const char** filename);
AbslInternalGetFileMappingHint(const void** start, const void** end, uint64_t* offset, const char** filename);


#endif // ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_ #endif // ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_

+ 112
- 95
CAPI/cpp/grpc/include/absl/debugging/internal/vdso_support.h View File

@@ -52,105 +52,122 @@
#define ABSL_HAVE_VDSO_SUPPORT 1 #define ABSL_HAVE_VDSO_SUPPORT 1
#endif #endif


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {

// NOTE: this class may be used from within tcmalloc, and can not
// use any memory allocation routines.
class VDSOSupport {
public:
VDSOSupport();

typedef ElfMemImage::SymbolInfo SymbolInfo;
typedef ElfMemImage::SymbolIterator SymbolIterator;

// On PowerPC64 VDSO symbols can either be of type STT_FUNC or STT_NOTYPE
// depending on how the kernel is built. The kernel is normally built with
// STT_NOTYPE type VDSO symbols. Let's make things simpler first by using a
// compile-time constant.
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace debugging_internal
{

// NOTE: this class may be used from within tcmalloc, and can not
// use any memory allocation routines.
class VDSOSupport
{
public:
VDSOSupport();

typedef ElfMemImage::SymbolInfo SymbolInfo;
typedef ElfMemImage::SymbolIterator SymbolIterator;

// On PowerPC64 VDSO symbols can either be of type STT_FUNC or STT_NOTYPE
// depending on how the kernel is built. The kernel is normally built with
// STT_NOTYPE type VDSO symbols. Let's make things simpler first by using a
// compile-time constant.
#ifdef __powerpc64__ #ifdef __powerpc64__
enum { kVDSOSymbolType = STT_NOTYPE };
enum
{
kVDSOSymbolType = STT_NOTYPE
};
#else #else
enum { kVDSOSymbolType = STT_FUNC };
enum
{
kVDSOSymbolType = STT_FUNC
};
#endif #endif


// Answers whether we have a vdso at all.
bool IsPresent() const { return image_.IsPresent(); }

// Allow to iterate over all VDSO symbols.
SymbolIterator begin() const { return image_.begin(); }
SymbolIterator end() const { return image_.end(); }

// Look up versioned dynamic symbol in the kernel VDSO.
// Returns false if VDSO is not present, or doesn't contain given
// symbol/version/type combination.
// If info_out != nullptr, additional details are filled in.
bool LookupSymbol(const char *name, const char *version,
int symbol_type, SymbolInfo *info_out) const;

// Find info about symbol (if any) which overlaps given address.
// Returns true if symbol was found; false if VDSO isn't present
// or doesn't have a symbol overlapping given address.
// If info_out != nullptr, additional details are filled in.
bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;

// Used only for testing. Replace real VDSO base with a mock.
// Returns previous value of vdso_base_. After you are done testing,
// you are expected to call SetBase() with previous value, in order to
// reset state to the way it was.
const void *SetBase(const void *s);

// Computes vdso_base_ and returns it. Should be called as early as
// possible; before any thread creation, chroot or setuid.
static const void *Init();

private:
// image_ represents VDSO ELF image in memory.
// image_.ehdr_ == nullptr implies there is no VDSO.
ElfMemImage image_;

// Cached value of auxv AT_SYSINFO_EHDR, computed once.
// This is a tri-state:
// kInvalidBase => value hasn't been determined yet.
// 0 => there is no VDSO.
// else => vma of VDSO Elf{32,64}_Ehdr.
//
// When testing with mock VDSO, low bit is set.
// The low bit is always available because vdso_base_ is
// page-aligned.
static std::atomic<const void *> vdso_base_;

// NOLINT on 'long' because these routines mimic kernel api.
// The 'cache' parameter may be used by some versions of the kernel,
// and should be nullptr or point to a static buffer containing at
// least two 'long's.
static long InitAndGetCPU(unsigned *cpu, void *cache, // NOLINT 'long'.
void *unused);
static long GetCPUViaSyscall(unsigned *cpu, void *cache, // NOLINT 'long'.
void *unused);
typedef long (*GetCpuFn)(unsigned *cpu, void *cache, // NOLINT 'long'.
void *unused);

// This function pointer may point to InitAndGetCPU,
// GetCPUViaSyscall, or __vdso_getcpu at different stages of initialization.
ABSL_CONST_INIT static std::atomic<GetCpuFn> getcpu_fn_;

friend int GetCPU(void); // Needs access to getcpu_fn_.

VDSOSupport(const VDSOSupport&) = delete;
VDSOSupport& operator=(const VDSOSupport&) = delete;
};

// Same as sched_getcpu() on later glibc versions.
// Return current CPU, using (fast) __vdso_getcpu@LINUX_2.6 if present,
// otherwise use syscall(SYS_getcpu,...).
// May return -1 with errno == ENOSYS if the kernel doesn't
// support SYS_getcpu.
int GetCPU();

} // namespace debugging_internal
ABSL_NAMESPACE_END
// Answers whether we have a vdso at all.
bool IsPresent() const
{
return image_.IsPresent();
}

// Allow to iterate over all VDSO symbols.
SymbolIterator begin() const
{
return image_.begin();
}
SymbolIterator end() const
{
return image_.end();
}

// Look up versioned dynamic symbol in the kernel VDSO.
// Returns false if VDSO is not present, or doesn't contain given
// symbol/version/type combination.
// If info_out != nullptr, additional details are filled in.
bool LookupSymbol(const char* name, const char* version, int symbol_type, SymbolInfo* info_out) const;

// Find info about symbol (if any) which overlaps given address.
// Returns true if symbol was found; false if VDSO isn't present
// or doesn't have a symbol overlapping given address.
// If info_out != nullptr, additional details are filled in.
bool LookupSymbolByAddress(const void* address, SymbolInfo* info_out) const;

// Used only for testing. Replace real VDSO base with a mock.
// Returns previous value of vdso_base_. After you are done testing,
// you are expected to call SetBase() with previous value, in order to
// reset state to the way it was.
const void* SetBase(const void* s);

// Computes vdso_base_ and returns it. Should be called as early as
// possible; before any thread creation, chroot or setuid.
static const void* Init();

private:
// image_ represents VDSO ELF image in memory.
// image_.ehdr_ == nullptr implies there is no VDSO.
ElfMemImage image_;

// Cached value of auxv AT_SYSINFO_EHDR, computed once.
// This is a tri-state:
// kInvalidBase => value hasn't been determined yet.
// 0 => there is no VDSO.
// else => vma of VDSO Elf{32,64}_Ehdr.
//
// When testing with mock VDSO, low bit is set.
// The low bit is always available because vdso_base_ is
// page-aligned.
static std::atomic<const void*> vdso_base_;

// NOLINT on 'long' because these routines mimic kernel api.
// The 'cache' parameter may be used by some versions of the kernel,
// and should be nullptr or point to a static buffer containing at
// least two 'long's.
static long InitAndGetCPU(unsigned* cpu, void* cache, // NOLINT 'long'.
void* unused);
static long GetCPUViaSyscall(unsigned* cpu, void* cache, // NOLINT 'long'.
void* unused);
typedef long (*GetCpuFn)(unsigned* cpu, void* cache, // NOLINT 'long'.
void* unused);

// This function pointer may point to InitAndGetCPU,
// GetCPUViaSyscall, or __vdso_getcpu at different stages of initialization.
ABSL_CONST_INIT static std::atomic<GetCpuFn> getcpu_fn_;

friend int GetCPU(void); // Needs access to getcpu_fn_.

VDSOSupport(const VDSOSupport&) = delete;
VDSOSupport& operator=(const VDSOSupport&) = delete;
};

// Same as sched_getcpu() on later glibc versions.
// Return current CPU, using (fast) __vdso_getcpu@LINUX_2.6 if present,
// otherwise use syscall(SYS_getcpu,...).
// May return -1 with errno == ENOSYS if the kernel doesn't
// support SYS_getcpu.
int GetCPU();

} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_HAVE_ELF_MEM_IMAGE #endif // ABSL_HAVE_ELF_MEM_IMAGE


+ 88
- 85
CAPI/cpp/grpc/include/absl/debugging/leak_check.h View File

@@ -51,100 +51,103 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace absl
{
ABSL_NAMESPACE_BEGIN


// HaveLeakSanitizer()
//
// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is
// currently built into this target.
bool HaveLeakSanitizer();
// HaveLeakSanitizer()
//
// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is
// currently built into this target.
bool HaveLeakSanitizer();


// LeakCheckerIsActive()
//
// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is
// currently built into this target and is turned on.
bool LeakCheckerIsActive();
// LeakCheckerIsActive()
//
// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is
// currently built into this target and is turned on.
bool LeakCheckerIsActive();


// DoIgnoreLeak()
//
// Implements `IgnoreLeak()` below. This function should usually
// not be called directly; calling `IgnoreLeak()` is preferred.
void DoIgnoreLeak(const void* ptr);
// DoIgnoreLeak()
//
// Implements `IgnoreLeak()` below. This function should usually
// not be called directly; calling `IgnoreLeak()` is preferred.
void DoIgnoreLeak(const void* ptr);


// IgnoreLeak()
//
// Instruct the leak sanitizer to ignore leak warnings on the object referenced
// by the passed pointer, as well as all heap objects transitively referenced
// by it. The passed object pointer can point to either the beginning of the
// object or anywhere within it.
//
// Example:
//
// static T* obj = IgnoreLeak(new T(...));
//
// If the passed `ptr` does not point to an actively allocated object at the
// time `IgnoreLeak()` is called, the call is a no-op; if it is actively
// allocated, leak sanitizer will assume this object is referenced even if
// there is no actual reference in user memory.
//
template <typename T>
T* IgnoreLeak(T* ptr) {
DoIgnoreLeak(ptr);
return ptr;
}
// IgnoreLeak()
//
// Instruct the leak sanitizer to ignore leak warnings on the object referenced
// by the passed pointer, as well as all heap objects transitively referenced
// by it. The passed object pointer can point to either the beginning of the
// object or anywhere within it.
//
// Example:
//
// static T* obj = IgnoreLeak(new T(...));
//
// If the passed `ptr` does not point to an actively allocated object at the
// time `IgnoreLeak()` is called, the call is a no-op; if it is actively
// allocated, leak sanitizer will assume this object is referenced even if
// there is no actual reference in user memory.
//
template<typename T>
T* IgnoreLeak(T* ptr)
{
DoIgnoreLeak(ptr);
return ptr;
}


// FindAndReportLeaks()
//
// If any leaks are detected, prints a leak report and returns true. This
// function may be called repeatedly, and does not affect end-of-process leak
// checking.
//
// Example:
// if (FindAndReportLeaks()) {
// ... diagnostic already printed. Exit with failure code.
// exit(1)
// }
bool FindAndReportLeaks();
// FindAndReportLeaks()
//
// If any leaks are detected, prints a leak report and returns true. This
// function may be called repeatedly, and does not affect end-of-process leak
// checking.
//
// Example:
// if (FindAndReportLeaks()) {
// ... diagnostic already printed. Exit with failure code.
// exit(1)
// }
bool FindAndReportLeaks();


// LeakCheckDisabler
//
// This helper class indicates that any heap allocations done in the code block
// covered by the scoped object, which should be allocated on the stack, will
// not be reported as leaks. Leak check disabling will occur within the code
// block and any nested function calls within the code block.
//
// Example:
//
// void Foo() {
// LeakCheckDisabler disabler;
// ... code that allocates objects whose leaks should be ignored ...
// }
//
// REQUIRES: Destructor runs in same thread as constructor
class LeakCheckDisabler {
public:
LeakCheckDisabler();
LeakCheckDisabler(const LeakCheckDisabler&) = delete;
LeakCheckDisabler& operator=(const LeakCheckDisabler&) = delete;
~LeakCheckDisabler();
};
// LeakCheckDisabler
//
// This helper class indicates that any heap allocations done in the code block
// covered by the scoped object, which should be allocated on the stack, will
// not be reported as leaks. Leak check disabling will occur within the code
// block and any nested function calls within the code block.
//
// Example:
//
// void Foo() {
// LeakCheckDisabler disabler;
// ... code that allocates objects whose leaks should be ignored ...
// }
//
// REQUIRES: Destructor runs in same thread as constructor
class LeakCheckDisabler
{
public:
LeakCheckDisabler();
LeakCheckDisabler(const LeakCheckDisabler&) = delete;
LeakCheckDisabler& operator=(const LeakCheckDisabler&) = delete;
~LeakCheckDisabler();
};


// RegisterLivePointers()
//
// Registers `ptr[0,size-1]` as pointers to memory that is still actively being
// referenced and for which leak checking should be ignored. This function is
// useful if you store pointers in mapped memory, for memory ranges that we know
// are correct but for which normal analysis would flag as leaked code.
void RegisterLivePointers(const void* ptr, size_t size);
// RegisterLivePointers()
//
// Registers `ptr[0,size-1]` as pointers to memory that is still actively being
// referenced and for which leak checking should be ignored. This function is
// useful if you store pointers in mapped memory, for memory ranges that we know
// are correct but for which normal analysis would flag as leaked code.
void RegisterLivePointers(const void* ptr, size_t size);


// UnRegisterLivePointers()
//
// Deregisters the pointers previously marked as active in
// `RegisterLivePointers()`, enabling leak checking of those pointers.
void UnRegisterLivePointers(const void* ptr, size_t size);
// UnRegisterLivePointers()
//
// Deregisters the pointers previously marked as active in
// `RegisterLivePointers()`, enabling leak checking of those pointers.
void UnRegisterLivePointers(const void* ptr, size_t size);


ABSL_NAMESPACE_END
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_DEBUGGING_LEAK_CHECK_H_ #endif // ABSL_DEBUGGING_LEAK_CHECK_H_

+ 178
- 186
CAPI/cpp/grpc/include/absl/debugging/stacktrace.h View File

@@ -33,199 +33,191 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace absl
{
ABSL_NAMESPACE_BEGIN


// GetStackFrames()
//
// Records program counter values for up to `max_depth` frames, skipping the
// most recent `skip_count` stack frames, stores their corresponding values
// and sizes in `results` and `sizes` buffers, and returns the number of frames
// stored. (Note that the frame generated for the `absl::GetStackFrames()`
// routine itself is also skipped.)
//
// Example:
//
// main() { foo(); }
// foo() { bar(); }
// bar() {
// void* result[10];
// int sizes[10];
// int depth = absl::GetStackFrames(result, sizes, 10, 1);
// }
//
// The current stack frame would consist of three function calls: `bar()`,
// `foo()`, and then `main()`; however, since the `GetStackFrames()` call sets
// `skip_count` to `1`, it will skip the frame for `bar()`, the most recently
// invoked function call. It will therefore return 2 and fill `result` with
// program counters within the following functions:
//
// result[0] foo()
// result[1] main()
//
// (Note: in practice, a few more entries after `main()` may be added to account
// for startup processes.)
//
// Corresponding stack frame sizes will also be recorded:
//
// sizes[0] 16
// sizes[1] 16
//
// (Stack frame sizes of `16` above are just for illustration purposes.)
//
// Stack frame sizes of 0 or less indicate that those frame sizes couldn't
// be identified.
//
// This routine may return fewer stack frame entries than are
// available. Also note that `result` and `sizes` must both be non-null.
extern int GetStackFrames(void** result, int* sizes, int max_depth,
int skip_count);
// GetStackFrames()
//
// Records program counter values for up to `max_depth` frames, skipping the
// most recent `skip_count` stack frames, stores their corresponding values
// and sizes in `results` and `sizes` buffers, and returns the number of frames
// stored. (Note that the frame generated for the `absl::GetStackFrames()`
// routine itself is also skipped.)
//
// Example:
//
// main() { foo(); }
// foo() { bar(); }
// bar() {
// void* result[10];
// int sizes[10];
// int depth = absl::GetStackFrames(result, sizes, 10, 1);
// }
//
// The current stack frame would consist of three function calls: `bar()`,
// `foo()`, and then `main()`; however, since the `GetStackFrames()` call sets
// `skip_count` to `1`, it will skip the frame for `bar()`, the most recently
// invoked function call. It will therefore return 2 and fill `result` with
// program counters within the following functions:
//
// result[0] foo()
// result[1] main()
//
// (Note: in practice, a few more entries after `main()` may be added to account
// for startup processes.)
//
// Corresponding stack frame sizes will also be recorded:
//
// sizes[0] 16
// sizes[1] 16
//
// (Stack frame sizes of `16` above are just for illustration purposes.)
//
// Stack frame sizes of 0 or less indicate that those frame sizes couldn't
// be identified.
//
// This routine may return fewer stack frame entries than are
// available. Also note that `result` and `sizes` must both be non-null.
extern int GetStackFrames(void** result, int* sizes, int max_depth, int skip_count);


// GetStackFramesWithContext()
//
// Records program counter values obtained from a signal handler. Records
// program counter values for up to `max_depth` frames, skipping the most recent
// `skip_count` stack frames, stores their corresponding values and sizes in
// `results` and `sizes` buffers, and returns the number of frames stored. (Note
// that the frame generated for the `absl::GetStackFramesWithContext()` routine
// itself is also skipped.)
//
// The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value
// passed to a signal handler registered via the `sa_sigaction` field of a
// `sigaction` struct. (See
// http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may
// help a stack unwinder to provide a better stack trace under certain
// conditions. `uc` may safely be null.
//
// The `min_dropped_frames` output parameter, if non-null, points to the
// location to note any dropped stack frames, if any, due to buffer limitations
// or other reasons. (This value will be set to `0` if no frames were dropped.)
// The number of total stack frames is guaranteed to be >= skip_count +
// max_depth + *min_dropped_frames.
extern int GetStackFramesWithContext(void** result, int* sizes, int max_depth,
int skip_count, const void* uc,
int* min_dropped_frames);
// GetStackFramesWithContext()
//
// Records program counter values obtained from a signal handler. Records
// program counter values for up to `max_depth` frames, skipping the most recent
// `skip_count` stack frames, stores their corresponding values and sizes in
// `results` and `sizes` buffers, and returns the number of frames stored. (Note
// that the frame generated for the `absl::GetStackFramesWithContext()` routine
// itself is also skipped.)
//
// The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value
// passed to a signal handler registered via the `sa_sigaction` field of a
// `sigaction` struct. (See
// http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may
// help a stack unwinder to provide a better stack trace under certain
// conditions. `uc` may safely be null.
//
// The `min_dropped_frames` output parameter, if non-null, points to the
// location to note any dropped stack frames, if any, due to buffer limitations
// or other reasons. (This value will be set to `0` if no frames were dropped.)
// The number of total stack frames is guaranteed to be >= skip_count +
// max_depth + *min_dropped_frames.
extern int GetStackFramesWithContext(void** result, int* sizes, int max_depth, int skip_count, const void* uc, int* min_dropped_frames);


// GetStackTrace()
//
// Records program counter values for up to `max_depth` frames, skipping the
// most recent `skip_count` stack frames, stores their corresponding values
// in `results`, and returns the number of frames
// stored. Note that this function is similar to `absl::GetStackFrames()`
// except that it returns the stack trace only, and not stack frame sizes.
//
// Example:
//
// main() { foo(); }
// foo() { bar(); }
// bar() {
// void* result[10];
// int depth = absl::GetStackTrace(result, 10, 1);
// }
//
// This produces:
//
// result[0] foo
// result[1] main
// .... ...
//
// `result` must not be null.
extern int GetStackTrace(void** result, int max_depth, int skip_count);
// GetStackTrace()
//
// Records program counter values for up to `max_depth` frames, skipping the
// most recent `skip_count` stack frames, stores their corresponding values
// in `results`, and returns the number of frames
// stored. Note that this function is similar to `absl::GetStackFrames()`
// except that it returns the stack trace only, and not stack frame sizes.
//
// Example:
//
// main() { foo(); }
// foo() { bar(); }
// bar() {
// void* result[10];
// int depth = absl::GetStackTrace(result, 10, 1);
// }
//
// This produces:
//
// result[0] foo
// result[1] main
// .... ...
//
// `result` must not be null.
extern int GetStackTrace(void** result, int max_depth, int skip_count);


// GetStackTraceWithContext()
//
// Records program counter values obtained from a signal handler. Records
// program counter values for up to `max_depth` frames, skipping the most recent
// `skip_count` stack frames, stores their corresponding values in `results`,
// and returns the number of frames stored. (Note that the frame generated for
// the `absl::GetStackFramesWithContext()` routine itself is also skipped.)
//
// The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value
// passed to a signal handler registered via the `sa_sigaction` field of a
// `sigaction` struct. (See
// http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may
// help a stack unwinder to provide a better stack trace under certain
// conditions. `uc` may safely be null.
//
// The `min_dropped_frames` output parameter, if non-null, points to the
// location to note any dropped stack frames, if any, due to buffer limitations
// or other reasons. (This value will be set to `0` if no frames were dropped.)
// The number of total stack frames is guaranteed to be >= skip_count +
// max_depth + *min_dropped_frames.
extern int GetStackTraceWithContext(void** result, int max_depth,
int skip_count, const void* uc,
int* min_dropped_frames);
// GetStackTraceWithContext()
//
// Records program counter values obtained from a signal handler. Records
// program counter values for up to `max_depth` frames, skipping the most recent
// `skip_count` stack frames, stores their corresponding values in `results`,
// and returns the number of frames stored. (Note that the frame generated for
// the `absl::GetStackFramesWithContext()` routine itself is also skipped.)
//
// The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value
// passed to a signal handler registered via the `sa_sigaction` field of a
// `sigaction` struct. (See
// http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may
// help a stack unwinder to provide a better stack trace under certain
// conditions. `uc` may safely be null.
//
// The `min_dropped_frames` output parameter, if non-null, points to the
// location to note any dropped stack frames, if any, due to buffer limitations
// or other reasons. (This value will be set to `0` if no frames were dropped.)
// The number of total stack frames is guaranteed to be >= skip_count +
// max_depth + *min_dropped_frames.
extern int GetStackTraceWithContext(void** result, int max_depth, int skip_count, const void* uc, int* min_dropped_frames);


// SetStackUnwinder()
//
// Provides a custom function for unwinding stack frames that will be used in
// place of the default stack unwinder when invoking the static
// GetStack{Frames,Trace}{,WithContext}() functions above.
//
// The arguments passed to the unwinder function will match the
// arguments passed to `absl::GetStackFramesWithContext()` except that sizes
// will be non-null iff the caller is interested in frame sizes.
//
// If unwinder is set to null, we revert to the default stack-tracing behavior.
//
// *****************************************************************************
// WARNING
// *****************************************************************************
//
// absl::SetStackUnwinder is not suitable for general purpose use. It is
// provided for custom runtimes.
// Some things to watch out for when calling `absl::SetStackUnwinder()`:
//
// (a) The unwinder may be called from within signal handlers and
// therefore must be async-signal-safe.
//
// (b) Even after a custom stack unwinder has been unregistered, other
// threads may still be in the process of using that unwinder.
// Therefore do not clean up any state that may be needed by an old
// unwinder.
// *****************************************************************************
extern void SetStackUnwinder(int (*unwinder)(void** pcs, int* sizes,
int max_depth, int skip_count,
const void* uc,
int* min_dropped_frames));
// SetStackUnwinder()
//
// Provides a custom function for unwinding stack frames that will be used in
// place of the default stack unwinder when invoking the static
// GetStack{Frames,Trace}{,WithContext}() functions above.
//
// The arguments passed to the unwinder function will match the
// arguments passed to `absl::GetStackFramesWithContext()` except that sizes
// will be non-null iff the caller is interested in frame sizes.
//
// If unwinder is set to null, we revert to the default stack-tracing behavior.
//
// *****************************************************************************
// WARNING
// *****************************************************************************
//
// absl::SetStackUnwinder is not suitable for general purpose use. It is
// provided for custom runtimes.
// Some things to watch out for when calling `absl::SetStackUnwinder()`:
//
// (a) The unwinder may be called from within signal handlers and
// therefore must be async-signal-safe.
//
// (b) Even after a custom stack unwinder has been unregistered, other
// threads may still be in the process of using that unwinder.
// Therefore do not clean up any state that may be needed by an old
// unwinder.
// *****************************************************************************
extern void SetStackUnwinder(int (*unwinder)(void** pcs, int* sizes, int max_depth, int skip_count, const void* uc, int* min_dropped_frames));


// DefaultStackUnwinder()
//
// Records program counter values of up to `max_depth` frames, skipping the most
// recent `skip_count` stack frames, and stores their corresponding values in
// `pcs`. (Note that the frame generated for this call itself is also skipped.)
// This function acts as a generic stack-unwinder; prefer usage of the more
// specific `GetStack{Trace,Frames}{,WithContext}()` functions above.
//
// If you have set your own stack unwinder (with the `SetStackUnwinder()`
// function above, you can still get the default stack unwinder by calling
// `DefaultStackUnwinder()`, which will ignore any previously set stack unwinder
// and use the default one instead.
//
// Because this function is generic, only `pcs` is guaranteed to be non-null
// upon return. It is legal for `sizes`, `uc`, and `min_dropped_frames` to all
// be null when called.
//
// The semantics are the same as the corresponding `GetStack*()` function in the
// case where `absl::SetStackUnwinder()` was never called. Equivalents are:
//
// null sizes | non-nullptr sizes
// |==========================================================|
// null uc | GetStackTrace() | GetStackFrames() |
// non-null uc | GetStackTraceWithContext() | GetStackFramesWithContext() |
// |==========================================================|
extern int DefaultStackUnwinder(void** pcs, int* sizes, int max_depth,
int skip_count, const void* uc,
int* min_dropped_frames);
// DefaultStackUnwinder()
//
// Records program counter values of up to `max_depth` frames, skipping the most
// recent `skip_count` stack frames, and stores their corresponding values in
// `pcs`. (Note that the frame generated for this call itself is also skipped.)
// This function acts as a generic stack-unwinder; prefer usage of the more
// specific `GetStack{Trace,Frames}{,WithContext}()` functions above.
//
// If you have set your own stack unwinder (with the `SetStackUnwinder()`
// function above, you can still get the default stack unwinder by calling
// `DefaultStackUnwinder()`, which will ignore any previously set stack unwinder
// and use the default one instead.
//
// Because this function is generic, only `pcs` is guaranteed to be non-null
// upon return. It is legal for `sizes`, `uc`, and `min_dropped_frames` to all
// be null when called.
//
// The semantics are the same as the corresponding `GetStack*()` function in the
// case where `absl::SetStackUnwinder()` was never called. Equivalents are:
//
// null sizes | non-nullptr sizes
// |==========================================================|
// null uc | GetStackTrace() | GetStackFrames() |
// non-null uc | GetStackTraceWithContext() | GetStackFramesWithContext() |
// |==========================================================|
extern int DefaultStackUnwinder(void** pcs, int* sizes, int max_depth, int skip_count, const void* uc, int* min_dropped_frames);


namespace debugging_internal {
// Returns true for platforms which are expected to have functioning stack trace
// implementations. Intended to be used for tests which want to exclude
// verification of logic known to be broken because stack traces are not
// working.
extern bool StackTraceWorksForTest();
} // namespace debugging_internal
ABSL_NAMESPACE_END
namespace debugging_internal
{
// Returns true for platforms which are expected to have functioning stack trace
// implementations. Intended to be used for tests which want to exclude
// verification of logic known to be broken because stack traces are not
// working.
extern bool StackTraceWorksForTest();
} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_DEBUGGING_STACKTRACE_H_ #endif // ABSL_DEBUGGING_STACKTRACE_H_

+ 39
- 38
CAPI/cpp/grpc/include/absl/debugging/symbolize.h View File

@@ -54,46 +54,47 @@


#include "absl/debugging/internal/symbolize.h" #include "absl/debugging/internal/symbolize.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace absl
{
ABSL_NAMESPACE_BEGIN


// InitializeSymbolizer()
//
// Initializes the program counter symbolizer, given the path of the program
// (typically obtained through `main()`s `argv[0]`). The Abseil symbolizer
// allows you to read program counters (instruction pointer values) using their
// human-readable names within output such as stack traces.
//
// Example:
//
// int main(int argc, char *argv[]) {
// absl::InitializeSymbolizer(argv[0]);
// // Now you can use the symbolizer
// }
void InitializeSymbolizer(const char* argv0);
//
// Symbolize()
//
// Symbolizes a program counter (instruction pointer value) `pc` and, on
// success, writes the name to `out`. The symbol name is demangled, if possible.
// Note that the symbolized name may be truncated and will be NUL-terminated.
// Demangling is supported for symbols generated by GCC 3.x or newer). Returns
// `false` on failure.
//
// Example:
//
// // Print a program counter and its symbol name.
// static void DumpPCAndSymbol(void *pc) {
// char tmp[1024];
// const char *symbol = "(unknown)";
// if (absl::Symbolize(pc, tmp, sizeof(tmp))) {
// symbol = tmp;
// }
// absl::PrintF("%p %s\n", pc, symbol);
// }
bool Symbolize(const void *pc, char *out, int out_size);
// InitializeSymbolizer()
//
// Initializes the program counter symbolizer, given the path of the program
// (typically obtained through `main()`s `argv[0]`). The Abseil symbolizer
// allows you to read program counters (instruction pointer values) using their
// human-readable names within output such as stack traces.
//
// Example:
//
// int main(int argc, char *argv[]) {
// absl::InitializeSymbolizer(argv[0]);
// // Now you can use the symbolizer
// }
void InitializeSymbolizer(const char* argv0);
//
// Symbolize()
//
// Symbolizes a program counter (instruction pointer value) `pc` and, on
// success, writes the name to `out`. The symbol name is demangled, if possible.
// Note that the symbolized name may be truncated and will be NUL-terminated.
// Demangling is supported for symbols generated by GCC 3.x or newer). Returns
// `false` on failure.
//
// Example:
//
// // Print a program counter and its symbol name.
// static void DumpPCAndSymbol(void *pc) {
// char tmp[1024];
// const char *symbol = "(unknown)";
// if (absl::Symbolize(pc, tmp, sizeof(tmp))) {
// symbol = tmp;
// }
// absl::PrintF("%p %s\n", pc, symbol);
// }
bool Symbolize(const void* pc, char* out, int out_size);


ABSL_NAMESPACE_END
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_DEBUGGING_SYMBOLIZE_H_ #endif // ABSL_DEBUGGING_SYMBOLIZE_H_

+ 166
- 156
CAPI/cpp/grpc/include/absl/flags/commandlineflag.h View File

@@ -35,166 +35,176 @@
#include "absl/strings/string_view.h" #include "absl/strings/string_view.h"
#include "absl/types/optional.h" #include "absl/types/optional.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
class PrivateHandleAccessor;
} // namespace flags_internal

// CommandLineFlag
//
// This type acts as a type-erased handle for an instance of an Abseil Flag and
// holds reflection information pertaining to that flag. Use CommandLineFlag to
// access a flag's name, location, help string etc.
//
// To obtain an absl::CommandLineFlag, invoke `absl::FindCommandLineFlag()`
// passing it the flag name string.
//
// Example:
//
// // Obtain reflection handle for a flag named "flagname".
// const absl::CommandLineFlag* my_flag_data =
// absl::FindCommandLineFlag("flagname");
//
// // Now you can get flag info from that reflection handle.
// std::string flag_location = my_flag_data->Filename();
// ...
class CommandLineFlag {
public:
constexpr CommandLineFlag() = default;

// Not copyable/assignable.
CommandLineFlag(const CommandLineFlag&) = delete;
CommandLineFlag& operator=(const CommandLineFlag&) = delete;

// absl::CommandLineFlag::IsOfType()
//
// Return true iff flag has type T.
template <typename T>
inline bool IsOfType() const {
return TypeId() == base_internal::FastTypeId<T>();
}

// absl::CommandLineFlag::TryGet()
//
// Attempts to retrieve the flag value. Returns value on success,
// absl::nullopt otherwise.
template <typename T>
absl::optional<T> TryGet() const {
if (IsRetired() || !IsOfType<T>()) {
return absl::nullopt;
}

// Implementation notes:
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace flags_internal
{
class PrivateHandleAccessor;
} // namespace flags_internal

// CommandLineFlag
//
// This type acts as a type-erased handle for an instance of an Abseil Flag and
// holds reflection information pertaining to that flag. Use CommandLineFlag to
// access a flag's name, location, help string etc.
// //
// We are wrapping a union around the value of `T` to serve three purposes:
// To obtain an absl::CommandLineFlag, invoke `absl::FindCommandLineFlag()`
// passing it the flag name string.
// //
// 1. `U.value` has correct size and alignment for a value of type `T`
// 2. The `U.value` constructor is not invoked since U's constructor does
// not do it explicitly.
// 3. The `U.value` destructor is invoked since U's destructor does it
// explicitly. This makes `U` a kind of RAII wrapper around non default
// constructible value of T, which is destructed when we leave the
// scope. We do need to destroy U.value, which is constructed by
// CommandLineFlag::Read even though we left it in a moved-from state
// after std::move.
// Example:
// //
// All of this serves to avoid requiring `T` being default constructible.
union U {
T value;
U() {}
~U() { value.~T(); }
// // Obtain reflection handle for a flag named "flagname".
// const absl::CommandLineFlag* my_flag_data =
// absl::FindCommandLineFlag("flagname");
//
// // Now you can get flag info from that reflection handle.
// std::string flag_location = my_flag_data->Filename();
// ...
class CommandLineFlag
{
public:
constexpr CommandLineFlag() = default;

// Not copyable/assignable.
CommandLineFlag(const CommandLineFlag&) = delete;
CommandLineFlag& operator=(const CommandLineFlag&) = delete;

// absl::CommandLineFlag::IsOfType()
//
// Return true iff flag has type T.
template<typename T>
inline bool IsOfType() const
{
return TypeId() == base_internal::FastTypeId<T>();
}

// absl::CommandLineFlag::TryGet()
//
// Attempts to retrieve the flag value. Returns value on success,
// absl::nullopt otherwise.
template<typename T>
absl::optional<T> TryGet() const
{
if (IsRetired() || !IsOfType<T>())
{
return absl::nullopt;
}

// Implementation notes:
//
// We are wrapping a union around the value of `T` to serve three purposes:
//
// 1. `U.value` has correct size and alignment for a value of type `T`
// 2. The `U.value` constructor is not invoked since U's constructor does
// not do it explicitly.
// 3. The `U.value` destructor is invoked since U's destructor does it
// explicitly. This makes `U` a kind of RAII wrapper around non default
// constructible value of T, which is destructed when we leave the
// scope. We do need to destroy U.value, which is constructed by
// CommandLineFlag::Read even though we left it in a moved-from state
// after std::move.
//
// All of this serves to avoid requiring `T` being default constructible.
union U
{
T value;
U()
{
}
~U()
{
value.~T();
}
};
U u;

Read(&u.value);
// allow retired flags to be "read", so we can report invalid access.
if (IsRetired())
{
return absl::nullopt;
}
return std::move(u.value);
}

// absl::CommandLineFlag::Name()
//
// Returns name of this flag.
virtual absl::string_view Name() const = 0;

// absl::CommandLineFlag::Filename()
//
// Returns name of the file where this flag is defined.
virtual std::string Filename() const = 0;

// absl::CommandLineFlag::Help()
//
// Returns help message associated with this flag.
virtual std::string Help() const = 0;

// absl::CommandLineFlag::IsRetired()
//
// Returns true iff this object corresponds to retired flag.
virtual bool IsRetired() const;

// absl::CommandLineFlag::DefaultValue()
//
// Returns the default value for this flag.
virtual std::string DefaultValue() const = 0;

// absl::CommandLineFlag::CurrentValue()
//
// Returns the current value for this flag.
virtual std::string CurrentValue() const = 0;

// absl::CommandLineFlag::ParseFrom()
//
// Sets the value of the flag based on specified string `value`. If the flag
// was successfully set to new value, it returns true. Otherwise, sets `error`
// to indicate the error, leaves the flag unchanged, and returns false.
bool ParseFrom(absl::string_view value, std::string* error);

protected:
~CommandLineFlag() = default;

private:
friend class flags_internal::PrivateHandleAccessor;

// Sets the value of the flag based on specified string `value`. If the flag
// was successfully set to new value, it returns true. Otherwise, sets `error`
// to indicate the error, leaves the flag unchanged, and returns false. There
// are three ways to set the flag's value:
// * Update the current flag value
// * Update the flag's default value
// * Update the current flag value if it was never set before
// The mode is selected based on `set_mode` parameter.
virtual bool ParseFrom(absl::string_view value, flags_internal::FlagSettingMode set_mode, flags_internal::ValueSource source, std::string& error) = 0;

// Returns id of the flag's value type.
virtual flags_internal::FlagFastTypeId TypeId() const = 0;

// Interface to save flag to some persistent state. Returns current flag state
// or nullptr if flag does not support saving and restoring a state.
virtual std::unique_ptr<flags_internal::FlagStateInterface> SaveState() = 0;

// Copy-construct a new value of the flag's type in a memory referenced by
// the dst based on the current flag's value.
virtual void Read(void* dst) const = 0;

// To be deleted. Used to return true if flag's current value originated from
// command line.
virtual bool IsSpecifiedOnCommandLine() const = 0;

// Validates supplied value usign validator or parseflag routine
virtual bool ValidateInputValue(absl::string_view value) const = 0;

// Checks that flags default value can be converted to string and back to the
// flag's value type.
virtual void CheckDefaultValueParsingRoundtrip() const = 0;
}; };
U u;

Read(&u.value);
// allow retired flags to be "read", so we can report invalid access.
if (IsRetired()) {
return absl::nullopt;
}
return std::move(u.value);
}

// absl::CommandLineFlag::Name()
//
// Returns name of this flag.
virtual absl::string_view Name() const = 0;

// absl::CommandLineFlag::Filename()
//
// Returns name of the file where this flag is defined.
virtual std::string Filename() const = 0;

// absl::CommandLineFlag::Help()
//
// Returns help message associated with this flag.
virtual std::string Help() const = 0;

// absl::CommandLineFlag::IsRetired()
//
// Returns true iff this object corresponds to retired flag.
virtual bool IsRetired() const;

// absl::CommandLineFlag::DefaultValue()
//
// Returns the default value for this flag.
virtual std::string DefaultValue() const = 0;

// absl::CommandLineFlag::CurrentValue()
//
// Returns the current value for this flag.
virtual std::string CurrentValue() const = 0;

// absl::CommandLineFlag::ParseFrom()
//
// Sets the value of the flag based on specified string `value`. If the flag
// was successfully set to new value, it returns true. Otherwise, sets `error`
// to indicate the error, leaves the flag unchanged, and returns false.
bool ParseFrom(absl::string_view value, std::string* error);

protected:
~CommandLineFlag() = default;

private:
friend class flags_internal::PrivateHandleAccessor;

// Sets the value of the flag based on specified string `value`. If the flag
// was successfully set to new value, it returns true. Otherwise, sets `error`
// to indicate the error, leaves the flag unchanged, and returns false. There
// are three ways to set the flag's value:
// * Update the current flag value
// * Update the flag's default value
// * Update the current flag value if it was never set before
// The mode is selected based on `set_mode` parameter.
virtual bool ParseFrom(absl::string_view value,
flags_internal::FlagSettingMode set_mode,
flags_internal::ValueSource source,
std::string& error) = 0;

// Returns id of the flag's value type.
virtual flags_internal::FlagFastTypeId TypeId() const = 0;

// Interface to save flag to some persistent state. Returns current flag state
// or nullptr if flag does not support saving and restoring a state.
virtual std::unique_ptr<flags_internal::FlagStateInterface> SaveState() = 0;

// Copy-construct a new value of the flag's type in a memory referenced by
// the dst based on the current flag's value.
virtual void Read(void* dst) const = 0;

// To be deleted. Used to return true if flag's current value originated from
// command line.
virtual bool IsSpecifiedOnCommandLine() const = 0;

// Validates supplied value usign validator or parseflag routine
virtual bool ValidateInputValue(absl::string_view value) const = 0;

// Checks that flags default value can be converted to string and back to the
// flag's value type.
virtual void CheckDefaultValueParsingRoundtrip() const = 0;
};

ABSL_NAMESPACE_END

ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_FLAGS_COMMANDLINEFLAG_H_ #endif // ABSL_FLAGS_COMMANDLINEFLAG_H_

+ 15
- 15
CAPI/cpp/grpc/include/absl/flags/config.h View File

@@ -47,22 +47,22 @@


// These macros represent the "source of truth" for the list of supported // These macros represent the "source of truth" for the list of supported
// built-in types. // built-in types.
#define ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \
A(bool, bool) \
A(short, short) \
A(unsigned short, unsigned_short) \
A(int, int) \
A(unsigned int, unsigned_int) \
A(long, long) \
A(unsigned long, unsigned_long) \
A(long long, long_long) \
A(unsigned long long, unsigned_long_long) \
A(double, double) \
A(float, float)
#define ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \
A(bool, bool) \
A(short, short) \
A(unsigned short, unsigned_short) \
A(int, int) \
A(unsigned int, unsigned_int) \
A(long, long) \
A(unsigned long, unsigned_long) \
A(long long, long_long) \
A(unsigned long long, unsigned_long_long) \
A(double, double) \
A(float, float)


#define ABSL_FLAGS_INTERNAL_SUPPORTED_TYPES(A) \ #define ABSL_FLAGS_INTERNAL_SUPPORTED_TYPES(A) \
ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \
A(std::string, std_string) \
A(std::vector<std::string>, std_vector_of_string)
ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \
A(std::string, std_string) \
A(std::vector<std::string>, std_vector_of_string)


#endif // ABSL_FLAGS_CONFIG_H_ #endif // ABSL_FLAGS_CONFIG_H_

+ 21
- 17
CAPI/cpp/grpc/include/absl/flags/declare.h View File

@@ -27,28 +27,30 @@


#include "absl/base/config.h" #include "absl/base/config.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace flags_internal
{


// absl::Flag<T> represents a flag of type 'T' created by ABSL_FLAG.
template <typename T>
class Flag;
// absl::Flag<T> represents a flag of type 'T' created by ABSL_FLAG.
template<typename T>
class Flag;


} // namespace flags_internal
} // namespace flags_internal


// Flag // Flag
// //
// Forward declaration of the `absl::Flag` type for use in defining the macro. // Forward declaration of the `absl::Flag` type for use in defining the macro.
#if defined(_MSC_VER) && !defined(__clang__) #if defined(_MSC_VER) && !defined(__clang__)
template <typename T>
class Flag;
template<typename T>
class Flag;
#else #else
template <typename T>
using Flag = flags_internal::Flag<T>;
template<typename T>
using Flag = flags_internal::Flag<T>;
#endif #endif


ABSL_NAMESPACE_END
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


// ABSL_DECLARE_FLAG() // ABSL_DECLARE_FLAG()
@@ -64,10 +66,12 @@ ABSL_NAMESPACE_END


// Internal implementation of ABSL_DECLARE_FLAG to allow macro expansion of its // Internal implementation of ABSL_DECLARE_FLAG to allow macro expansion of its
// arguments. Clients must use ABSL_DECLARE_FLAG instead. // arguments. Clients must use ABSL_DECLARE_FLAG instead.
#define ABSL_DECLARE_FLAG_INTERNAL(type, name) \
extern absl::Flag<type> FLAGS_##name; \
namespace absl /* block flags in namespaces */ {} \
/* second redeclaration is to allow applying attributes */ \
extern absl::Flag<type> FLAGS_##name
#define ABSL_DECLARE_FLAG_INTERNAL(type, name) \
extern absl::Flag<type> FLAGS_##name; \
namespace absl /* block flags in namespaces */ \
{ \
} \
/* second redeclaration is to allow applying attributes */ \
extern absl::Flag<type> FLAGS_##name


#endif // ABSL_FLAGS_DECLARE_H_ #endif // ABSL_FLAGS_DECLARE_H_

+ 120
- 107
CAPI/cpp/grpc/include/absl/flags/flag.h View File

@@ -40,8 +40,9 @@
#include "absl/flags/internal/registry.h" #include "absl/flags/internal/registry.h"
#include "absl/strings/string_view.h" #include "absl/strings/string_view.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace absl
{
ABSL_NAMESPACE_BEGIN


// Flag // Flag
// //
@@ -72,73 +73,76 @@ ABSL_NAMESPACE_BEGIN
// discusses supported standard types, optional flags, and additional Abseil // discusses supported standard types, optional flags, and additional Abseil
// type support. // type support.
#if !defined(_MSC_VER) || defined(__clang__) #if !defined(_MSC_VER) || defined(__clang__)
template <typename T>
using Flag = flags_internal::Flag<T>;
template<typename T>
using Flag = flags_internal::Flag<T>;
#else #else
#include "absl/flags/internal/flag_msvc.inc" #include "absl/flags/internal/flag_msvc.inc"
#endif #endif


// GetFlag()
//
// Returns the value (of type `T`) of an `absl::Flag<T>` instance, by value. Do
// not construct an `absl::Flag<T>` directly and call `absl::GetFlag()`;
// instead, refer to flag's constructed variable name (e.g. `FLAGS_name`).
// Because this function returns by value and not by reference, it is
// thread-safe, but note that the operation may be expensive; as a result, avoid
// `absl::GetFlag()` within any tight loops.
//
// Example:
//
// // FLAGS_count is a Flag of type `int`
// int my_count = absl::GetFlag(FLAGS_count);
//
// // FLAGS_firstname is a Flag of type `std::string`
// std::string first_name = absl::GetFlag(FLAGS_firstname);
template <typename T>
ABSL_MUST_USE_RESULT T GetFlag(const absl::Flag<T>& flag) {
return flags_internal::FlagImplPeer::InvokeGet<T>(flag);
}
// GetFlag()
//
// Returns the value (of type `T`) of an `absl::Flag<T>` instance, by value. Do
// not construct an `absl::Flag<T>` directly and call `absl::GetFlag()`;
// instead, refer to flag's constructed variable name (e.g. `FLAGS_name`).
// Because this function returns by value and not by reference, it is
// thread-safe, but note that the operation may be expensive; as a result, avoid
// `absl::GetFlag()` within any tight loops.
//
// Example:
//
// // FLAGS_count is a Flag of type `int`
// int my_count = absl::GetFlag(FLAGS_count);
//
// // FLAGS_firstname is a Flag of type `std::string`
// std::string first_name = absl::GetFlag(FLAGS_firstname);
template<typename T>
ABSL_MUST_USE_RESULT T GetFlag(const absl::Flag<T>& flag)
{
return flags_internal::FlagImplPeer::InvokeGet<T>(flag);
}


// SetFlag()
//
// Sets the value of an `absl::Flag` to the value `v`. Do not construct an
// `absl::Flag<T>` directly and call `absl::SetFlag()`; instead, use the
// flag's variable name (e.g. `FLAGS_name`). This function is
// thread-safe, but is potentially expensive. Avoid setting flags in general,
// but especially within performance-critical code.
template <typename T>
void SetFlag(absl::Flag<T>* flag, const T& v) {
flags_internal::FlagImplPeer::InvokeSet(*flag, v);
}
// SetFlag()
//
// Sets the value of an `absl::Flag` to the value `v`. Do not construct an
// `absl::Flag<T>` directly and call `absl::SetFlag()`; instead, use the
// flag's variable name (e.g. `FLAGS_name`). This function is
// thread-safe, but is potentially expensive. Avoid setting flags in general,
// but especially within performance-critical code.
template<typename T>
void SetFlag(absl::Flag<T>* flag, const T& v)
{
flags_internal::FlagImplPeer::InvokeSet(*flag, v);
}


// Overload of `SetFlag()` to allow callers to pass in a value that is
// convertible to `T`. E.g., use this overload to pass a "const char*" when `T`
// is `std::string`.
template <typename T, typename V>
void SetFlag(absl::Flag<T>* flag, const V& v) {
T value(v);
flags_internal::FlagImplPeer::InvokeSet(*flag, value);
}
// Overload of `SetFlag()` to allow callers to pass in a value that is
// convertible to `T`. E.g., use this overload to pass a "const char*" when `T`
// is `std::string`.
template<typename T, typename V>
void SetFlag(absl::Flag<T>* flag, const V& v)
{
T value(v);
flags_internal::FlagImplPeer::InvokeSet(*flag, value);
}


// GetFlagReflectionHandle()
//
// Returns the reflection handle corresponding to specified Abseil Flag
// instance. Use this handle to access flag's reflection information, like name,
// location, default value etc.
//
// Example:
//
// std::string = absl::GetFlagReflectionHandle(FLAGS_count).DefaultValue();
// GetFlagReflectionHandle()
//
// Returns the reflection handle corresponding to specified Abseil Flag
// instance. Use this handle to access flag's reflection information, like name,
// location, default value etc.
//
// Example:
//
// std::string = absl::GetFlagReflectionHandle(FLAGS_count).DefaultValue();


template <typename T>
const CommandLineFlag& GetFlagReflectionHandle(const absl::Flag<T>& f) {
return flags_internal::FlagImplPeer::InvokeReflect(f);
}
template<typename T>
const CommandLineFlag& GetFlagReflectionHandle(const absl::Flag<T>& f)
{
return flags_internal::FlagImplPeer::InvokeReflect(f);
}


ABSL_NAMESPACE_END
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl



// ABSL_FLAG() // ABSL_FLAG()
// //
// This macro defines an `absl::Flag<T>` instance of a specified type `T`: // This macro defines an `absl::Flag<T>` instance of a specified type `T`:
@@ -167,7 +171,7 @@ ABSL_NAMESPACE_END
// Note: do not construct objects of type `absl::Flag<T>` directly. Only use the // Note: do not construct objects of type `absl::Flag<T>` directly. Only use the
// `ABSL_FLAG()` macro for such construction. // `ABSL_FLAG()` macro for such construction.
#define ABSL_FLAG(Type, name, default_value, help) \ #define ABSL_FLAG(Type, name, default_value, help) \
ABSL_FLAG_IMPL(Type, name, default_value, help)
ABSL_FLAG_IMPL(Type, name, default_value, help)


// ABSL_FLAG().OnUpdate() // ABSL_FLAG().OnUpdate()
// //
@@ -198,11 +202,12 @@ ABSL_NAMESPACE_END
// ABSL_FLAG_IMPL macro definition conditional on ABSL_FLAGS_STRIP_NAMES // ABSL_FLAG_IMPL macro definition conditional on ABSL_FLAGS_STRIP_NAMES
#if !defined(_MSC_VER) || defined(__clang__) #if !defined(_MSC_VER) || defined(__clang__)
#define ABSL_FLAG_IMPL_FLAG_PTR(flag) flag #define ABSL_FLAG_IMPL_FLAG_PTR(flag) flag
#define ABSL_FLAG_IMPL_HELP_ARG(name) \
absl::flags_internal::HelpArg<AbslFlagHelpGenFor##name>( \
FLAGS_help_storage_##name)
#define ABSL_FLAG_IMPL_HELP_ARG(name) \
absl::flags_internal::HelpArg<AbslFlagHelpGenFor##name>( \
FLAGS_help_storage_##name \
)
#define ABSL_FLAG_IMPL_DEFAULT_ARG(Type, name) \ #define ABSL_FLAG_IMPL_DEFAULT_ARG(Type, name) \
absl::flags_internal::DefaultArg<Type, AbslFlagDefaultGenFor##name>(0)
absl::flags_internal::DefaultArg<Type, AbslFlagDefaultGenFor##name>(0)
#else #else
#define ABSL_FLAG_IMPL_FLAG_PTR(flag) flag.GetImpl() #define ABSL_FLAG_IMPL_FLAG_PTR(flag) flag.GetImpl()
#define ABSL_FLAG_IMPL_HELP_ARG(name) &AbslFlagHelpGenFor##name::NonConst #define ABSL_FLAG_IMPL_HELP_ARG(name) &AbslFlagHelpGenFor##name::NonConst
@@ -212,15 +217,13 @@ ABSL_NAMESPACE_END
#if ABSL_FLAGS_STRIP_NAMES #if ABSL_FLAGS_STRIP_NAMES
#define ABSL_FLAG_IMPL_FLAGNAME(txt) "" #define ABSL_FLAG_IMPL_FLAGNAME(txt) ""
#define ABSL_FLAG_IMPL_FILENAME() "" #define ABSL_FLAG_IMPL_FILENAME() ""
#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \
absl::flags_internal::FlagRegistrar<T, false>(ABSL_FLAG_IMPL_FLAG_PTR(flag), \
nullptr)
#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \
absl::flags_internal::FlagRegistrar<T, false>(ABSL_FLAG_IMPL_FLAG_PTR(flag), nullptr)
#else #else
#define ABSL_FLAG_IMPL_FLAGNAME(txt) txt #define ABSL_FLAG_IMPL_FLAGNAME(txt) txt
#define ABSL_FLAG_IMPL_FILENAME() __FILE__ #define ABSL_FLAG_IMPL_FILENAME() __FILE__
#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \
absl::flags_internal::FlagRegistrar<T, true>(ABSL_FLAG_IMPL_FLAG_PTR(flag), \
__FILE__)
#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \
absl::flags_internal::FlagRegistrar<T, true>(ABSL_FLAG_IMPL_FLAG_PTR(flag), __FILE__)
#endif #endif


// ABSL_FLAG_IMPL macro definition conditional on ABSL_FLAGS_STRIP_HELP // ABSL_FLAG_IMPL macro definition conditional on ABSL_FLAGS_STRIP_HELP
@@ -239,46 +242,56 @@ ABSL_NAMESPACE_END
// TODO(rogeeff): place these generated structs into local namespace and apply // TODO(rogeeff): place these generated structs into local namespace and apply
// ABSL_INTERNAL_UNIQUE_SHORT_NAME. // ABSL_INTERNAL_UNIQUE_SHORT_NAME.
// TODO(rogeeff): Apply __attribute__((nodebug)) to FLAGS_help_storage_##name // TODO(rogeeff): Apply __attribute__((nodebug)) to FLAGS_help_storage_##name
#define ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, txt) \
struct AbslFlagHelpGenFor##name { \
/* The expression is run in the caller as part of the */ \
/* default value argument. That keeps temporaries alive */ \
/* long enough for NonConst to work correctly. */ \
static constexpr absl::string_view Value( \
absl::string_view absl_flag_help = ABSL_FLAG_IMPL_FLAGHELP(txt)) { \
return absl_flag_help; \
} \
static std::string NonConst() { return std::string(Value()); } \
}; \
constexpr auto FLAGS_help_storage_##name ABSL_INTERNAL_UNIQUE_SMALL_NAME() \
ABSL_ATTRIBUTE_SECTION_VARIABLE(flags_help_cold) = \
absl::flags_internal::HelpStringAsArray<AbslFlagHelpGenFor##name>( \
0);
#define ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, txt) \
struct AbslFlagHelpGenFor##name \
{ \
/* The expression is run in the caller as part of the */ \
/* default value argument. That keeps temporaries alive */ \
/* long enough for NonConst to work correctly. */ \
static constexpr absl::string_view Value( \
absl::string_view absl_flag_help = ABSL_FLAG_IMPL_FLAGHELP(txt) \
) \
{ \
return absl_flag_help; \
} \
static std::string NonConst() \
{ \
return std::string(Value()); \
} \
}; \
constexpr auto FLAGS_help_storage_##name ABSL_INTERNAL_UNIQUE_SMALL_NAME() \
ABSL_ATTRIBUTE_SECTION_VARIABLE(flags_help_cold) = \
absl::flags_internal::HelpStringAsArray<AbslFlagHelpGenFor##name>( \
0 \
);


#define ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \
struct AbslFlagDefaultGenFor##name { \
Type value = absl::flags_internal::InitDefaultValue<Type>(default_value); \
static void Gen(void* absl_flag_default_loc) { \
new (absl_flag_default_loc) Type(AbslFlagDefaultGenFor##name{}.value); \
} \
};
#define ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \
struct AbslFlagDefaultGenFor##name \
{ \
Type value = absl::flags_internal::InitDefaultValue<Type>(default_value); \
static void Gen(void* absl_flag_default_loc) \
{ \
new (absl_flag_default_loc) Type(AbslFlagDefaultGenFor##name{}.value); \
} \
};


// ABSL_FLAG_IMPL // ABSL_FLAG_IMPL
// //
// Note: Name of registrar object is not arbitrary. It is used to "grab" // Note: Name of registrar object is not arbitrary. It is used to "grab"
// global name for FLAGS_no<flag_name> symbol, thus preventing the possibility // global name for FLAGS_no<flag_name> symbol, thus preventing the possibility
// of defining two flags with names foo and nofoo. // of defining two flags with names foo and nofoo.
#define ABSL_FLAG_IMPL(Type, name, default_value, help) \
extern ::absl::Flag<Type> FLAGS_##name; \
namespace absl /* block flags in namespaces */ {} \
ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \
ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, help) \
ABSL_CONST_INIT absl::Flag<Type> FLAGS_##name{ \
ABSL_FLAG_IMPL_FLAGNAME(#name), ABSL_FLAG_IMPL_FILENAME(), \
ABSL_FLAG_IMPL_HELP_ARG(name), ABSL_FLAG_IMPL_DEFAULT_ARG(Type, name)}; \
extern absl::flags_internal::FlagRegistrarEmpty FLAGS_no##name; \
absl::flags_internal::FlagRegistrarEmpty FLAGS_no##name = \
ABSL_FLAG_IMPL_REGISTRAR(Type, FLAGS_##name)
#define ABSL_FLAG_IMPL(Type, name, default_value, help) \
extern ::absl::Flag<Type> FLAGS_##name; \
namespace absl /* block flags in namespaces */ \
{ \
} \
ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \
ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, help) \
ABSL_CONST_INIT absl::Flag<Type> FLAGS_##name{ \
ABSL_FLAG_IMPL_FLAGNAME(#name), ABSL_FLAG_IMPL_FILENAME(), ABSL_FLAG_IMPL_HELP_ARG(name), ABSL_FLAG_IMPL_DEFAULT_ARG(Type, name)}; \
extern absl::flags_internal::FlagRegistrarEmpty FLAGS_no##name; \
absl::flags_internal::FlagRegistrarEmpty FLAGS_no##name = \
ABSL_FLAG_IMPL_REGISTRAR(Type, FLAGS_##name)


// ABSL_RETIRED_FLAG // ABSL_RETIRED_FLAG
// //
@@ -301,10 +314,10 @@ ABSL_NAMESPACE_END
// unused. // unused.
// TODO(rogeeff): replace RETIRED_FLAGS with FLAGS once forward declarations of // TODO(rogeeff): replace RETIRED_FLAGS with FLAGS once forward declarations of
// retired flags are cleaned up. // retired flags are cleaned up.
#define ABSL_RETIRED_FLAG(type, name, default_value, explanation) \
static absl::flags_internal::RetiredFlag<type> RETIRED_FLAGS_##name; \
ABSL_ATTRIBUTE_UNUSED static const auto RETIRED_FLAGS_REG_##name = \
(RETIRED_FLAGS_##name.Retire(#name), \
::absl::flags_internal::FlagRegistrarEmpty{})
#define ABSL_RETIRED_FLAG(type, name, default_value, explanation) \
static absl::flags_internal::RetiredFlag<type> RETIRED_FLAGS_##name; \
ABSL_ATTRIBUTE_UNUSED static const auto RETIRED_FLAGS_REG_##name = \
(RETIRED_FLAGS_##name.Retire(#name), \
::absl::flags_internal::FlagRegistrarEmpty{})


#endif // ABSL_FLAGS_FLAG_H_ #endif // ABSL_FLAGS_FLAG_H_

+ 43
- 38
CAPI/cpp/grpc/include/absl/flags/internal/commandlineflag.h View File

@@ -19,50 +19,55 @@
#include "absl/base/config.h" #include "absl/base/config.h"
#include "absl/base/internal/fast_type_id.h" #include "absl/base/internal/fast_type_id.h"


namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
namespace absl
{
ABSL_NAMESPACE_BEGIN
namespace flags_internal
{


// An alias for flag fast type id. This value identifies the flag value type
// similarly to typeid(T), without relying on RTTI being available. In most
// cases this id is enough to uniquely identify the flag's value type. In a few
// cases we'll have to resort to using actual RTTI implementation if it is
// available.
using FlagFastTypeId = absl::base_internal::FastTypeIdType;
// An alias for flag fast type id. This value identifies the flag value type
// similarly to typeid(T), without relying on RTTI being available. In most
// cases this id is enough to uniquely identify the flag's value type. In a few
// cases we'll have to resort to using actual RTTI implementation if it is
// available.
using FlagFastTypeId = absl::base_internal::FastTypeIdType;


// Options that control SetCommandLineOptionWithMode.
enum FlagSettingMode {
// update the flag's value unconditionally (can call this multiple times).
SET_FLAGS_VALUE,
// update the flag's value, but *only if* it has not yet been updated
// with SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef".
SET_FLAG_IF_DEFAULT,
// set the flag's default value to this. If the flag has not been updated
// yet (via SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef")
// change the flag's current value to the new default value as well.
SET_FLAGS_DEFAULT
};
// Options that control SetCommandLineOptionWithMode.
enum FlagSettingMode
{
// update the flag's value unconditionally (can call this multiple times).
SET_FLAGS_VALUE,
// update the flag's value, but *only if* it has not yet been updated
// with SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef".
SET_FLAG_IF_DEFAULT,
// set the flag's default value to this. If the flag has not been updated
// yet (via SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef")
// change the flag's current value to the new default value as well.
SET_FLAGS_DEFAULT
};


// Options that control ParseFrom: Source of a value.
enum ValueSource {
// Flag is being set by value specified on a command line.
kCommandLine,
// Flag is being set by value specified in the code.
kProgrammaticChange,
};
// Options that control ParseFrom: Source of a value.
enum ValueSource
{
// Flag is being set by value specified on a command line.
kCommandLine,
// Flag is being set by value specified in the code.
kProgrammaticChange,
};


// Handle to FlagState objects. Specific flag state objects will restore state
// of a flag produced this flag state from method CommandLineFlag::SaveState().
class FlagStateInterface {
public:
virtual ~FlagStateInterface();
// Handle to FlagState objects. Specific flag state objects will restore state
// of a flag produced this flag state from method CommandLineFlag::SaveState().
class FlagStateInterface
{
public:
virtual ~FlagStateInterface();


// Restores the flag originated this object to the saved state.
virtual void Restore() const = 0;
};
// Restores the flag originated this object to the saved state.
virtual void Restore() const = 0;
};


} // namespace flags_internal
ABSL_NAMESPACE_END
} // namespace flags_internal
ABSL_NAMESPACE_END
} // namespace absl } // namespace absl


#endif // ABSL_FLAGS_INTERNAL_COMMANDLINEFLAG_H_ #endif // ABSL_FLAGS_INTERNAL_COMMANDLINEFLAG_H_

+ 860
- 737
CAPI/cpp/grpc/include/absl/flags/internal/flag.h
File diff suppressed because it is too large
View File


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save