diff --git a/CAPI/cpp/grpc/include/absl/algorithm/algorithm.h b/CAPI/cpp/grpc/include/absl/algorithm/algorithm.h index e9b4733..865b6d1 100644 --- a/CAPI/cpp/grpc/include/absl/algorithm/algorithm.h +++ b/CAPI/cpp/grpc/include/absl/algorithm/algorithm.h @@ -28,132 +28,135 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN - -namespace algorithm_internal { - -// Performs comparisons with operator==, similar to C++14's `std::equal_to<>`. -struct EqualTo { - template - bool operator()(const T& a, const U& b) const { - return a == b; - } -}; - -template -bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, - InputIter2 last2, Pred pred, std::input_iterator_tag, - std::input_iterator_tag) { - while (true) { - if (first1 == last1) return first2 == last2; - if (first2 == last2) return false; - if (!pred(*first1, *first2)) return false; - ++first1; - ++first2; - } -} - -template -bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, - InputIter2 last2, Pred&& pred, std::random_access_iterator_tag, - std::random_access_iterator_tag) { - return (last1 - first1 == last2 - first2) && - std::equal(first1, last1, first2, std::forward(pred)); -} - -// When we are using our own internal predicate that just applies operator==, we -// forward to the non-predicate form of std::equal. This enables an optimization -// in libstdc++ that can result in std::memcmp being used for integer types. -template -bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, - InputIter2 last2, algorithm_internal::EqualTo /* unused */, - std::random_access_iterator_tag, - std::random_access_iterator_tag) { - return (last1 - first1 == last2 - first2) && - std::equal(first1, last1, first2); -} - -template -It RotateImpl(It first, It middle, It last, std::true_type) { - return std::rotate(first, middle, last); -} - -template -It RotateImpl(It first, It middle, It last, std::false_type) { - std::rotate(first, middle, last); - return std::next(first, std::distance(middle, last)); -} - -} // namespace algorithm_internal - -// equal() -// -// Compares the equality of two ranges specified by pairs of iterators, using -// the given predicate, returning true iff for each corresponding iterator i1 -// and i2 in the first and second range respectively, pred(*i1, *i2) == true -// -// This comparison takes at most min(`last1` - `first1`, `last2` - `first2`) -// invocations of the predicate. Additionally, if InputIter1 and InputIter2 are -// both random-access iterators, and `last1` - `first1` != `last2` - `first2`, -// then the predicate is never invoked and the function returns false. -// -// This is a C++11-compatible implementation of C++14 `std::equal`. See -// https://en.cppreference.com/w/cpp/algorithm/equal for more information. -template -bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, - InputIter2 last2, Pred&& pred) { - return algorithm_internal::EqualImpl( - first1, last1, first2, last2, std::forward(pred), - typename std::iterator_traits::iterator_category{}, - typename std::iterator_traits::iterator_category{}); -} - -// Overload of equal() that performs comparison of two ranges specified by pairs -// of iterators using operator==. -template -bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, - InputIter2 last2) { - return absl::equal(first1, last1, first2, last2, - algorithm_internal::EqualTo{}); -} - -// linear_search() -// -// Performs a linear search for `value` using the iterator `first` up to -// but not including `last`, returning true if [`first`, `last`) contains an -// element equal to `value`. -// -// A linear search is of O(n) complexity which is guaranteed to make at most -// n = (`last` - `first`) comparisons. A linear search over short containers -// may be faster than a binary search, even when the container is sorted. -template -bool linear_search(InputIterator first, InputIterator last, - const EqualityComparable& value) { - return std::find(first, last, value) != last; -} - -// rotate() -// -// Performs a left rotation on a range of elements (`first`, `last`) such that -// `middle` is now the first element. `rotate()` returns an iterator pointing to -// the first element before rotation. This function is exactly the same as -// `std::rotate`, but fixes a bug in gcc -// <= 4.9 where `std::rotate` returns `void` instead of an iterator. -// -// The complexity of this algorithm is the same as that of `std::rotate`, but if -// `ForwardIterator` is not a random-access iterator, then `absl::rotate` -// performs an additional pass over the range to construct the return value. -template -ForwardIterator rotate(ForwardIterator first, ForwardIterator middle, - ForwardIterator last) { - return algorithm_internal::RotateImpl( - first, middle, last, - std::is_same()); -} - -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace algorithm_internal + { + + // Performs comparisons with operator==, similar to C++14's `std::equal_to<>`. + struct EqualTo + { + template + bool operator()(const T& a, const U& b) const + { + return a == b; + } + }; + + template + bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, Pred pred, std::input_iterator_tag, std::input_iterator_tag) + { + while (true) + { + if (first1 == last1) + return first2 == last2; + if (first2 == last2) + return false; + if (!pred(*first1, *first2)) + return false; + ++first1; + ++first2; + } + } + + template + bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, Pred&& pred, std::random_access_iterator_tag, std::random_access_iterator_tag) + { + return (last1 - first1 == last2 - first2) && + std::equal(first1, last1, first2, std::forward(pred)); + } + + // When we are using our own internal predicate that just applies operator==, we + // forward to the non-predicate form of std::equal. This enables an optimization + // in libstdc++ that can result in std::memcmp being used for integer types. + template + bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, algorithm_internal::EqualTo /* unused */, std::random_access_iterator_tag, std::random_access_iterator_tag) + { + return (last1 - first1 == last2 - first2) && + std::equal(first1, last1, first2); + } + + template + It RotateImpl(It first, It middle, It last, std::true_type) + { + return std::rotate(first, middle, last); + } + + template + It RotateImpl(It first, It middle, It last, std::false_type) + { + std::rotate(first, middle, last); + return std::next(first, std::distance(middle, last)); + } + + } // namespace algorithm_internal + + // equal() + // + // Compares the equality of two ranges specified by pairs of iterators, using + // the given predicate, returning true iff for each corresponding iterator i1 + // and i2 in the first and second range respectively, pred(*i1, *i2) == true + // + // This comparison takes at most min(`last1` - `first1`, `last2` - `first2`) + // invocations of the predicate. Additionally, if InputIter1 and InputIter2 are + // both random-access iterators, and `last1` - `first1` != `last2` - `first2`, + // then the predicate is never invoked and the function returns false. + // + // This is a C++11-compatible implementation of C++14 `std::equal`. See + // https://en.cppreference.com/w/cpp/algorithm/equal for more information. + template + bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, Pred&& pred) + { + return algorithm_internal::EqualImpl( + first1, last1, first2, last2, std::forward(pred), typename std::iterator_traits::iterator_category{}, typename std::iterator_traits::iterator_category{} + ); + } + + // Overload of equal() that performs comparison of two ranges specified by pairs + // of iterators using operator==. + template + bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2) + { + return absl::equal(first1, last1, first2, last2, algorithm_internal::EqualTo{}); + } + + // linear_search() + // + // Performs a linear search for `value` using the iterator `first` up to + // but not including `last`, returning true if [`first`, `last`) contains an + // element equal to `value`. + // + // A linear search is of O(n) complexity which is guaranteed to make at most + // n = (`last` - `first`) comparisons. A linear search over short containers + // may be faster than a binary search, even when the container is sorted. + template + bool linear_search(InputIterator first, InputIterator last, const EqualityComparable& value) + { + return std::find(first, last, value) != last; + } + + // rotate() + // + // Performs a left rotation on a range of elements (`first`, `last`) such that + // `middle` is now the first element. `rotate()` returns an iterator pointing to + // the first element before rotation. This function is exactly the same as + // `std::rotate`, but fixes a bug in gcc + // <= 4.9 where `std::rotate` returns `void` instead of an iterator. + // + // The complexity of this algorithm is the same as that of `std::rotate`, but if + // `ForwardIterator` is not a random-access iterator, then `absl::rotate` + // performs an additional pass over the range to construct the return value. + template + ForwardIterator rotate(ForwardIterator first, ForwardIterator middle, ForwardIterator last) + { + return algorithm_internal::RotateImpl( + first, middle, last, std::is_same() + ); + } + + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_ALGORITHM_ALGORITHM_H_ diff --git a/CAPI/cpp/grpc/include/absl/algorithm/container.h b/CAPI/cpp/grpc/include/absl/algorithm/container.h index 26b1952..3ffb238 100644 --- a/CAPI/cpp/grpc/include/absl/algorithm/container.h +++ b/CAPI/cpp/grpc/include/absl/algorithm/container.h @@ -54,1721 +54,1570 @@ #include "absl/base/macros.h" #include "absl/meta/type_traits.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_algorithm_internal { - -// NOTE: it is important to defer to ADL lookup for building with C++ modules, -// especially for headers like which are not visible from this file -// but specialize std::begin and std::end. -using std::begin; -using std::end; - -// The type of the iterator given by begin(c) (possibly std::begin(c)). -// ContainerIter> gives vector::const_iterator, -// while ContainerIter> gives vector::iterator. -template -using ContainerIter = decltype(begin(std::declval())); - -// An MSVC bug involving template parameter substitution requires us to use -// decltype() here instead of just std::pair. -template -using ContainerIterPairType = - decltype(std::make_pair(ContainerIter(), ContainerIter())); - -template -using ContainerDifferenceType = - decltype(std::distance(std::declval>(), - std::declval>())); - -template -using ContainerPointerType = - typename std::iterator_traits>::pointer; - -// container_algorithm_internal::c_begin and -// container_algorithm_internal::c_end are abbreviations for proper ADL -// lookup of std::begin and std::end, i.e. -// using std::begin; -// using std::end; -// std::foo(begin(c), end(c)); -// becomes -// std::foo(container_algorithm_internal::begin(c), -// container_algorithm_internal::end(c)); -// These are meant for internal use only. - -template -ContainerIter c_begin(C& c) { return begin(c); } - -template -ContainerIter c_end(C& c) { return end(c); } - -template -struct IsUnorderedContainer : std::false_type {}; - -template -struct IsUnorderedContainer< - std::unordered_map> : std::true_type {}; - -template -struct IsUnorderedContainer> - : std::true_type {}; - -// container_algorithm_internal::c_size. It is meant for internal use only. - -template -auto c_size(C& c) -> decltype(c.size()) { - return c.size(); -} - -template -constexpr std::size_t c_size(T (&)[N]) { - return N; -} - -} // namespace container_algorithm_internal - -// PUBLIC API - -//------------------------------------------------------------------------------ -// Abseil algorithm.h functions -//------------------------------------------------------------------------------ - -// c_linear_search() -// -// Container-based version of absl::linear_search() for performing a linear -// search within a container. -template -bool c_linear_search(const C& c, EqualityComparable&& value) { - return linear_search(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(value)); -} - -//------------------------------------------------------------------------------ -// algorithms -//------------------------------------------------------------------------------ - -// c_distance() -// -// Container-based version of the `std::distance()` function to -// return the number of elements within a container. -template -container_algorithm_internal::ContainerDifferenceType c_distance( - const C& c) { - return std::distance(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c)); -} - -//------------------------------------------------------------------------------ -// Non-modifying sequence operations -//------------------------------------------------------------------------------ - -// c_all_of() -// -// Container-based version of the `std::all_of()` function to -// test if all elements within a container satisfy a condition. -template -bool c_all_of(const C& c, Pred&& pred) { - return std::all_of(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(pred)); -} - -// c_any_of() -// -// Container-based version of the `std::any_of()` function to -// test if any element in a container fulfills a condition. -template -bool c_any_of(const C& c, Pred&& pred) { - return std::any_of(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(pred)); -} - -// c_none_of() -// -// Container-based version of the `std::none_of()` function to -// test if no elements in a container fulfill a condition. -template -bool c_none_of(const C& c, Pred&& pred) { - return std::none_of(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(pred)); -} - -// c_for_each() -// -// Container-based version of the `std::for_each()` function to -// apply a function to a container's elements. -template -decay_t c_for_each(C&& c, Function&& f) { - return std::for_each(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(f)); -} - -// c_find() -// -// Container-based version of the `std::find()` function to find -// the first element containing the passed value within a container value. -template -container_algorithm_internal::ContainerIter c_find(C& c, T&& value) { - return std::find(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(value)); -} - -// c_find_if() -// -// Container-based version of the `std::find_if()` function to find -// the first element in a container matching the given condition. -template -container_algorithm_internal::ContainerIter c_find_if(C& c, Pred&& pred) { - return std::find_if(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(pred)); -} - -// c_find_if_not() -// -// Container-based version of the `std::find_if_not()` function to -// find the first element in a container not matching the given condition. -template -container_algorithm_internal::ContainerIter c_find_if_not(C& c, - Pred&& pred) { - return std::find_if_not(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(pred)); -} - -// c_find_end() -// -// Container-based version of the `std::find_end()` function to -// find the last subsequence within a container. -template -container_algorithm_internal::ContainerIter c_find_end( - Sequence1& sequence, Sequence2& subsequence) { - return std::find_end(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - container_algorithm_internal::c_begin(subsequence), - container_algorithm_internal::c_end(subsequence)); -} - -// Overload of c_find_end() for using a predicate evaluation other than `==` as -// the function's test condition. -template -container_algorithm_internal::ContainerIter c_find_end( - Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) { - return std::find_end(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - container_algorithm_internal::c_begin(subsequence), - container_algorithm_internal::c_end(subsequence), - std::forward(pred)); -} - -// c_find_first_of() -// -// Container-based version of the `std::find_first_of()` function to -// find the first element within the container that is also within the options -// container. -template -container_algorithm_internal::ContainerIter c_find_first_of(C1& container, - C2& options) { - return std::find_first_of(container_algorithm_internal::c_begin(container), - container_algorithm_internal::c_end(container), - container_algorithm_internal::c_begin(options), - container_algorithm_internal::c_end(options)); -} - -// Overload of c_find_first_of() for using a predicate evaluation other than -// `==` as the function's test condition. -template -container_algorithm_internal::ContainerIter c_find_first_of( - C1& container, C2& options, BinaryPredicate&& pred) { - return std::find_first_of(container_algorithm_internal::c_begin(container), - container_algorithm_internal::c_end(container), - container_algorithm_internal::c_begin(options), - container_algorithm_internal::c_end(options), - std::forward(pred)); -} - -// c_adjacent_find() -// -// Container-based version of the `std::adjacent_find()` function to -// find equal adjacent elements within a container. -template -container_algorithm_internal::ContainerIter c_adjacent_find( - Sequence& sequence) { - return std::adjacent_find(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence)); -} - -// Overload of c_adjacent_find() for using a predicate evaluation other than -// `==` as the function's test condition. -template -container_algorithm_internal::ContainerIter c_adjacent_find( - Sequence& sequence, BinaryPredicate&& pred) { - return std::adjacent_find(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(pred)); -} - -// c_count() -// -// Container-based version of the `std::count()` function to count -// values that match within a container. -template -container_algorithm_internal::ContainerDifferenceType c_count( - const C& c, T&& value) { - return std::count(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(value)); -} - -// c_count_if() -// -// Container-based version of the `std::count_if()` function to -// count values matching a condition within a container. -template -container_algorithm_internal::ContainerDifferenceType c_count_if( - const C& c, Pred&& pred) { - return std::count_if(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(pred)); -} - -// c_mismatch() -// -// Container-based version of the `std::mismatch()` function to -// return the first element where two ordered containers differ. Applies `==` to -// the first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). -template -container_algorithm_internal::ContainerIterPairType -c_mismatch(C1& c1, C2& c2) { - auto first1 = container_algorithm_internal::c_begin(c1); - auto last1 = container_algorithm_internal::c_end(c1); - auto first2 = container_algorithm_internal::c_begin(c2); - auto last2 = container_algorithm_internal::c_end(c2); - - for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) { - // Negates equality because Cpp17EqualityComparable doesn't require clients - // to overload both `operator==` and `operator!=`. - if (!(*first1 == *first2)) { - break; - } - } - - return std::make_pair(first1, first2); -} - -// Overload of c_mismatch() for using a predicate evaluation other than `==` as -// the function's test condition. Applies `pred`to the first N elements of `c1` -// and `c2`, where N = min(size(c1), size(c2)). -template -container_algorithm_internal::ContainerIterPairType -c_mismatch(C1& c1, C2& c2, BinaryPredicate pred) { - auto first1 = container_algorithm_internal::c_begin(c1); - auto last1 = container_algorithm_internal::c_end(c1); - auto first2 = container_algorithm_internal::c_begin(c2); - auto last2 = container_algorithm_internal::c_end(c2); - - for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) { - if (!pred(*first1, *first2)) { - break; - } - } - - return std::make_pair(first1, first2); -} - -// c_equal() -// -// Container-based version of the `std::equal()` function to -// test whether two containers are equal. -// -// NOTE: the semantics of c_equal() are slightly different than those of -// equal(): while the latter iterates over the second container only up to the -// size of the first container, c_equal() also checks whether the container -// sizes are equal. This better matches expectations about c_equal() based on -// its signature. -// -// Example: -// vector v1 = <1, 2, 3>; -// vector v2 = <1, 2, 3, 4>; -// equal(std::begin(v1), std::end(v1), std::begin(v2)) returns true -// c_equal(v1, v2) returns false - -template -bool c_equal(const C1& c1, const C2& c2) { - return ((container_algorithm_internal::c_size(c1) == - container_algorithm_internal::c_size(c2)) && - std::equal(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2))); -} - -// Overload of c_equal() for using a predicate evaluation other than `==` as -// the function's test condition. -template -bool c_equal(const C1& c1, const C2& c2, BinaryPredicate&& pred) { - return ((container_algorithm_internal::c_size(c1) == - container_algorithm_internal::c_size(c2)) && - std::equal(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2), - std::forward(pred))); -} - -// c_is_permutation() -// -// Container-based version of the `std::is_permutation()` function -// to test whether a container is a permutation of another. -template -bool c_is_permutation(const C1& c1, const C2& c2) { - using std::begin; - using std::end; - return c1.size() == c2.size() && - std::is_permutation(begin(c1), end(c1), begin(c2)); -} - -// Overload of c_is_permutation() for using a predicate evaluation other than -// `==` as the function's test condition. -template -bool c_is_permutation(const C1& c1, const C2& c2, BinaryPredicate&& pred) { - using std::begin; - using std::end; - return c1.size() == c2.size() && - std::is_permutation(begin(c1), end(c1), begin(c2), - std::forward(pred)); -} - -// c_search() -// -// Container-based version of the `std::search()` function to search -// a container for a subsequence. -template -container_algorithm_internal::ContainerIter c_search( - Sequence1& sequence, Sequence2& subsequence) { - return std::search(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - container_algorithm_internal::c_begin(subsequence), - container_algorithm_internal::c_end(subsequence)); -} - -// Overload of c_search() for using a predicate evaluation other than -// `==` as the function's test condition. -template -container_algorithm_internal::ContainerIter c_search( - Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) { - return std::search(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - container_algorithm_internal::c_begin(subsequence), - container_algorithm_internal::c_end(subsequence), - std::forward(pred)); -} - -// c_search_n() -// -// Container-based version of the `std::search_n()` function to -// search a container for the first sequence of N elements. -template -container_algorithm_internal::ContainerIter c_search_n( - Sequence& sequence, Size count, T&& value) { - return std::search_n(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), count, - std::forward(value)); -} - -// Overload of c_search_n() for using a predicate evaluation other than -// `==` as the function's test condition. -template -container_algorithm_internal::ContainerIter c_search_n( - Sequence& sequence, Size count, T&& value, BinaryPredicate&& pred) { - return std::search_n(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), count, - std::forward(value), - std::forward(pred)); -} - -//------------------------------------------------------------------------------ -// Modifying sequence operations -//------------------------------------------------------------------------------ - -// c_copy() -// -// Container-based version of the `std::copy()` function to copy a -// container's elements into an iterator. -template -OutputIterator c_copy(const InputSequence& input, OutputIterator output) { - return std::copy(container_algorithm_internal::c_begin(input), - container_algorithm_internal::c_end(input), output); -} - -// c_copy_n() -// -// Container-based version of the `std::copy_n()` function to copy a -// container's first N elements into an iterator. -template -OutputIterator c_copy_n(const C& input, Size n, OutputIterator output) { - return std::copy_n(container_algorithm_internal::c_begin(input), n, output); -} - -// c_copy_if() -// -// Container-based version of the `std::copy_if()` function to copy -// a container's elements satisfying some condition into an iterator. -template -OutputIterator c_copy_if(const InputSequence& input, OutputIterator output, - Pred&& pred) { - return std::copy_if(container_algorithm_internal::c_begin(input), - container_algorithm_internal::c_end(input), output, - std::forward(pred)); -} - -// c_copy_backward() -// -// Container-based version of the `std::copy_backward()` function to -// copy a container's elements in reverse order into an iterator. -template -BidirectionalIterator c_copy_backward(const C& src, - BidirectionalIterator dest) { - return std::copy_backward(container_algorithm_internal::c_begin(src), - container_algorithm_internal::c_end(src), dest); -} - -// c_move() -// -// Container-based version of the `std::move()` function to move -// a container's elements into an iterator. -template -OutputIterator c_move(C&& src, OutputIterator dest) { - return std::move(container_algorithm_internal::c_begin(src), - container_algorithm_internal::c_end(src), dest); -} - -// c_move_backward() -// -// Container-based version of the `std::move_backward()` function to -// move a container's elements into an iterator in reverse order. -template -BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) { - return std::move_backward(container_algorithm_internal::c_begin(src), - container_algorithm_internal::c_end(src), dest); -} - -// c_swap_ranges() -// -// Container-based version of the `std::swap_ranges()` function to -// swap a container's elements with another container's elements. Swaps the -// first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). -template -container_algorithm_internal::ContainerIter c_swap_ranges(C1& c1, C2& c2) { - auto first1 = container_algorithm_internal::c_begin(c1); - auto last1 = container_algorithm_internal::c_end(c1); - auto first2 = container_algorithm_internal::c_begin(c2); - auto last2 = container_algorithm_internal::c_end(c2); - - using std::swap; - for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) { - swap(*first1, *first2); - } - return first2; -} - -// c_transform() -// -// Container-based version of the `std::transform()` function to -// transform a container's elements using the unary operation, storing the -// result in an iterator pointing to the last transformed element in the output -// range. -template -OutputIterator c_transform(const InputSequence& input, OutputIterator output, - UnaryOp&& unary_op) { - return std::transform(container_algorithm_internal::c_begin(input), - container_algorithm_internal::c_end(input), output, - std::forward(unary_op)); -} - -// Overload of c_transform() for performing a transformation using a binary -// predicate. Applies `binary_op` to the first N elements of `c1` and `c2`, -// where N = min(size(c1), size(c2)). -template -OutputIterator c_transform(const InputSequence1& input1, - const InputSequence2& input2, OutputIterator output, - BinaryOp&& binary_op) { - auto first1 = container_algorithm_internal::c_begin(input1); - auto last1 = container_algorithm_internal::c_end(input1); - auto first2 = container_algorithm_internal::c_begin(input2); - auto last2 = container_algorithm_internal::c_end(input2); - for (; first1 != last1 && first2 != last2; - ++first1, (void)++first2, ++output) { - *output = binary_op(*first1, *first2); - } - - return output; -} - -// c_replace() -// -// Container-based version of the `std::replace()` function to -// replace a container's elements of some value with a new value. The container -// is modified in place. -template -void c_replace(Sequence& sequence, const T& old_value, const T& new_value) { - std::replace(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), old_value, - new_value); -} - -// c_replace_if() -// -// Container-based version of the `std::replace_if()` function to -// replace a container's elements of some value with a new value based on some -// condition. The container is modified in place. -template -void c_replace_if(C& c, Pred&& pred, T&& new_value) { - std::replace_if(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(pred), std::forward(new_value)); -} - -// c_replace_copy() -// -// Container-based version of the `std::replace_copy()` function to -// replace a container's elements of some value with a new value and return the -// results within an iterator. -template -OutputIterator c_replace_copy(const C& c, OutputIterator result, T&& old_value, - T&& new_value) { - return std::replace_copy(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), result, - std::forward(old_value), - std::forward(new_value)); -} - -// c_replace_copy_if() -// -// Container-based version of the `std::replace_copy_if()` function -// to replace a container's elements of some value with a new value based on -// some condition, and return the results within an iterator. -template -OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred, - T&& new_value) { - return std::replace_copy_if(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), result, - std::forward(pred), - std::forward(new_value)); -} - -// c_fill() -// -// Container-based version of the `std::fill()` function to fill a -// container with some value. -template -void c_fill(C& c, T&& value) { - std::fill(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), std::forward(value)); -} - -// c_fill_n() -// -// Container-based version of the `std::fill_n()` function to fill -// the first N elements in a container with some value. -template -void c_fill_n(C& c, Size n, T&& value) { - std::fill_n(container_algorithm_internal::c_begin(c), n, - std::forward(value)); -} - -// c_generate() -// -// Container-based version of the `std::generate()` function to -// assign a container's elements to the values provided by the given generator. -template -void c_generate(C& c, Generator&& gen) { - std::generate(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(gen)); -} - -// c_generate_n() -// -// Container-based version of the `std::generate_n()` function to -// assign a container's first N elements to the values provided by the given -// generator. -template -container_algorithm_internal::ContainerIter c_generate_n(C& c, Size n, - Generator&& gen) { - return std::generate_n(container_algorithm_internal::c_begin(c), n, - std::forward(gen)); -} - -// Note: `c_xx()` container versions for `remove()`, `remove_if()`, -// and `unique()` are omitted, because it's not clear whether or not such -// functions should call erase on their supplied sequences afterwards. Either -// behavior would be surprising for a different set of users. - -// c_remove_copy() -// -// Container-based version of the `std::remove_copy()` function to -// copy a container's elements while removing any elements matching the given -// `value`. -template -OutputIterator c_remove_copy(const C& c, OutputIterator result, T&& value) { - return std::remove_copy(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), result, - std::forward(value)); -} - -// c_remove_copy_if() -// -// Container-based version of the `std::remove_copy_if()` function -// to copy a container's elements while removing any elements matching the given -// condition. -template -OutputIterator c_remove_copy_if(const C& c, OutputIterator result, - Pred&& pred) { - return std::remove_copy_if(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), result, - std::forward(pred)); -} - -// c_unique_copy() -// -// Container-based version of the `std::unique_copy()` function to -// copy a container's elements while removing any elements containing duplicate -// values. -template -OutputIterator c_unique_copy(const C& c, OutputIterator result) { - return std::unique_copy(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), result); -} - -// Overload of c_unique_copy() for using a predicate evaluation other than -// `==` for comparing uniqueness of the element values. -template -OutputIterator c_unique_copy(const C& c, OutputIterator result, - BinaryPredicate&& pred) { - return std::unique_copy(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), result, - std::forward(pred)); -} - -// c_reverse() -// -// Container-based version of the `std::reverse()` function to -// reverse a container's elements. -template -void c_reverse(Sequence& sequence) { - std::reverse(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence)); -} - -// c_reverse_copy() -// -// Container-based version of the `std::reverse()` function to -// reverse a container's elements and write them to an iterator range. -template -OutputIterator c_reverse_copy(const C& sequence, OutputIterator result) { - return std::reverse_copy(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - result); -} - -// c_rotate() -// -// Container-based version of the `std::rotate()` function to -// shift a container's elements leftward such that the `middle` element becomes -// the first element in the container. -template > -Iterator c_rotate(C& sequence, Iterator middle) { - return absl::rotate(container_algorithm_internal::c_begin(sequence), middle, - container_algorithm_internal::c_end(sequence)); -} - -// c_rotate_copy() -// -// Container-based version of the `std::rotate_copy()` function to -// shift a container's elements leftward such that the `middle` element becomes -// the first element in a new iterator range. -template -OutputIterator c_rotate_copy( - const C& sequence, - container_algorithm_internal::ContainerIter middle, - OutputIterator result) { - return std::rotate_copy(container_algorithm_internal::c_begin(sequence), - middle, container_algorithm_internal::c_end(sequence), - result); -} - -// c_shuffle() -// -// Container-based version of the `std::shuffle()` function to -// randomly shuffle elements within the container using a `gen()` uniform random -// number generator. -template -void c_shuffle(RandomAccessContainer& c, UniformRandomBitGenerator&& gen) { - std::shuffle(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(gen)); -} - -//------------------------------------------------------------------------------ -// Partition functions -//------------------------------------------------------------------------------ - -// c_is_partitioned() -// -// Container-based version of the `std::is_partitioned()` function -// to test whether all elements in the container for which `pred` returns `true` -// precede those for which `pred` is `false`. -template -bool c_is_partitioned(const C& c, Pred&& pred) { - return std::is_partitioned(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(pred)); -} - -// c_partition() -// -// Container-based version of the `std::partition()` function -// to rearrange all elements in a container in such a way that all elements for -// which `pred` returns `true` precede all those for which it returns `false`, -// returning an iterator to the first element of the second group. -template -container_algorithm_internal::ContainerIter c_partition(C& c, Pred&& pred) { - return std::partition(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(pred)); -} - -// c_stable_partition() -// -// Container-based version of the `std::stable_partition()` function -// to rearrange all elements in a container in such a way that all elements for -// which `pred` returns `true` precede all those for which it returns `false`, -// preserving the relative ordering between the two groups. The function returns -// an iterator to the first element of the second group. -template -container_algorithm_internal::ContainerIter c_stable_partition(C& c, - Pred&& pred) { - return std::stable_partition(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(pred)); -} - -// c_partition_copy() -// -// Container-based version of the `std::partition_copy()` function -// to partition a container's elements and return them into two iterators: one -// for which `pred` returns `true`, and one for which `pred` returns `false.` - -template -std::pair c_partition_copy( - const C& c, OutputIterator1 out_true, OutputIterator2 out_false, - Pred&& pred) { - return std::partition_copy(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), out_true, - out_false, std::forward(pred)); -} - -// c_partition_point() -// -// Container-based version of the `std::partition_point()` function -// to return the first element of an already partitioned container for which -// the given `pred` is not `true`. -template -container_algorithm_internal::ContainerIter c_partition_point(C& c, - Pred&& pred) { - return std::partition_point(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(pred)); -} - -//------------------------------------------------------------------------------ -// Sorting functions -//------------------------------------------------------------------------------ - -// c_sort() -// -// Container-based version of the `std::sort()` function -// to sort elements in ascending order of their values. -template -void c_sort(C& c) { - std::sort(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c)); -} - -// Overload of c_sort() for performing a `comp` comparison other than the -// default `operator<`. -template -void c_sort(C& c, LessThan&& comp) { - std::sort(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(comp)); -} - -// c_stable_sort() -// -// Container-based version of the `std::stable_sort()` function -// to sort elements in ascending order of their values, preserving the order -// of equivalents. -template -void c_stable_sort(C& c) { - std::stable_sort(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c)); -} - -// Overload of c_stable_sort() for performing a `comp` comparison other than the -// default `operator<`. -template -void c_stable_sort(C& c, LessThan&& comp) { - std::stable_sort(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(comp)); -} - -// c_is_sorted() -// -// Container-based version of the `std::is_sorted()` function -// to evaluate whether the given container is sorted in ascending order. -template -bool c_is_sorted(const C& c) { - return std::is_sorted(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c)); -} - -// c_is_sorted() overload for performing a `comp` comparison other than the -// default `operator<`. -template -bool c_is_sorted(const C& c, LessThan&& comp) { - return std::is_sorted(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(comp)); -} - -// c_partial_sort() -// -// Container-based version of the `std::partial_sort()` function -// to rearrange elements within a container such that elements before `middle` -// are sorted in ascending order. -template -void c_partial_sort( - RandomAccessContainer& sequence, - container_algorithm_internal::ContainerIter middle) { - std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, - container_algorithm_internal::c_end(sequence)); -} - -// Overload of c_partial_sort() for performing a `comp` comparison other than -// the default `operator<`. -template -void c_partial_sort( - RandomAccessContainer& sequence, - container_algorithm_internal::ContainerIter middle, - LessThan&& comp) { - std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, - container_algorithm_internal::c_end(sequence), - std::forward(comp)); -} - -// c_partial_sort_copy() -// -// Container-based version of the `std::partial_sort_copy()` -// function to sort the elements in the given range `result` within the larger -// `sequence` in ascending order (and using `result` as the output parameter). -// At most min(result.last - result.first, sequence.last - sequence.first) -// elements from the sequence will be stored in the result. -template -container_algorithm_internal::ContainerIter -c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) { - return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - container_algorithm_internal::c_begin(result), - container_algorithm_internal::c_end(result)); -} - -// Overload of c_partial_sort_copy() for performing a `comp` comparison other -// than the default `operator<`. -template -container_algorithm_internal::ContainerIter -c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, - LessThan&& comp) { - return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - container_algorithm_internal::c_begin(result), - container_algorithm_internal::c_end(result), - std::forward(comp)); -} - -// c_is_sorted_until() -// -// Container-based version of the `std::is_sorted_until()` function -// to return the first element within a container that is not sorted in -// ascending order as an iterator. -template -container_algorithm_internal::ContainerIter c_is_sorted_until(C& c) { - return std::is_sorted_until(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c)); -} - -// Overload of c_is_sorted_until() for performing a `comp` comparison other than -// the default `operator<`. -template -container_algorithm_internal::ContainerIter c_is_sorted_until( - C& c, LessThan&& comp) { - return std::is_sorted_until(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(comp)); -} - -// c_nth_element() -// -// Container-based version of the `std::nth_element()` function -// to rearrange the elements within a container such that the `nth` element -// would be in that position in an ordered sequence; other elements may be in -// any order, except that all preceding `nth` will be less than that element, -// and all following `nth` will be greater than that element. -template -void c_nth_element( - RandomAccessContainer& sequence, - container_algorithm_internal::ContainerIter nth) { - std::nth_element(container_algorithm_internal::c_begin(sequence), nth, - container_algorithm_internal::c_end(sequence)); -} - -// Overload of c_nth_element() for performing a `comp` comparison other than -// the default `operator<`. -template -void c_nth_element( - RandomAccessContainer& sequence, - container_algorithm_internal::ContainerIter nth, - LessThan&& comp) { - std::nth_element(container_algorithm_internal::c_begin(sequence), nth, - container_algorithm_internal::c_end(sequence), - std::forward(comp)); -} - -//------------------------------------------------------------------------------ -// Binary Search -//------------------------------------------------------------------------------ - -// c_lower_bound() -// -// Container-based version of the `std::lower_bound()` function -// to return an iterator pointing to the first element in a sorted container -// which does not compare less than `value`. -template -container_algorithm_internal::ContainerIter c_lower_bound( - Sequence& sequence, T&& value) { - return std::lower_bound(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(value)); -} - -// Overload of c_lower_bound() for performing a `comp` comparison other than -// the default `operator<`. -template -container_algorithm_internal::ContainerIter c_lower_bound( - Sequence& sequence, T&& value, LessThan&& comp) { - return std::lower_bound(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(value), std::forward(comp)); -} - -// c_upper_bound() -// -// Container-based version of the `std::upper_bound()` function -// to return an iterator pointing to the first element in a sorted container -// which is greater than `value`. -template -container_algorithm_internal::ContainerIter c_upper_bound( - Sequence& sequence, T&& value) { - return std::upper_bound(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(value)); -} - -// Overload of c_upper_bound() for performing a `comp` comparison other than -// the default `operator<`. -template -container_algorithm_internal::ContainerIter c_upper_bound( - Sequence& sequence, T&& value, LessThan&& comp) { - return std::upper_bound(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(value), std::forward(comp)); -} - -// c_equal_range() -// -// Container-based version of the `std::equal_range()` function -// to return an iterator pair pointing to the first and last elements in a -// sorted container which compare equal to `value`. -template -container_algorithm_internal::ContainerIterPairType -c_equal_range(Sequence& sequence, T&& value) { - return std::equal_range(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(value)); -} - -// Overload of c_equal_range() for performing a `comp` comparison other than -// the default `operator<`. -template -container_algorithm_internal::ContainerIterPairType -c_equal_range(Sequence& sequence, T&& value, LessThan&& comp) { - return std::equal_range(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(value), std::forward(comp)); -} - -// c_binary_search() -// -// Container-based version of the `std::binary_search()` function -// to test if any element in the sorted container contains a value equivalent to -// 'value'. -template -bool c_binary_search(Sequence&& sequence, T&& value) { - return std::binary_search(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(value)); -} - -// Overload of c_binary_search() for performing a `comp` comparison other than -// the default `operator<`. -template -bool c_binary_search(Sequence&& sequence, T&& value, LessThan&& comp) { - return std::binary_search(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(value), - std::forward(comp)); -} - -//------------------------------------------------------------------------------ -// Merge functions -//------------------------------------------------------------------------------ - -// c_merge() -// -// Container-based version of the `std::merge()` function -// to merge two sorted containers into a single sorted iterator. -template -OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) { - return std::merge(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2), - container_algorithm_internal::c_end(c2), result); -} - -// Overload of c_merge() for performing a `comp` comparison other than -// the default `operator<`. -template -OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result, - LessThan&& comp) { - return std::merge(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2), - container_algorithm_internal::c_end(c2), result, - std::forward(comp)); -} - -// c_inplace_merge() -// -// Container-based version of the `std::inplace_merge()` function -// to merge a supplied iterator `middle` into a container. -template -void c_inplace_merge(C& c, - container_algorithm_internal::ContainerIter middle) { - std::inplace_merge(container_algorithm_internal::c_begin(c), middle, - container_algorithm_internal::c_end(c)); -} - -// Overload of c_inplace_merge() for performing a merge using a `comp` other -// than `operator<`. -template -void c_inplace_merge(C& c, - container_algorithm_internal::ContainerIter middle, - LessThan&& comp) { - std::inplace_merge(container_algorithm_internal::c_begin(c), middle, - container_algorithm_internal::c_end(c), - std::forward(comp)); -} - -// c_includes() -// -// Container-based version of the `std::includes()` function -// to test whether a sorted container `c1` entirely contains another sorted -// container `c2`. -template -bool c_includes(const C1& c1, const C2& c2) { - return std::includes(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2), - container_algorithm_internal::c_end(c2)); -} - -// Overload of c_includes() for performing a merge using a `comp` other than -// `operator<`. -template -bool c_includes(const C1& c1, const C2& c2, LessThan&& comp) { - return std::includes(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2), - container_algorithm_internal::c_end(c2), - std::forward(comp)); -} - -// c_set_union() -// -// Container-based version of the `std::set_union()` function -// to return an iterator containing the union of two containers; duplicate -// values are not copied into the output. -template ::value, - void>::type, - typename = typename std::enable_if< - !container_algorithm_internal::IsUnorderedContainer::value, - void>::type> -OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) { - return std::set_union(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2), - container_algorithm_internal::c_end(c2), output); -} - -// Overload of c_set_union() for performing a merge using a `comp` other than -// `operator<`. -template ::value, - void>::type, - typename = typename std::enable_if< - !container_algorithm_internal::IsUnorderedContainer::value, - void>::type> -OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output, - LessThan&& comp) { - return std::set_union(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2), - container_algorithm_internal::c_end(c2), output, - std::forward(comp)); -} - -// c_set_intersection() -// -// Container-based version of the `std::set_intersection()` function -// to return an iterator containing the intersection of two sorted containers. -template ::value, - void>::type, - typename = typename std::enable_if< - !container_algorithm_internal::IsUnorderedContainer::value, - void>::type> -OutputIterator c_set_intersection(const C1& c1, const C2& c2, - OutputIterator output) { - // In debug builds, ensure that both containers are sorted with respect to the - // default comparator. std::set_intersection requires the containers be sorted - // using operator<. - assert(absl::c_is_sorted(c1)); - assert(absl::c_is_sorted(c2)); - return std::set_intersection(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2), - container_algorithm_internal::c_end(c2), output); -} - -// Overload of c_set_intersection() for performing a merge using a `comp` other -// than `operator<`. -template ::value, - void>::type, - typename = typename std::enable_if< - !container_algorithm_internal::IsUnorderedContainer::value, - void>::type> -OutputIterator c_set_intersection(const C1& c1, const C2& c2, - OutputIterator output, LessThan&& comp) { - // In debug builds, ensure that both containers are sorted with respect to the - // default comparator. std::set_intersection requires the containers be sorted - // using the same comparator. - assert(absl::c_is_sorted(c1, comp)); - assert(absl::c_is_sorted(c2, comp)); - return std::set_intersection(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2), - container_algorithm_internal::c_end(c2), output, - std::forward(comp)); -} - -// c_set_difference() -// -// Container-based version of the `std::set_difference()` function -// to return an iterator containing elements present in the first container but -// not in the second. -template ::value, - void>::type, - typename = typename std::enable_if< - !container_algorithm_internal::IsUnorderedContainer::value, - void>::type> -OutputIterator c_set_difference(const C1& c1, const C2& c2, - OutputIterator output) { - return std::set_difference(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2), - container_algorithm_internal::c_end(c2), output); -} - -// Overload of c_set_difference() for performing a merge using a `comp` other -// than `operator<`. -template ::value, - void>::type, - typename = typename std::enable_if< - !container_algorithm_internal::IsUnorderedContainer::value, - void>::type> -OutputIterator c_set_difference(const C1& c1, const C2& c2, - OutputIterator output, LessThan&& comp) { - return std::set_difference(container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2), - container_algorithm_internal::c_end(c2), output, - std::forward(comp)); -} - -// c_set_symmetric_difference() -// -// Container-based version of the `std::set_symmetric_difference()` -// function to return an iterator containing elements present in either one -// container or the other, but not both. -template ::value, - void>::type, - typename = typename std::enable_if< - !container_algorithm_internal::IsUnorderedContainer::value, - void>::type> -OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, - OutputIterator output) { - return std::set_symmetric_difference( - container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2), - container_algorithm_internal::c_end(c2), output); -} - -// Overload of c_set_symmetric_difference() for performing a merge using a -// `comp` other than `operator<`. -template ::value, - void>::type, - typename = typename std::enable_if< - !container_algorithm_internal::IsUnorderedContainer::value, - void>::type> -OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, - OutputIterator output, - LessThan&& comp) { - return std::set_symmetric_difference( - container_algorithm_internal::c_begin(c1), - container_algorithm_internal::c_end(c1), - container_algorithm_internal::c_begin(c2), - container_algorithm_internal::c_end(c2), output, - std::forward(comp)); -} - -//------------------------------------------------------------------------------ -// Heap functions -//------------------------------------------------------------------------------ - -// c_push_heap() -// -// Container-based version of the `std::push_heap()` function -// to push a value onto a container heap. -template -void c_push_heap(RandomAccessContainer& sequence) { - std::push_heap(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence)); -} - -// Overload of c_push_heap() for performing a push operation on a heap using a -// `comp` other than `operator<`. -template -void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) { - std::push_heap(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(comp)); -} - -// c_pop_heap() -// -// Container-based version of the `std::pop_heap()` function -// to pop a value from a heap container. -template -void c_pop_heap(RandomAccessContainer& sequence) { - std::pop_heap(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence)); -} - -// Overload of c_pop_heap() for performing a pop operation on a heap using a -// `comp` other than `operator<`. -template -void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) { - std::pop_heap(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(comp)); -} - -// c_make_heap() -// -// Container-based version of the `std::make_heap()` function -// to make a container a heap. -template -void c_make_heap(RandomAccessContainer& sequence) { - std::make_heap(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence)); -} - -// Overload of c_make_heap() for performing heap comparisons using a -// `comp` other than `operator<` -template -void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) { - std::make_heap(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(comp)); -} - -// c_sort_heap() -// -// Container-based version of the `std::sort_heap()` function -// to sort a heap into ascending order (after which it is no longer a heap). -template -void c_sort_heap(RandomAccessContainer& sequence) { - std::sort_heap(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence)); -} - -// Overload of c_sort_heap() for performing heap comparisons using a -// `comp` other than `operator<` -template -void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) { - std::sort_heap(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(comp)); -} - -// c_is_heap() -// -// Container-based version of the `std::is_heap()` function -// to check whether the given container is a heap. -template -bool c_is_heap(const RandomAccessContainer& sequence) { - return std::is_heap(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence)); -} - -// Overload of c_is_heap() for performing heap comparisons using a -// `comp` other than `operator<` -template -bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) { - return std::is_heap(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(comp)); -} - -// c_is_heap_until() -// -// Container-based version of the `std::is_heap_until()` function -// to find the first element in a given container which is not in heap order. -template -container_algorithm_internal::ContainerIter -c_is_heap_until(RandomAccessContainer& sequence) { - return std::is_heap_until(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence)); -} - -// Overload of c_is_heap_until() for performing heap comparisons using a -// `comp` other than `operator<` -template -container_algorithm_internal::ContainerIter -c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) { - return std::is_heap_until(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(comp)); -} - -//------------------------------------------------------------------------------ -// Min/max -//------------------------------------------------------------------------------ - -// c_min_element() -// -// Container-based version of the `std::min_element()` function -// to return an iterator pointing to the element with the smallest value, using -// `operator<` to make the comparisons. -template -container_algorithm_internal::ContainerIter c_min_element( - Sequence& sequence) { - return std::min_element(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence)); -} - -// Overload of c_min_element() for performing a `comp` comparison other than -// `operator<`. -template -container_algorithm_internal::ContainerIter c_min_element( - Sequence& sequence, LessThan&& comp) { - return std::min_element(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(comp)); -} - -// c_max_element() -// -// Container-based version of the `std::max_element()` function -// to return an iterator pointing to the element with the largest value, using -// `operator<` to make the comparisons. -template -container_algorithm_internal::ContainerIter c_max_element( - Sequence& sequence) { - return std::max_element(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence)); -} - -// Overload of c_max_element() for performing a `comp` comparison other than -// `operator<`. -template -container_algorithm_internal::ContainerIter c_max_element( - Sequence& sequence, LessThan&& comp) { - return std::max_element(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(comp)); -} - -// c_minmax_element() -// -// Container-based version of the `std::minmax_element()` function -// to return a pair of iterators pointing to the elements containing the -// smallest and largest values, respectively, using `operator<` to make the -// comparisons. -template -container_algorithm_internal::ContainerIterPairType -c_minmax_element(C& c) { - return std::minmax_element(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c)); -} - -// Overload of c_minmax_element() for performing `comp` comparisons other than -// `operator<`. -template -container_algorithm_internal::ContainerIterPairType -c_minmax_element(C& c, LessThan&& comp) { - return std::minmax_element(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(comp)); -} - -//------------------------------------------------------------------------------ -// Lexicographical Comparisons -//------------------------------------------------------------------------------ - -// c_lexicographical_compare() -// -// Container-based version of the `std::lexicographical_compare()` -// function to lexicographically compare (e.g. sort words alphabetically) two -// container sequences. The comparison is performed using `operator<`. Note -// that capital letters ("A-Z") have ASCII values less than lowercase letters -// ("a-z"). -template -bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2) { - return std::lexicographical_compare( - container_algorithm_internal::c_begin(sequence1), - container_algorithm_internal::c_end(sequence1), - container_algorithm_internal::c_begin(sequence2), - container_algorithm_internal::c_end(sequence2)); -} - -// Overload of c_lexicographical_compare() for performing a lexicographical -// comparison using a `comp` operator instead of `operator<`. -template -bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2, - LessThan&& comp) { - return std::lexicographical_compare( - container_algorithm_internal::c_begin(sequence1), - container_algorithm_internal::c_end(sequence1), - container_algorithm_internal::c_begin(sequence2), - container_algorithm_internal::c_end(sequence2), - std::forward(comp)); -} - -// c_next_permutation() -// -// Container-based version of the `std::next_permutation()` function -// to rearrange a container's elements into the next lexicographically greater -// permutation. -template -bool c_next_permutation(C& c) { - return std::next_permutation(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c)); -} - -// Overload of c_next_permutation() for performing a lexicographical -// comparison using a `comp` operator instead of `operator<`. -template -bool c_next_permutation(C& c, LessThan&& comp) { - return std::next_permutation(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(comp)); -} - -// c_prev_permutation() -// -// Container-based version of the `std::prev_permutation()` function -// to rearrange a container's elements into the next lexicographically lesser -// permutation. -template -bool c_prev_permutation(C& c) { - return std::prev_permutation(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c)); -} - -// Overload of c_prev_permutation() for performing a lexicographical -// comparison using a `comp` operator instead of `operator<`. -template -bool c_prev_permutation(C& c, LessThan&& comp) { - return std::prev_permutation(container_algorithm_internal::c_begin(c), - container_algorithm_internal::c_end(c), - std::forward(comp)); -} - -//------------------------------------------------------------------------------ -// algorithms -//------------------------------------------------------------------------------ - -// c_iota() -// -// Container-based version of the `std::iota()` function -// to compute successive values of `value`, as if incremented with `++value` -// after each element is written. and write them to the container. -template -void c_iota(Sequence& sequence, T&& value) { - std::iota(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(value)); -} -// c_accumulate() -// -// Container-based version of the `std::accumulate()` function -// to accumulate the element values of a container to `init` and return that -// accumulation by value. -// -// Note: Due to a language technicality this function has return type -// absl::decay_t. As a user of this function you can casually read -// this as "returns T by value" and assume it does the right thing. -template -decay_t c_accumulate(const Sequence& sequence, T&& init) { - return std::accumulate(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(init)); -} - -// Overload of c_accumulate() for using a binary operations other than -// addition for computing the accumulation. -template -decay_t c_accumulate(const Sequence& sequence, T&& init, - BinaryOp&& binary_op) { - return std::accumulate(container_algorithm_internal::c_begin(sequence), - container_algorithm_internal::c_end(sequence), - std::forward(init), - std::forward(binary_op)); -} - -// c_inner_product() -// -// Container-based version of the `std::inner_product()` function -// to compute the cumulative inner product of container element pairs. -// -// Note: Due to a language technicality this function has return type -// absl::decay_t. As a user of this function you can casually read -// this as "returns T by value" and assume it does the right thing. -template -decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, - T&& sum) { - return std::inner_product(container_algorithm_internal::c_begin(factors1), - container_algorithm_internal::c_end(factors1), - container_algorithm_internal::c_begin(factors2), - std::forward(sum)); -} - -// Overload of c_inner_product() for using binary operations other than -// `operator+` (for computing the accumulation) and `operator*` (for computing -// the product between the two container's element pair). -template -decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, - T&& sum, BinaryOp1&& op1, BinaryOp2&& op2) { - return std::inner_product(container_algorithm_internal::c_begin(factors1), - container_algorithm_internal::c_end(factors1), - container_algorithm_internal::c_begin(factors2), - std::forward(sum), std::forward(op1), - std::forward(op2)); -} - -// c_adjacent_difference() -// -// Container-based version of the `std::adjacent_difference()` -// function to compute the difference between each element and the one preceding -// it and write it to an iterator. -template -OutputIt c_adjacent_difference(const InputSequence& input, - OutputIt output_first) { - return std::adjacent_difference(container_algorithm_internal::c_begin(input), - container_algorithm_internal::c_end(input), - output_first); -} - -// Overload of c_adjacent_difference() for using a binary operation other than -// subtraction to compute the adjacent difference. -template -OutputIt c_adjacent_difference(const InputSequence& input, - OutputIt output_first, BinaryOp&& op) { - return std::adjacent_difference(container_algorithm_internal::c_begin(input), - container_algorithm_internal::c_end(input), - output_first, std::forward(op)); -} - -// c_partial_sum() -// -// Container-based version of the `std::partial_sum()` function -// to compute the partial sum of the elements in a sequence and write them -// to an iterator. The partial sum is the sum of all element values so far in -// the sequence. -template -OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first) { - return std::partial_sum(container_algorithm_internal::c_begin(input), - container_algorithm_internal::c_end(input), - output_first); -} - -// Overload of c_partial_sum() for using a binary operation other than addition -// to compute the "partial sum". -template -OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first, - BinaryOp&& op) { - return std::partial_sum(container_algorithm_internal::c_begin(input), - container_algorithm_internal::c_end(input), - output_first, std::forward(op)); -} - -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_algorithm_internal + { + + // NOTE: it is important to defer to ADL lookup for building with C++ modules, + // especially for headers like which are not visible from this file + // but specialize std::begin and std::end. + using std::begin; + using std::end; + + // The type of the iterator given by begin(c) (possibly std::begin(c)). + // ContainerIter> gives vector::const_iterator, + // while ContainerIter> gives vector::iterator. + template + using ContainerIter = decltype(begin(std::declval())); + + // An MSVC bug involving template parameter substitution requires us to use + // decltype() here instead of just std::pair. + template + using ContainerIterPairType = + decltype(std::make_pair(ContainerIter(), ContainerIter())); + + template + using ContainerDifferenceType = + decltype(std::distance(std::declval>(), std::declval>())); + + template + using ContainerPointerType = + typename std::iterator_traits>::pointer; + + // container_algorithm_internal::c_begin and + // container_algorithm_internal::c_end are abbreviations for proper ADL + // lookup of std::begin and std::end, i.e. + // using std::begin; + // using std::end; + // std::foo(begin(c), end(c)); + // becomes + // std::foo(container_algorithm_internal::begin(c), + // container_algorithm_internal::end(c)); + // These are meant for internal use only. + + template + ContainerIter c_begin(C& c) + { + return begin(c); + } + + template + ContainerIter c_end(C& c) + { + return end(c); + } + + template + struct IsUnorderedContainer : std::false_type + { + }; + + template + struct IsUnorderedContainer< + std::unordered_map> : std::true_type + { + }; + + template + struct IsUnorderedContainer> : std::true_type + { + }; + + // container_algorithm_internal::c_size. It is meant for internal use only. + + template + auto c_size(C& c) -> decltype(c.size()) + { + return c.size(); + } + + template + constexpr std::size_t c_size(T (&)[N]) + { + return N; + } + + } // namespace container_algorithm_internal + + // PUBLIC API + + //------------------------------------------------------------------------------ + // Abseil algorithm.h functions + //------------------------------------------------------------------------------ + + // c_linear_search() + // + // Container-based version of absl::linear_search() for performing a linear + // search within a container. + template + bool c_linear_search(const C& c, EqualityComparable&& value) + { + return linear_search(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(value)); + } + + //------------------------------------------------------------------------------ + // algorithms + //------------------------------------------------------------------------------ + + // c_distance() + // + // Container-based version of the `std::distance()` function to + // return the number of elements within a container. + template + container_algorithm_internal::ContainerDifferenceType c_distance( + const C& c + ) + { + return std::distance(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + //------------------------------------------------------------------------------ + // Non-modifying sequence operations + //------------------------------------------------------------------------------ + + // c_all_of() + // + // Container-based version of the `std::all_of()` function to + // test if all elements within a container satisfy a condition. + template + bool c_all_of(const C& c, Pred&& pred) + { + return std::all_of(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_any_of() + // + // Container-based version of the `std::any_of()` function to + // test if any element in a container fulfills a condition. + template + bool c_any_of(const C& c, Pred&& pred) + { + return std::any_of(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_none_of() + // + // Container-based version of the `std::none_of()` function to + // test if no elements in a container fulfill a condition. + template + bool c_none_of(const C& c, Pred&& pred) + { + return std::none_of(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_for_each() + // + // Container-based version of the `std::for_each()` function to + // apply a function to a container's elements. + template + decay_t c_for_each(C&& c, Function&& f) + { + return std::for_each(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(f)); + } + + // c_find() + // + // Container-based version of the `std::find()` function to find + // the first element containing the passed value within a container value. + template + container_algorithm_internal::ContainerIter c_find(C& c, T&& value) + { + return std::find(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(value)); + } + + // c_find_if() + // + // Container-based version of the `std::find_if()` function to find + // the first element in a container matching the given condition. + template + container_algorithm_internal::ContainerIter c_find_if(C& c, Pred&& pred) + { + return std::find_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_find_if_not() + // + // Container-based version of the `std::find_if_not()` function to + // find the first element in a container not matching the given condition. + template + container_algorithm_internal::ContainerIter c_find_if_not(C& c, Pred&& pred) + { + return std::find_if_not(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_find_end() + // + // Container-based version of the `std::find_end()` function to + // find the last subsequence within a container. + template + container_algorithm_internal::ContainerIter c_find_end( + Sequence1& sequence, Sequence2& subsequence + ) + { + return std::find_end(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(subsequence), container_algorithm_internal::c_end(subsequence)); + } + + // Overload of c_find_end() for using a predicate evaluation other than `==` as + // the function's test condition. + template + container_algorithm_internal::ContainerIter c_find_end( + Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred + ) + { + return std::find_end(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(subsequence), container_algorithm_internal::c_end(subsequence), std::forward(pred)); + } + + // c_find_first_of() + // + // Container-based version of the `std::find_first_of()` function to + // find the first element within the container that is also within the options + // container. + template + container_algorithm_internal::ContainerIter c_find_first_of(C1& container, C2& options) + { + return std::find_first_of(container_algorithm_internal::c_begin(container), container_algorithm_internal::c_end(container), container_algorithm_internal::c_begin(options), container_algorithm_internal::c_end(options)); + } + + // Overload of c_find_first_of() for using a predicate evaluation other than + // `==` as the function's test condition. + template + container_algorithm_internal::ContainerIter c_find_first_of( + C1& container, C2& options, BinaryPredicate&& pred + ) + { + return std::find_first_of(container_algorithm_internal::c_begin(container), container_algorithm_internal::c_end(container), container_algorithm_internal::c_begin(options), container_algorithm_internal::c_end(options), std::forward(pred)); + } + + // c_adjacent_find() + // + // Container-based version of the `std::adjacent_find()` function to + // find equal adjacent elements within a container. + template + container_algorithm_internal::ContainerIter c_adjacent_find( + Sequence& sequence + ) + { + return std::adjacent_find(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_adjacent_find() for using a predicate evaluation other than + // `==` as the function's test condition. + template + container_algorithm_internal::ContainerIter c_adjacent_find( + Sequence& sequence, BinaryPredicate&& pred + ) + { + return std::adjacent_find(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(pred)); + } + + // c_count() + // + // Container-based version of the `std::count()` function to count + // values that match within a container. + template + container_algorithm_internal::ContainerDifferenceType c_count( + const C& c, T&& value + ) + { + return std::count(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(value)); + } + + // c_count_if() + // + // Container-based version of the `std::count_if()` function to + // count values matching a condition within a container. + template + container_algorithm_internal::ContainerDifferenceType c_count_if( + const C& c, Pred&& pred + ) + { + return std::count_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_mismatch() + // + // Container-based version of the `std::mismatch()` function to + // return the first element where two ordered containers differ. Applies `==` to + // the first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). + template + container_algorithm_internal::ContainerIterPairType + c_mismatch(C1& c1, C2& c2) + { + auto first1 = container_algorithm_internal::c_begin(c1); + auto last1 = container_algorithm_internal::c_end(c1); + auto first2 = container_algorithm_internal::c_begin(c2); + auto last2 = container_algorithm_internal::c_end(c2); + + for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) + { + // Negates equality because Cpp17EqualityComparable doesn't require clients + // to overload both `operator==` and `operator!=`. + if (!(*first1 == *first2)) + { + break; + } + } + + return std::make_pair(first1, first2); + } + + // Overload of c_mismatch() for using a predicate evaluation other than `==` as + // the function's test condition. Applies `pred`to the first N elements of `c1` + // and `c2`, where N = min(size(c1), size(c2)). + template + container_algorithm_internal::ContainerIterPairType + c_mismatch(C1& c1, C2& c2, BinaryPredicate pred) + { + auto first1 = container_algorithm_internal::c_begin(c1); + auto last1 = container_algorithm_internal::c_end(c1); + auto first2 = container_algorithm_internal::c_begin(c2); + auto last2 = container_algorithm_internal::c_end(c2); + + for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) + { + if (!pred(*first1, *first2)) + { + break; + } + } + + return std::make_pair(first1, first2); + } + + // c_equal() + // + // Container-based version of the `std::equal()` function to + // test whether two containers are equal. + // + // NOTE: the semantics of c_equal() are slightly different than those of + // equal(): while the latter iterates over the second container only up to the + // size of the first container, c_equal() also checks whether the container + // sizes are equal. This better matches expectations about c_equal() based on + // its signature. + // + // Example: + // vector v1 = <1, 2, 3>; + // vector v2 = <1, 2, 3, 4>; + // equal(std::begin(v1), std::end(v1), std::begin(v2)) returns true + // c_equal(v1, v2) returns false + + template + bool c_equal(const C1& c1, const C2& c2) + { + return ((container_algorithm_internal::c_size(c1) == container_algorithm_internal::c_size(c2)) && std::equal(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2))); + } + + // Overload of c_equal() for using a predicate evaluation other than `==` as + // the function's test condition. + template + bool c_equal(const C1& c1, const C2& c2, BinaryPredicate&& pred) + { + return ((container_algorithm_internal::c_size(c1) == container_algorithm_internal::c_size(c2)) && std::equal(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), std::forward(pred))); + } + + // c_is_permutation() + // + // Container-based version of the `std::is_permutation()` function + // to test whether a container is a permutation of another. + template + bool c_is_permutation(const C1& c1, const C2& c2) + { + using std::begin; + using std::end; + return c1.size() == c2.size() && + std::is_permutation(begin(c1), end(c1), begin(c2)); + } + + // Overload of c_is_permutation() for using a predicate evaluation other than + // `==` as the function's test condition. + template + bool c_is_permutation(const C1& c1, const C2& c2, BinaryPredicate&& pred) + { + using std::begin; + using std::end; + return c1.size() == c2.size() && + std::is_permutation(begin(c1), end(c1), begin(c2), std::forward(pred)); + } + + // c_search() + // + // Container-based version of the `std::search()` function to search + // a container for a subsequence. + template + container_algorithm_internal::ContainerIter c_search( + Sequence1& sequence, Sequence2& subsequence + ) + { + return std::search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(subsequence), container_algorithm_internal::c_end(subsequence)); + } + + // Overload of c_search() for using a predicate evaluation other than + // `==` as the function's test condition. + template + container_algorithm_internal::ContainerIter c_search( + Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred + ) + { + return std::search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(subsequence), container_algorithm_internal::c_end(subsequence), std::forward(pred)); + } + + // c_search_n() + // + // Container-based version of the `std::search_n()` function to + // search a container for the first sequence of N elements. + template + container_algorithm_internal::ContainerIter c_search_n( + Sequence& sequence, Size count, T&& value + ) + { + return std::search_n(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), count, std::forward(value)); + } + + // Overload of c_search_n() for using a predicate evaluation other than + // `==` as the function's test condition. + template + container_algorithm_internal::ContainerIter c_search_n( + Sequence& sequence, Size count, T&& value, BinaryPredicate&& pred + ) + { + return std::search_n(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), count, std::forward(value), std::forward(pred)); + } + + //------------------------------------------------------------------------------ + // Modifying sequence operations + //------------------------------------------------------------------------------ + + // c_copy() + // + // Container-based version of the `std::copy()` function to copy a + // container's elements into an iterator. + template + OutputIterator c_copy(const InputSequence& input, OutputIterator output) + { + return std::copy(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output); + } + + // c_copy_n() + // + // Container-based version of the `std::copy_n()` function to copy a + // container's first N elements into an iterator. + template + OutputIterator c_copy_n(const C& input, Size n, OutputIterator output) + { + return std::copy_n(container_algorithm_internal::c_begin(input), n, output); + } + + // c_copy_if() + // + // Container-based version of the `std::copy_if()` function to copy + // a container's elements satisfying some condition into an iterator. + template + OutputIterator c_copy_if(const InputSequence& input, OutputIterator output, Pred&& pred) + { + return std::copy_if(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output, std::forward(pred)); + } + + // c_copy_backward() + // + // Container-based version of the `std::copy_backward()` function to + // copy a container's elements in reverse order into an iterator. + template + BidirectionalIterator c_copy_backward(const C& src, BidirectionalIterator dest) + { + return std::copy_backward(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); + } + + // c_move() + // + // Container-based version of the `std::move()` function to move + // a container's elements into an iterator. + template + OutputIterator c_move(C&& src, OutputIterator dest) + { + return std::move(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); + } + + // c_move_backward() + // + // Container-based version of the `std::move_backward()` function to + // move a container's elements into an iterator in reverse order. + template + BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) + { + return std::move_backward(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); + } + + // c_swap_ranges() + // + // Container-based version of the `std::swap_ranges()` function to + // swap a container's elements with another container's elements. Swaps the + // first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). + template + container_algorithm_internal::ContainerIter c_swap_ranges(C1& c1, C2& c2) + { + auto first1 = container_algorithm_internal::c_begin(c1); + auto last1 = container_algorithm_internal::c_end(c1); + auto first2 = container_algorithm_internal::c_begin(c2); + auto last2 = container_algorithm_internal::c_end(c2); + + using std::swap; + for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) + { + swap(*first1, *first2); + } + return first2; + } + + // c_transform() + // + // Container-based version of the `std::transform()` function to + // transform a container's elements using the unary operation, storing the + // result in an iterator pointing to the last transformed element in the output + // range. + template + OutputIterator c_transform(const InputSequence& input, OutputIterator output, UnaryOp&& unary_op) + { + return std::transform(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output, std::forward(unary_op)); + } + + // Overload of c_transform() for performing a transformation using a binary + // predicate. Applies `binary_op` to the first N elements of `c1` and `c2`, + // where N = min(size(c1), size(c2)). + template + OutputIterator c_transform(const InputSequence1& input1, const InputSequence2& input2, OutputIterator output, BinaryOp&& binary_op) + { + auto first1 = container_algorithm_internal::c_begin(input1); + auto last1 = container_algorithm_internal::c_end(input1); + auto first2 = container_algorithm_internal::c_begin(input2); + auto last2 = container_algorithm_internal::c_end(input2); + for (; first1 != last1 && first2 != last2; + ++first1, (void)++first2, ++output) + { + *output = binary_op(*first1, *first2); + } + + return output; + } + + // c_replace() + // + // Container-based version of the `std::replace()` function to + // replace a container's elements of some value with a new value. The container + // is modified in place. + template + void c_replace(Sequence& sequence, const T& old_value, const T& new_value) + { + std::replace(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), old_value, new_value); + } + + // c_replace_if() + // + // Container-based version of the `std::replace_if()` function to + // replace a container's elements of some value with a new value based on some + // condition. The container is modified in place. + template + void c_replace_if(C& c, Pred&& pred, T&& new_value) + { + std::replace_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred), std::forward(new_value)); + } + + // c_replace_copy() + // + // Container-based version of the `std::replace_copy()` function to + // replace a container's elements of some value with a new value and return the + // results within an iterator. + template + OutputIterator c_replace_copy(const C& c, OutputIterator result, T&& old_value, T&& new_value) + { + return std::replace_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(old_value), std::forward(new_value)); + } + + // c_replace_copy_if() + // + // Container-based version of the `std::replace_copy_if()` function + // to replace a container's elements of some value with a new value based on + // some condition, and return the results within an iterator. + template + OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred, T&& new_value) + { + return std::replace_copy_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(pred), std::forward(new_value)); + } + + // c_fill() + // + // Container-based version of the `std::fill()` function to fill a + // container with some value. + template + void c_fill(C& c, T&& value) + { + std::fill(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(value)); + } + + // c_fill_n() + // + // Container-based version of the `std::fill_n()` function to fill + // the first N elements in a container with some value. + template + void c_fill_n(C& c, Size n, T&& value) + { + std::fill_n(container_algorithm_internal::c_begin(c), n, std::forward(value)); + } + + // c_generate() + // + // Container-based version of the `std::generate()` function to + // assign a container's elements to the values provided by the given generator. + template + void c_generate(C& c, Generator&& gen) + { + std::generate(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(gen)); + } + + // c_generate_n() + // + // Container-based version of the `std::generate_n()` function to + // assign a container's first N elements to the values provided by the given + // generator. + template + container_algorithm_internal::ContainerIter c_generate_n(C& c, Size n, Generator&& gen) + { + return std::generate_n(container_algorithm_internal::c_begin(c), n, std::forward(gen)); + } + + // Note: `c_xx()` container versions for `remove()`, `remove_if()`, + // and `unique()` are omitted, because it's not clear whether or not such + // functions should call erase on their supplied sequences afterwards. Either + // behavior would be surprising for a different set of users. + + // c_remove_copy() + // + // Container-based version of the `std::remove_copy()` function to + // copy a container's elements while removing any elements matching the given + // `value`. + template + OutputIterator c_remove_copy(const C& c, OutputIterator result, T&& value) + { + return std::remove_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(value)); + } + + // c_remove_copy_if() + // + // Container-based version of the `std::remove_copy_if()` function + // to copy a container's elements while removing any elements matching the given + // condition. + template + OutputIterator c_remove_copy_if(const C& c, OutputIterator result, Pred&& pred) + { + return std::remove_copy_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(pred)); + } + + // c_unique_copy() + // + // Container-based version of the `std::unique_copy()` function to + // copy a container's elements while removing any elements containing duplicate + // values. + template + OutputIterator c_unique_copy(const C& c, OutputIterator result) + { + return std::unique_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result); + } + + // Overload of c_unique_copy() for using a predicate evaluation other than + // `==` for comparing uniqueness of the element values. + template + OutputIterator c_unique_copy(const C& c, OutputIterator result, BinaryPredicate&& pred) + { + return std::unique_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(pred)); + } + + // c_reverse() + // + // Container-based version of the `std::reverse()` function to + // reverse a container's elements. + template + void c_reverse(Sequence& sequence) + { + std::reverse(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // c_reverse_copy() + // + // Container-based version of the `std::reverse()` function to + // reverse a container's elements and write them to an iterator range. + template + OutputIterator c_reverse_copy(const C& sequence, OutputIterator result) + { + return std::reverse_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), result); + } + + // c_rotate() + // + // Container-based version of the `std::rotate()` function to + // shift a container's elements leftward such that the `middle` element becomes + // the first element in the container. + template> + Iterator c_rotate(C& sequence, Iterator middle) + { + return absl::rotate(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence)); + } + + // c_rotate_copy() + // + // Container-based version of the `std::rotate_copy()` function to + // shift a container's elements leftward such that the `middle` element becomes + // the first element in a new iterator range. + template + OutputIterator c_rotate_copy( + const C& sequence, + container_algorithm_internal::ContainerIter middle, + OutputIterator result + ) + { + return std::rotate_copy(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence), result); + } + + // c_shuffle() + // + // Container-based version of the `std::shuffle()` function to + // randomly shuffle elements within the container using a `gen()` uniform random + // number generator. + template + void c_shuffle(RandomAccessContainer& c, UniformRandomBitGenerator&& gen) + { + std::shuffle(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(gen)); + } + + //------------------------------------------------------------------------------ + // Partition functions + //------------------------------------------------------------------------------ + + // c_is_partitioned() + // + // Container-based version of the `std::is_partitioned()` function + // to test whether all elements in the container for which `pred` returns `true` + // precede those for which `pred` is `false`. + template + bool c_is_partitioned(const C& c, Pred&& pred) + { + return std::is_partitioned(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_partition() + // + // Container-based version of the `std::partition()` function + // to rearrange all elements in a container in such a way that all elements for + // which `pred` returns `true` precede all those for which it returns `false`, + // returning an iterator to the first element of the second group. + template + container_algorithm_internal::ContainerIter c_partition(C& c, Pred&& pred) + { + return std::partition(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_stable_partition() + // + // Container-based version of the `std::stable_partition()` function + // to rearrange all elements in a container in such a way that all elements for + // which `pred` returns `true` precede all those for which it returns `false`, + // preserving the relative ordering between the two groups. The function returns + // an iterator to the first element of the second group. + template + container_algorithm_internal::ContainerIter c_stable_partition(C& c, Pred&& pred) + { + return std::stable_partition(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + // c_partition_copy() + // + // Container-based version of the `std::partition_copy()` function + // to partition a container's elements and return them into two iterators: one + // for which `pred` returns `true`, and one for which `pred` returns `false.` + + template + std::pair c_partition_copy( + const C& c, OutputIterator1 out_true, OutputIterator2 out_false, Pred&& pred + ) + { + return std::partition_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), out_true, out_false, std::forward(pred)); + } + + // c_partition_point() + // + // Container-based version of the `std::partition_point()` function + // to return the first element of an already partitioned container for which + // the given `pred` is not `true`. + template + container_algorithm_internal::ContainerIter c_partition_point(C& c, Pred&& pred) + { + return std::partition_point(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); + } + + //------------------------------------------------------------------------------ + // Sorting functions + //------------------------------------------------------------------------------ + + // c_sort() + // + // Container-based version of the `std::sort()` function + // to sort elements in ascending order of their values. + template + void c_sort(C& c) + { + std::sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + // Overload of c_sort() for performing a `comp` comparison other than the + // default `operator<`. + template + void c_sort(C& c, LessThan&& comp) + { + std::sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); + } + + // c_stable_sort() + // + // Container-based version of the `std::stable_sort()` function + // to sort elements in ascending order of their values, preserving the order + // of equivalents. + template + void c_stable_sort(C& c) + { + std::stable_sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + // Overload of c_stable_sort() for performing a `comp` comparison other than the + // default `operator<`. + template + void c_stable_sort(C& c, LessThan&& comp) + { + std::stable_sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); + } + + // c_is_sorted() + // + // Container-based version of the `std::is_sorted()` function + // to evaluate whether the given container is sorted in ascending order. + template + bool c_is_sorted(const C& c) + { + return std::is_sorted(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + // c_is_sorted() overload for performing a `comp` comparison other than the + // default `operator<`. + template + bool c_is_sorted(const C& c, LessThan&& comp) + { + return std::is_sorted(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); + } + + // c_partial_sort() + // + // Container-based version of the `std::partial_sort()` function + // to rearrange elements within a container such that elements before `middle` + // are sorted in ascending order. + template + void c_partial_sort( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter middle + ) + { + std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_partial_sort() for performing a `comp` comparison other than + // the default `operator<`. + template + void c_partial_sort( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter middle, + LessThan&& comp + ) + { + std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_partial_sort_copy() + // + // Container-based version of the `std::partial_sort_copy()` + // function to sort the elements in the given range `result` within the larger + // `sequence` in ascending order (and using `result` as the output parameter). + // At most min(result.last - result.first, sequence.last - sequence.first) + // elements from the sequence will be stored in the result. + template + container_algorithm_internal::ContainerIter + c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) + { + return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(result), container_algorithm_internal::c_end(result)); + } + + // Overload of c_partial_sort_copy() for performing a `comp` comparison other + // than the default `operator<`. + template + container_algorithm_internal::ContainerIter + c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, LessThan&& comp) + { + return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(result), container_algorithm_internal::c_end(result), std::forward(comp)); + } + + // c_is_sorted_until() + // + // Container-based version of the `std::is_sorted_until()` function + // to return the first element within a container that is not sorted in + // ascending order as an iterator. + template + container_algorithm_internal::ContainerIter c_is_sorted_until(C& c) + { + return std::is_sorted_until(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + // Overload of c_is_sorted_until() for performing a `comp` comparison other than + // the default `operator<`. + template + container_algorithm_internal::ContainerIter c_is_sorted_until( + C& c, LessThan&& comp + ) + { + return std::is_sorted_until(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); + } + + // c_nth_element() + // + // Container-based version of the `std::nth_element()` function + // to rearrange the elements within a container such that the `nth` element + // would be in that position in an ordered sequence; other elements may be in + // any order, except that all preceding `nth` will be less than that element, + // and all following `nth` will be greater than that element. + template + void c_nth_element( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter nth + ) + { + std::nth_element(container_algorithm_internal::c_begin(sequence), nth, container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_nth_element() for performing a `comp` comparison other than + // the default `operator<`. + template + void c_nth_element( + RandomAccessContainer& sequence, + container_algorithm_internal::ContainerIter nth, + LessThan&& comp + ) + { + std::nth_element(container_algorithm_internal::c_begin(sequence), nth, container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + //------------------------------------------------------------------------------ + // Binary Search + //------------------------------------------------------------------------------ + + // c_lower_bound() + // + // Container-based version of the `std::lower_bound()` function + // to return an iterator pointing to the first element in a sorted container + // which does not compare less than `value`. + template + container_algorithm_internal::ContainerIter c_lower_bound( + Sequence& sequence, T&& value + ) + { + return std::lower_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value)); + } + + // Overload of c_lower_bound() for performing a `comp` comparison other than + // the default `operator<`. + template + container_algorithm_internal::ContainerIter c_lower_bound( + Sequence& sequence, T&& value, LessThan&& comp + ) + { + return std::lower_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value), std::forward(comp)); + } + + // c_upper_bound() + // + // Container-based version of the `std::upper_bound()` function + // to return an iterator pointing to the first element in a sorted container + // which is greater than `value`. + template + container_algorithm_internal::ContainerIter c_upper_bound( + Sequence& sequence, T&& value + ) + { + return std::upper_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value)); + } + + // Overload of c_upper_bound() for performing a `comp` comparison other than + // the default `operator<`. + template + container_algorithm_internal::ContainerIter c_upper_bound( + Sequence& sequence, T&& value, LessThan&& comp + ) + { + return std::upper_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value), std::forward(comp)); + } + + // c_equal_range() + // + // Container-based version of the `std::equal_range()` function + // to return an iterator pair pointing to the first and last elements in a + // sorted container which compare equal to `value`. + template + container_algorithm_internal::ContainerIterPairType + c_equal_range(Sequence& sequence, T&& value) + { + return std::equal_range(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value)); + } + + // Overload of c_equal_range() for performing a `comp` comparison other than + // the default `operator<`. + template + container_algorithm_internal::ContainerIterPairType + c_equal_range(Sequence& sequence, T&& value, LessThan&& comp) + { + return std::equal_range(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value), std::forward(comp)); + } + + // c_binary_search() + // + // Container-based version of the `std::binary_search()` function + // to test if any element in the sorted container contains a value equivalent to + // 'value'. + template + bool c_binary_search(Sequence&& sequence, T&& value) + { + return std::binary_search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value)); + } + + // Overload of c_binary_search() for performing a `comp` comparison other than + // the default `operator<`. + template + bool c_binary_search(Sequence&& sequence, T&& value, LessThan&& comp) + { + return std::binary_search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value), std::forward(comp)); + } + + //------------------------------------------------------------------------------ + // Merge functions + //------------------------------------------------------------------------------ + + // c_merge() + // + // Container-based version of the `std::merge()` function + // to merge two sorted containers into a single sorted iterator. + template + OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) + { + return std::merge(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), result); + } + + // Overload of c_merge() for performing a `comp` comparison other than + // the default `operator<`. + template + OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result, LessThan&& comp) + { + return std::merge(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), result, std::forward(comp)); + } + + // c_inplace_merge() + // + // Container-based version of the `std::inplace_merge()` function + // to merge a supplied iterator `middle` into a container. + template + void c_inplace_merge(C& c, container_algorithm_internal::ContainerIter middle) + { + std::inplace_merge(container_algorithm_internal::c_begin(c), middle, container_algorithm_internal::c_end(c)); + } + + // Overload of c_inplace_merge() for performing a merge using a `comp` other + // than `operator<`. + template + void c_inplace_merge(C& c, container_algorithm_internal::ContainerIter middle, LessThan&& comp) + { + std::inplace_merge(container_algorithm_internal::c_begin(c), middle, container_algorithm_internal::c_end(c), std::forward(comp)); + } + + // c_includes() + // + // Container-based version of the `std::includes()` function + // to test whether a sorted container `c1` entirely contains another sorted + // container `c2`. + template + bool c_includes(const C1& c1, const C2& c2) + { + return std::includes(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2)); + } + + // Overload of c_includes() for performing a merge using a `comp` other than + // `operator<`. + template + bool c_includes(const C1& c1, const C2& c2, LessThan&& comp) + { + return std::includes(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), std::forward(comp)); + } + + // c_set_union() + // + // Container-based version of the `std::set_union()` function + // to return an iterator containing the union of two containers; duplicate + // values are not copied into the output. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) + { + return std::set_union(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output); + } + + // Overload of c_set_union() for performing a merge using a `comp` other than + // `operator<`. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) + { + return std::set_union(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, std::forward(comp)); + } + + // c_set_intersection() + // + // Container-based version of the `std::set_intersection()` function + // to return an iterator containing the intersection of two sorted containers. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_intersection(const C1& c1, const C2& c2, OutputIterator output) + { + // In debug builds, ensure that both containers are sorted with respect to the + // default comparator. std::set_intersection requires the containers be sorted + // using operator<. + assert(absl::c_is_sorted(c1)); + assert(absl::c_is_sorted(c2)); + return std::set_intersection(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output); + } + + // Overload of c_set_intersection() for performing a merge using a `comp` other + // than `operator<`. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_intersection(const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) + { + // In debug builds, ensure that both containers are sorted with respect to the + // default comparator. std::set_intersection requires the containers be sorted + // using the same comparator. + assert(absl::c_is_sorted(c1, comp)); + assert(absl::c_is_sorted(c2, comp)); + return std::set_intersection(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, std::forward(comp)); + } + + // c_set_difference() + // + // Container-based version of the `std::set_difference()` function + // to return an iterator containing elements present in the first container but + // not in the second. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_difference(const C1& c1, const C2& c2, OutputIterator output) + { + return std::set_difference(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output); + } + + // Overload of c_set_difference() for performing a merge using a `comp` other + // than `operator<`. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_difference(const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) + { + return std::set_difference(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, std::forward(comp)); + } + + // c_set_symmetric_difference() + // + // Container-based version of the `std::set_symmetric_difference()` + // function to return an iterator containing elements present in either one + // container or the other, but not both. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, OutputIterator output) + { + return std::set_symmetric_difference( + container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), + output + ); + } + + // Overload of c_set_symmetric_difference() for performing a merge using a + // `comp` other than `operator<`. + template::value, void>::type, typename = typename std::enable_if::value, void>::type> + OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) + { + return std::set_symmetric_difference( + container_algorithm_internal::c_begin(c1), + container_algorithm_internal::c_end(c1), + container_algorithm_internal::c_begin(c2), + container_algorithm_internal::c_end(c2), + output, + std::forward(comp) + ); + } + + //------------------------------------------------------------------------------ + // Heap functions + //------------------------------------------------------------------------------ + + // c_push_heap() + // + // Container-based version of the `std::push_heap()` function + // to push a value onto a container heap. + template + void c_push_heap(RandomAccessContainer& sequence) + { + std::push_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_push_heap() for performing a push operation on a heap using a + // `comp` other than `operator<`. + template + void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) + { + std::push_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_pop_heap() + // + // Container-based version of the `std::pop_heap()` function + // to pop a value from a heap container. + template + void c_pop_heap(RandomAccessContainer& sequence) + { + std::pop_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_pop_heap() for performing a pop operation on a heap using a + // `comp` other than `operator<`. + template + void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) + { + std::pop_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_make_heap() + // + // Container-based version of the `std::make_heap()` function + // to make a container a heap. + template + void c_make_heap(RandomAccessContainer& sequence) + { + std::make_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_make_heap() for performing heap comparisons using a + // `comp` other than `operator<` + template + void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) + { + std::make_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_sort_heap() + // + // Container-based version of the `std::sort_heap()` function + // to sort a heap into ascending order (after which it is no longer a heap). + template + void c_sort_heap(RandomAccessContainer& sequence) + { + std::sort_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_sort_heap() for performing heap comparisons using a + // `comp` other than `operator<` + template + void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) + { + std::sort_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_is_heap() + // + // Container-based version of the `std::is_heap()` function + // to check whether the given container is a heap. + template + bool c_is_heap(const RandomAccessContainer& sequence) + { + return std::is_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_is_heap() for performing heap comparisons using a + // `comp` other than `operator<` + template + bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) + { + return std::is_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_is_heap_until() + // + // Container-based version of the `std::is_heap_until()` function + // to find the first element in a given container which is not in heap order. + template + container_algorithm_internal::ContainerIter + c_is_heap_until(RandomAccessContainer& sequence) + { + return std::is_heap_until(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_is_heap_until() for performing heap comparisons using a + // `comp` other than `operator<` + template + container_algorithm_internal::ContainerIter + c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) + { + return std::is_heap_until(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + //------------------------------------------------------------------------------ + // Min/max + //------------------------------------------------------------------------------ + + // c_min_element() + // + // Container-based version of the `std::min_element()` function + // to return an iterator pointing to the element with the smallest value, using + // `operator<` to make the comparisons. + template + container_algorithm_internal::ContainerIter c_min_element( + Sequence& sequence + ) + { + return std::min_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_min_element() for performing a `comp` comparison other than + // `operator<`. + template + container_algorithm_internal::ContainerIter c_min_element( + Sequence& sequence, LessThan&& comp + ) + { + return std::min_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_max_element() + // + // Container-based version of the `std::max_element()` function + // to return an iterator pointing to the element with the largest value, using + // `operator<` to make the comparisons. + template + container_algorithm_internal::ContainerIter c_max_element( + Sequence& sequence + ) + { + return std::max_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); + } + + // Overload of c_max_element() for performing a `comp` comparison other than + // `operator<`. + template + container_algorithm_internal::ContainerIter c_max_element( + Sequence& sequence, LessThan&& comp + ) + { + return std::max_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); + } + + // c_minmax_element() + // + // Container-based version of the `std::minmax_element()` function + // to return a pair of iterators pointing to the elements containing the + // smallest and largest values, respectively, using `operator<` to make the + // comparisons. + template + container_algorithm_internal::ContainerIterPairType + c_minmax_element(C& c) + { + return std::minmax_element(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + // Overload of c_minmax_element() for performing `comp` comparisons other than + // `operator<`. + template + container_algorithm_internal::ContainerIterPairType + c_minmax_element(C& c, LessThan&& comp) + { + return std::minmax_element(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); + } + + //------------------------------------------------------------------------------ + // Lexicographical Comparisons + //------------------------------------------------------------------------------ + + // c_lexicographical_compare() + // + // Container-based version of the `std::lexicographical_compare()` + // function to lexicographically compare (e.g. sort words alphabetically) two + // container sequences. The comparison is performed using `operator<`. Note + // that capital letters ("A-Z") have ASCII values less than lowercase letters + // ("a-z"). + template + bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2) + { + return std::lexicographical_compare( + container_algorithm_internal::c_begin(sequence1), + container_algorithm_internal::c_end(sequence1), + container_algorithm_internal::c_begin(sequence2), + container_algorithm_internal::c_end(sequence2) + ); + } + + // Overload of c_lexicographical_compare() for performing a lexicographical + // comparison using a `comp` operator instead of `operator<`. + template + bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2, LessThan&& comp) + { + return std::lexicographical_compare( + container_algorithm_internal::c_begin(sequence1), + container_algorithm_internal::c_end(sequence1), + container_algorithm_internal::c_begin(sequence2), + container_algorithm_internal::c_end(sequence2), + std::forward(comp) + ); + } + + // c_next_permutation() + // + // Container-based version of the `std::next_permutation()` function + // to rearrange a container's elements into the next lexicographically greater + // permutation. + template + bool c_next_permutation(C& c) + { + return std::next_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + // Overload of c_next_permutation() for performing a lexicographical + // comparison using a `comp` operator instead of `operator<`. + template + bool c_next_permutation(C& c, LessThan&& comp) + { + return std::next_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); + } + + // c_prev_permutation() + // + // Container-based version of the `std::prev_permutation()` function + // to rearrange a container's elements into the next lexicographically lesser + // permutation. + template + bool c_prev_permutation(C& c) + { + return std::prev_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); + } + + // Overload of c_prev_permutation() for performing a lexicographical + // comparison using a `comp` operator instead of `operator<`. + template + bool c_prev_permutation(C& c, LessThan&& comp) + { + return std::prev_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); + } + + //------------------------------------------------------------------------------ + // algorithms + //------------------------------------------------------------------------------ + + // c_iota() + // + // Container-based version of the `std::iota()` function + // to compute successive values of `value`, as if incremented with `++value` + // after each element is written. and write them to the container. + template + void c_iota(Sequence& sequence, T&& value) + { + std::iota(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value)); + } + // c_accumulate() + // + // Container-based version of the `std::accumulate()` function + // to accumulate the element values of a container to `init` and return that + // accumulation by value. + // + // Note: Due to a language technicality this function has return type + // absl::decay_t. As a user of this function you can casually read + // this as "returns T by value" and assume it does the right thing. + template + decay_t c_accumulate(const Sequence& sequence, T&& init) + { + return std::accumulate(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(init)); + } + + // Overload of c_accumulate() for using a binary operations other than + // addition for computing the accumulation. + template + decay_t c_accumulate(const Sequence& sequence, T&& init, BinaryOp&& binary_op) + { + return std::accumulate(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(init), std::forward(binary_op)); + } + + // c_inner_product() + // + // Container-based version of the `std::inner_product()` function + // to compute the cumulative inner product of container element pairs. + // + // Note: Due to a language technicality this function has return type + // absl::decay_t. As a user of this function you can casually read + // this as "returns T by value" and assume it does the right thing. + template + decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, T&& sum) + { + return std::inner_product(container_algorithm_internal::c_begin(factors1), container_algorithm_internal::c_end(factors1), container_algorithm_internal::c_begin(factors2), std::forward(sum)); + } + + // Overload of c_inner_product() for using binary operations other than + // `operator+` (for computing the accumulation) and `operator*` (for computing + // the product between the two container's element pair). + template + decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, T&& sum, BinaryOp1&& op1, BinaryOp2&& op2) + { + return std::inner_product(container_algorithm_internal::c_begin(factors1), container_algorithm_internal::c_end(factors1), container_algorithm_internal::c_begin(factors2), std::forward(sum), std::forward(op1), std::forward(op2)); + } + + // c_adjacent_difference() + // + // Container-based version of the `std::adjacent_difference()` + // function to compute the difference between each element and the one preceding + // it and write it to an iterator. + template + OutputIt c_adjacent_difference(const InputSequence& input, OutputIt output_first) + { + return std::adjacent_difference(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first); + } + + // Overload of c_adjacent_difference() for using a binary operation other than + // subtraction to compute the adjacent difference. + template + OutputIt c_adjacent_difference(const InputSequence& input, OutputIt output_first, BinaryOp&& op) + { + return std::adjacent_difference(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first, std::forward(op)); + } + + // c_partial_sum() + // + // Container-based version of the `std::partial_sum()` function + // to compute the partial sum of the elements in a sequence and write them + // to an iterator. The partial sum is the sum of all element values so far in + // the sequence. + template + OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first) + { + return std::partial_sum(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first); + } + + // Overload of c_partial_sum() for using a binary operation other than addition + // to compute the "partial sum". + template + OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first, BinaryOp&& op) + { + return std::partial_sum(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first, std::forward(op)); + } + + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_ALGORITHM_CONTAINER_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/attributes.h b/CAPI/cpp/grpc/include/absl/base/attributes.h index e4e7a3d..9908348 100644 --- a/CAPI/cpp/grpc/include/absl/base/attributes.h +++ b/CAPI/cpp/grpc/include/absl/base/attributes.h @@ -85,9 +85,9 @@ // should be counted from two, not one." #if ABSL_HAVE_ATTRIBUTE(format) || (defined(__GNUC__) && !defined(__clang__)) #define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) \ - __attribute__((__format__(__printf__, string_index, first_to_check))) + __attribute__((__format__(__printf__, string_index, first_to_check))) #define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) \ - __attribute__((__format__(__scanf__, string_index, first_to_check))) + __attribute__((__format__(__scanf__, string_index, first_to_check))) #else #define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) #define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) @@ -122,7 +122,7 @@ #elif defined(__GNUC__) && !defined(__clang__) && !defined(__e2k__) #define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 #define ABSL_ATTRIBUTE_NO_TAIL_CALL \ - __attribute__((optimize("no-optimize-sibling-calls"))) + __attribute__((optimize("no-optimize-sibling-calls"))) #else #define ABSL_ATTRIBUTE_NO_TAIL_CALL #define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 0 @@ -136,9 +136,8 @@ // for further information. // The MinGW compiler doesn't complain about the weak attribute until the link // step, presumably because Windows doesn't use ELF binaries. -#if (ABSL_HAVE_ATTRIBUTE(weak) || \ - (defined(__GNUC__) && !defined(__clang__))) && \ - (!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \ +#if (ABSL_HAVE_ATTRIBUTE(weak) || (defined(__GNUC__) && !defined(__clang__))) && \ + (!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \ !defined(__MINGW32__) #undef ABSL_ATTRIBUTE_WEAK #define ABSL_ATTRIBUTE_WEAK __attribute__((weak)) @@ -253,10 +252,10 @@ // https://gcc.gnu.org/gcc-4.9/changes.html #if ABSL_HAVE_ATTRIBUTE(no_sanitize_undefined) #define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ - __attribute__((no_sanitize_undefined)) + __attribute__((no_sanitize_undefined)) #elif ABSL_HAVE_ATTRIBUTE(no_sanitize) #define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ - __attribute__((no_sanitize("undefined"))) + __attribute__((no_sanitize("undefined"))) #else #define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED #endif @@ -277,7 +276,7 @@ // See https://clang.llvm.org/docs/SafeStack.html for details. #if ABSL_HAVE_ATTRIBUTE(no_sanitize) #define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \ - __attribute__((no_sanitize("safe-stack"))) + __attribute__((no_sanitize("safe-stack"))) #else #define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK #endif @@ -297,8 +296,7 @@ // a prerequisite. Labeled sections are not supported on Darwin/iOS. #ifdef ABSL_HAVE_ATTRIBUTE_SECTION #error ABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set -#elif (ABSL_HAVE_ATTRIBUTE(section) || \ - (defined(__GNUC__) && !defined(__clang__))) && \ +#elif (ABSL_HAVE_ATTRIBUTE(section) || (defined(__GNUC__) && !defined(__clang__))) && \ !defined(__APPLE__) && ABSL_HAVE_ATTRIBUTE_WEAK #define ABSL_HAVE_ATTRIBUTE_SECTION 1 @@ -312,7 +310,7 @@ // #ifndef ABSL_ATTRIBUTE_SECTION #define ABSL_ATTRIBUTE_SECTION(name) \ - __attribute__((section(#name))) __attribute__((noinline)) + __attribute__((section(#name))) __attribute__((noinline)) #endif // ABSL_ATTRIBUTE_SECTION_VARIABLE @@ -341,9 +339,9 @@ // a no-op on ELF but not on Mach-O. // #ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS -#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ - extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \ - extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK +#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ + extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \ + extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK #endif #ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS #define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) @@ -359,9 +357,9 @@ // link. // #define ABSL_ATTRIBUTE_SECTION_START(name) \ - (reinterpret_cast(__start_##name)) + (reinterpret_cast(__start_##name)) #define ABSL_ATTRIBUTE_SECTION_STOP(name) \ - (reinterpret_cast(__stop_##name)) + (reinterpret_cast(__stop_##name)) #else // !ABSL_HAVE_ATTRIBUTE_SECTION @@ -373,8 +371,8 @@ #define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) #define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) #define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) -#define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast(0)) -#define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast(0)) +#define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast(0)) +#define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast(0)) #endif // ABSL_ATTRIBUTE_SECTION @@ -385,7 +383,7 @@ (defined(__GNUC__) && !defined(__clang__)) #if defined(__i386__) #define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC \ - __attribute__((force_align_arg_pointer)) + __attribute__((force_align_arg_pointer)) #define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) #elif defined(__x86_64__) #define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (1) @@ -505,7 +503,7 @@ #define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]] #if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args) #define ABSL_XRAY_LOG_ARGS(N) \ - [[clang::xray_always_instrument, clang::xray_log_args(N)]] + [[clang::xray_always_instrument, clang::xray_log_args(N)]] #else #define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]] #endif @@ -639,8 +637,9 @@ #define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]] #else #define ABSL_FALLTHROUGH_INTENDED \ - do { \ - } while (0) + do \ + { \ + } while (0) #endif // ABSL_DEPRECATED() diff --git a/CAPI/cpp/grpc/include/absl/base/call_once.h b/CAPI/cpp/grpc/include/absl/base/call_once.h index 96109f5..e520b89 100644 --- a/CAPI/cpp/grpc/include/absl/base/call_once.h +++ b/CAPI/cpp/grpc/include/absl/base/call_once.h @@ -40,180 +40,195 @@ #include "absl/base/optimization.h" #include "absl/base/port.h" -namespace absl { -ABSL_NAMESPACE_BEGIN - -class once_flag; - -namespace base_internal { -std::atomic* ControlWord(absl::once_flag* flag); -} // namespace base_internal - -// call_once() -// -// For all invocations using a given `once_flag`, invokes a given `fn` exactly -// once across all threads. The first call to `call_once()` with a particular -// `once_flag` argument (that does not throw an exception) will run the -// specified function with the provided `args`; other calls with the same -// `once_flag` argument will not run the function, but will wait -// for the provided function to finish running (if it is still running). -// -// This mechanism provides a safe, simple, and fast mechanism for one-time -// initialization in a multi-threaded process. -// -// Example: -// -// class MyInitClass { -// public: -// ... -// mutable absl::once_flag once_; -// -// MyInitClass* init() const { -// absl::call_once(once_, &MyInitClass::Init, this); -// return ptr_; -// } -// -template -void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args); - -// once_flag -// -// Objects of this type are used to distinguish calls to `call_once()` and -// ensure the provided function is only invoked once across all threads. This -// type is not copyable or movable. However, it has a `constexpr` -// constructor, and is safe to use as a namespace-scoped global variable. -class once_flag { - public: - constexpr once_flag() : control_(0) {} - once_flag(const once_flag&) = delete; - once_flag& operator=(const once_flag&) = delete; - - private: - friend std::atomic* base_internal::ControlWord(once_flag* flag); - std::atomic control_; -}; - -//------------------------------------------------------------------------------ -// End of public interfaces. -// Implementation details follow. -//------------------------------------------------------------------------------ - -namespace base_internal { - -// Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to -// initialize entities used by the scheduler implementation. -template -void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args); - -// Disables scheduling while on stack when scheduling mode is non-cooperative. -// No effect for cooperative scheduling modes. -class SchedulingHelper { - public: - explicit SchedulingHelper(base_internal::SchedulingMode mode) : mode_(mode) { - if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) { - guard_result_ = base_internal::SchedulingGuard::DisableRescheduling(); - } - } - - ~SchedulingHelper() { - if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) { - base_internal::SchedulingGuard::EnableRescheduling(guard_result_); - } - } - - private: - base_internal::SchedulingMode mode_; - bool guard_result_; -}; - -// Bit patterns for call_once state machine values. Internal implementation -// detail, not for use by clients. -// -// The bit patterns are arbitrarily chosen from unlikely values, to aid in -// debugging. However, kOnceInit must be 0, so that a zero-initialized -// once_flag will be valid for immediate use. -enum { - kOnceInit = 0, - kOnceRunning = 0x65C2937B, - kOnceWaiter = 0x05A308D2, - // A very small constant is chosen for kOnceDone so that it fit in a single - // compare with immediate instruction for most common ISAs. This is verified - // for x86, POWER and ARM. - kOnceDone = 221, // Random Number -}; - -template -ABSL_ATTRIBUTE_NOINLINE -void CallOnceImpl(std::atomic* control, - base_internal::SchedulingMode scheduling_mode, Callable&& fn, - Args&&... args) { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + class once_flag; + + namespace base_internal + { + std::atomic* ControlWord(absl::once_flag* flag); + } // namespace base_internal + + // call_once() + // + // For all invocations using a given `once_flag`, invokes a given `fn` exactly + // once across all threads. The first call to `call_once()` with a particular + // `once_flag` argument (that does not throw an exception) will run the + // specified function with the provided `args`; other calls with the same + // `once_flag` argument will not run the function, but will wait + // for the provided function to finish running (if it is still running). + // + // This mechanism provides a safe, simple, and fast mechanism for one-time + // initialization in a multi-threaded process. + // + // Example: + // + // class MyInitClass { + // public: + // ... + // mutable absl::once_flag once_; + // + // MyInitClass* init() const { + // absl::call_once(once_, &MyInitClass::Init, this); + // return ptr_; + // } + // + template + void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args); + + // once_flag + // + // Objects of this type are used to distinguish calls to `call_once()` and + // ensure the provided function is only invoked once across all threads. This + // type is not copyable or movable. However, it has a `constexpr` + // constructor, and is safe to use as a namespace-scoped global variable. + class once_flag + { + public: + constexpr once_flag() : + control_(0) + { + } + once_flag(const once_flag&) = delete; + once_flag& operator=(const once_flag&) = delete; + + private: + friend std::atomic* base_internal::ControlWord(once_flag* flag); + std::atomic control_; + }; + + //------------------------------------------------------------------------------ + // End of public interfaces. + // Implementation details follow. + //------------------------------------------------------------------------------ + + namespace base_internal + { + + // Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to + // initialize entities used by the scheduler implementation. + template + void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args); + + // Disables scheduling while on stack when scheduling mode is non-cooperative. + // No effect for cooperative scheduling modes. + class SchedulingHelper + { + public: + explicit SchedulingHelper(base_internal::SchedulingMode mode) : + mode_(mode) + { + if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) + { + guard_result_ = base_internal::SchedulingGuard::DisableRescheduling(); + } + } + + ~SchedulingHelper() + { + if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) + { + base_internal::SchedulingGuard::EnableRescheduling(guard_result_); + } + } + + private: + base_internal::SchedulingMode mode_; + bool guard_result_; + }; + + // Bit patterns for call_once state machine values. Internal implementation + // detail, not for use by clients. + // + // The bit patterns are arbitrarily chosen from unlikely values, to aid in + // debugging. However, kOnceInit must be 0, so that a zero-initialized + // once_flag will be valid for immediate use. + enum + { + kOnceInit = 0, + kOnceRunning = 0x65C2937B, + kOnceWaiter = 0x05A308D2, + // A very small constant is chosen for kOnceDone so that it fit in a single + // compare with immediate instruction for most common ISAs. This is verified + // for x86, POWER and ARM. + kOnceDone = 221, // Random Number + }; + + template + ABSL_ATTRIBUTE_NOINLINE void CallOnceImpl(std::atomic* control, base_internal::SchedulingMode scheduling_mode, Callable&& fn, Args&&... args) + { #ifndef NDEBUG - { - uint32_t old_control = control->load(std::memory_order_relaxed); - if (old_control != kOnceInit && - old_control != kOnceRunning && - old_control != kOnceWaiter && - old_control != kOnceDone) { - ABSL_RAW_LOG(FATAL, "Unexpected value for control word: 0x%lx", - static_cast(old_control)); // NOLINT - } - } + { + uint32_t old_control = control->load(std::memory_order_relaxed); + if (old_control != kOnceInit && + old_control != kOnceRunning && + old_control != kOnceWaiter && + old_control != kOnceDone) + { + ABSL_RAW_LOG(FATAL, "Unexpected value for control word: 0x%lx", + static_cast(old_control)); // NOLINT + } + } #endif // NDEBUG - static const base_internal::SpinLockWaitTransition trans[] = { - {kOnceInit, kOnceRunning, true}, - {kOnceRunning, kOnceWaiter, false}, - {kOnceDone, kOnceDone, true}}; - - // Must do this before potentially modifying control word's state. - base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode); - // Short circuit the simplest case to avoid procedure call overhead. - // The base_internal::SpinLockWait() call returns either kOnceInit or - // kOnceDone. If it returns kOnceDone, it must have loaded the control word - // with std::memory_order_acquire and seen a value of kOnceDone. - uint32_t old_control = kOnceInit; - if (control->compare_exchange_strong(old_control, kOnceRunning, - std::memory_order_relaxed) || - base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans, - scheduling_mode) == kOnceInit) { - base_internal::invoke(std::forward(fn), - std::forward(args)...); - old_control = - control->exchange(base_internal::kOnceDone, std::memory_order_release); - if (old_control == base_internal::kOnceWaiter) { - base_internal::SpinLockWake(control, true); + static const base_internal::SpinLockWaitTransition trans[] = { + {kOnceInit, kOnceRunning, true}, + {kOnceRunning, kOnceWaiter, false}, + {kOnceDone, kOnceDone, true}}; + + // Must do this before potentially modifying control word's state. + base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode); + // Short circuit the simplest case to avoid procedure call overhead. + // The base_internal::SpinLockWait() call returns either kOnceInit or + // kOnceDone. If it returns kOnceDone, it must have loaded the control word + // with std::memory_order_acquire and seen a value of kOnceDone. + uint32_t old_control = kOnceInit; + if (control->compare_exchange_strong(old_control, kOnceRunning, std::memory_order_relaxed) || + base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans, scheduling_mode) == kOnceInit) + { + base_internal::invoke(std::forward(fn), std::forward(args)...); + old_control = + control->exchange(base_internal::kOnceDone, std::memory_order_release); + if (old_control == base_internal::kOnceWaiter) + { + base_internal::SpinLockWake(control, true); + } + } // else *control is already kOnceDone + } + + inline std::atomic* ControlWord(once_flag* flag) + { + return &flag->control_; + } + + template + void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args) + { + std::atomic* once = base_internal::ControlWord(flag); + uint32_t s = once->load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) + { + base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY, std::forward(fn), std::forward(args)...); + } + } + + } // namespace base_internal + + template + void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args) + { + std::atomic* once = base_internal::ControlWord(&flag); + uint32_t s = once->load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) + { + base_internal::CallOnceImpl( + once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL, std::forward(fn), std::forward(args)... + ); + } } - } // else *control is already kOnceDone -} - -inline std::atomic* ControlWord(once_flag* flag) { - return &flag->control_; -} - -template -void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args) { - std::atomic* once = base_internal::ControlWord(flag); - uint32_t s = once->load(std::memory_order_acquire); - if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) { - base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY, - std::forward(fn), - std::forward(args)...); - } -} - -} // namespace base_internal - -template -void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args) { - std::atomic* once = base_internal::ControlWord(&flag); - uint32_t s = once->load(std::memory_order_acquire); - if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) { - base_internal::CallOnceImpl( - once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL, - std::forward(fn), std::forward(args)...); - } -} - -ABSL_NAMESPACE_END + + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_CALL_ONCE_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/casts.h b/CAPI/cpp/grpc/include/absl/base/casts.h index b99adb0..2ebd9e4 100644 --- a/CAPI/cpp/grpc/include/absl/base/casts.h +++ b/CAPI/cpp/grpc/include/absl/base/casts.h @@ -31,68 +31,70 @@ #if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L #include // For std::bit_cast. -#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L +#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L #include "absl/base/internal/identity.h" #include "absl/base/macros.h" #include "absl/meta/type_traits.h" -namespace absl { -ABSL_NAMESPACE_BEGIN +namespace absl +{ + ABSL_NAMESPACE_BEGIN -// implicit_cast() -// -// Performs an implicit conversion between types following the language -// rules for implicit conversion; if an implicit conversion is otherwise -// allowed by the language in the given context, this function performs such an -// implicit conversion. -// -// Example: -// -// // If the context allows implicit conversion: -// From from; -// To to = from; -// -// // Such code can be replaced by: -// implicit_cast(from); -// -// An `implicit_cast()` may also be used to annotate numeric type conversions -// that, although safe, may produce compiler warnings (such as `long` to `int`). -// Additionally, an `implicit_cast()` is also useful within return statements to -// indicate a specific implicit conversion is being undertaken. -// -// Example: -// -// return implicit_cast(size_in_bytes) / capacity_; -// -// Annotating code with `implicit_cast()` allows you to explicitly select -// particular overloads and template instantiations, while providing a safer -// cast than `reinterpret_cast()` or `static_cast()`. -// -// Additionally, an `implicit_cast()` can be used to allow upcasting within a -// type hierarchy where incorrect use of `static_cast()` could accidentally -// allow downcasting. -// -// Finally, an `implicit_cast()` can be used to perform implicit conversions -// from unrelated types that otherwise couldn't be implicitly cast directly; -// C++ will normally only implicitly cast "one step" in such conversions. -// -// That is, if C is a type which can be implicitly converted to B, with B being -// a type that can be implicitly converted to A, an `implicit_cast()` can be -// used to convert C to B (which the compiler can then implicitly convert to A -// using language rules). -// -// Example: -// -// // Assume an object C is convertible to B, which is implicitly convertible -// // to A -// A a = implicit_cast(C); -// -// Such implicit cast chaining may be useful within template logic. -template -constexpr To implicit_cast(typename absl::internal::identity_t to) { - return to; -} + // implicit_cast() + // + // Performs an implicit conversion between types following the language + // rules for implicit conversion; if an implicit conversion is otherwise + // allowed by the language in the given context, this function performs such an + // implicit conversion. + // + // Example: + // + // // If the context allows implicit conversion: + // From from; + // To to = from; + // + // // Such code can be replaced by: + // implicit_cast(from); + // + // An `implicit_cast()` may also be used to annotate numeric type conversions + // that, although safe, may produce compiler warnings (such as `long` to `int`). + // Additionally, an `implicit_cast()` is also useful within return statements to + // indicate a specific implicit conversion is being undertaken. + // + // Example: + // + // return implicit_cast(size_in_bytes) / capacity_; + // + // Annotating code with `implicit_cast()` allows you to explicitly select + // particular overloads and template instantiations, while providing a safer + // cast than `reinterpret_cast()` or `static_cast()`. + // + // Additionally, an `implicit_cast()` can be used to allow upcasting within a + // type hierarchy where incorrect use of `static_cast()` could accidentally + // allow downcasting. + // + // Finally, an `implicit_cast()` can be used to perform implicit conversions + // from unrelated types that otherwise couldn't be implicitly cast directly; + // C++ will normally only implicitly cast "one step" in such conversions. + // + // That is, if C is a type which can be implicitly converted to B, with B being + // a type that can be implicitly converted to A, an `implicit_cast()` can be + // used to convert C to B (which the compiler can then implicitly convert to A + // using language rules). + // + // Example: + // + // // Assume an object C is convertible to B, which is implicitly convertible + // // to A + // A a = implicit_cast(C); + // + // Such implicit cast chaining may be useful within template logic. + template + constexpr To implicit_cast(typename absl::internal::identity_t to) + { + return to; + } // bit_cast() // @@ -145,36 +147,33 @@ constexpr To implicit_cast(typename absl::internal::identity_t to) { // `std::bit_cast`. #if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L -using std::bit_cast; + using std::bit_cast; #else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L -template ::value && - type_traits_internal::is_trivially_copyable::value + template::value && type_traits_internal::is_trivially_copyable::value #if !ABSL_HAVE_BUILTIN(__builtin_bit_cast) - && std::is_default_constructible::value + && std::is_default_constructible::value #endif // !ABSL_HAVE_BUILTIN(__builtin_bit_cast) - , - int>::type = 0> + , + int>::type = 0> #if ABSL_HAVE_BUILTIN(__builtin_bit_cast) -inline constexpr Dest bit_cast(const Source& source) { - return __builtin_bit_cast(Dest, source); -} -#else // ABSL_HAVE_BUILTIN(__builtin_bit_cast) -inline Dest bit_cast(const Source& source) { - Dest dest; - memcpy(static_cast(std::addressof(dest)), - static_cast(std::addressof(source)), sizeof(dest)); - return dest; -} + inline constexpr Dest bit_cast(const Source& source) + { + return __builtin_bit_cast(Dest, source); + } +#else // ABSL_HAVE_BUILTIN(__builtin_bit_cast) + inline Dest bit_cast(const Source& source) + { + Dest dest; + memcpy(static_cast(std::addressof(dest)), static_cast(std::addressof(source)), sizeof(dest)); + return dest; + } #endif // ABSL_HAVE_BUILTIN(__builtin_bit_cast) #endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_CASTS_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/config.h b/CAPI/cpp/grpc/include/absl/base/config.h index 705ecea..3c5dab8 100644 --- a/CAPI/cpp/grpc/include/absl/base/config.h +++ b/CAPI/cpp/grpc/include/absl/base/config.h @@ -151,18 +151,12 @@ #if defined(__cplusplus) && ABSL_OPTION_USE_INLINE_NAMESPACE == 1 #define ABSL_INTERNAL_INLINE_NAMESPACE_STR \ - ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) + ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) -static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != '\0', - "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must " - "not be empty."); -static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || - ABSL_INTERNAL_INLINE_NAMESPACE_STR[1] != 'e' || - ABSL_INTERNAL_INLINE_NAMESPACE_STR[2] != 'a' || - ABSL_INTERNAL_INLINE_NAMESPACE_STR[3] != 'd' || - ABSL_INTERNAL_INLINE_NAMESPACE_STR[4] != '\0', - "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must " - "be changed to a new, unique identifier name."); +static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != '\0', "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must " + "not be empty."); +static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[1] != 'e' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[2] != 'a' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[3] != 'd' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[4] != '\0', "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must " + "be changed to a new, unique identifier name."); #endif @@ -171,14 +165,15 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #define ABSL_NAMESPACE_END #define ABSL_INTERNAL_C_SYMBOL(x) x #elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1 -#define ABSL_NAMESPACE_BEGIN \ - inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME { +#define ABSL_NAMESPACE_BEGIN \ + inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME \ + { #define ABSL_NAMESPACE_END } #define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v #define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \ - ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) + ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) #define ABSL_INTERNAL_C_SYMBOL(x) \ - ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME) + ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME) #else #error options.h is misconfigured. #endif @@ -212,14 +207,14 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html #if defined(__GNUC__) && defined(__GNUC_MINOR__) #define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) \ - (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y)) + (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y)) #else #define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) 0 #endif #if defined(__clang__) && defined(__clang_major__) && defined(__clang_minor__) #define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) \ - (__clang_major__ > (x) || __clang_major__ == (x) && __clang_minor__ >= (y)) + (__clang_major__ > (x) || __clang_major__ == (x) && __clang_minor__ >= (y)) #else #define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) 0 #endif @@ -336,8 +331,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #ifdef ABSL_HAVE_INTRINSIC_INT128 #error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set #elif defined(__SIZEOF_INT128__) -#if (defined(__clang__) && !defined(_WIN32)) || \ - (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \ +#if (defined(__clang__) && !defined(_WIN32)) || \ + (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \ (defined(__GNUC__) && !defined(__clang__) && !defined(__CUDACC__)) #define ABSL_HAVE_INTRINSIC_INT128 1 #elif defined(__CUDACC__) @@ -511,8 +506,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #error "ABSL_IS_LITTLE_ENDIAN cannot be directly set." #endif -#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ - __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) #define ABSL_IS_LITTLE_ENDIAN 1 #elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \ __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ @@ -721,8 +715,9 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #define ABSL_INTERNAL_MANGLED_NS "absl" #define ABSL_INTERNAL_MANGLED_BACKREFERENCE "5" #else -#define ABSL_INTERNAL_MANGLED_NS \ - ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) "@absl" +#define ABSL_INTERNAL_MANGLED_NS \ + ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) \ + "@absl" #define ABSL_INTERNAL_MANGLED_BACKREFERENCE "6" #endif #endif diff --git a/CAPI/cpp/grpc/include/absl/base/const_init.h b/CAPI/cpp/grpc/include/absl/base/const_init.h index 16520b6..4a0ef13 100644 --- a/CAPI/cpp/grpc/include/absl/base/const_init.h +++ b/CAPI/cpp/grpc/include/absl/base/const_init.h @@ -63,14 +63,16 @@ // The absl::kConstInit tag should only be used to define objects with static // or thread_local storage duration. -namespace absl { -ABSL_NAMESPACE_BEGIN +namespace absl +{ + ABSL_NAMESPACE_BEGIN -enum ConstInitType { - kConstInit, -}; + enum ConstInitType + { + kConstInit, + }; -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_CONST_INIT_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/dynamic_annotations.h b/CAPI/cpp/grpc/include/absl/base/dynamic_annotations.h index 3ea7c15..82db73b 100644 --- a/CAPI/cpp/grpc/include/absl/base/dynamic_annotations.h +++ b/CAPI/cpp/grpc/include/absl/base/dynamic_annotations.h @@ -90,12 +90,14 @@ // Read/write annotations are enabled in Annotalysis mode; disabled otherwise. #define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ - ABSL_INTERNAL_ANNOTALYSIS_ENABLED + ABSL_INTERNAL_ANNOTALYSIS_ENABLED #endif // ABSL_HAVE_THREAD_SANITIZER #ifdef __cplusplus -#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" { +#define ABSL_INTERNAL_BEGIN_EXTERN_C \ + extern "C" \ + { #define ABSL_INTERNAL_END_EXTERN_C } // extern "C" #define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F #define ABSL_INTERNAL_STATIC_INLINE inline @@ -123,29 +125,30 @@ // "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the // point where `pointer` has been allocated, preferably close to the point // where the race happens. See also ABSL_ANNOTATE_BENIGN_RACE_STATIC. -#define ABSL_ANNOTATE_BENIGN_RACE(pointer, description) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ - (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) +#define ABSL_ANNOTATE_BENIGN_RACE(pointer, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) // Same as ABSL_ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to // the memory range [`address`, `address`+`size`). #define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ - (__FILE__, __LINE__, address, size, description) + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, address, size, description) // Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. // This annotation could be useful if you want to skip expensive race analysis // during some period of program execution, e.g. during initialization. -#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ - (__FILE__, __LINE__, enable) +#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ + (__FILE__, __LINE__, enable) // ------------------------------------------------------------- // Annotations useful for debugging. // Report the current thread `name` to a race detector. -#define ABSL_ANNOTATE_THREAD_NAME(name) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name) +#define ABSL_ANNOTATE_THREAD_NAME(name) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName) \ + (__FILE__, __LINE__, name) // ------------------------------------------------------------- // Annotations useful when implementing locks. They are not normally needed by @@ -153,66 +156,62 @@ // object. // Report that a lock has been created at address `lock`. -#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock) +#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate) \ + (__FILE__, __LINE__, lock) // Report that a linker initialized lock has been created at address `lock`. #ifdef ABSL_HAVE_THREAD_SANITIZER -#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ - (__FILE__, __LINE__, lock) +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ + (__FILE__, __LINE__, lock) #else #define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ - ABSL_ANNOTATE_RWLOCK_CREATE(lock) + ABSL_ANNOTATE_RWLOCK_CREATE(lock) #endif // Report that the lock at address `lock` is about to be destroyed. -#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock) +#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy) \ + (__FILE__, __LINE__, lock) // Report that the lock at address `lock` has been acquired. // `is_w`=1 for writer lock, `is_w`=0 for reader lock. -#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ - (__FILE__, __LINE__, lock, is_w) +#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ + (__FILE__, __LINE__, lock, is_w) // Report that the lock at address `lock` is about to be released. // `is_w`=1 for writer lock, `is_w`=0 for reader lock. -#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ - (__FILE__, __LINE__, lock, is_w) +#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ + (__FILE__, __LINE__, lock, is_w) // Apply ABSL_ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. -#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ - namespace { \ - class static_var##_annotator { \ - public: \ - static_var##_annotator() { \ - ABSL_ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \ - #static_var ": " description); \ - } \ - }; \ - static static_var##_annotator the##static_var##_annotator; \ - } // namespace +#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace \ + { \ + class static_var##_annotator \ + { \ + public: \ + static_var##_annotator() \ + { \ + ABSL_ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), #static_var ": " description); \ + } \ + }; \ + static static_var##_annotator the##static_var##_annotator; \ + } // namespace // Function prototypes of annotations provided by the compiler-based sanitizer // implementation. ABSL_INTERNAL_BEGIN_EXTERN_C -void AnnotateRWLockCreate(const char* file, int line, - const volatile void* lock); -void AnnotateRWLockCreateStatic(const char* file, int line, - const volatile void* lock); -void AnnotateRWLockDestroy(const char* file, int line, - const volatile void* lock); -void AnnotateRWLockAcquired(const char* file, int line, - const volatile void* lock, long is_w); // NOLINT -void AnnotateRWLockReleased(const char* file, int line, - const volatile void* lock, long is_w); // NOLINT -void AnnotateBenignRace(const char* file, int line, - const volatile void* address, const char* description); -void AnnotateBenignRaceSized(const char* file, int line, - const volatile void* address, size_t size, - const char* description); +void AnnotateRWLockCreate(const char* file, int line, const volatile void* lock); +void AnnotateRWLockCreateStatic(const char* file, int line, const volatile void* lock); +void AnnotateRWLockDestroy(const char* file, int line, const volatile void* lock); +void AnnotateRWLockAcquired(const char* file, int line, const volatile void* lock, long is_w); // NOLINT +void AnnotateRWLockReleased(const char* file, int line, const volatile void* lock, long is_w); // NOLINT +void AnnotateBenignRace(const char* file, int line, const volatile void* address, const char* description); +void AnnotateBenignRaceSized(const char* file, int line, const volatile void* address, size_t size, const char* description); void AnnotateThreadName(const char* file, int line, const char* name); void AnnotateEnableRaceDetection(const char* file, int line, int enable); ABSL_INTERNAL_END_EXTERN_C @@ -240,25 +239,27 @@ ABSL_INTERNAL_END_EXTERN_C #include #define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ - __msan_unpoison(address, size) + __msan_unpoison(address, size) #define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ - __msan_allocated_memory(address, size) + __msan_allocated_memory(address, size) #else // !defined(ABSL_HAVE_MEMORY_SANITIZER) // TODO(rogeeff): remove this branch #ifdef ABSL_HAVE_THREAD_SANITIZER #define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ - do { \ - (void)(address); \ - (void)(size); \ - } while (0) + do \ + { \ + (void)(address); \ + (void)(size); \ + } while (0) #define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ - do { \ - (void)(address); \ - (void)(size); \ - } while (0) + do \ + { \ + (void)(address); \ + (void)(size); \ + } while (0) #else #define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty @@ -274,9 +275,9 @@ ABSL_INTERNAL_END_EXTERN_C #if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) #define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ - __attribute((exclusive_lock_function("*"))) + __attribute((exclusive_lock_function("*"))) #define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ - __attribute((unlock_function("*"))) + __attribute((unlock_function("*"))) #else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) @@ -297,22 +298,21 @@ ABSL_INTERNAL_END_EXTERN_C // ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey // reads, while still checking other reads and all writes. // See also ABSL_ANNOTATE_UNPROTECTED_READ. -#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \ - (__FILE__, __LINE__) +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \ + (__FILE__, __LINE__) // Stop ignoring reads. -#define ABSL_ANNOTATE_IGNORE_READS_END() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \ - (__FILE__, __LINE__) +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \ + (__FILE__, __LINE__) // Function prototypes of annotations provided by the compiler-based sanitizer // implementation. ABSL_INTERNAL_BEGIN_EXTERN_C void AnnotateIgnoreReadsBegin(const char* file, int line) ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE; -void AnnotateIgnoreReadsEnd(const char* file, - int line) ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE; +void AnnotateIgnoreReadsEnd(const char* file, int line) ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE; ABSL_INTERNAL_END_EXTERN_C #elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) @@ -324,23 +324,31 @@ ABSL_INTERNAL_END_EXTERN_C // TODO(delesley) -- The exclusive lock here ignores writes as well, but // allows IGNORE_READS_AND_WRITES to work properly. -#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ - ABSL_INTERNAL_GLOBAL_SCOPED( \ - ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \ - () +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED( \ + ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin) \ + ) \ + () -#define ABSL_ANNOTATE_IGNORE_READS_END() \ - ABSL_INTERNAL_GLOBAL_SCOPED( \ - ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \ - () +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED( \ + ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd) \ + ) \ + () ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( - AbslInternalAnnotateIgnoreReadsBegin)() - ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {} + AbslInternalAnnotateIgnoreReadsBegin +)() + ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE +{ +} ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( - AbslInternalAnnotateIgnoreReadsEnd)() - ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {} + AbslInternalAnnotateIgnoreReadsEnd +)() + ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE +{ +} #else @@ -355,12 +363,14 @@ ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( #if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 // Similar to ABSL_ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. -#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__) +#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin) \ + (__FILE__, __LINE__) // Stop ignoring writes. -#define ABSL_ANNOTATE_IGNORE_WRITES_END() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__) +#define ABSL_ANNOTATE_IGNORE_WRITES_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd) \ + (__FILE__, __LINE__) // Function prototypes of annotations provided by the compiler-based sanitizer // implementation. @@ -391,37 +401,42 @@ ABSL_INTERNAL_END_EXTERN_C // Start ignoring all memory accesses (both reads and writes). #define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ - do { \ - ABSL_ANNOTATE_IGNORE_READS_BEGIN(); \ - ABSL_ANNOTATE_IGNORE_WRITES_BEGIN(); \ - } while (0) + do \ + { \ + ABSL_ANNOTATE_IGNORE_READS_BEGIN(); \ + ABSL_ANNOTATE_IGNORE_WRITES_BEGIN(); \ + } while (0) // Stop ignoring both reads and writes. #define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() \ - do { \ - ABSL_ANNOTATE_IGNORE_WRITES_END(); \ - ABSL_ANNOTATE_IGNORE_READS_END(); \ - } while (0) + do \ + { \ + ABSL_ANNOTATE_IGNORE_WRITES_END(); \ + ABSL_ANNOTATE_IGNORE_READS_END(); \ + } while (0) #ifdef __cplusplus // ABSL_ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. #define ABSL_ANNOTATE_UNPROTECTED_READ(x) \ - absl::base_internal::AnnotateUnprotectedRead(x) - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -template -inline T AnnotateUnprotectedRead(const volatile T& x) { // NOLINT - ABSL_ANNOTATE_IGNORE_READS_BEGIN(); - T res = x; - ABSL_ANNOTATE_IGNORE_READS_END(); - return res; -} - -} // namespace base_internal -ABSL_NAMESPACE_END + absl::base_internal::AnnotateUnprotectedRead(x) + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + template + inline T AnnotateUnprotectedRead(const volatile T& x) + { // NOLINT + ABSL_ANNOTATE_IGNORE_READS_BEGIN(); + T res = x; + ABSL_ANNOTATE_IGNORE_READS_END(); + return res; + } + + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif @@ -443,11 +458,12 @@ ABSL_NAMESPACE_END #include #define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ - __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) + __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) #define ABSL_ADDRESS_SANITIZER_REDZONE(name) \ - struct { \ - alignas(8) char x[8]; \ - } name + struct \ + { \ + alignas(8) char x[8]; \ + } name #else diff --git a/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook.h b/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook.h index ae21cd7..8cef0e1 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook.h @@ -35,12 +35,14 @@ #define ABSL_HAVE_WORKING_ATOMIC_POINTER 1 #endif -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { -template -class AtomicHook; + template + class AtomicHook; // To workaround AtomicHook not being constant-initializable on some platforms, // prefer to annotate instances with `ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES` @@ -51,150 +53,173 @@ class AtomicHook; #define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES #endif -// `AtomicHook` is a helper class, templatized on a raw function pointer type, -// for implementing Abseil customization hooks. It is a callable object that -// dispatches to the registered hook. Objects of type `AtomicHook` must have -// static or thread storage duration. -// -// A default constructed object performs a no-op (and returns a default -// constructed object) if no hook has been registered. -// -// Hooks can be pre-registered via constant initialization, for example: -// -// ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static AtomicHook -// my_hook(DefaultAction); -// -// and then changed at runtime via a call to `Store()`. -// -// Reads and writes guarantee memory_order_acquire/memory_order_release -// semantics. -template -class AtomicHook { - public: - using FnPtr = ReturnType (*)(Args...); - - // Constructs an object that by default performs a no-op (and - // returns a default constructed object) when no hook as been registered. - constexpr AtomicHook() : AtomicHook(DummyFunction) {} - - // Constructs an object that by default dispatches to/returns the - // pre-registered default_fn when no hook has been registered at runtime. + // `AtomicHook` is a helper class, templatized on a raw function pointer type, + // for implementing Abseil customization hooks. It is a callable object that + // dispatches to the registered hook. Objects of type `AtomicHook` must have + // static or thread storage duration. + // + // A default constructed object performs a no-op (and returns a default + // constructed object) if no hook has been registered. + // + // Hooks can be pre-registered via constant initialization, for example: + // + // ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static AtomicHook + // my_hook(DefaultAction); + // + // and then changed at runtime via a call to `Store()`. + // + // Reads and writes guarantee memory_order_acquire/memory_order_release + // semantics. + template + class AtomicHook + { + public: + using FnPtr = ReturnType (*)(Args...); + + // Constructs an object that by default performs a no-op (and + // returns a default constructed object) when no hook as been registered. + constexpr AtomicHook() : + AtomicHook(DummyFunction) + { + } + + // Constructs an object that by default dispatches to/returns the + // pre-registered default_fn when no hook has been registered at runtime. #if ABSL_HAVE_WORKING_ATOMIC_POINTER && ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT - explicit constexpr AtomicHook(FnPtr default_fn) - : hook_(default_fn), default_fn_(default_fn) {} + explicit constexpr AtomicHook(FnPtr default_fn) : + hook_(default_fn), + default_fn_(default_fn) + { + } #elif ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT - explicit constexpr AtomicHook(FnPtr default_fn) - : hook_(kUninitialized), default_fn_(default_fn) {} + explicit constexpr AtomicHook(FnPtr default_fn) : + hook_(kUninitialized), + default_fn_(default_fn) + { + } #else - // As of January 2020, on all known versions of MSVC this constructor runs in - // the global constructor sequence. If `Store()` is called by a dynamic - // initializer, we want to preserve the value, even if this constructor runs - // after the call to `Store()`. If not, `hook_` will be - // zero-initialized by the linker and we have no need to set it. - // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html - explicit constexpr AtomicHook(FnPtr default_fn) - : /* hook_(deliberately omitted), */ default_fn_(default_fn) { - static_assert(kUninitialized == 0, "here we rely on zero-initialization"); - } + // As of January 2020, on all known versions of MSVC this constructor runs in + // the global constructor sequence. If `Store()` is called by a dynamic + // initializer, we want to preserve the value, even if this constructor runs + // after the call to `Store()`. If not, `hook_` will be + // zero-initialized by the linker and we have no need to set it. + // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html + explicit constexpr AtomicHook(FnPtr default_fn) : + /* hook_(deliberately omitted), */ default_fn_(default_fn) + { + static_assert(kUninitialized == 0, "here we rely on zero-initialization"); + } #endif - // Stores the provided function pointer as the value for this hook. - // - // This is intended to be called once. Multiple calls are legal only if the - // same function pointer is provided for each call. The store is implemented - // as a memory_order_release operation, and read accesses are implemented as - // memory_order_acquire. - void Store(FnPtr fn) { - bool success = DoStore(fn); - static_cast(success); - assert(success); - } - - // Invokes the registered callback. If no callback has yet been registered, a - // default-constructed object of the appropriate type is returned instead. - template - ReturnType operator()(CallArgs&&... args) const { - return DoLoad()(std::forward(args)...); - } - - // Returns the registered callback, or nullptr if none has been registered. - // Useful if client code needs to conditionalize behavior based on whether a - // callback was registered. - // - // Note that atomic_hook.Load()() and atomic_hook() have different semantics: - // operator()() will perform a no-op if no callback was registered, while - // Load()() will dereference a null function pointer. Prefer operator()() to - // Load()() unless you must conditionalize behavior on whether a hook was - // registered. - FnPtr Load() const { - FnPtr ptr = DoLoad(); - return (ptr == DummyFunction) ? nullptr : ptr; - } - - private: - static ReturnType DummyFunction(Args...) { - return ReturnType(); - } - - // Current versions of MSVC (as of September 2017) have a broken - // implementation of std::atomic: Its constructor attempts to do the - // equivalent of a reinterpret_cast in a constexpr context, which is not - // allowed. - // - // This causes an issue when building with LLVM under Windows. To avoid this, - // we use a less-efficient, intptr_t-based implementation on Windows. + // Stores the provided function pointer as the value for this hook. + // + // This is intended to be called once. Multiple calls are legal only if the + // same function pointer is provided for each call. The store is implemented + // as a memory_order_release operation, and read accesses are implemented as + // memory_order_acquire. + void Store(FnPtr fn) + { + bool success = DoStore(fn); + static_cast(success); + assert(success); + } + + // Invokes the registered callback. If no callback has yet been registered, a + // default-constructed object of the appropriate type is returned instead. + template + ReturnType operator()(CallArgs&&... args) const + { + return DoLoad()(std::forward(args)...); + } + + // Returns the registered callback, or nullptr if none has been registered. + // Useful if client code needs to conditionalize behavior based on whether a + // callback was registered. + // + // Note that atomic_hook.Load()() and atomic_hook() have different semantics: + // operator()() will perform a no-op if no callback was registered, while + // Load()() will dereference a null function pointer. Prefer operator()() to + // Load()() unless you must conditionalize behavior on whether a hook was + // registered. + FnPtr Load() const + { + FnPtr ptr = DoLoad(); + return (ptr == DummyFunction) ? nullptr : ptr; + } + + private: + static ReturnType DummyFunction(Args...) + { + return ReturnType(); + } + + // Current versions of MSVC (as of September 2017) have a broken + // implementation of std::atomic: Its constructor attempts to do the + // equivalent of a reinterpret_cast in a constexpr context, which is not + // allowed. + // + // This causes an issue when building with LLVM under Windows. To avoid this, + // we use a less-efficient, intptr_t-based implementation on Windows. #if ABSL_HAVE_WORKING_ATOMIC_POINTER - // Return the stored value, or DummyFunction if no value has been stored. - FnPtr DoLoad() const { return hook_.load(std::memory_order_acquire); } - - // Store the given value. Returns false if a different value was already - // stored to this object. - bool DoStore(FnPtr fn) { - assert(fn); - FnPtr expected = default_fn_; - const bool store_succeeded = hook_.compare_exchange_strong( - expected, fn, std::memory_order_acq_rel, std::memory_order_acquire); - const bool same_value_already_stored = (expected == fn); - return store_succeeded || same_value_already_stored; - } - - std::atomic hook_; + // Return the stored value, or DummyFunction if no value has been stored. + FnPtr DoLoad() const + { + return hook_.load(std::memory_order_acquire); + } + + // Store the given value. Returns false if a different value was already + // stored to this object. + bool DoStore(FnPtr fn) + { + assert(fn); + FnPtr expected = default_fn_; + const bool store_succeeded = hook_.compare_exchange_strong( + expected, fn, std::memory_order_acq_rel, std::memory_order_acquire + ); + const bool same_value_already_stored = (expected == fn); + return store_succeeded || same_value_already_stored; + } + + std::atomic hook_; #else // !ABSL_HAVE_WORKING_ATOMIC_POINTER - // Use a sentinel value unlikely to be the address of an actual function. - static constexpr intptr_t kUninitialized = 0; - - static_assert(sizeof(intptr_t) >= sizeof(FnPtr), - "intptr_t can't contain a function pointer"); - - FnPtr DoLoad() const { - const intptr_t value = hook_.load(std::memory_order_acquire); - if (value == kUninitialized) { - return default_fn_; - } - return reinterpret_cast(value); - } - - bool DoStore(FnPtr fn) { - assert(fn); - const auto value = reinterpret_cast(fn); - intptr_t expected = kUninitialized; - const bool store_succeeded = hook_.compare_exchange_strong( - expected, value, std::memory_order_acq_rel, std::memory_order_acquire); - const bool same_value_already_stored = (expected == value); - return store_succeeded || same_value_already_stored; - } - - std::atomic hook_; + // Use a sentinel value unlikely to be the address of an actual function. + static constexpr intptr_t kUninitialized = 0; + + static_assert(sizeof(intptr_t) >= sizeof(FnPtr), "intptr_t can't contain a function pointer"); + + FnPtr DoLoad() const + { + const intptr_t value = hook_.load(std::memory_order_acquire); + if (value == kUninitialized) + { + return default_fn_; + } + return reinterpret_cast(value); + } + + bool DoStore(FnPtr fn) + { + assert(fn); + const auto value = reinterpret_cast(fn); + intptr_t expected = kUninitialized; + const bool store_succeeded = hook_.compare_exchange_strong( + expected, value, std::memory_order_acq_rel, std::memory_order_acquire + ); + const bool same_value_already_stored = (expected == value); + return store_succeeded || same_value_already_stored; + } + + std::atomic hook_; #endif - const FnPtr default_fn_; -}; + const FnPtr default_fn_; + }; #undef ABSL_HAVE_WORKING_ATOMIC_POINTER #undef ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT -} // namespace base_internal -ABSL_NAMESPACE_END + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook_test_helper.h b/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook_test_helper.h index 3e72b49..1df0513 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook_test_helper.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/atomic_hook_test_helper.h @@ -17,18 +17,20 @@ #include "absl/base/internal/atomic_hook.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace atomic_hook_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace atomic_hook_internal + { -using VoidF = void (*)(); -extern absl::base_internal::AtomicHook func; -extern int default_func_calls; -void DefaultFunc(); -void RegisterFunc(VoidF func); + using VoidF = void (*)(); + extern absl::base_internal::AtomicHook func; + extern int default_func_calls; + void DefaultFunc(); + void RegisterFunc(VoidF func); -} // namespace atomic_hook_internal -ABSL_NAMESPACE_END + } // namespace atomic_hook_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/cycleclock.h b/CAPI/cpp/grpc/include/absl/base/internal/cycleclock.h index 9704e38..9656963 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/cycleclock.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/cycleclock.h @@ -49,111 +49,120 @@ #include "absl/base/config.h" #include "absl/base/internal/unscaledcycleclock.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -using CycleClockSourceFunc = int64_t (*)(); - -// ----------------------------------------------------------------------------- -// CycleClock -// ----------------------------------------------------------------------------- -class CycleClock { - public: - // CycleClock::Now() - // - // Returns the value of a cycle counter that counts at a rate that is - // approximately constant. - static int64_t Now(); - - // CycleClock::Frequency() - // - // Returns the amount by which `CycleClock::Now()` increases per second. Note - // that this value may not necessarily match the core CPU clock frequency. - static double Frequency(); - - private: +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + using CycleClockSourceFunc = int64_t (*)(); + + // ----------------------------------------------------------------------------- + // CycleClock + // ----------------------------------------------------------------------------- + class CycleClock + { + public: + // CycleClock::Now() + // + // Returns the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // CycleClock::Frequency() + // + // Returns the amount by which `CycleClock::Now()` increases per second. Note + // that this value may not necessarily match the core CPU clock frequency. + static double Frequency(); + + private: #if ABSL_USE_UNSCALED_CYCLECLOCK - static CycleClockSourceFunc LoadCycleClockSource(); + static CycleClockSourceFunc LoadCycleClockSource(); #ifdef NDEBUG #ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY - // Not debug mode and the UnscaledCycleClock frequency is the CPU - // frequency. Scale the CycleClock to prevent overflow if someone - // tries to represent the time as cycles since the Unix epoch. - static constexpr int32_t kShift = 1; + // Not debug mode and the UnscaledCycleClock frequency is the CPU + // frequency. Scale the CycleClock to prevent overflow if someone + // tries to represent the time as cycles since the Unix epoch. + static constexpr int32_t kShift = 1; #else - // Not debug mode and the UnscaledCycleClock isn't operating at the - // raw CPU frequency. There is no need to do any scaling, so don't - // needlessly sacrifice precision. - static constexpr int32_t kShift = 0; + // Not debug mode and the UnscaledCycleClock isn't operating at the + // raw CPU frequency. There is no need to do any scaling, so don't + // needlessly sacrifice precision. + static constexpr int32_t kShift = 0; #endif #else // NDEBUG - // In debug mode use a different shift to discourage depending on a - // particular shift value. - static constexpr int32_t kShift = 2; + // In debug mode use a different shift to discourage depending on a + // particular shift value. + static constexpr int32_t kShift = 2; #endif // NDEBUG - static constexpr double kFrequencyScale = 1.0 / (1 << kShift); - ABSL_CONST_INIT static std::atomic cycle_clock_source_; + static constexpr double kFrequencyScale = 1.0 / (1 << kShift); + ABSL_CONST_INIT static std::atomic cycle_clock_source_; #endif // ABSL_USE_UNSCALED_CYCLECLOC - CycleClock() = delete; // no instances - CycleClock(const CycleClock&) = delete; - CycleClock& operator=(const CycleClock&) = delete; - - friend class CycleClockSource; -}; - -class CycleClockSource { - private: - // CycleClockSource::Register() - // - // Register a function that provides an alternate source for the unscaled CPU - // cycle count value. The source function must be async signal safe, must not - // call CycleClock::Now(), and must have a frequency that matches that of the - // unscaled clock used by CycleClock. A nullptr value resets CycleClock to use - // the default source. - static void Register(CycleClockSourceFunc source); -}; + CycleClock() = delete; // no instances + CycleClock(const CycleClock&) = delete; + CycleClock& operator=(const CycleClock&) = delete; + + friend class CycleClockSource; + }; + + class CycleClockSource + { + private: + // CycleClockSource::Register() + // + // Register a function that provides an alternate source for the unscaled CPU + // cycle count value. The source function must be async signal safe, must not + // call CycleClock::Now(), and must have a frequency that matches that of the + // unscaled clock used by CycleClock. A nullptr value resets CycleClock to use + // the default source. + static void Register(CycleClockSourceFunc source); + }; #if ABSL_USE_UNSCALED_CYCLECLOCK -inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() { + inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() + { #if !defined(__x86_64__) - // Optimize for the common case (no callback) by first doing a relaxed load; - // this is significantly faster on non-x86 platforms. - if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) { - return nullptr; - } + // Optimize for the common case (no callback) by first doing a relaxed load; + // this is significantly faster on non-x86 platforms. + if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) + { + return nullptr; + } #endif // !defined(__x86_64__) - // This corresponds to the store(std::memory_order_release) in - // CycleClockSource::Register, and makes sure that any updates made prior to - // registering the callback are visible to this thread before the callback - // is invoked. - return cycle_clock_source_.load(std::memory_order_acquire); -} + // This corresponds to the store(std::memory_order_release) in + // CycleClockSource::Register, and makes sure that any updates made prior to + // registering the callback are visible to this thread before the callback + // is invoked. + return cycle_clock_source_.load(std::memory_order_acquire); + } // Accessing globals in inlined code in Window DLLs is problematic. #ifndef _WIN32 -inline int64_t CycleClock::Now() { - auto fn = LoadCycleClockSource(); - if (fn == nullptr) { - return base_internal::UnscaledCycleClock::Now() >> kShift; - } - return fn() >> kShift; -} + inline int64_t CycleClock::Now() + { + auto fn = LoadCycleClockSource(); + if (fn == nullptr) + { + return base_internal::UnscaledCycleClock::Now() >> kShift; + } + return fn() >> kShift; + } #endif -inline double CycleClock::Frequency() { - return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency(); -} + inline double CycleClock::Frequency() + { + return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency(); + } #endif // ABSL_USE_UNSCALED_CYCLECLOCK -} // namespace base_internal -ABSL_NAMESPACE_END + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/direct_mmap.h b/CAPI/cpp/grpc/include/absl/base/internal/direct_mmap.h index e492bb0..4d7476b 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/direct_mmap.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/direct_mmap.h @@ -65,14 +65,16 @@ extern "C" void* __mmap2(void*, size_t, int, int, int, size_t); #define SYS_mmap2 __NR_mmap2 #endif -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -// Platform specific logic extracted from -// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h -inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, - off64_t offset) noexcept { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + // Platform specific logic extracted from + // https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h + inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, off64_t offset) noexcept + { #if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ defined(__m68k__) || defined(__sh__) || \ (defined(__hppa__) && !defined(__LP64__)) || \ @@ -81,37 +83,39 @@ inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, (defined(__riscv) && __riscv_xlen == 32) || \ (defined(__s390__) && !defined(__s390x__)) || \ (defined(__sparc__) && !defined(__arch64__)) - // On these architectures, implement mmap with mmap2. - static int pagesize = 0; - if (pagesize == 0) { + // On these architectures, implement mmap with mmap2. + static int pagesize = 0; + if (pagesize == 0) + { #if defined(__wasm__) || defined(__asmjs__) - pagesize = getpagesize(); + pagesize = getpagesize(); #else - pagesize = sysconf(_SC_PAGESIZE); + pagesize = sysconf(_SC_PAGESIZE); #endif - } - if (offset < 0 || offset % pagesize != 0) { - errno = EINVAL; - return MAP_FAILED; - } + } + if (offset < 0 || offset % pagesize != 0) + { + errno = EINVAL; + return MAP_FAILED; + } #ifdef __BIONIC__ - // SYS_mmap2 has problems on Android API level <= 16. - // Workaround by invoking __mmap2() instead. - return __mmap2(start, length, prot, flags, fd, offset / pagesize); + // SYS_mmap2 has problems on Android API level <= 16. + // Workaround by invoking __mmap2() instead. + return __mmap2(start, length, prot, flags, fd, offset / pagesize); #else - return reinterpret_cast( - syscall(SYS_mmap2, start, length, prot, flags, fd, - static_cast(offset / pagesize))); + return reinterpret_cast( + syscall(SYS_mmap2, start, length, prot, flags, fd, static_cast(offset / pagesize)) + ); #endif #elif defined(__s390x__) - // On s390x, mmap() arguments are passed in memory. - unsigned long buf[6] = {reinterpret_cast(start), // NOLINT - static_cast(length), // NOLINT - static_cast(prot), // NOLINT - static_cast(flags), // NOLINT - static_cast(fd), // NOLINT - static_cast(offset)}; // NOLINT - return reinterpret_cast(syscall(SYS_mmap, buf)); + // On s390x, mmap() arguments are passed in memory. + unsigned long buf[6] = {reinterpret_cast(start), // NOLINT + static_cast(length), // NOLINT + static_cast(prot), // NOLINT + static_cast(flags), // NOLINT + static_cast(fd), // NOLINT + static_cast(offset)}; // NOLINT + return reinterpret_cast(syscall(SYS_mmap, buf)); #elif defined(__x86_64__) // The x32 ABI has 32 bit longs, but the syscall interface is 64 bit. // We need to explicitly cast to an unsigned 64 bit type to avoid implicit @@ -120,24 +124,25 @@ inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, // to an integer of a different size. We also need to make sure __off64_t // isn't truncated to 32-bits under x32. #define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x)) - return reinterpret_cast( - syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length), - MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags), - MMAP_SYSCALL_ARG(fd), static_cast(offset))); + return reinterpret_cast( + syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length), MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags), MMAP_SYSCALL_ARG(fd), static_cast(offset)) + ); #undef MMAP_SYSCALL_ARG #else // Remaining 64-bit aritectures. - static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit"); - return reinterpret_cast( - syscall(SYS_mmap, start, length, prot, flags, fd, offset)); + static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit"); + return reinterpret_cast( + syscall(SYS_mmap, start, length, prot, flags, fd, offset) + ); #endif -} + } -inline int DirectMunmap(void* start, size_t length) { - return static_cast(syscall(SYS_munmap, start, length)); -} + inline int DirectMunmap(void* start, size_t length) + { + return static_cast(syscall(SYS_munmap, start, length)); + } -} // namespace base_internal -ABSL_NAMESPACE_END + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #else // !__linux__ @@ -145,21 +150,24 @@ ABSL_NAMESPACE_END // For non-linux platforms where we have mmap, just dispatch directly to the // actual mmap()/munmap() methods. -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { -inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, - off_t offset) { - return mmap(start, length, prot, flags, fd, offset); -} + inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, off_t offset) + { + return mmap(start, length, prot, flags, fd, offset); + } -inline int DirectMunmap(void* start, size_t length) { - return munmap(start, length); -} + inline int DirectMunmap(void* start, size_t length) + { + return munmap(start, length); + } -} // namespace base_internal -ABSL_NAMESPACE_END + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // __linux__ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/dynamic_annotations.h b/CAPI/cpp/grpc/include/absl/base/internal/dynamic_annotations.h index b23c5ec..4fb5e13 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/dynamic_annotations.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/dynamic_annotations.h @@ -82,10 +82,10 @@ // ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1 #define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \ - defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) // Read/write annotations are enabled in Annotalysis mode; disabled otherwise. #define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ - ABSL_INTERNAL_ANNOTALYSIS_ENABLED + ABSL_INTERNAL_ANNOTALYSIS_ENABLED #endif // Memory annotations are also made available to LLVM's Memory Sanitizer @@ -98,7 +98,9 @@ #endif #ifdef __cplusplus -#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" { +#define ABSL_INTERNAL_BEGIN_EXTERN_C \ + extern "C" \ + { #define ABSL_INTERNAL_END_EXTERN_C } // extern "C" #define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F #define ABSL_INTERNAL_STATIC_INLINE inline @@ -123,29 +125,30 @@ // "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the // point where `pointer` has been allocated, preferably close to the point // where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. -#define ANNOTATE_BENIGN_RACE(pointer, description) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ - (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) +#define ANNOTATE_BENIGN_RACE(pointer, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) // Same as ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to // the memory range [`address`, `address`+`size`). #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ - (__FILE__, __LINE__, address, size, description) + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, address, size, description) // Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. // This annotation could be useful if you want to skip expensive race analysis // during some period of program execution, e.g. during initialization. -#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ - (__FILE__, __LINE__, enable) +#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ + (__FILE__, __LINE__, enable) // ------------------------------------------------------------- // Annotations useful for debugging. // Report the current thread `name` to a race detector. -#define ANNOTATE_THREAD_NAME(name) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name) +#define ANNOTATE_THREAD_NAME(name) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName) \ + (__FILE__, __LINE__, name) // ------------------------------------------------------------- // Annotations useful when implementing locks. They are not normally needed by @@ -153,46 +156,50 @@ // object. // Report that a lock has been created at address `lock`. -#define ANNOTATE_RWLOCK_CREATE(lock) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock) +#define ANNOTATE_RWLOCK_CREATE(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate) \ + (__FILE__, __LINE__, lock) // Report that a linker initialized lock has been created at address `lock`. #ifdef ABSL_HAVE_THREAD_SANITIZER -#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ - (__FILE__, __LINE__, lock) +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ + (__FILE__, __LINE__, lock) #else #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock) #endif // Report that the lock at address `lock` is about to be destroyed. -#define ANNOTATE_RWLOCK_DESTROY(lock) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock) +#define ANNOTATE_RWLOCK_DESTROY(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy) \ + (__FILE__, __LINE__, lock) // Report that the lock at address `lock` has been acquired. // `is_w`=1 for writer lock, `is_w`=0 for reader lock. -#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ - (__FILE__, __LINE__, lock, is_w) +#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ + (__FILE__, __LINE__, lock, is_w) // Report that the lock at address `lock` is about to be released. // `is_w`=1 for writer lock, `is_w`=0 for reader lock. -#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ - (__FILE__, __LINE__, lock, is_w) +#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ + (__FILE__, __LINE__, lock, is_w) // Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. -#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ - namespace { \ - class static_var##_annotator { \ - public: \ - static_var##_annotator() { \ - ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \ - #static_var ": " description); \ - } \ - }; \ - static static_var##_annotator the##static_var##_annotator; \ - } // namespace +#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace \ + { \ + class static_var##_annotator \ + { \ + public: \ + static_var##_annotator() \ + { \ + ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), #static_var ": " description); \ + } \ + }; \ + static static_var##_annotator the##static_var##_annotator; \ + } // namespace #else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0 @@ -217,24 +224,26 @@ #include #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ - __msan_unpoison(address, size) + __msan_unpoison(address, size) #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ - __msan_allocated_memory(address, size) + __msan_allocated_memory(address, size) #else // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0 #if DYNAMIC_ANNOTATIONS_ENABLED == 1 #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ - do { \ - (void)(address); \ - (void)(size); \ - } while (0) + do \ + { \ + (void)(address); \ + (void)(size); \ + } while (0) #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ - do { \ - (void)(address); \ - (void)(size); \ - } while (0) + do \ + { \ + (void)(address); \ + (void)(size); \ + } while (0) #else #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty @@ -248,9 +257,9 @@ #if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) #define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ - __attribute((exclusive_lock_function("*"))) + __attribute((exclusive_lock_function("*"))) #define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ - __attribute((unlock_function("*"))) + __attribute((unlock_function("*"))) #else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) @@ -268,12 +277,14 @@ // ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey // reads, while still checking other reads and all writes. // See also ANNOTATE_UNPROTECTED_READ. -#define ANNOTATE_IGNORE_READS_BEGIN() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__) +#define ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \ + (__FILE__, __LINE__) // Stop ignoring reads. -#define ANNOTATE_IGNORE_READS_END() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__) +#define ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \ + (__FILE__, __LINE__) #elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) @@ -284,11 +295,13 @@ // TODO(delesley) -- The exclusive lock here ignores writes as well, but // allows IGNORE_READS_AND_WRITES to work properly. -#define ANNOTATE_IGNORE_READS_BEGIN() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)() +#define ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin) \ + () -#define ANNOTATE_IGNORE_READS_END() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)() +#define ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd) \ + () #else @@ -303,12 +316,14 @@ #if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 // Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. -#define ANNOTATE_IGNORE_WRITES_BEGIN() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__) +#define ANNOTATE_IGNORE_WRITES_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin) \ + (__FILE__, __LINE__) // Stop ignoring writes. -#define ANNOTATE_IGNORE_WRITES_END() \ - ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__) +#define ANNOTATE_IGNORE_WRITES_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd) \ + (__FILE__, __LINE__) #else @@ -332,22 +347,24 @@ // Start ignoring all memory accesses (both reads and writes). #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ - do { \ - ANNOTATE_IGNORE_READS_BEGIN(); \ - ANNOTATE_IGNORE_WRITES_BEGIN(); \ - } while (0) + do \ + { \ + ANNOTATE_IGNORE_READS_BEGIN(); \ + ANNOTATE_IGNORE_WRITES_BEGIN(); \ + } while (0) // Stop ignoring both reads and writes. #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ - do { \ - ANNOTATE_IGNORE_WRITES_END(); \ - ANNOTATE_IGNORE_READS_END(); \ - } while (0) + do \ + { \ + ANNOTATE_IGNORE_WRITES_END(); \ + ANNOTATE_IGNORE_READS_END(); \ + } while (0) #ifdef __cplusplus // ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. #define ANNOTATE_UNPROTECTED_READ(x) \ - absl::base_internal::AnnotateUnprotectedRead(x) + absl::base_internal::AnnotateUnprotectedRead(x) #endif @@ -369,11 +386,12 @@ #include #define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ - __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) -#define ADDRESS_SANITIZER_REDZONE(name) \ - struct { \ - char x[8] __attribute__((aligned(8))); \ - } name + __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) \ + struct \ + { \ + char x[8] __attribute__((aligned(8))); \ + } name #else diff --git a/CAPI/cpp/grpc/include/absl/base/internal/endian.h b/CAPI/cpp/grpc/include/absl/base/internal/endian.h index 50747d7..4079f84 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/endian.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/endian.h @@ -24,66 +24,77 @@ #include "absl/base/internal/unaligned_access.h" #include "absl/base/port.h" -namespace absl { -ABSL_NAMESPACE_BEGIN +namespace absl +{ + ABSL_NAMESPACE_BEGIN -inline uint64_t gbswap_64(uint64_t host_int) { + inline uint64_t gbswap_64(uint64_t host_int) + { #if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__) - return __builtin_bswap64(host_int); + return __builtin_bswap64(host_int); #elif defined(_MSC_VER) - return _byteswap_uint64(host_int); + return _byteswap_uint64(host_int); #else - return (((host_int & uint64_t{0xFF}) << 56) | - ((host_int & uint64_t{0xFF00}) << 40) | - ((host_int & uint64_t{0xFF0000}) << 24) | - ((host_int & uint64_t{0xFF000000}) << 8) | - ((host_int & uint64_t{0xFF00000000}) >> 8) | - ((host_int & uint64_t{0xFF0000000000}) >> 24) | - ((host_int & uint64_t{0xFF000000000000}) >> 40) | - ((host_int & uint64_t{0xFF00000000000000}) >> 56)); + return (((host_int & uint64_t{0xFF}) << 56) | ((host_int & uint64_t{0xFF00}) << 40) | ((host_int & uint64_t{0xFF0000}) << 24) | ((host_int & uint64_t{0xFF000000}) << 8) | ((host_int & uint64_t{0xFF00000000}) >> 8) | ((host_int & uint64_t{0xFF0000000000}) >> 24) | ((host_int & uint64_t{0xFF000000000000}) >> 40) | ((host_int & uint64_t{0xFF00000000000000}) >> 56)); #endif -} + } -inline uint32_t gbswap_32(uint32_t host_int) { + inline uint32_t gbswap_32(uint32_t host_int) + { #if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__) - return __builtin_bswap32(host_int); + return __builtin_bswap32(host_int); #elif defined(_MSC_VER) - return _byteswap_ulong(host_int); + return _byteswap_ulong(host_int); #else - return (((host_int & uint32_t{0xFF}) << 24) | - ((host_int & uint32_t{0xFF00}) << 8) | - ((host_int & uint32_t{0xFF0000}) >> 8) | - ((host_int & uint32_t{0xFF000000}) >> 24)); + return (((host_int & uint32_t{0xFF}) << 24) | ((host_int & uint32_t{0xFF00}) << 8) | ((host_int & uint32_t{0xFF0000}) >> 8) | ((host_int & uint32_t{0xFF000000}) >> 24)); #endif -} + } -inline uint16_t gbswap_16(uint16_t host_int) { + inline uint16_t gbswap_16(uint16_t host_int) + { #if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__) - return __builtin_bswap16(host_int); + return __builtin_bswap16(host_int); #elif defined(_MSC_VER) - return _byteswap_ushort(host_int); + return _byteswap_ushort(host_int); #else - return (((host_int & uint16_t{0xFF}) << 8) | - ((host_int & uint16_t{0xFF00}) >> 8)); + return (((host_int & uint16_t{0xFF}) << 8) | ((host_int & uint16_t{0xFF00}) >> 8)); #endif -} + } #ifdef ABSL_IS_LITTLE_ENDIAN -// Portable definitions for htonl (host-to-network) and friends on little-endian -// architectures. -inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); } -inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); } -inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); } + // Portable definitions for htonl (host-to-network) and friends on little-endian + // architectures. + inline uint16_t ghtons(uint16_t x) + { + return gbswap_16(x); + } + inline uint32_t ghtonl(uint32_t x) + { + return gbswap_32(x); + } + inline uint64_t ghtonll(uint64_t x) + { + return gbswap_64(x); + } #elif defined ABSL_IS_BIG_ENDIAN -// Portable definitions for htonl (host-to-network) etc on big-endian -// architectures. These definitions are simpler since the host byte order is the -// same as network byte order. -inline uint16_t ghtons(uint16_t x) { return x; } -inline uint32_t ghtonl(uint32_t x) { return x; } -inline uint64_t ghtonll(uint64_t x) { return x; } + // Portable definitions for htonl (host-to-network) etc on big-endian + // architectures. These definitions are simpler since the host byte order is the + // same as network byte order. + inline uint16_t ghtons(uint16_t x) + { + return x; + } + inline uint32_t ghtonl(uint32_t x) + { + return x; + } + inline uint64_t ghtonll(uint64_t x) + { + return x; + } #else #error \ @@ -91,192 +102,371 @@ inline uint64_t ghtonll(uint64_t x) { return x; } "ABSL_IS_LITTLE_ENDIAN must be defined" #endif // byte order -inline uint16_t gntohs(uint16_t x) { return ghtons(x); } -inline uint32_t gntohl(uint32_t x) { return ghtonl(x); } -inline uint64_t gntohll(uint64_t x) { return ghtonll(x); } - -// Utilities to convert numbers between the current hosts's native byte -// order and little-endian byte order -// -// Load/Store methods are alignment safe -namespace little_endian { + inline uint16_t gntohs(uint16_t x) + { + return ghtons(x); + } + inline uint32_t gntohl(uint32_t x) + { + return ghtonl(x); + } + inline uint64_t gntohll(uint64_t x) + { + return ghtonll(x); + } + + // Utilities to convert numbers between the current hosts's native byte + // order and little-endian byte order + // + // Load/Store methods are alignment safe + namespace little_endian + { // Conversion functions. #ifdef ABSL_IS_LITTLE_ENDIAN -inline uint16_t FromHost16(uint16_t x) { return x; } -inline uint16_t ToHost16(uint16_t x) { return x; } - -inline uint32_t FromHost32(uint32_t x) { return x; } -inline uint32_t ToHost32(uint32_t x) { return x; } - -inline uint64_t FromHost64(uint64_t x) { return x; } -inline uint64_t ToHost64(uint64_t x) { return x; } - -inline constexpr bool IsLittleEndian() { return true; } + inline uint16_t FromHost16(uint16_t x) + { + return x; + } + inline uint16_t ToHost16(uint16_t x) + { + return x; + } + + inline uint32_t FromHost32(uint32_t x) + { + return x; + } + inline uint32_t ToHost32(uint32_t x) + { + return x; + } + + inline uint64_t FromHost64(uint64_t x) + { + return x; + } + inline uint64_t ToHost64(uint64_t x) + { + return x; + } + + inline constexpr bool IsLittleEndian() + { + return true; + } #elif defined ABSL_IS_BIG_ENDIAN -inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } -inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } - -inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } -inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } - -inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } -inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } - -inline constexpr bool IsLittleEndian() { return false; } + inline uint16_t FromHost16(uint16_t x) + { + return gbswap_16(x); + } + inline uint16_t ToHost16(uint16_t x) + { + return gbswap_16(x); + } + + inline uint32_t FromHost32(uint32_t x) + { + return gbswap_32(x); + } + inline uint32_t ToHost32(uint32_t x) + { + return gbswap_32(x); + } + + inline uint64_t FromHost64(uint64_t x) + { + return gbswap_64(x); + } + inline uint64_t ToHost64(uint64_t x) + { + return gbswap_64(x); + } + + inline constexpr bool IsLittleEndian() + { + return false; + } #endif /* ENDIAN */ -inline uint8_t FromHost(uint8_t x) { return x; } -inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } -inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } -inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } -inline uint8_t ToHost(uint8_t x) { return x; } -inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } -inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } -inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } - -inline int8_t FromHost(int8_t x) { return x; } -inline int16_t FromHost(int16_t x) { - return bit_cast(FromHost16(bit_cast(x))); -} -inline int32_t FromHost(int32_t x) { - return bit_cast(FromHost32(bit_cast(x))); -} -inline int64_t FromHost(int64_t x) { - return bit_cast(FromHost64(bit_cast(x))); -} -inline int8_t ToHost(int8_t x) { return x; } -inline int16_t ToHost(int16_t x) { - return bit_cast(ToHost16(bit_cast(x))); -} -inline int32_t ToHost(int32_t x) { - return bit_cast(ToHost32(bit_cast(x))); -} -inline int64_t ToHost(int64_t x) { - return bit_cast(ToHost64(bit_cast(x))); -} - -// Functions to do unaligned loads and stores in little-endian order. -inline uint16_t Load16(const void *p) { - return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); -} - -inline void Store16(void *p, uint16_t v) { - ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); -} - -inline uint32_t Load32(const void *p) { - return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); -} - -inline void Store32(void *p, uint32_t v) { - ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); -} - -inline uint64_t Load64(const void *p) { - return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); -} - -inline void Store64(void *p, uint64_t v) { - ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); -} - -} // namespace little_endian - -// Utilities to convert numbers between the current hosts's native byte -// order and big-endian byte order (same as network byte order) -// -// Load/Store methods are alignment safe -namespace big_endian { + inline uint8_t FromHost(uint8_t x) + { + return x; + } + inline uint16_t FromHost(uint16_t x) + { + return FromHost16(x); + } + inline uint32_t FromHost(uint32_t x) + { + return FromHost32(x); + } + inline uint64_t FromHost(uint64_t x) + { + return FromHost64(x); + } + inline uint8_t ToHost(uint8_t x) + { + return x; + } + inline uint16_t ToHost(uint16_t x) + { + return ToHost16(x); + } + inline uint32_t ToHost(uint32_t x) + { + return ToHost32(x); + } + inline uint64_t ToHost(uint64_t x) + { + return ToHost64(x); + } + + inline int8_t FromHost(int8_t x) + { + return x; + } + inline int16_t FromHost(int16_t x) + { + return bit_cast(FromHost16(bit_cast(x))); + } + inline int32_t FromHost(int32_t x) + { + return bit_cast(FromHost32(bit_cast(x))); + } + inline int64_t FromHost(int64_t x) + { + return bit_cast(FromHost64(bit_cast(x))); + } + inline int8_t ToHost(int8_t x) + { + return x; + } + inline int16_t ToHost(int16_t x) + { + return bit_cast(ToHost16(bit_cast(x))); + } + inline int32_t ToHost(int32_t x) + { + return bit_cast(ToHost32(bit_cast(x))); + } + inline int64_t ToHost(int64_t x) + { + return bit_cast(ToHost64(bit_cast(x))); + } + + // Functions to do unaligned loads and stores in little-endian order. + inline uint16_t Load16(const void* p) + { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); + } + + inline void Store16(void* p, uint16_t v) + { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); + } + + inline uint32_t Load32(const void* p) + { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); + } + + inline void Store32(void* p, uint32_t v) + { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); + } + + inline uint64_t Load64(const void* p) + { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); + } + + inline void Store64(void* p, uint64_t v) + { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); + } + + } // namespace little_endian + + // Utilities to convert numbers between the current hosts's native byte + // order and big-endian byte order (same as network byte order) + // + // Load/Store methods are alignment safe + namespace big_endian + { #ifdef ABSL_IS_LITTLE_ENDIAN -inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } -inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } - -inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } -inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } - -inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } -inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } - -inline constexpr bool IsLittleEndian() { return true; } + inline uint16_t FromHost16(uint16_t x) + { + return gbswap_16(x); + } + inline uint16_t ToHost16(uint16_t x) + { + return gbswap_16(x); + } + + inline uint32_t FromHost32(uint32_t x) + { + return gbswap_32(x); + } + inline uint32_t ToHost32(uint32_t x) + { + return gbswap_32(x); + } + + inline uint64_t FromHost64(uint64_t x) + { + return gbswap_64(x); + } + inline uint64_t ToHost64(uint64_t x) + { + return gbswap_64(x); + } + + inline constexpr bool IsLittleEndian() + { + return true; + } #elif defined ABSL_IS_BIG_ENDIAN -inline uint16_t FromHost16(uint16_t x) { return x; } -inline uint16_t ToHost16(uint16_t x) { return x; } - -inline uint32_t FromHost32(uint32_t x) { return x; } -inline uint32_t ToHost32(uint32_t x) { return x; } - -inline uint64_t FromHost64(uint64_t x) { return x; } -inline uint64_t ToHost64(uint64_t x) { return x; } - -inline constexpr bool IsLittleEndian() { return false; } + inline uint16_t FromHost16(uint16_t x) + { + return x; + } + inline uint16_t ToHost16(uint16_t x) + { + return x; + } + + inline uint32_t FromHost32(uint32_t x) + { + return x; + } + inline uint32_t ToHost32(uint32_t x) + { + return x; + } + + inline uint64_t FromHost64(uint64_t x) + { + return x; + } + inline uint64_t ToHost64(uint64_t x) + { + return x; + } + + inline constexpr bool IsLittleEndian() + { + return false; + } #endif /* ENDIAN */ -inline uint8_t FromHost(uint8_t x) { return x; } -inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } -inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } -inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } -inline uint8_t ToHost(uint8_t x) { return x; } -inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } -inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } -inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } - -inline int8_t FromHost(int8_t x) { return x; } -inline int16_t FromHost(int16_t x) { - return bit_cast(FromHost16(bit_cast(x))); -} -inline int32_t FromHost(int32_t x) { - return bit_cast(FromHost32(bit_cast(x))); -} -inline int64_t FromHost(int64_t x) { - return bit_cast(FromHost64(bit_cast(x))); -} -inline int8_t ToHost(int8_t x) { return x; } -inline int16_t ToHost(int16_t x) { - return bit_cast(ToHost16(bit_cast(x))); -} -inline int32_t ToHost(int32_t x) { - return bit_cast(ToHost32(bit_cast(x))); -} -inline int64_t ToHost(int64_t x) { - return bit_cast(ToHost64(bit_cast(x))); -} - -// Functions to do unaligned loads and stores in big-endian order. -inline uint16_t Load16(const void *p) { - return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); -} - -inline void Store16(void *p, uint16_t v) { - ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); -} - -inline uint32_t Load32(const void *p) { - return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); -} - -inline void Store32(void *p, uint32_t v) { - ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); -} - -inline uint64_t Load64(const void *p) { - return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); -} - -inline void Store64(void *p, uint64_t v) { - ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); -} - -} // namespace big_endian - -ABSL_NAMESPACE_END + inline uint8_t FromHost(uint8_t x) + { + return x; + } + inline uint16_t FromHost(uint16_t x) + { + return FromHost16(x); + } + inline uint32_t FromHost(uint32_t x) + { + return FromHost32(x); + } + inline uint64_t FromHost(uint64_t x) + { + return FromHost64(x); + } + inline uint8_t ToHost(uint8_t x) + { + return x; + } + inline uint16_t ToHost(uint16_t x) + { + return ToHost16(x); + } + inline uint32_t ToHost(uint32_t x) + { + return ToHost32(x); + } + inline uint64_t ToHost(uint64_t x) + { + return ToHost64(x); + } + + inline int8_t FromHost(int8_t x) + { + return x; + } + inline int16_t FromHost(int16_t x) + { + return bit_cast(FromHost16(bit_cast(x))); + } + inline int32_t FromHost(int32_t x) + { + return bit_cast(FromHost32(bit_cast(x))); + } + inline int64_t FromHost(int64_t x) + { + return bit_cast(FromHost64(bit_cast(x))); + } + inline int8_t ToHost(int8_t x) + { + return x; + } + inline int16_t ToHost(int16_t x) + { + return bit_cast(ToHost16(bit_cast(x))); + } + inline int32_t ToHost(int32_t x) + { + return bit_cast(ToHost32(bit_cast(x))); + } + inline int64_t ToHost(int64_t x) + { + return bit_cast(ToHost64(bit_cast(x))); + } + + // Functions to do unaligned loads and stores in big-endian order. + inline uint16_t Load16(const void* p) + { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); + } + + inline void Store16(void* p, uint16_t v) + { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); + } + + inline uint32_t Load32(const void* p) + { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); + } + + inline void Store32(void* p, uint32_t v) + { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); + } + + inline uint64_t Load64(const void* p) + { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); + } + + inline void Store64(void* p, uint64_t v) + { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); + } + + } // namespace big_endian + + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_ENDIAN_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/errno_saver.h b/CAPI/cpp/grpc/include/absl/base/internal/errno_saver.h index 251de51..4140bef 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/errno_saver.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/errno_saver.h @@ -19,25 +19,37 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -// `ErrnoSaver` captures the value of `errno` upon construction and restores it -// upon deletion. It is used in low-level code and must be super fast. Do not -// add instrumentation, even in debug modes. -class ErrnoSaver { - public: - ErrnoSaver() : saved_errno_(errno) {} - ~ErrnoSaver() { errno = saved_errno_; } - int operator()() const { return saved_errno_; } - - private: - const int saved_errno_; -}; - -} // namespace base_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + // `ErrnoSaver` captures the value of `errno` upon construction and restores it + // upon deletion. It is used in low-level code and must be super fast. Do not + // add instrumentation, even in debug modes. + class ErrnoSaver + { + public: + ErrnoSaver() : + saved_errno_(errno) + { + } + ~ErrnoSaver() + { + errno = saved_errno_; + } + int operator()() const + { + return saved_errno_; + } + + private: + const int saved_errno_; + }; + + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/exception_safety_testing.h b/CAPI/cpp/grpc/include/absl/base/internal/exception_safety_testing.h index 77a5aec..741a8c9 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/exception_safety_testing.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/exception_safety_testing.h @@ -38,1069 +38,1305 @@ #include "absl/strings/substitute.h" #include "absl/utility/utility.h" -namespace testing { - -enum class TypeSpec; -enum class AllocSpec; - -constexpr TypeSpec operator|(TypeSpec a, TypeSpec b) { - using T = absl::underlying_type_t; - return static_cast(static_cast(a) | static_cast(b)); -} - -constexpr TypeSpec operator&(TypeSpec a, TypeSpec b) { - using T = absl::underlying_type_t; - return static_cast(static_cast(a) & static_cast(b)); -} - -constexpr AllocSpec operator|(AllocSpec a, AllocSpec b) { - using T = absl::underlying_type_t; - return static_cast(static_cast(a) | static_cast(b)); -} - -constexpr AllocSpec operator&(AllocSpec a, AllocSpec b) { - using T = absl::underlying_type_t; - return static_cast(static_cast(a) & static_cast(b)); -} - -namespace exceptions_internal { - -std::string GetSpecString(TypeSpec); -std::string GetSpecString(AllocSpec); - -struct NoThrowTag {}; -struct StrongGuaranteeTagType {}; - -// A simple exception class. We throw this so that test code can catch -// exceptions specifically thrown by ThrowingValue. -class TestException { - public: - explicit TestException(absl::string_view msg) : msg_(msg) {} - virtual ~TestException() {} - virtual const char* what() const noexcept { return msg_.c_str(); } - - private: - std::string msg_; -}; - -// TestBadAllocException exists because allocation functions must throw an -// exception which can be caught by a handler of std::bad_alloc. We use a child -// class of std::bad_alloc so we can customise the error message, and also -// derive from TestException so we don't accidentally end up catching an actual -// bad_alloc exception in TestExceptionSafety. -class TestBadAllocException : public std::bad_alloc, public TestException { - public: - explicit TestBadAllocException(absl::string_view msg) : TestException(msg) {} - using TestException::what; -}; - -extern int countdown; - -// Allows the countdown variable to be set manually (defaulting to the initial -// value of 0) -inline void SetCountdown(int i = 0) { countdown = i; } -// Sets the countdown to the terminal value -1 -inline void UnsetCountdown() { SetCountdown(-1); } - -void MaybeThrow(absl::string_view msg, bool throw_bad_alloc = false); - -testing::AssertionResult FailureMessage(const TestException& e, - int countdown) noexcept; - -struct TrackedAddress { - bool is_alive; - std::string description; -}; - -// Inspects the constructions and destructions of anything inheriting from -// TrackedObject. This allows us to safely "leak" TrackedObjects, as -// ConstructorTracker will destroy everything left over in its destructor. -class ConstructorTracker { - public: - explicit ConstructorTracker(int count) : countdown_(count) { - assert(current_tracker_instance_ == nullptr); - current_tracker_instance_ = this; - } - - ~ConstructorTracker() { - assert(current_tracker_instance_ == this); - current_tracker_instance_ = nullptr; - - for (auto& it : address_map_) { - void* address = it.first; - TrackedAddress& tracked_address = it.second; - if (tracked_address.is_alive) { - ADD_FAILURE() << ErrorMessage(address, tracked_address.description, - countdown_, "Object was not destroyed."); - } - } - } - - static void ObjectConstructed(void* address, std::string description) { - if (!CurrentlyTracking()) return; - - TrackedAddress& tracked_address = - current_tracker_instance_->address_map_[address]; - if (tracked_address.is_alive) { - ADD_FAILURE() << ErrorMessage( - address, tracked_address.description, - current_tracker_instance_->countdown_, - "Object was re-constructed. Current object was constructed by " + - description); - } - tracked_address = {true, std::move(description)}; - } +namespace testing +{ - static void ObjectDestructed(void* address) { - if (!CurrentlyTracking()) return; + enum class TypeSpec; + enum class AllocSpec; - auto it = current_tracker_instance_->address_map_.find(address); - // Not tracked. Ignore. - if (it == current_tracker_instance_->address_map_.end()) return; + constexpr TypeSpec operator|(TypeSpec a, TypeSpec b) + { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) | static_cast(b)); + } - TrackedAddress& tracked_address = it->second; - if (!tracked_address.is_alive) { - ADD_FAILURE() << ErrorMessage(address, tracked_address.description, - current_tracker_instance_->countdown_, - "Object was re-destroyed."); + constexpr TypeSpec operator&(TypeSpec a, TypeSpec b) + { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) & static_cast(b)); } - tracked_address.is_alive = false; - } - - private: - static bool CurrentlyTracking() { - return current_tracker_instance_ != nullptr; - } - - static std::string ErrorMessage(void* address, - const std::string& address_description, - int countdown, - const std::string& error_description) { - return absl::Substitute( - "With coundtown at $0:\n" - " $1\n" - " Object originally constructed by $2\n" - " Object address: $3\n", - countdown, error_description, address_description, address); - } - - std::unordered_map address_map_; - int countdown_; - - static ConstructorTracker* current_tracker_instance_; -}; - -class TrackedObject { - public: - TrackedObject(const TrackedObject&) = delete; - TrackedObject(TrackedObject&&) = delete; - - protected: - explicit TrackedObject(std::string description) { - ConstructorTracker::ObjectConstructed(this, std::move(description)); - } - - ~TrackedObject() noexcept { ConstructorTracker::ObjectDestructed(this); } -}; -} // namespace exceptions_internal - -extern exceptions_internal::NoThrowTag nothrow_ctor; - -extern exceptions_internal::StrongGuaranteeTagType strong_guarantee; - -// A test class which is convertible to bool. The conversion can be -// instrumented to throw at a controlled time. -class ThrowingBool { - public: - ThrowingBool(bool b) noexcept : b_(b) {} // NOLINT(runtime/explicit) - operator bool() const { // NOLINT - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return b_; - } - - private: - bool b_; -}; - -/* - * Configuration enum for the ThrowingValue type that defines behavior for the - * lifetime of the instance. Use testing::nothrow_ctor to prevent the integer - * constructor from throwing. - * - * kEverythingThrows: Every operation can throw an exception - * kNoThrowCopy: Copy construction and copy assignment will not throw - * kNoThrowMove: Move construction and move assignment will not throw - * kNoThrowNew: Overloaded operators new and new[] will not throw - */ -enum class TypeSpec { - kEverythingThrows = 0, - kNoThrowCopy = 1, - kNoThrowMove = 1 << 1, - kNoThrowNew = 1 << 2, -}; - -/* - * A testing class instrumented to throw an exception at a controlled time. - * - * ThrowingValue implements a slightly relaxed version of the Regular concept -- - * that is it's a value type with the expected semantics. It also implements - * arithmetic operations. It doesn't implement member and pointer operators - * like operator-> or operator[]. - * - * ThrowingValue can be instrumented to have certain operations be noexcept by - * using compile-time bitfield template arguments. That is, to make an - * ThrowingValue which has noexcept move construction/assignment and noexcept - * copy construction/assignment, use the following: - * ThrowingValue my_thrwr{val}; - */ -template -class ThrowingValue : private exceptions_internal::TrackedObject { - static constexpr bool IsSpecified(TypeSpec spec) { - return static_cast(Spec & spec); - } - - static constexpr int kDefaultValue = 0; - static constexpr int kBadValue = 938550620; - - public: - ThrowingValue() : TrackedObject(GetInstanceString(kDefaultValue)) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - dummy_ = kDefaultValue; - } - - ThrowingValue(const ThrowingValue& other) noexcept( - IsSpecified(TypeSpec::kNoThrowCopy)) - : TrackedObject(GetInstanceString(other.dummy_)) { - if (!IsSpecified(TypeSpec::kNoThrowCopy)) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + + constexpr AllocSpec operator|(AllocSpec a, AllocSpec b) + { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) | static_cast(b)); } - dummy_ = other.dummy_; - } - - ThrowingValue(ThrowingValue&& other) noexcept( - IsSpecified(TypeSpec::kNoThrowMove)) - : TrackedObject(GetInstanceString(other.dummy_)) { - if (!IsSpecified(TypeSpec::kNoThrowMove)) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + + constexpr AllocSpec operator&(AllocSpec a, AllocSpec b) + { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) & static_cast(b)); } - dummy_ = other.dummy_; - } - explicit ThrowingValue(int i) : TrackedObject(GetInstanceString(i)) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - dummy_ = i; - } + namespace exceptions_internal + { + + std::string GetSpecString(TypeSpec); + std::string GetSpecString(AllocSpec); + + struct NoThrowTag + { + }; + struct StrongGuaranteeTagType + { + }; + + // A simple exception class. We throw this so that test code can catch + // exceptions specifically thrown by ThrowingValue. + class TestException + { + public: + explicit TestException(absl::string_view msg) : + msg_(msg) + { + } + virtual ~TestException() + { + } + virtual const char* what() const noexcept + { + return msg_.c_str(); + } + + private: + std::string msg_; + }; + + // TestBadAllocException exists because allocation functions must throw an + // exception which can be caught by a handler of std::bad_alloc. We use a child + // class of std::bad_alloc so we can customise the error message, and also + // derive from TestException so we don't accidentally end up catching an actual + // bad_alloc exception in TestExceptionSafety. + class TestBadAllocException : public std::bad_alloc, public TestException + { + public: + explicit TestBadAllocException(absl::string_view msg) : + TestException(msg) + { + } + using TestException::what; + }; + + extern int countdown; + + // Allows the countdown variable to be set manually (defaulting to the initial + // value of 0) + inline void SetCountdown(int i = 0) + { + countdown = i; + } + // Sets the countdown to the terminal value -1 + inline void UnsetCountdown() + { + SetCountdown(-1); + } - ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept - : TrackedObject(GetInstanceString(i)), dummy_(i) {} + void MaybeThrow(absl::string_view msg, bool throw_bad_alloc = false); + + testing::AssertionResult FailureMessage(const TestException& e, int countdown) noexcept; + + struct TrackedAddress + { + bool is_alive; + std::string description; + }; + + // Inspects the constructions and destructions of anything inheriting from + // TrackedObject. This allows us to safely "leak" TrackedObjects, as + // ConstructorTracker will destroy everything left over in its destructor. + class ConstructorTracker + { + public: + explicit ConstructorTracker(int count) : + countdown_(count) + { + assert(current_tracker_instance_ == nullptr); + current_tracker_instance_ = this; + } + + ~ConstructorTracker() + { + assert(current_tracker_instance_ == this); + current_tracker_instance_ = nullptr; + + for (auto& it : address_map_) + { + void* address = it.first; + TrackedAddress& tracked_address = it.second; + if (tracked_address.is_alive) + { + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, countdown_, "Object was not destroyed."); + } + } + } + + static void ObjectConstructed(void* address, std::string description) + { + if (!CurrentlyTracking()) + return; + + TrackedAddress& tracked_address = + current_tracker_instance_->address_map_[address]; + if (tracked_address.is_alive) + { + ADD_FAILURE() << ErrorMessage( + address, tracked_address.description, current_tracker_instance_->countdown_, "Object was re-constructed. Current object was constructed by " + description + ); + } + tracked_address = {true, std::move(description)}; + } + + static void ObjectDestructed(void* address) + { + if (!CurrentlyTracking()) + return; + + auto it = current_tracker_instance_->address_map_.find(address); + // Not tracked. Ignore. + if (it == current_tracker_instance_->address_map_.end()) + return; + + TrackedAddress& tracked_address = it->second; + if (!tracked_address.is_alive) + { + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, current_tracker_instance_->countdown_, "Object was re-destroyed."); + } + tracked_address.is_alive = false; + } + + private: + static bool CurrentlyTracking() + { + return current_tracker_instance_ != nullptr; + } + + static std::string ErrorMessage(void* address, const std::string& address_description, int countdown, const std::string& error_description) + { + return absl::Substitute( + "With coundtown at $0:\n" + " $1\n" + " Object originally constructed by $2\n" + " Object address: $3\n", + countdown, + error_description, + address_description, + address + ); + } + + std::unordered_map address_map_; + int countdown_; + + static ConstructorTracker* current_tracker_instance_; + }; + + class TrackedObject + { + public: + TrackedObject(const TrackedObject&) = delete; + TrackedObject(TrackedObject&&) = delete; + + protected: + explicit TrackedObject(std::string description) + { + ConstructorTracker::ObjectConstructed(this, std::move(description)); + } + + ~TrackedObject() noexcept + { + ConstructorTracker::ObjectDestructed(this); + } + }; + } // namespace exceptions_internal + + extern exceptions_internal::NoThrowTag nothrow_ctor; + + extern exceptions_internal::StrongGuaranteeTagType strong_guarantee; + + // A test class which is convertible to bool. The conversion can be + // instrumented to throw at a controlled time. + class ThrowingBool + { + public: + ThrowingBool(bool b) noexcept : + b_(b) + { + } // NOLINT(runtime/explicit) + operator bool() const + { // NOLINT + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return b_; + } - // absl expects nothrow destructors - ~ThrowingValue() noexcept = default; + private: + bool b_; + }; + + /* + * Configuration enum for the ThrowingValue type that defines behavior for the + * lifetime of the instance. Use testing::nothrow_ctor to prevent the integer + * constructor from throwing. + * + * kEverythingThrows: Every operation can throw an exception + * kNoThrowCopy: Copy construction and copy assignment will not throw + * kNoThrowMove: Move construction and move assignment will not throw + * kNoThrowNew: Overloaded operators new and new[] will not throw + */ + enum class TypeSpec + { + kEverythingThrows = 0, + kNoThrowCopy = 1, + kNoThrowMove = 1 << 1, + kNoThrowNew = 1 << 2, + }; + + /* + * A testing class instrumented to throw an exception at a controlled time. + * + * ThrowingValue implements a slightly relaxed version of the Regular concept -- + * that is it's a value type with the expected semantics. It also implements + * arithmetic operations. It doesn't implement member and pointer operators + * like operator-> or operator[]. + * + * ThrowingValue can be instrumented to have certain operations be noexcept by + * using compile-time bitfield template arguments. That is, to make an + * ThrowingValue which has noexcept move construction/assignment and noexcept + * copy construction/assignment, use the following: + * ThrowingValue my_thrwr{val}; + */ + template + class ThrowingValue : private exceptions_internal::TrackedObject + { + static constexpr bool IsSpecified(TypeSpec spec) + { + return static_cast(Spec & spec); + } - ThrowingValue& operator=(const ThrowingValue& other) noexcept( - IsSpecified(TypeSpec::kNoThrowCopy)) { - dummy_ = kBadValue; - if (!IsSpecified(TypeSpec::kNoThrowCopy)) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - } - dummy_ = other.dummy_; - return *this; - } - - ThrowingValue& operator=(ThrowingValue&& other) noexcept( - IsSpecified(TypeSpec::kNoThrowMove)) { - dummy_ = kBadValue; - if (!IsSpecified(TypeSpec::kNoThrowMove)) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - } - dummy_ = other.dummy_; - return *this; - } - - // Arithmetic Operators - ThrowingValue operator+(const ThrowingValue& other) const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return ThrowingValue(dummy_ + other.dummy_, nothrow_ctor); - } - - ThrowingValue operator+() const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return ThrowingValue(dummy_, nothrow_ctor); - } - - ThrowingValue operator-(const ThrowingValue& other) const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return ThrowingValue(dummy_ - other.dummy_, nothrow_ctor); - } - - ThrowingValue operator-() const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return ThrowingValue(-dummy_, nothrow_ctor); - } - - ThrowingValue& operator++() { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - ++dummy_; - return *this; - } - - ThrowingValue operator++(int) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - auto out = ThrowingValue(dummy_, nothrow_ctor); - ++dummy_; - return out; - } - - ThrowingValue& operator--() { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - --dummy_; - return *this; - } - - ThrowingValue operator--(int) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - auto out = ThrowingValue(dummy_, nothrow_ctor); - --dummy_; - return out; - } - - ThrowingValue operator*(const ThrowingValue& other) const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return ThrowingValue(dummy_ * other.dummy_, nothrow_ctor); - } - - ThrowingValue operator/(const ThrowingValue& other) const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return ThrowingValue(dummy_ / other.dummy_, nothrow_ctor); - } - - ThrowingValue operator%(const ThrowingValue& other) const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return ThrowingValue(dummy_ % other.dummy_, nothrow_ctor); - } - - ThrowingValue operator<<(int shift) const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return ThrowingValue(dummy_ << shift, nothrow_ctor); - } - - ThrowingValue operator>>(int shift) const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return ThrowingValue(dummy_ >> shift, nothrow_ctor); - } - - // Comparison Operators - // NOTE: We use `ThrowingBool` instead of `bool` because most STL - // types/containers requires T to be convertible to bool. - friend ThrowingBool operator==(const ThrowingValue& a, - const ThrowingValue& b) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return a.dummy_ == b.dummy_; - } - friend ThrowingBool operator!=(const ThrowingValue& a, - const ThrowingValue& b) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return a.dummy_ != b.dummy_; - } - friend ThrowingBool operator<(const ThrowingValue& a, - const ThrowingValue& b) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return a.dummy_ < b.dummy_; - } - friend ThrowingBool operator<=(const ThrowingValue& a, - const ThrowingValue& b) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return a.dummy_ <= b.dummy_; - } - friend ThrowingBool operator>(const ThrowingValue& a, - const ThrowingValue& b) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return a.dummy_ > b.dummy_; - } - friend ThrowingBool operator>=(const ThrowingValue& a, - const ThrowingValue& b) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return a.dummy_ >= b.dummy_; - } - - // Logical Operators - ThrowingBool operator!() const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return !dummy_; - } - - ThrowingBool operator&&(const ThrowingValue& other) const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return dummy_ && other.dummy_; - } - - ThrowingBool operator||(const ThrowingValue& other) const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return dummy_ || other.dummy_; - } - - // Bitwise Logical Operators - ThrowingValue operator~() const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return ThrowingValue(~dummy_, nothrow_ctor); - } - - ThrowingValue operator&(const ThrowingValue& other) const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return ThrowingValue(dummy_ & other.dummy_, nothrow_ctor); - } - - ThrowingValue operator|(const ThrowingValue& other) const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return ThrowingValue(dummy_ | other.dummy_, nothrow_ctor); - } - - ThrowingValue operator^(const ThrowingValue& other) const { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return ThrowingValue(dummy_ ^ other.dummy_, nothrow_ctor); - } - - // Compound Assignment operators - ThrowingValue& operator+=(const ThrowingValue& other) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - dummy_ += other.dummy_; - return *this; - } - - ThrowingValue& operator-=(const ThrowingValue& other) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - dummy_ -= other.dummy_; - return *this; - } - - ThrowingValue& operator*=(const ThrowingValue& other) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - dummy_ *= other.dummy_; - return *this; - } - - ThrowingValue& operator/=(const ThrowingValue& other) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - dummy_ /= other.dummy_; - return *this; - } - - ThrowingValue& operator%=(const ThrowingValue& other) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - dummy_ %= other.dummy_; - return *this; - } - - ThrowingValue& operator&=(const ThrowingValue& other) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - dummy_ &= other.dummy_; - return *this; - } - - ThrowingValue& operator|=(const ThrowingValue& other) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - dummy_ |= other.dummy_; - return *this; - } - - ThrowingValue& operator^=(const ThrowingValue& other) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - dummy_ ^= other.dummy_; - return *this; - } - - ThrowingValue& operator<<=(int shift) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - dummy_ <<= shift; - return *this; - } - - ThrowingValue& operator>>=(int shift) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - dummy_ >>= shift; - return *this; - } - - // Pointer operators - void operator&() const = delete; // NOLINT(runtime/operator) - - // Stream operators - friend std::ostream& operator<<(std::ostream& os, const ThrowingValue& tv) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return os << GetInstanceString(tv.dummy_); - } - - friend std::istream& operator>>(std::istream& is, const ThrowingValue&) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - return is; - } - - // Memory management operators - static void* operator new(size_t s) noexcept( - IsSpecified(TypeSpec::kNoThrowNew)) { - if (!IsSpecified(TypeSpec::kNoThrowNew)) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); - } - return ::operator new(s); - } + static constexpr int kDefaultValue = 0; + static constexpr int kBadValue = 938550620; - static void* operator new[](size_t s) noexcept( - IsSpecified(TypeSpec::kNoThrowNew)) { - if (!IsSpecified(TypeSpec::kNoThrowNew)) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); - } - return ::operator new[](s); - } - - template - static void* operator new(size_t s, Args&&... args) noexcept( - IsSpecified(TypeSpec::kNoThrowNew)) { - if (!IsSpecified(TypeSpec::kNoThrowNew)) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); - } - return ::operator new(s, std::forward(args)...); - } - - template - static void* operator new[](size_t s, Args&&... args) noexcept( - IsSpecified(TypeSpec::kNoThrowNew)) { - if (!IsSpecified(TypeSpec::kNoThrowNew)) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); - } - return ::operator new[](s, std::forward(args)...); - } - - // Abseil doesn't support throwing overloaded operator delete. These are - // provided so a throwing operator-new can clean up after itself. - void operator delete(void* p) noexcept { ::operator delete(p); } - - template - void operator delete(void* p, Args&&... args) noexcept { - ::operator delete(p, std::forward(args)...); - } - - void operator delete[](void* p) noexcept { return ::operator delete[](p); } - - template - void operator delete[](void* p, Args&&... args) noexcept { - return ::operator delete[](p, std::forward(args)...); - } - - // Non-standard access to the actual contained value. No need for this to - // throw. - int& Get() noexcept { return dummy_; } - const int& Get() const noexcept { return dummy_; } - - private: - static std::string GetInstanceString(int dummy) { - return absl::StrCat("ThrowingValue<", - exceptions_internal::GetSpecString(Spec), ">(", dummy, - ")"); - } - - int dummy_; -}; -// While not having to do with exceptions, explicitly delete comma operator, to -// make sure we don't use it on user-supplied types. -template -void operator,(const ThrowingValue&, T&&) = delete; -template -void operator,(T&&, const ThrowingValue&) = delete; - -/* - * Configuration enum for the ThrowingAllocator type that defines behavior for - * the lifetime of the instance. - * - * kEverythingThrows: Calls to the member functions may throw - * kNoThrowAllocate: Calls to the member functions will not throw - */ -enum class AllocSpec { - kEverythingThrows = 0, - kNoThrowAllocate = 1, -}; - -/* - * An allocator type which is instrumented to throw at a controlled time, or not - * to throw, using AllocSpec. The supported settings are the default of every - * function which is allowed to throw in a conforming allocator possibly - * throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS - * configuration macro. - */ -template -class ThrowingAllocator : private exceptions_internal::TrackedObject { - static constexpr bool IsSpecified(AllocSpec spec) { - return static_cast(Spec & spec); - } - - public: - using pointer = T*; - using const_pointer = const T*; - using reference = T&; - using const_reference = const T&; - using void_pointer = void*; - using const_void_pointer = const void*; - using value_type = T; - using size_type = size_t; - using difference_type = ptrdiff_t; - - using is_nothrow = - std::integral_constant; - using propagate_on_container_copy_assignment = std::true_type; - using propagate_on_container_move_assignment = std::true_type; - using propagate_on_container_swap = std::true_type; - using is_always_equal = std::false_type; - - ThrowingAllocator() : TrackedObject(GetInstanceString(next_id_)) { - exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); - dummy_ = std::make_shared(next_id_++); - } - - template - ThrowingAllocator(const ThrowingAllocator& other) noexcept // NOLINT - : TrackedObject(GetInstanceString(*other.State())), - dummy_(other.State()) {} - - // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of - // allocator shall not exit via an exception, thus they are marked noexcept. - ThrowingAllocator(const ThrowingAllocator& other) noexcept - : TrackedObject(GetInstanceString(*other.State())), - dummy_(other.State()) {} - - template - ThrowingAllocator(ThrowingAllocator&& other) noexcept // NOLINT - : TrackedObject(GetInstanceString(*other.State())), - dummy_(std::move(other.State())) {} - - ThrowingAllocator(ThrowingAllocator&& other) noexcept - : TrackedObject(GetInstanceString(*other.State())), - dummy_(std::move(other.State())) {} - - ~ThrowingAllocator() noexcept = default; - - ThrowingAllocator& operator=(const ThrowingAllocator& other) noexcept { - dummy_ = other.State(); - return *this; - } - - template - ThrowingAllocator& operator=( - const ThrowingAllocator& other) noexcept { - dummy_ = other.State(); - return *this; - } - - template - ThrowingAllocator& operator=(ThrowingAllocator&& other) noexcept { - dummy_ = std::move(other.State()); - return *this; - } - - template - struct rebind { - using other = ThrowingAllocator; - }; - - pointer allocate(size_type n) noexcept( - IsSpecified(AllocSpec::kNoThrowAllocate)) { - ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); - return static_cast(::operator new(n * sizeof(T))); - } - - pointer allocate(size_type n, const_void_pointer) noexcept( - IsSpecified(AllocSpec::kNoThrowAllocate)) { - return allocate(n); - } - - void deallocate(pointer ptr, size_type) noexcept { - ReadState(); - ::operator delete(static_cast(ptr)); - } - - template - void construct(U* ptr, Args&&... args) noexcept( - IsSpecified(AllocSpec::kNoThrowAllocate)) { - ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); - ::new (static_cast(ptr)) U(std::forward(args)...); - } - - template - void destroy(U* p) noexcept { - ReadState(); - p->~U(); - } - - size_type max_size() const noexcept { - return (std::numeric_limits::max)() / sizeof(value_type); - } - - ThrowingAllocator select_on_container_copy_construction() noexcept( - IsSpecified(AllocSpec::kNoThrowAllocate)) { - ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); - return *this; - } - - template - bool operator==(const ThrowingAllocator& other) const noexcept { - return dummy_ == other.dummy_; - } - - template - bool operator!=(const ThrowingAllocator& other) const noexcept { - return dummy_ != other.dummy_; - } - - template - friend class ThrowingAllocator; - - private: - static std::string GetInstanceString(int dummy) { - return absl::StrCat("ThrowingAllocator<", - exceptions_internal::GetSpecString(Spec), ">(", dummy, - ")"); - } - - const std::shared_ptr& State() const { return dummy_; } - std::shared_ptr& State() { return dummy_; } - - void ReadState() { - // we know that this will never be true, but the compiler doesn't, so this - // should safely force a read of the value. - if (*dummy_ < 0) std::abort(); - } - - void ReadStateAndMaybeThrow(absl::string_view msg) const { - if (!IsSpecified(AllocSpec::kNoThrowAllocate)) { - exceptions_internal::MaybeThrow( - absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg)); - } - } - - static int next_id_; - std::shared_ptr dummy_; -}; - -template -int ThrowingAllocator::next_id_ = 0; - -// Tests for resource leaks by attempting to construct a T using args repeatedly -// until successful, using the countdown method. Side effects can then be -// tested for resource leaks. -template -void TestThrowingCtor(Args&&... args) { - struct Cleanup { - ~Cleanup() { exceptions_internal::UnsetCountdown(); } - } c; - for (int count = 0;; ++count) { - exceptions_internal::ConstructorTracker ct(count); - exceptions_internal::SetCountdown(count); - try { - T temp(std::forward(args)...); - static_cast(temp); - break; - } catch (const exceptions_internal::TestException&) { + public: + ThrowingValue() : + TrackedObject(GetInstanceString(kDefaultValue)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = kDefaultValue; + } + + ThrowingValue(const ThrowingValue& other) noexcept( + IsSpecified(TypeSpec::kNoThrowCopy) + ) : + TrackedObject(GetInstanceString(other.dummy_)) + { + if (!IsSpecified(TypeSpec::kNoThrowCopy)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + } + + ThrowingValue(ThrowingValue&& other) noexcept( + IsSpecified(TypeSpec::kNoThrowMove) + ) : + TrackedObject(GetInstanceString(other.dummy_)) + { + if (!IsSpecified(TypeSpec::kNoThrowMove)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + } + + explicit ThrowingValue(int i) : + TrackedObject(GetInstanceString(i)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = i; + } + + ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept + : + TrackedObject(GetInstanceString(i)), + dummy_(i) + { + } + + // absl expects nothrow destructors + ~ThrowingValue() noexcept = default; + + ThrowingValue& operator=(const ThrowingValue& other) noexcept( + IsSpecified(TypeSpec::kNoThrowCopy) + ) + { + dummy_ = kBadValue; + if (!IsSpecified(TypeSpec::kNoThrowCopy)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + return *this; + } + + ThrowingValue& operator=(ThrowingValue&& other) noexcept( + IsSpecified(TypeSpec::kNoThrowMove) + ) + { + dummy_ = kBadValue; + if (!IsSpecified(TypeSpec::kNoThrowMove)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + return *this; + } + + // Arithmetic Operators + ThrowingValue operator+(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ + other.dummy_, nothrow_ctor); + } + + ThrowingValue operator+() const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_, nothrow_ctor); + } + + ThrowingValue operator-(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ - other.dummy_, nothrow_ctor); + } + + ThrowingValue operator-() const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(-dummy_, nothrow_ctor); + } + + ThrowingValue& operator++() + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + ++dummy_; + return *this; + } + + ThrowingValue operator++(int) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + auto out = ThrowingValue(dummy_, nothrow_ctor); + ++dummy_; + return out; + } + + ThrowingValue& operator--() + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + --dummy_; + return *this; + } + + ThrowingValue operator--(int) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + auto out = ThrowingValue(dummy_, nothrow_ctor); + --dummy_; + return out; + } + + ThrowingValue operator*(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ * other.dummy_, nothrow_ctor); + } + + ThrowingValue operator/(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ / other.dummy_, nothrow_ctor); + } + + ThrowingValue operator%(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ % other.dummy_, nothrow_ctor); + } + + ThrowingValue operator<<(int shift) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ << shift, nothrow_ctor); + } + + ThrowingValue operator>>(int shift) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ >> shift, nothrow_ctor); + } + + // Comparison Operators + // NOTE: We use `ThrowingBool` instead of `bool` because most STL + // types/containers requires T to be convertible to bool. + friend ThrowingBool operator==(const ThrowingValue& a, const ThrowingValue& b) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ == b.dummy_; + } + friend ThrowingBool operator!=(const ThrowingValue& a, const ThrowingValue& b) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ != b.dummy_; + } + friend ThrowingBool operator<(const ThrowingValue& a, const ThrowingValue& b) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ < b.dummy_; + } + friend ThrowingBool operator<=(const ThrowingValue& a, const ThrowingValue& b) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ <= b.dummy_; + } + friend ThrowingBool operator>(const ThrowingValue& a, const ThrowingValue& b) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ > b.dummy_; + } + friend ThrowingBool operator>=(const ThrowingValue& a, const ThrowingValue& b) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ >= b.dummy_; + } + + // Logical Operators + ThrowingBool operator!() const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return !dummy_; + } + + ThrowingBool operator&&(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return dummy_ && other.dummy_; + } + + ThrowingBool operator||(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return dummy_ || other.dummy_; + } + + // Bitwise Logical Operators + ThrowingValue operator~() const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(~dummy_, nothrow_ctor); + } + + ThrowingValue operator&(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ & other.dummy_, nothrow_ctor); + } + + ThrowingValue operator|(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ | other.dummy_, nothrow_ctor); + } + + ThrowingValue operator^(const ThrowingValue& other) const + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ ^ other.dummy_, nothrow_ctor); + } + + // Compound Assignment operators + ThrowingValue& operator+=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ += other.dummy_; + return *this; + } + + ThrowingValue& operator-=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ -= other.dummy_; + return *this; + } + + ThrowingValue& operator*=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ *= other.dummy_; + return *this; + } + + ThrowingValue& operator/=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ /= other.dummy_; + return *this; + } + + ThrowingValue& operator%=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ %= other.dummy_; + return *this; + } + + ThrowingValue& operator&=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ &= other.dummy_; + return *this; + } + + ThrowingValue& operator|=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ |= other.dummy_; + return *this; + } + + ThrowingValue& operator^=(const ThrowingValue& other) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ ^= other.dummy_; + return *this; + } + + ThrowingValue& operator<<=(int shift) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ <<= shift; + return *this; + } + + ThrowingValue& operator>>=(int shift) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ >>= shift; + return *this; + } + + // Pointer operators + void operator&() const = delete; // NOLINT(runtime/operator) + + // Stream operators + friend std::ostream& operator<<(std::ostream& os, const ThrowingValue& tv) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return os << GetInstanceString(tv.dummy_); + } + + friend std::istream& operator>>(std::istream& is, const ThrowingValue&) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return is; + } + + // Memory management operators + static void* operator new(size_t s) noexcept( + IsSpecified(TypeSpec::kNoThrowNew) + ) + { + if (!IsSpecified(TypeSpec::kNoThrowNew)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new(s); + } + + static void* operator new[](size_t s) noexcept( + IsSpecified(TypeSpec::kNoThrowNew) + ) + { + if (!IsSpecified(TypeSpec::kNoThrowNew)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new[](s); + } + + template + static void* operator new(size_t s, Args&&... args) noexcept( + IsSpecified(TypeSpec::kNoThrowNew) + ) + { + if (!IsSpecified(TypeSpec::kNoThrowNew)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new(s, std::forward(args)...); + } + + template + static void* operator new[](size_t s, Args&&... args) noexcept( + IsSpecified(TypeSpec::kNoThrowNew) + ) + { + if (!IsSpecified(TypeSpec::kNoThrowNew)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new[](s, std::forward(args)...); + } + + // Abseil doesn't support throwing overloaded operator delete. These are + // provided so a throwing operator-new can clean up after itself. + void operator delete(void* p) noexcept + { + ::operator delete(p); + } + + template + void operator delete(void* p, Args&&... args) noexcept + { + ::operator delete(p, std::forward(args)...); + } + + void operator delete[](void* p) noexcept + { + return ::operator delete[](p); + } + + template + void operator delete[](void* p, Args&&... args) noexcept + { + return ::operator delete[](p, std::forward(args)...); + } + + // Non-standard access to the actual contained value. No need for this to + // throw. + int& Get() noexcept + { + return dummy_; + } + const int& Get() const noexcept + { + return dummy_; + } + + private: + static std::string GetInstanceString(int dummy) + { + return absl::StrCat("ThrowingValue<", exceptions_internal::GetSpecString(Spec), ">(", dummy, ")"); + } + + int dummy_; + }; + // While not having to do with exceptions, explicitly delete comma operator, to + // make sure we don't use it on user-supplied types. + template + void operator,(const ThrowingValue&, T&&) = delete; + template + void operator,(T&&, const ThrowingValue&) = delete; + + /* + * Configuration enum for the ThrowingAllocator type that defines behavior for + * the lifetime of the instance. + * + * kEverythingThrows: Calls to the member functions may throw + * kNoThrowAllocate: Calls to the member functions will not throw + */ + enum class AllocSpec + { + kEverythingThrows = 0, + kNoThrowAllocate = 1, + }; + + /* + * An allocator type which is instrumented to throw at a controlled time, or not + * to throw, using AllocSpec. The supported settings are the default of every + * function which is allowed to throw in a conforming allocator possibly + * throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS + * configuration macro. + */ + template + class ThrowingAllocator : private exceptions_internal::TrackedObject + { + static constexpr bool IsSpecified(AllocSpec spec) + { + return static_cast(Spec & spec); + } + + public: + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using void_pointer = void*; + using const_void_pointer = const void*; + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + + using is_nothrow = + std::integral_constant; + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + using is_always_equal = std::false_type; + + ThrowingAllocator() : + TrackedObject(GetInstanceString(next_id_)) + { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = std::make_shared(next_id_++); + } + + template + ThrowingAllocator(const ThrowingAllocator& other) noexcept // NOLINT + : + TrackedObject(GetInstanceString(*other.State())), + dummy_(other.State()) + { + } + + // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of + // allocator shall not exit via an exception, thus they are marked noexcept. + ThrowingAllocator(const ThrowingAllocator& other) noexcept + : + TrackedObject(GetInstanceString(*other.State())), + dummy_(other.State()) + { + } + + template + ThrowingAllocator(ThrowingAllocator&& other) noexcept // NOLINT + : + TrackedObject(GetInstanceString(*other.State())), + dummy_(std::move(other.State())) + { + } + + ThrowingAllocator(ThrowingAllocator&& other) noexcept + : + TrackedObject(GetInstanceString(*other.State())), + dummy_(std::move(other.State())) + { + } + + ~ThrowingAllocator() noexcept = default; + + ThrowingAllocator& operator=(const ThrowingAllocator& other) noexcept + { + dummy_ = other.State(); + return *this; + } + + template + ThrowingAllocator& operator=( + const ThrowingAllocator& other + ) noexcept + { + dummy_ = other.State(); + return *this; + } + + template + ThrowingAllocator& operator=(ThrowingAllocator&& other) noexcept + { + dummy_ = std::move(other.State()); + return *this; + } + + template + struct rebind + { + using other = ThrowingAllocator; + }; + + pointer allocate(size_type n) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate) + ) + { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + return static_cast(::operator new(n * sizeof(T))); + } + + pointer allocate(size_type n, const_void_pointer) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate) + ) + { + return allocate(n); + } + + void deallocate(pointer ptr, size_type) noexcept + { + ReadState(); + ::operator delete(static_cast(ptr)); + } + + template + void construct(U* ptr, Args&&... args) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate) + ) + { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + ::new (static_cast(ptr)) U(std::forward(args)...); + } + + template + void destroy(U* p) noexcept + { + ReadState(); + p->~U(); + } + + size_type max_size() const noexcept + { + return (std::numeric_limits::max)() / sizeof(value_type); + } + + ThrowingAllocator select_on_container_copy_construction() noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate) + ) + { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + return *this; + } + + template + bool operator==(const ThrowingAllocator& other) const noexcept + { + return dummy_ == other.dummy_; + } + + template + bool operator!=(const ThrowingAllocator& other) const noexcept + { + return dummy_ != other.dummy_; + } + + template + friend class ThrowingAllocator; + + private: + static std::string GetInstanceString(int dummy) + { + return absl::StrCat("ThrowingAllocator<", exceptions_internal::GetSpecString(Spec), ">(", dummy, ")"); + } + + const std::shared_ptr& State() const + { + return dummy_; + } + std::shared_ptr& State() + { + return dummy_; + } + + void ReadState() + { + // we know that this will never be true, but the compiler doesn't, so this + // should safely force a read of the value. + if (*dummy_ < 0) + std::abort(); + } + + void ReadStateAndMaybeThrow(absl::string_view msg) const + { + if (!IsSpecified(AllocSpec::kNoThrowAllocate)) + { + exceptions_internal::MaybeThrow( + absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg) + ); + } + } + + static int next_id_; + std::shared_ptr dummy_; + }; + + template + int ThrowingAllocator::next_id_ = 0; + + // Tests for resource leaks by attempting to construct a T using args repeatedly + // until successful, using the countdown method. Side effects can then be + // tested for resource leaks. + template + void TestThrowingCtor(Args&&... args) + { + struct Cleanup + { + ~Cleanup() + { + exceptions_internal::UnsetCountdown(); + } + } c; + for (int count = 0;; ++count) + { + exceptions_internal::ConstructorTracker ct(count); + exceptions_internal::SetCountdown(count); + try + { + T temp(std::forward(args)...); + static_cast(temp); + break; + } + catch (const exceptions_internal::TestException&) + { + } + } } - } -} - -// Tests the nothrow guarantee of the provided nullary operation. If the an -// exception is thrown, the result will be AssertionFailure(). Otherwise, it -// will be AssertionSuccess(). -template -testing::AssertionResult TestNothrowOp(const Operation& operation) { - struct Cleanup { - Cleanup() { exceptions_internal::SetCountdown(); } - ~Cleanup() { exceptions_internal::UnsetCountdown(); } - } c; - try { - operation(); - return testing::AssertionSuccess(); - } catch (const exceptions_internal::TestException&) { - return testing::AssertionFailure() - << "TestException thrown during call to operation() when nothrow " - "guarantee was expected."; - } catch (...) { - return testing::AssertionFailure() - << "Unknown exception thrown during call to operation() when " - "nothrow guarantee was expected."; - } -} - -namespace exceptions_internal { - -// Dummy struct for ExceptionSafetyTestBuilder<> partial state. -struct UninitializedT {}; - -template -class DefaultFactory { - public: - explicit DefaultFactory(const T& t) : t_(t) {} - std::unique_ptr operator()() const { return absl::make_unique(t_); } - - private: - T t_; -}; - -template -using EnableIfTestable = typename absl::enable_if_t< - LazyContractsCount != 0 && - !std::is_same::value && - !std::is_same::value>; - -template -class ExceptionSafetyTestBuilder; - -} // namespace exceptions_internal - -/* - * Constructs an empty ExceptionSafetyTestBuilder. All - * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation - * methods return new instances of ExceptionSafetyTestBuilder. - * - * In order to test a T for exception safety, a factory for that T, a testable - * operation, and at least one contract callback returning an assertion - * result must be applied using the respective methods. - */ -exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester(); - -namespace exceptions_internal { -template -struct IsUniquePtr : std::false_type {}; - -template -struct IsUniquePtr> : std::true_type {}; - -template -struct FactoryPtrTypeHelper { - using type = decltype(std::declval()()); - - static_assert(IsUniquePtr::value, "Factories must return a unique_ptr"); -}; - -template -using FactoryPtrType = typename FactoryPtrTypeHelper::type; - -template -using FactoryElementType = typename FactoryPtrType::element_type; - -template -class ExceptionSafetyTest { - using Factory = std::function()>; - using Operation = std::function; - using Contract = std::function; - - public: - template - explicit ExceptionSafetyTest(const Factory& f, const Operation& op, - const Contracts&... contracts) - : factory_(f), operation_(op), contracts_{WrapContract(contracts)...} {} - - AssertionResult Test() const { - for (int count = 0;; ++count) { - exceptions_internal::ConstructorTracker ct(count); - - for (const auto& contract : contracts_) { - auto t_ptr = factory_(); - try { - SetCountdown(count); - operation_(t_ptr.get()); - // Unset for the case that the operation throws no exceptions, which - // would leave the countdown set and break the *next* exception safety - // test after this one. - UnsetCountdown(); - return AssertionSuccess(); - } catch (const exceptions_internal::TestException& e) { - if (!contract(t_ptr.get())) { - return AssertionFailure() << e.what() << " failed contract check"; - } - } - } + + // Tests the nothrow guarantee of the provided nullary operation. If the an + // exception is thrown, the result will be AssertionFailure(). Otherwise, it + // will be AssertionSuccess(). + template + testing::AssertionResult TestNothrowOp(const Operation& operation) + { + struct Cleanup + { + Cleanup() + { + exceptions_internal::SetCountdown(); + } + ~Cleanup() + { + exceptions_internal::UnsetCountdown(); + } + } c; + try + { + operation(); + return testing::AssertionSuccess(); + } + catch (const exceptions_internal::TestException&) + { + return testing::AssertionFailure() + << "TestException thrown during call to operation() when nothrow " + "guarantee was expected."; + } + catch (...) + { + return testing::AssertionFailure() + << "Unknown exception thrown during call to operation() when " + "nothrow guarantee was expected."; + } } - } - - private: - template - Contract WrapContract(const ContractFn& contract) { - return [contract](T* t_ptr) { return AssertionResult(contract(t_ptr)); }; - } - - Contract WrapContract(StrongGuaranteeTagType) { - return [this](T* t_ptr) { return AssertionResult(*factory_() == *t_ptr); }; - } - - Factory factory_; - Operation operation_; - std::vector contracts_; -}; - -/* - * Builds a tester object that tests if performing a operation on a T follows - * exception safety guarantees. Verification is done via contract assertion - * callbacks applied to T instances post-throw. - * - * Template parameters for ExceptionSafetyTestBuilder: - * - * - Factory: The factory object (passed in via tester.WithFactory(...) or - * tester.WithInitialValue(...)) must be invocable with the signature - * `std::unique_ptr operator()() const` where T is the type being tested. - * It is used for reliably creating identical T instances to test on. - * - * - Operation: The operation object (passsed in via tester.WithOperation(...) - * or tester.Test(...)) must be invocable with the signature - * `void operator()(T*) const` where T is the type being tested. It is used - * for performing steps on a T instance that may throw and that need to be - * checked for exception safety. Each call to the operation will receive a - * fresh T instance so it's free to modify and destroy the T instances as it - * pleases. - * - * - Contracts...: The contract assertion callback objects (passed in via - * tester.WithContracts(...)) must be invocable with the signature - * `testing::AssertionResult operator()(T*) const` where T is the type being - * tested. Contract assertion callbacks are provided T instances post-throw. - * They must return testing::AssertionSuccess when the type contracts of the - * provided T instance hold. If the type contracts of the T instance do not - * hold, they must return testing::AssertionFailure. Execution order of - * Contracts... is unspecified. They will each individually get a fresh T - * instance so they are free to modify and destroy the T instances as they - * please. - */ -template -class ExceptionSafetyTestBuilder { - public: - /* - * Returns a new ExceptionSafetyTestBuilder with an included T factory based - * on the provided T instance. The existing factory will not be included in - * the newly created tester instance. The created factory returns a new T - * instance by copy-constructing the provided const T& t. - * - * Preconditions for tester.WithInitialValue(const T& t): - * - * - The const T& t object must be copy-constructible where T is the type - * being tested. For non-copy-constructible objects, use the method - * tester.WithFactory(...). - */ - template - ExceptionSafetyTestBuilder, Operation, Contracts...> - WithInitialValue(const T& t) const { - return WithFactory(DefaultFactory(t)); - } - - /* - * Returns a new ExceptionSafetyTestBuilder with the provided T factory - * included. The existing factory will not be included in the newly-created - * tester instance. This method is intended for use with types lacking a copy - * constructor. Types that can be copy-constructed should instead use the - * method tester.WithInitialValue(...). - */ - template - ExceptionSafetyTestBuilder, Operation, Contracts...> - WithFactory(const NewFactory& new_factory) const { - return {new_factory, operation_, contracts_}; - } - - /* - * Returns a new ExceptionSafetyTestBuilder with the provided testable - * operation included. The existing operation will not be included in the - * newly created tester. - */ - template - ExceptionSafetyTestBuilder, Contracts...> - WithOperation(const NewOperation& new_operation) const { - return {factory_, new_operation, contracts_}; - } - - /* - * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts... - * combined with the Contracts... that were already included in the instance - * on which the method was called. Contracts... cannot be removed or replaced - * once added to an ExceptionSafetyTestBuilder instance. A fresh object must - * be created in order to get an empty Contracts... list. - * - * In addition to passing in custom contract assertion callbacks, this method - * accepts `testing::strong_guarantee` as an argument which checks T instances - * post-throw against freshly created T instances via operator== to verify - * that any state changes made during the execution of the operation were - * properly rolled back. - */ - template - ExceptionSafetyTestBuilder...> - WithContracts(const MoreContracts&... more_contracts) const { - return { - factory_, operation_, - std::tuple_cat(contracts_, std::tuple...>( - more_contracts...))}; - } - - /* - * Returns a testing::AssertionResult that is the reduced result of the - * exception safety algorithm. The algorithm short circuits and returns - * AssertionFailure after the first contract callback returns an - * AssertionFailure. Otherwise, if all contract callbacks return an - * AssertionSuccess, the reduced result is AssertionSuccess. - * - * The passed-in testable operation will not be saved in a new tester instance - * nor will it modify/replace the existing tester instance. This is useful - * when each operation being tested is unique and does not need to be reused. - * - * Preconditions for tester.Test(const NewOperation& new_operation): - * - * - May only be called after at least one contract assertion callback and a - * factory or initial value have been provided. - */ - template < - typename NewOperation, - typename = EnableIfTestable> - testing::AssertionResult Test(const NewOperation& new_operation) const { - return TestImpl(new_operation, absl::index_sequence_for()); - } - - /* - * Returns a testing::AssertionResult that is the reduced result of the - * exception safety algorithm. The algorithm short circuits and returns - * AssertionFailure after the first contract callback returns an - * AssertionFailure. Otherwise, if all contract callbacks return an - * AssertionSuccess, the reduced result is AssertionSuccess. - * - * Preconditions for tester.Test(): - * - * - May only be called after at least one contract assertion callback, a - * factory or initial value and a testable operation have been provided. - */ - template < - typename LazyOperation = Operation, - typename = EnableIfTestable> - testing::AssertionResult Test() const { - return Test(operation_); - } - - private: - template - friend class ExceptionSafetyTestBuilder; - - friend ExceptionSafetyTestBuilder<> testing::MakeExceptionSafetyTester(); - - ExceptionSafetyTestBuilder() {} - - ExceptionSafetyTestBuilder(const Factory& f, const Operation& o, - const std::tuple& i) - : factory_(f), operation_(o), contracts_(i) {} - - template - testing::AssertionResult TestImpl(SelectedOperation selected_operation, - absl::index_sequence) const { - return ExceptionSafetyTest>( - factory_, selected_operation, std::get(contracts_)...) - .Test(); - } - - Factory factory_; - Operation operation_; - std::tuple contracts_; -}; - -} // namespace exceptions_internal + + namespace exceptions_internal + { + + // Dummy struct for ExceptionSafetyTestBuilder<> partial state. + struct UninitializedT + { + }; + + template + class DefaultFactory + { + public: + explicit DefaultFactory(const T& t) : + t_(t) + { + } + std::unique_ptr operator()() const + { + return absl::make_unique(t_); + } + + private: + T t_; + }; + + template + using EnableIfTestable = typename absl::enable_if_t< + LazyContractsCount != 0 && + !std::is_same::value && + !std::is_same::value>; + + template + class ExceptionSafetyTestBuilder; + + } // namespace exceptions_internal + + /* + * Constructs an empty ExceptionSafetyTestBuilder. All + * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation + * methods return new instances of ExceptionSafetyTestBuilder. + * + * In order to test a T for exception safety, a factory for that T, a testable + * operation, and at least one contract callback returning an assertion + * result must be applied using the respective methods. + */ + exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester(); + + namespace exceptions_internal + { + template + struct IsUniquePtr : std::false_type + { + }; + + template + struct IsUniquePtr> : std::true_type + { + }; + + template + struct FactoryPtrTypeHelper + { + using type = decltype(std::declval()()); + + static_assert(IsUniquePtr::value, "Factories must return a unique_ptr"); + }; + + template + using FactoryPtrType = typename FactoryPtrTypeHelper::type; + + template + using FactoryElementType = typename FactoryPtrType::element_type; + + template + class ExceptionSafetyTest + { + using Factory = std::function()>; + using Operation = std::function; + using Contract = std::function; + + public: + template + explicit ExceptionSafetyTest(const Factory& f, const Operation& op, const Contracts&... contracts) : + factory_(f), + operation_(op), + contracts_{WrapContract(contracts)...} + { + } + + AssertionResult Test() const + { + for (int count = 0;; ++count) + { + exceptions_internal::ConstructorTracker ct(count); + + for (const auto& contract : contracts_) + { + auto t_ptr = factory_(); + try + { + SetCountdown(count); + operation_(t_ptr.get()); + // Unset for the case that the operation throws no exceptions, which + // would leave the countdown set and break the *next* exception safety + // test after this one. + UnsetCountdown(); + return AssertionSuccess(); + } + catch (const exceptions_internal::TestException& e) + { + if (!contract(t_ptr.get())) + { + return AssertionFailure() << e.what() << " failed contract check"; + } + } + } + } + } + + private: + template + Contract WrapContract(const ContractFn& contract) + { + return [contract](T* t_ptr) + { return AssertionResult(contract(t_ptr)); }; + } + + Contract WrapContract(StrongGuaranteeTagType) + { + return [this](T* t_ptr) + { return AssertionResult(*factory_() == *t_ptr); }; + } + + Factory factory_; + Operation operation_; + std::vector contracts_; + }; + + /* + * Builds a tester object that tests if performing a operation on a T follows + * exception safety guarantees. Verification is done via contract assertion + * callbacks applied to T instances post-throw. + * + * Template parameters for ExceptionSafetyTestBuilder: + * + * - Factory: The factory object (passed in via tester.WithFactory(...) or + * tester.WithInitialValue(...)) must be invocable with the signature + * `std::unique_ptr operator()() const` where T is the type being tested. + * It is used for reliably creating identical T instances to test on. + * + * - Operation: The operation object (passsed in via tester.WithOperation(...) + * or tester.Test(...)) must be invocable with the signature + * `void operator()(T*) const` where T is the type being tested. It is used + * for performing steps on a T instance that may throw and that need to be + * checked for exception safety. Each call to the operation will receive a + * fresh T instance so it's free to modify and destroy the T instances as it + * pleases. + * + * - Contracts...: The contract assertion callback objects (passed in via + * tester.WithContracts(...)) must be invocable with the signature + * `testing::AssertionResult operator()(T*) const` where T is the type being + * tested. Contract assertion callbacks are provided T instances post-throw. + * They must return testing::AssertionSuccess when the type contracts of the + * provided T instance hold. If the type contracts of the T instance do not + * hold, they must return testing::AssertionFailure. Execution order of + * Contracts... is unspecified. They will each individually get a fresh T + * instance so they are free to modify and destroy the T instances as they + * please. + */ + template + class ExceptionSafetyTestBuilder + { + public: + /* + * Returns a new ExceptionSafetyTestBuilder with an included T factory based + * on the provided T instance. The existing factory will not be included in + * the newly created tester instance. The created factory returns a new T + * instance by copy-constructing the provided const T& t. + * + * Preconditions for tester.WithInitialValue(const T& t): + * + * - The const T& t object must be copy-constructible where T is the type + * being tested. For non-copy-constructible objects, use the method + * tester.WithFactory(...). + */ + template + ExceptionSafetyTestBuilder, Operation, Contracts...> + WithInitialValue(const T& t) const + { + return WithFactory(DefaultFactory(t)); + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided T factory + * included. The existing factory will not be included in the newly-created + * tester instance. This method is intended for use with types lacking a copy + * constructor. Types that can be copy-constructed should instead use the + * method tester.WithInitialValue(...). + */ + template + ExceptionSafetyTestBuilder, Operation, Contracts...> + WithFactory(const NewFactory& new_factory) const + { + return {new_factory, operation_, contracts_}; + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided testable + * operation included. The existing operation will not be included in the + * newly created tester. + */ + template + ExceptionSafetyTestBuilder, Contracts...> + WithOperation(const NewOperation& new_operation) const + { + return {factory_, new_operation, contracts_}; + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts... + * combined with the Contracts... that were already included in the instance + * on which the method was called. Contracts... cannot be removed or replaced + * once added to an ExceptionSafetyTestBuilder instance. A fresh object must + * be created in order to get an empty Contracts... list. + * + * In addition to passing in custom contract assertion callbacks, this method + * accepts `testing::strong_guarantee` as an argument which checks T instances + * post-throw against freshly created T instances via operator== to verify + * that any state changes made during the execution of the operation were + * properly rolled back. + */ + template + ExceptionSafetyTestBuilder...> + WithContracts(const MoreContracts&... more_contracts) const + { + return { + factory_, operation_, std::tuple_cat(contracts_, std::tuple...>(more_contracts...))}; + } + + /* + * Returns a testing::AssertionResult that is the reduced result of the + * exception safety algorithm. The algorithm short circuits and returns + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an + * AssertionSuccess, the reduced result is AssertionSuccess. + * + * The passed-in testable operation will not be saved in a new tester instance + * nor will it modify/replace the existing tester instance. This is useful + * when each operation being tested is unique and does not need to be reused. + * + * Preconditions for tester.Test(const NewOperation& new_operation): + * + * - May only be called after at least one contract assertion callback and a + * factory or initial value have been provided. + */ + template< + typename NewOperation, + typename = EnableIfTestable> + testing::AssertionResult Test(const NewOperation& new_operation) const + { + return TestImpl(new_operation, absl::index_sequence_for()); + } + + /* + * Returns a testing::AssertionResult that is the reduced result of the + * exception safety algorithm. The algorithm short circuits and returns + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an + * AssertionSuccess, the reduced result is AssertionSuccess. + * + * Preconditions for tester.Test(): + * + * - May only be called after at least one contract assertion callback, a + * factory or initial value and a testable operation have been provided. + */ + template< + typename LazyOperation = Operation, + typename = EnableIfTestable> + testing::AssertionResult Test() const + { + return Test(operation_); + } + + private: + template + friend class ExceptionSafetyTestBuilder; + + friend ExceptionSafetyTestBuilder<> testing::MakeExceptionSafetyTester(); + + ExceptionSafetyTestBuilder() + { + } + + ExceptionSafetyTestBuilder(const Factory& f, const Operation& o, const std::tuple& i) : + factory_(f), + operation_(o), + contracts_(i) + { + } + + template + testing::AssertionResult TestImpl(SelectedOperation selected_operation, absl::index_sequence) const + { + return ExceptionSafetyTest>( + factory_, selected_operation, std::get(contracts_)... + ) + .Test(); + } + + Factory factory_; + Operation operation_; + std::tuple contracts_; + }; + + } // namespace exceptions_internal } // namespace testing diff --git a/CAPI/cpp/grpc/include/absl/base/internal/exception_testing.h b/CAPI/cpp/grpc/include/absl/base/internal/exception_testing.h index 01b5465..4ac46ad 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/exception_testing.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/exception_testing.h @@ -26,16 +26,16 @@ #ifdef ABSL_HAVE_EXCEPTIONS #define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ - EXPECT_THROW(expr, exception_t) + EXPECT_THROW(expr, exception_t) #elif defined(__ANDROID__) // Android asserts do not log anywhere that gtest can currently inspect. // So we expect exit, but cannot match the message. #define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ - EXPECT_DEATH(expr, ".*") + EXPECT_DEATH(expr, ".*") #else #define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ - EXPECT_DEATH_IF_SUPPORTED(expr, text) + EXPECT_DEATH_IF_SUPPORTED(expr, text) #endif diff --git a/CAPI/cpp/grpc/include/absl/base/internal/fast_type_id.h b/CAPI/cpp/grpc/include/absl/base/internal/fast_type_id.h index a547b3a..6f199d1 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/fast_type_id.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/fast_type_id.h @@ -19,32 +19,36 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -template -struct FastTypeTag { - constexpr static char dummy_var = 0; -}; +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + template + struct FastTypeTag + { + constexpr static char dummy_var = 0; + }; #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL -template -constexpr char FastTypeTag::dummy_var; + template + constexpr char FastTypeTag::dummy_var; #endif -// FastTypeId() evaluates at compile/link-time to a unique pointer for the -// passed-in type. These are meant to be good match for keys into maps or -// straight up comparisons. -using FastTypeIdType = const void*; + // FastTypeId() evaluates at compile/link-time to a unique pointer for the + // passed-in type. These are meant to be good match for keys into maps or + // straight up comparisons. + using FastTypeIdType = const void*; -template -constexpr inline FastTypeIdType FastTypeId() { - return &FastTypeTag::dummy_var; -} + template + constexpr inline FastTypeIdType FastTypeId() + { + return &FastTypeTag::dummy_var; + } -} // namespace base_internal -ABSL_NAMESPACE_END + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/hide_ptr.h b/CAPI/cpp/grpc/include/absl/base/internal/hide_ptr.h index 1dba809..4249bc2 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/hide_ptr.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/hide_ptr.h @@ -19,33 +19,38 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -// Arbitrary value with high bits set. Xor'ing with it is unlikely -// to map one valid pointer to another valid pointer. -constexpr uintptr_t HideMask() { - return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU; -} - -// Hide a pointer from the leak checker. For internal use only. -// Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr -// and all objects reachable from ptr to be ignored by the leak checker. -template -inline uintptr_t HidePtr(T* ptr) { - return reinterpret_cast(ptr) ^ HideMask(); -} - -// Return a pointer that has been hidden from the leak checker. -// For internal use only. -template -inline T* UnhidePtr(uintptr_t hidden) { - return reinterpret_cast(hidden ^ HideMask()); -} - -} // namespace base_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + // Arbitrary value with high bits set. Xor'ing with it is unlikely + // to map one valid pointer to another valid pointer. + constexpr uintptr_t HideMask() + { + return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU; + } + + // Hide a pointer from the leak checker. For internal use only. + // Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr + // and all objects reachable from ptr to be ignored by the leak checker. + template + inline uintptr_t HidePtr(T* ptr) + { + return reinterpret_cast(ptr) ^ HideMask(); + } + + // Return a pointer that has been hidden from the leak checker. + // For internal use only. + template + inline T* UnhidePtr(uintptr_t hidden) + { + return reinterpret_cast(hidden ^ HideMask()); + } + + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/identity.h b/CAPI/cpp/grpc/include/absl/base/internal/identity.h index a3154ed..d62e556 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/identity.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/identity.h @@ -18,20 +18,23 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace internal + { -template -struct identity { - typedef T type; -}; + template + struct identity + { + typedef T type; + }; -template -using identity_t = typename identity::type; + template + using identity_t = typename identity::type; -} // namespace internal -ABSL_NAMESPACE_END + } // namespace internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_IDENTITY_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/inline_variable.h b/CAPI/cpp/grpc/include/absl/base/internal/inline_variable.h index 130d8c2..1ebd4b7 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/inline_variable.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/inline_variable.h @@ -68,15 +68,15 @@ // types, etc.. #if defined(__clang__) #define ABSL_INTERNAL_EXTERN_DECL(type, name) \ - extern const ::absl::internal::identity_t name; + extern const ::absl::internal::identity_t name; #else // Otherwise, just define the macro to do nothing. #define ABSL_INTERNAL_EXTERN_DECL(type, name) #endif // defined(__clang__) // See above comment at top of file for details. #define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \ - ABSL_INTERNAL_EXTERN_DECL(type, name) \ - inline constexpr ::absl::internal::identity_t name = init + ABSL_INTERNAL_EXTERN_DECL(type, name) \ + inline constexpr ::absl::internal::identity_t name = init #else @@ -86,21 +86,21 @@ // identity_t is used here so that the const and name are in the // appropriate place for pointer types, reference types, function pointer // types, etc.. -#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \ - template \ - struct AbslInternalInlineVariableHolder##name { \ - static constexpr ::absl::internal::identity_t kInstance = init; \ - }; \ - \ - template \ - constexpr ::absl::internal::identity_t \ - AbslInternalInlineVariableHolder##name::kInstance; \ - \ - static constexpr const ::absl::internal::identity_t& \ - name = /* NOLINT */ \ - AbslInternalInlineVariableHolder##name<>::kInstance; \ - static_assert(sizeof(void (*)(decltype(name))) != 0, \ - "Silence unused variable warnings.") +#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \ + template \ + struct AbslInternalInlineVariableHolder##name \ + { \ + static constexpr ::absl::internal::identity_t kInstance = init; \ + }; \ + \ + template \ + constexpr ::absl::internal::identity_t \ + AbslInternalInlineVariableHolder##name::kInstance; \ + \ + static constexpr const ::absl::internal::identity_t& \ + name = /* NOLINT */ \ + AbslInternalInlineVariableHolder##name<>::kInstance; \ + static_assert(sizeof(void (*)(decltype(name))) != 0, "Silence unused variable warnings.") #endif // __cpp_inline_variables diff --git a/CAPI/cpp/grpc/include/absl/base/internal/inline_variable_testing.h b/CAPI/cpp/grpc/include/absl/base/internal/inline_variable_testing.h index 3856b9f..a9878f5 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/inline_variable_testing.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/inline_variable_testing.h @@ -17,30 +17,33 @@ #include "absl/base/internal/inline_variable.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace inline_variable_testing_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace inline_variable_testing_internal + { -struct Foo { - int value = 5; -}; + struct Foo + { + int value = 5; + }; -ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {}); -ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {}); + ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {}); + ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {}); -ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5); -ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5); + ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5); + ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5); -ABSL_INTERNAL_INLINE_CONSTEXPR(void(*)(), inline_variable_fun_ptr, nullptr); + ABSL_INTERNAL_INLINE_CONSTEXPR(void (*)(), inline_variable_fun_ptr, nullptr); -const Foo& get_foo_a(); -const Foo& get_foo_b(); + const Foo& get_foo_a(); + const Foo& get_foo_b(); -const int& get_int_a(); -const int& get_int_b(); + const int& get_int_a(); + const int& get_int_b(); -} // namespace inline_variable_testing_internal -ABSL_NAMESPACE_END + } // namespace inline_variable_testing_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INLINE_VARIABLE_TESTING_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/invoke.h b/CAPI/cpp/grpc/include/absl/base/internal/invoke.h index 643c2a4..2316378 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/invoke.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/invoke.h @@ -43,16 +43,18 @@ #include -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -using std::invoke; -using std::invoke_result_t; -using std::is_invocable_r; - -} // namespace base_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + using std::invoke; + using std::invoke_result_t; + using std::is_invocable_r; + + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #else // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L @@ -66,42 +68,48 @@ ABSL_NAMESPACE_END // The following code is internal implementation detail. See the comment at the // top of this file for the API documentation. -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -// The five classes below each implement one of the clauses from the definition -// of INVOKE. The inner class template Accept checks whether the -// clause is applicable; static function template Invoke(f, args...) does the -// invocation. -// -// By separating the clause selection logic from invocation we make sure that -// Invoke() does exactly what the standard says. - -template -struct StrippedAccept { - template - struct Accept : Derived::template AcceptImpl::type>::type...> {}; -}; - -// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T -// and t1 is an object of type T or a reference to an object of type T or a -// reference to an object of a type derived from T. -struct MemFunAndRef : StrippedAccept { - template - struct AcceptImpl : std::false_type {}; - - template - struct AcceptImpl - : std::integral_constant::value && - absl::is_function::value> { - }; - - template - static decltype((std::declval().* - std::declval())(std::declval()...)) - Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + // The five classes below each implement one of the clauses from the definition + // of INVOKE. The inner class template Accept checks whether the + // clause is applicable; static function template Invoke(f, args...) does the + // invocation. + // + // By separating the clause selection logic from invocation we make sure that + // Invoke() does exactly what the standard says. + + template + struct StrippedAccept + { + template + struct Accept : Derived::template AcceptImpl::type>::type...> + { + }; + }; + + // (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T + // and t1 is an object of type T or a reference to an object of type T or a + // reference to an object of a type derived from T. + struct MemFunAndRef : StrippedAccept + { + template + struct AcceptImpl : std::false_type + { + }; + + template + struct AcceptImpl : std::integral_constant::value && absl::is_function::value> + { + }; + + template + static decltype((std::declval().*std::declval())(std::declval()...)) + Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) + { // Ignore bogus GCC warnings on this line. // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101436 for similar example. #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) @@ -109,131 +117,149 @@ struct MemFunAndRef : StrippedAccept { #pragma GCC diagnostic ignored "-Warray-bounds" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #endif - return (std::forward(obj).* - std::forward(mem_fun))(std::forward(args)...); + return (std::forward(obj).*std::forward(mem_fun))(std::forward(args)...); #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) #pragma GCC diagnostic pop #endif - } -}; - -// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a -// class T and t1 is not one of the types described in the previous item. -struct MemFunAndPtr : StrippedAccept { - template - struct AcceptImpl : std::false_type {}; - - template - struct AcceptImpl - : std::integral_constant::value && - absl::is_function::value> { - }; - - template - static decltype(((*std::declval()).* - std::declval())(std::declval()...)) - Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) { - return ((*std::forward(ptr)).* - std::forward(mem_fun))(std::forward(args)...); - } -}; - -// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is -// an object of type T or a reference to an object of type T or a reference -// to an object of a type derived from T. -struct DataMemAndRef : StrippedAccept { - template - struct AcceptImpl : std::false_type {}; - - template - struct AcceptImpl - : std::integral_constant::value && - !absl::is_function::value> {}; - - template - static decltype(std::declval().*std::declval()) Invoke( - DataMem&& data_mem, Ref&& ref) { - return std::forward(ref).*std::forward(data_mem); - } -}; - -// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 -// is not one of the types described in the previous item. -struct DataMemAndPtr : StrippedAccept { - template - struct AcceptImpl : std::false_type {}; - - template - struct AcceptImpl - : std::integral_constant::value && - !absl::is_function::value> {}; - - template - static decltype((*std::declval()).*std::declval()) Invoke( - DataMem&& data_mem, Ptr&& ptr) { - return (*std::forward(ptr)).*std::forward(data_mem); - } -}; - -// f(t1, t2, ..., tN) in all other cases. -struct Callable { - // Callable doesn't have Accept because it's the last clause that gets picked - // when none of the previous clauses are applicable. - template - static decltype(std::declval()(std::declval()...)) Invoke( - F&& f, Args&&... args) { - return std::forward(f)(std::forward(args)...); - } -}; - -// Resolves to the first matching clause. -template -struct Invoker { - typedef typename std::conditional< - MemFunAndRef::Accept::value, MemFunAndRef, - typename std::conditional< - MemFunAndPtr::Accept::value, MemFunAndPtr, - typename std::conditional< - DataMemAndRef::Accept::value, DataMemAndRef, - typename std::conditional::value, - DataMemAndPtr, Callable>::type>::type>:: - type>::type type; -}; - -// The result type of Invoke. -template -using invoke_result_t = decltype(Invoker::type::Invoke( - std::declval(), std::declval()...)); - -// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section -// [func.require] of the C++ standard. -template -invoke_result_t invoke(F&& f, Args&&... args) { - return Invoker::type::Invoke(std::forward(f), - std::forward(args)...); -} - -template -struct IsInvocableRImpl : std::false_type {}; - -template -struct IsInvocableRImpl< - absl::void_t >, R, F, - Args...> - : std::integral_constant< - bool, - std::is_convertible, - R>::value || - std::is_void::value> {}; - -// Type trait whose member `value` is true if invoking `F` with `Args` is valid, -// and either the return type is convertible to `R`, or `R` is void. -// C++11-compatible version of `std::is_invocable_r`. -template -using is_invocable_r = IsInvocableRImpl; - -} // namespace base_internal -ABSL_NAMESPACE_END + } + }; + + // ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a + // class T and t1 is not one of the types described in the previous item. + struct MemFunAndPtr : StrippedAccept + { + template + struct AcceptImpl : std::false_type + { + }; + + template + struct AcceptImpl : std::integral_constant::value && absl::is_function::value> + { + }; + + template + static decltype(((*std::declval()).*std::declval())(std::declval()...)) + Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) + { + return ((*std::forward(ptr)).*std::forward(mem_fun))(std::forward(args)...); + } + }; + + // t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is + // an object of type T or a reference to an object of type T or a reference + // to an object of a type derived from T. + struct DataMemAndRef : StrippedAccept + { + template + struct AcceptImpl : std::false_type + { + }; + + template + struct AcceptImpl : std::integral_constant::value && !absl::is_function::value> + { + }; + + template + static decltype(std::declval().*std::declval()) Invoke( + DataMem&& data_mem, Ref&& ref + ) + { + return std::forward(ref).*std::forward(data_mem); + } + }; + + // (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 + // is not one of the types described in the previous item. + struct DataMemAndPtr : StrippedAccept + { + template + struct AcceptImpl : std::false_type + { + }; + + template + struct AcceptImpl : std::integral_constant::value && !absl::is_function::value> + { + }; + + template + static decltype((*std::declval()).*std::declval()) Invoke( + DataMem&& data_mem, Ptr&& ptr + ) + { + return (*std::forward(ptr)).*std::forward(data_mem); + } + }; + + // f(t1, t2, ..., tN) in all other cases. + struct Callable + { + // Callable doesn't have Accept because it's the last clause that gets picked + // when none of the previous clauses are applicable. + template + static decltype(std::declval()(std::declval()...)) Invoke( + F&& f, Args&&... args + ) + { + return std::forward(f)(std::forward(args)...); + } + }; + + // Resolves to the first matching clause. + template + struct Invoker + { + typedef typename std::conditional< + MemFunAndRef::Accept::value, + MemFunAndRef, + typename std::conditional< + MemFunAndPtr::Accept::value, + MemFunAndPtr, + typename std::conditional< + DataMemAndRef::Accept::value, + DataMemAndRef, + typename std::conditional::value, DataMemAndPtr, Callable>::type>::type>:: + type>::type type; + }; + + // The result type of Invoke. + template + using invoke_result_t = decltype(Invoker::type::Invoke( + std::declval(), std::declval()... + )); + + // Invoke(f, args...) is an implementation of INVOKE(f, args...) from section + // [func.require] of the C++ standard. + template + invoke_result_t invoke(F&& f, Args&&... args) + { + return Invoker::type::Invoke(std::forward(f), std::forward(args)...); + } + + template + struct IsInvocableRImpl : std::false_type + { + }; + + template + struct IsInvocableRImpl< + absl::void_t>, + R, + F, + Args...> : std::integral_constant, R>::value || std::is_void::value> + { + }; + + // Type trait whose member `value` is true if invoking `F` with `Args` is valid, + // and either the return type is convertible to `R`, or `R` is void. + // C++11-compatible version of `std::is_invocable_r`. + template + using is_invocable_r = IsInvocableRImpl; + + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L diff --git a/CAPI/cpp/grpc/include/absl/base/internal/low_level_alloc.h b/CAPI/cpp/grpc/include/absl/base/internal/low_level_alloc.h index db91951..95a2d1f 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/low_level_alloc.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/low_level_alloc.h @@ -54,73 +54,77 @@ #include "absl/base/port.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -class LowLevelAlloc { - public: - struct Arena; // an arena from which memory may be allocated - - // Returns a pointer to a block of at least "request" bytes - // that have been newly allocated from the specific arena. - // for Alloc() call the DefaultArena() is used. - // Returns 0 if passed request==0. - // Does not return 0 under other circumstances; it crashes if memory - // is not available. - static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook); - static void *AllocWithArena(size_t request, Arena *arena) - ABSL_ATTRIBUTE_SECTION(malloc_hook); - - // Deallocates a region of memory that was previously allocated with - // Alloc(). Does nothing if passed 0. "s" must be either 0, - // or must have been returned from a call to Alloc() and not yet passed to - // Free() since that call to Alloc(). The space is returned to the arena - // from which it was allocated. - static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook); - - // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free - // are to put all callers of MallocHook::Invoke* in this module - // into special section, - // so that MallocHook::GetCallerStackTrace can function accurately. - - // Create a new arena. - // The root metadata for the new arena is allocated in the - // meta_data_arena; the DefaultArena() can be passed for meta_data_arena. - // These values may be ored into flags: - enum { - // Report calls to Alloc() and Free() via the MallocHook interface. - // Set in the DefaultArena. - kCallMallocHook = 0x0001, +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + class LowLevelAlloc + { + public: + struct Arena; // an arena from which memory may be allocated + + // Returns a pointer to a block of at least "request" bytes + // that have been newly allocated from the specific arena. + // for Alloc() call the DefaultArena() is used. + // Returns 0 if passed request==0. + // Does not return 0 under other circumstances; it crashes if memory + // is not available. + static void* Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook); + static void* AllocWithArena(size_t request, Arena* arena) + ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // Deallocates a region of memory that was previously allocated with + // Alloc(). Does nothing if passed 0. "s" must be either 0, + // or must have been returned from a call to Alloc() and not yet passed to + // Free() since that call to Alloc(). The space is returned to the arena + // from which it was allocated. + static void Free(void* s) ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free + // are to put all callers of MallocHook::Invoke* in this module + // into special section, + // so that MallocHook::GetCallerStackTrace can function accurately. + + // Create a new arena. + // The root metadata for the new arena is allocated in the + // meta_data_arena; the DefaultArena() can be passed for meta_data_arena. + // These values may be ored into flags: + enum + { + // Report calls to Alloc() and Free() via the MallocHook interface. + // Set in the DefaultArena. + kCallMallocHook = 0x0001, #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING - // Make calls to Alloc(), Free() be async-signal-safe. Not set in - // DefaultArena(). Not supported on all platforms. - kAsyncSignalSafe = 0x0002, + // Make calls to Alloc(), Free() be async-signal-safe. Not set in + // DefaultArena(). Not supported on all platforms. + kAsyncSignalSafe = 0x0002, #endif - }; - // Construct a new arena. The allocation of the underlying metadata honors - // the provided flags. For example, the call NewArena(kAsyncSignalSafe) - // is itself async-signal-safe, as well as generatating an arena that provides - // async-signal-safe Alloc/Free. - static Arena *NewArena(int32_t flags); - - // Destroys an arena allocated by NewArena and returns true, - // provided no allocated blocks remain in the arena. - // If allocated blocks remain in the arena, does nothing and - // returns false. - // It is illegal to attempt to destroy the DefaultArena(). - static bool DeleteArena(Arena *arena); - - // The default arena that always exists. - static Arena *DefaultArena(); - - private: - LowLevelAlloc(); // no instances -}; - -} // namespace base_internal -ABSL_NAMESPACE_END + }; + // Construct a new arena. The allocation of the underlying metadata honors + // the provided flags. For example, the call NewArena(kAsyncSignalSafe) + // is itself async-signal-safe, as well as generatating an arena that provides + // async-signal-safe Alloc/Free. + static Arena* NewArena(int32_t flags); + + // Destroys an arena allocated by NewArena and returns true, + // provided no allocated blocks remain in the arena. + // If allocated blocks remain in the arena, does nothing and + // returns false. + // It is illegal to attempt to destroy the DefaultArena(). + static bool DeleteArena(Arena* arena); + + // The default arena that always exists. + static Arena* DefaultArena(); + + private: + LowLevelAlloc(); // no instances + }; + + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/low_level_scheduling.h b/CAPI/cpp/grpc/include/absl/base/internal/low_level_scheduling.h index 9baccc0..ed2f311 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/low_level_scheduling.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/low_level_scheduling.h @@ -28,107 +28,125 @@ extern "C" bool __google_disable_rescheduling(void); extern "C" void __google_enable_rescheduling(bool disable_result); -namespace absl { -ABSL_NAMESPACE_BEGIN -class CondVar; -class Mutex; - -namespace synchronization_internal { -int MutexDelay(int32_t c, int mode); -} // namespace synchronization_internal - -namespace base_internal { - -class SchedulingHelper; // To allow use of SchedulingGuard. -class SpinLock; // To allow use of SchedulingGuard. - -// SchedulingGuard -// Provides guard semantics that may be used to disable cooperative rescheduling -// of the calling thread within specific program blocks. This is used to -// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative -// scheduling depends on. -// -// Domain implementations capable of rescheduling in reaction to involuntary -// kernel thread actions (e.g blocking due to a pagefault or syscall) must -// guarantee that an annotated thread is not allowed to (cooperatively) -// reschedule until the annotated region is complete. -// -// It is an error to attempt to use a cooperatively scheduled resource (e.g. -// Mutex) within a rescheduling-disabled region. -// -// All methods are async-signal safe. -class SchedulingGuard { - public: - // Returns true iff the calling thread may be cooperatively rescheduled. - static bool ReschedulingIsAllowed(); - SchedulingGuard(const SchedulingGuard&) = delete; - SchedulingGuard& operator=(const SchedulingGuard&) = delete; - - private: - // Disable cooperative rescheduling of the calling thread. It may still - // initiate scheduling operations (e.g. wake-ups), however, it may not itself - // reschedule. Nestable. The returned result is opaque, clients should not - // attempt to interpret it. - // REQUIRES: Result must be passed to a pairing EnableScheduling(). - static bool DisableRescheduling(); - - // Marks the end of a rescheduling disabled region, previously started by - // DisableRescheduling(). - // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling(). - static void EnableRescheduling(bool disable_result); - - // A scoped helper for {Disable, Enable}Rescheduling(). - // REQUIRES: destructor must run in same thread as constructor. - struct ScopedDisable { - ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); } - ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); } - - bool disabled; - }; - - // A scoped helper to enable rescheduling temporarily. - // REQUIRES: destructor must run in same thread as constructor. - class ScopedEnable { - public: - ScopedEnable(); - ~ScopedEnable(); - - private: - int scheduling_disabled_depth_; - }; - - // Access to SchedulingGuard is explicitly permitted. - friend class absl::CondVar; - friend class absl::Mutex; - friend class SchedulingHelper; - friend class SpinLock; - friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode); -}; - -//------------------------------------------------------------------------------ -// End of public interfaces. -//------------------------------------------------------------------------------ - -inline bool SchedulingGuard::ReschedulingIsAllowed() { - return false; -} - -inline bool SchedulingGuard::DisableRescheduling() { - return false; -} - -inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) { - return; -} - -inline SchedulingGuard::ScopedEnable::ScopedEnable() - : scheduling_disabled_depth_(0) {} -inline SchedulingGuard::ScopedEnable::~ScopedEnable() { - ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning"); -} - -} // namespace base_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + class CondVar; + class Mutex; + + namespace synchronization_internal + { + int MutexDelay(int32_t c, int mode); + } // namespace synchronization_internal + + namespace base_internal + { + + class SchedulingHelper; // To allow use of SchedulingGuard. + class SpinLock; // To allow use of SchedulingGuard. + + // SchedulingGuard + // Provides guard semantics that may be used to disable cooperative rescheduling + // of the calling thread within specific program blocks. This is used to + // protect resources (e.g. low-level SpinLocks or Domain code) that cooperative + // scheduling depends on. + // + // Domain implementations capable of rescheduling in reaction to involuntary + // kernel thread actions (e.g blocking due to a pagefault or syscall) must + // guarantee that an annotated thread is not allowed to (cooperatively) + // reschedule until the annotated region is complete. + // + // It is an error to attempt to use a cooperatively scheduled resource (e.g. + // Mutex) within a rescheduling-disabled region. + // + // All methods are async-signal safe. + class SchedulingGuard + { + public: + // Returns true iff the calling thread may be cooperatively rescheduled. + static bool ReschedulingIsAllowed(); + SchedulingGuard(const SchedulingGuard&) = delete; + SchedulingGuard& operator=(const SchedulingGuard&) = delete; + + private: + // Disable cooperative rescheduling of the calling thread. It may still + // initiate scheduling operations (e.g. wake-ups), however, it may not itself + // reschedule. Nestable. The returned result is opaque, clients should not + // attempt to interpret it. + // REQUIRES: Result must be passed to a pairing EnableScheduling(). + static bool DisableRescheduling(); + + // Marks the end of a rescheduling disabled region, previously started by + // DisableRescheduling(). + // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling(). + static void EnableRescheduling(bool disable_result); + + // A scoped helper for {Disable, Enable}Rescheduling(). + // REQUIRES: destructor must run in same thread as constructor. + struct ScopedDisable + { + ScopedDisable() + { + disabled = SchedulingGuard::DisableRescheduling(); + } + ~ScopedDisable() + { + SchedulingGuard::EnableRescheduling(disabled); + } + + bool disabled; + }; + + // A scoped helper to enable rescheduling temporarily. + // REQUIRES: destructor must run in same thread as constructor. + class ScopedEnable + { + public: + ScopedEnable(); + ~ScopedEnable(); + + private: + int scheduling_disabled_depth_; + }; + + // Access to SchedulingGuard is explicitly permitted. + friend class absl::CondVar; + friend class absl::Mutex; + friend class SchedulingHelper; + friend class SpinLock; + friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode); + }; + + //------------------------------------------------------------------------------ + // End of public interfaces. + //------------------------------------------------------------------------------ + + inline bool SchedulingGuard::ReschedulingIsAllowed() + { + return false; + } + + inline bool SchedulingGuard::DisableRescheduling() + { + return false; + } + + inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) + { + return; + } + + inline SchedulingGuard::ScopedEnable::ScopedEnable() : + scheduling_disabled_depth_(0) + { + } + inline SchedulingGuard::ScopedEnable::~ScopedEnable() + { + ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning"); + } + + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/prefetch.h b/CAPI/cpp/grpc/include/absl/base/internal/prefetch.h index 0641928..4176d0b 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/prefetch.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/prefetch.h @@ -68,71 +68,89 @@ // // SNB = Sandy Bridge, SKL = Skylake, SKX = Skylake Xeon. // -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { -void PrefetchT0(const void* addr); -void PrefetchT1(const void* addr); -void PrefetchT2(const void* addr); -void PrefetchNta(const void* addr); + void PrefetchT0(const void* addr); + void PrefetchT1(const void* addr); + void PrefetchT2(const void* addr); + void PrefetchNta(const void* addr); -// Implementation details follow. + // Implementation details follow. #if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__) #define ABSL_INTERNAL_HAVE_PREFETCH 1 -// See __builtin_prefetch: -// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html. -// -// These functions speculatively load for read only. This is -// safe for all currently supported platforms. However, prefetch for -// store may have problems depending on the target platform. -// -inline void PrefetchT0(const void* addr) { - // Note: this uses prefetcht0 on Intel. - __builtin_prefetch(addr, 0, 3); -} -inline void PrefetchT1(const void* addr) { - // Note: this uses prefetcht1 on Intel. - __builtin_prefetch(addr, 0, 2); -} -inline void PrefetchT2(const void* addr) { - // Note: this uses prefetcht2 on Intel. - __builtin_prefetch(addr, 0, 1); -} -inline void PrefetchNta(const void* addr) { - // Note: this uses prefetchtnta on Intel. - __builtin_prefetch(addr, 0, 0); -} + // See __builtin_prefetch: + // https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html. + // + // These functions speculatively load for read only. This is + // safe for all currently supported platforms. However, prefetch for + // store may have problems depending on the target platform. + // + inline void PrefetchT0(const void* addr) + { + // Note: this uses prefetcht0 on Intel. + __builtin_prefetch(addr, 0, 3); + } + inline void PrefetchT1(const void* addr) + { + // Note: this uses prefetcht1 on Intel. + __builtin_prefetch(addr, 0, 2); + } + inline void PrefetchT2(const void* addr) + { + // Note: this uses prefetcht2 on Intel. + __builtin_prefetch(addr, 0, 1); + } + inline void PrefetchNta(const void* addr) + { + // Note: this uses prefetchtnta on Intel. + __builtin_prefetch(addr, 0, 0); + } #elif defined(ABSL_INTERNAL_HAVE_SSE) #define ABSL_INTERNAL_HAVE_PREFETCH 1 -inline void PrefetchT0(const void* addr) { - _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T0); -} -inline void PrefetchT1(const void* addr) { - _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T1); -} -inline void PrefetchT2(const void* addr) { - _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T2); -} -inline void PrefetchNta(const void* addr) { - _mm_prefetch(reinterpret_cast(addr), _MM_HINT_NTA); -} + inline void PrefetchT0(const void* addr) + { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T0); + } + inline void PrefetchT1(const void* addr) + { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T1); + } + inline void PrefetchT2(const void* addr) + { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T2); + } + inline void PrefetchNta(const void* addr) + { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_NTA); + } #else -inline void PrefetchT0(const void*) {} -inline void PrefetchT1(const void*) {} -inline void PrefetchT2(const void*) {} -inline void PrefetchNta(const void*) {} + inline void PrefetchT0(const void*) + { + } + inline void PrefetchT1(const void*) + { + } + inline void PrefetchT2(const void*) + { + } + inline void PrefetchNta(const void*) + { + } #endif -} // namespace base_internal -ABSL_NAMESPACE_END + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_PREFETCH_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/raw_logging.h b/CAPI/cpp/grpc/include/absl/base/internal/raw_logging.h index 0747c9d..1752d34 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/raw_logging.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/raw_logging.h @@ -41,27 +41,27 @@ // This will print an almost standard log line like this to stderr only: // E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file -#define ABSL_RAW_LOG(severity, ...) \ - do { \ - constexpr const char* absl_raw_logging_internal_basename = \ - ::absl::raw_logging_internal::Basename(__FILE__, \ - sizeof(__FILE__) - 1); \ - ::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \ - absl_raw_logging_internal_basename, \ - __LINE__, __VA_ARGS__); \ - } while (0) +#define ABSL_RAW_LOG(severity, ...) \ + do \ + { \ + constexpr const char* absl_raw_logging_internal_basename = \ + ::absl::raw_logging_internal::Basename(__FILE__, sizeof(__FILE__) - 1); \ + ::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, absl_raw_logging_internal_basename, __LINE__, __VA_ARGS__); \ + } while (0) // Similar to CHECK(condition) << message, but for low-level modules: // we use only ABSL_RAW_LOG that does not allocate memory. // We do not want to provide args list here to encourage this usage: // if (!cond) ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args); // so that the args are not computed when not needed. -#define ABSL_RAW_CHECK(condition, message) \ - do { \ - if (ABSL_PREDICT_FALSE(!(condition))) { \ - ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \ - } \ - } while (0) +#define ABSL_RAW_CHECK(condition, message) \ + do \ + { \ + if (ABSL_PREDICT_FALSE(!(condition))) \ + { \ + ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \ + } \ + } while (0) // ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above, // except that if the richer log library is linked into the binary, we dispatch @@ -72,125 +72,126 @@ // // The API is a subset of the above: each macro only takes two arguments. Use // StrCat if you need to build a richer message. -#define ABSL_INTERNAL_LOG(severity, message) \ - do { \ - constexpr const char* absl_raw_logging_internal_filename = __FILE__; \ - ::absl::raw_logging_internal::internal_log_function( \ - ABSL_RAW_LOGGING_INTERNAL_##severity, \ - absl_raw_logging_internal_filename, __LINE__, message); \ - if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \ - ABSL_INTERNAL_UNREACHABLE; \ - } while (0) - -#define ABSL_INTERNAL_CHECK(condition, message) \ - do { \ - if (ABSL_PREDICT_FALSE(!(condition))) { \ - std::string death_message = "Check " #condition " failed: "; \ - death_message += std::string(message); \ - ABSL_INTERNAL_LOG(FATAL, death_message); \ - } \ - } while (0) +#define ABSL_INTERNAL_LOG(severity, message) \ + do \ + { \ + constexpr const char* absl_raw_logging_internal_filename = __FILE__; \ + ::absl::raw_logging_internal::internal_log_function( \ + ABSL_RAW_LOGGING_INTERNAL_##severity, \ + absl_raw_logging_internal_filename, \ + __LINE__, \ + message \ + ); \ + if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \ + ABSL_INTERNAL_UNREACHABLE; \ + } while (0) + +#define ABSL_INTERNAL_CHECK(condition, message) \ + do \ + { \ + if (ABSL_PREDICT_FALSE(!(condition))) \ + { \ + std::string death_message = "Check " #condition " failed: "; \ + death_message += std::string(message); \ + ABSL_INTERNAL_LOG(FATAL, death_message); \ + } \ + } while (0) #define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo #define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning #define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError #define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal #define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \ - ::absl::NormalizeLogSeverity(severity) - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace raw_logging_internal { - -// Helper function to implement ABSL_RAW_LOG -// Logs format... at "severity" level, reporting it -// as called from file:line. -// This does not allocate memory or acquire locks. -void RawLog(absl::LogSeverity severity, const char* file, int line, - const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); - -// Writes the provided buffer directly to stderr, in a signal-safe, low-level -// manner. -void AsyncSignalSafeWriteToStderr(const char* s, size_t len); - -// compile-time function to get the "base" filename, that is, the part of -// a filename after the last "/" or "\" path separator. The search starts at -// the end of the string; the second parameter is the length of the string. -constexpr const char* Basename(const char* fname, int offset) { - return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\' - ? fname + offset - : Basename(fname, offset - 1); -} - -// For testing only. -// Returns true if raw logging is fully supported. When it is not -// fully supported, no messages will be emitted, but a log at FATAL -// severity will cause an abort. -// -// TODO(gfalcon): Come up with a better name for this method. -bool RawLoggingFullySupported(); - -// Function type for a raw_logging customization hook for suppressing messages -// by severity, and for writing custom prefixes on non-suppressed messages. -// -// The installed hook is called for every raw log invocation. The message will -// be logged to stderr only if the hook returns true. FATAL errors will cause -// the process to abort, even if writing to stderr is suppressed. The hook is -// also provided with an output buffer, where it can write a custom log message -// prefix. -// -// The raw_logging system does not allocate memory or grab locks. User-provided -// hooks must avoid these operations, and must not throw exceptions. -// -// 'severity' is the severity level of the message being written. -// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro -// was located. -// 'buf' and 'buf_size' are pointers to the buffer and buffer size. If the -// hook writes a prefix, it must increment *buf and decrement *buf_size -// accordingly. -using LogFilterAndPrefixHook = bool (*)(absl::LogSeverity severity, - const char* file, int line, char** buf, - int* buf_size); - -// Function type for a raw_logging customization hook called to abort a process -// when a FATAL message is logged. If the provided AbortHook() returns, the -// logging system will call abort(). -// -// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro -// was located. -// The NUL-terminated logged message lives in the buffer between 'buf_start' -// and 'buf_end'. 'prefix_end' points to the first non-prefix character of the -// buffer (as written by the LogFilterAndPrefixHook.) -// -// The lifetime of the filename and message buffers will not end while the -// process remains alive. -using AbortHook = void (*)(const char* file, int line, const char* buf_start, - const char* prefix_end, const char* buf_end); - -// Internal logging function for ABSL_INTERNAL_LOG to dispatch to. -// -// TODO(gfalcon): When string_view no longer depends on base, change this -// interface to take its message as a string_view instead. -using InternalLogFunction = void (*)(absl::LogSeverity severity, - const char* file, int line, - const std::string& message); - -ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook< - InternalLogFunction> - internal_log_function; - -// Registers hooks of the above types. Only a single hook of each type may be -// registered. It is an error to call these functions multiple times with -// different input arguments. -// -// These functions are safe to call at any point during initialization; they do -// not block or malloc, and are async-signal safe. -void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func); -void RegisterAbortHook(AbortHook func); -void RegisterInternalLogFunction(InternalLogFunction func); - -} // namespace raw_logging_internal -ABSL_NAMESPACE_END + ::absl::NormalizeLogSeverity(severity) + +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace raw_logging_internal + { + + // Helper function to implement ABSL_RAW_LOG + // Logs format... at "severity" level, reporting it + // as called from file:line. + // This does not allocate memory or acquire locks. + void RawLog(absl::LogSeverity severity, const char* file, int line, const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); + + // Writes the provided buffer directly to stderr, in a signal-safe, low-level + // manner. + void AsyncSignalSafeWriteToStderr(const char* s, size_t len); + + // compile-time function to get the "base" filename, that is, the part of + // a filename after the last "/" or "\" path separator. The search starts at + // the end of the string; the second parameter is the length of the string. + constexpr const char* Basename(const char* fname, int offset) + { + return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\' ? fname + offset : Basename(fname, offset - 1); + } + + // For testing only. + // Returns true if raw logging is fully supported. When it is not + // fully supported, no messages will be emitted, but a log at FATAL + // severity will cause an abort. + // + // TODO(gfalcon): Come up with a better name for this method. + bool RawLoggingFullySupported(); + + // Function type for a raw_logging customization hook for suppressing messages + // by severity, and for writing custom prefixes on non-suppressed messages. + // + // The installed hook is called for every raw log invocation. The message will + // be logged to stderr only if the hook returns true. FATAL errors will cause + // the process to abort, even if writing to stderr is suppressed. The hook is + // also provided with an output buffer, where it can write a custom log message + // prefix. + // + // The raw_logging system does not allocate memory or grab locks. User-provided + // hooks must avoid these operations, and must not throw exceptions. + // + // 'severity' is the severity level of the message being written. + // 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro + // was located. + // 'buf' and 'buf_size' are pointers to the buffer and buffer size. If the + // hook writes a prefix, it must increment *buf and decrement *buf_size + // accordingly. + using LogFilterAndPrefixHook = bool (*)(absl::LogSeverity severity, const char* file, int line, char** buf, int* buf_size); + + // Function type for a raw_logging customization hook called to abort a process + // when a FATAL message is logged. If the provided AbortHook() returns, the + // logging system will call abort(). + // + // 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro + // was located. + // The NUL-terminated logged message lives in the buffer between 'buf_start' + // and 'buf_end'. 'prefix_end' points to the first non-prefix character of the + // buffer (as written by the LogFilterAndPrefixHook.) + // + // The lifetime of the filename and message buffers will not end while the + // process remains alive. + using AbortHook = void (*)(const char* file, int line, const char* buf_start, const char* prefix_end, const char* buf_end); + + // Internal logging function for ABSL_INTERNAL_LOG to dispatch to. + // + // TODO(gfalcon): When string_view no longer depends on base, change this + // interface to take its message as a string_view instead. + using InternalLogFunction = void (*)(absl::LogSeverity severity, const char* file, int line, const std::string& message); + + ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook< + InternalLogFunction> + internal_log_function; + + // Registers hooks of the above types. Only a single hook of each type may be + // registered. It is an error to call these functions multiple times with + // different input arguments. + // + // These functions are safe to call at any point during initialization; they do + // not block or malloc, and are async-signal safe. + void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func); + void RegisterAbortHook(AbortHook func); + void RegisterInternalLogFunction(InternalLogFunction func); + + } // namespace raw_logging_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/scheduling_mode.h b/CAPI/cpp/grpc/include/absl/base/internal/scheduling_mode.h index 8be5ab6..15091e9 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/scheduling_mode.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/scheduling_mode.h @@ -20,39 +20,42 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { -// Used to describe how a thread may be scheduled. Typically associated with -// the declaration of a resource supporting synchronized access. -// -// SCHEDULE_COOPERATIVE_AND_KERNEL: -// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may -// reschedule (using base::scheduling semantics); allowing other cooperative -// threads to proceed. -// -// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative") -// Specifies that no cooperative scheduling semantics may be used, even if the -// current thread is itself cooperatively scheduled. This means that -// cooperative threads will NOT allow other cooperative threads to execute in -// their place while waiting for a resource of this type. Host operating system -// semantics (e.g. a futex) may still be used. -// -// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL -// by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which -// base::scheduling (e.g. the implementation of a Scheduler) may depend. -// -// NOTE: Cooperative resources may not be nested below non-cooperative ones. -// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL -// resource if a SCHEDULE_KERNEL_ONLY resource is already held. -enum SchedulingMode { - SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS. - SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling. -}; + // Used to describe how a thread may be scheduled. Typically associated with + // the declaration of a resource supporting synchronized access. + // + // SCHEDULE_COOPERATIVE_AND_KERNEL: + // Specifies that when waiting, a cooperative thread (e.g. a Fiber) may + // reschedule (using base::scheduling semantics); allowing other cooperative + // threads to proceed. + // + // SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative") + // Specifies that no cooperative scheduling semantics may be used, even if the + // current thread is itself cooperatively scheduled. This means that + // cooperative threads will NOT allow other cooperative threads to execute in + // their place while waiting for a resource of this type. Host operating system + // semantics (e.g. a futex) may still be used. + // + // When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL + // by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which + // base::scheduling (e.g. the implementation of a Scheduler) may depend. + // + // NOTE: Cooperative resources may not be nested below non-cooperative ones. + // This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL + // resource if a SCHEDULE_KERNEL_ONLY resource is already held. + enum SchedulingMode + { + SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS. + SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling. + }; -} // namespace base_internal -ABSL_NAMESPACE_END + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/scoped_set_env.h b/CAPI/cpp/grpc/include/absl/base/internal/scoped_set_env.h index 19ec7b5..8471b59 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/scoped_set_env.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/scoped_set_env.h @@ -21,25 +21,28 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -class ScopedSetEnv { - public: - ScopedSetEnv(const char* var_name, const char* new_value); - ~ScopedSetEnv(); - - private: - std::string var_name_; - std::string old_value_; - - // True if the environment variable was initially not set. - bool was_unset_; -}; - -} // namespace base_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + class ScopedSetEnv + { + public: + ScopedSetEnv(const char* var_name, const char* new_value); + ~ScopedSetEnv(); + + private: + std::string var_name_; + std::string old_value_; + + // True if the environment variable was initially not set. + bool was_unset_; + }; + + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/spinlock.h b/CAPI/cpp/grpc/include/absl/base/internal/spinlock.h index 6d8d8dd..0cb41fc 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/spinlock.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/spinlock.h @@ -45,212 +45,242 @@ #include "absl/base/port.h" #include "absl/base/thread_annotations.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -class ABSL_LOCKABLE SpinLock { - public: - SpinLock() : lockword_(kSpinLockCooperative) { - ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); - } - - // Constructors that allow non-cooperative spinlocks to be created for use - // inside thread schedulers. Normal clients should not use these. - explicit SpinLock(base_internal::SchedulingMode mode); - - // Constructor for global SpinLock instances. See absl/base/const_init.h. - constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode) - : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {} - - // For global SpinLock instances prefer trivial destructor when possible. - // Default but non-trivial destructor in some build configurations causes an - // extra static initializer. +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + class ABSL_LOCKABLE SpinLock + { + public: + SpinLock() : + lockword_(kSpinLockCooperative) + { + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); + } + + // Constructors that allow non-cooperative spinlocks to be created for use + // inside thread schedulers. Normal clients should not use these. + explicit SpinLock(base_internal::SchedulingMode mode); + + // Constructor for global SpinLock instances. See absl/base/const_init.h. + constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode) : + lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) + { + } + + // For global SpinLock instances prefer trivial destructor when possible. + // Default but non-trivial destructor in some build configurations causes an + // extra static initializer. #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE - ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); } + ~SpinLock() + { + ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); + } #else - ~SpinLock() = default; + ~SpinLock() = default; #endif - // Acquire this SpinLock. - inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { - ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); - if (!TryLockImpl()) { - SlowLock(); - } - ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); - } - - // Try to acquire this SpinLock without blocking and return true if the - // acquisition was successful. If the lock was not acquired, false is - // returned. If this SpinLock is free at the time of the call, TryLock - // will return true with high probability. - inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { - ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); - bool res = TryLockImpl(); - ABSL_TSAN_MUTEX_POST_LOCK( - this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed), - 0); - return res; - } - - // Release this SpinLock, which must be held by the calling thread. - inline void Unlock() ABSL_UNLOCK_FUNCTION() { - ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); - uint32_t lock_value = lockword_.load(std::memory_order_relaxed); - lock_value = lockword_.exchange(lock_value & kSpinLockCooperative, - std::memory_order_release); - - if ((lock_value & kSpinLockDisabledScheduling) != 0) { - base_internal::SchedulingGuard::EnableRescheduling(true); - } - if ((lock_value & kWaitTimeMask) != 0) { - // Collect contentionz profile info, and speed the wakeup of any waiter. - // The wait_cycles value indicates how long this thread spent waiting - // for the lock. - SlowUnlock(lock_value); - } - ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0); - } - - // Determine if the lock is held. When the lock is held by the invoking - // thread, true will always be returned. Intended to be used as - // CHECK(lock.IsHeld()). - inline bool IsHeld() const { - return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; - } - - // Return immediately if this thread holds the SpinLock exclusively. - // Otherwise, report an error by crashing with a diagnostic. - inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() { - if (!IsHeld()) { - ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock"); - } - } - - protected: - // These should not be exported except for testing. - - // Store number of cycles between wait_start_time and wait_end_time in a - // lock value. - static uint32_t EncodeWaitCycles(int64_t wait_start_time, - int64_t wait_end_time); - - // Extract number of wait cycles in a lock value. - static uint64_t DecodeWaitCycles(uint32_t lock_value); - - // Provide access to protected method above. Use for testing only. - friend struct SpinLockTest; - - private: - // lockword_ is used to store the following: - // - // bit[0] encodes whether a lock is being held. - // bit[1] encodes whether a lock uses cooperative scheduling. - // bit[2] encodes whether the current lock holder disabled scheduling when - // acquiring the lock. Only set when kSpinLockHeld is also set. - // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. - // This is set by the lock holder to indicate how long it waited on - // the lock before eventually acquiring it. The number of cycles is - // encoded as a 29-bit unsigned int, or in the case that the current - // holder did not wait but another waiter is queued, the LSB - // (kSpinLockSleeper) is set. The implementation does not explicitly - // track the number of queued waiters beyond this. It must always be - // assumed that waiters may exist if the current holder was required to - // queue. - // - // Invariant: if the lock is not held, the value is either 0 or - // kSpinLockCooperative. - static constexpr uint32_t kSpinLockHeld = 1; - static constexpr uint32_t kSpinLockCooperative = 2; - static constexpr uint32_t kSpinLockDisabledScheduling = 4; - static constexpr uint32_t kSpinLockSleeper = 8; - // Includes kSpinLockSleeper. - static constexpr uint32_t kWaitTimeMask = - ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling); - - // Returns true if the provided scheduling mode is cooperative. - static constexpr bool IsCooperative( - base_internal::SchedulingMode scheduling_mode) { - return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; - } - - uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles); - void SlowLock() ABSL_ATTRIBUTE_COLD; - void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD; - uint32_t SpinLoop(); - - inline bool TryLockImpl() { - uint32_t lock_value = lockword_.load(std::memory_order_relaxed); - return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0; - } - - std::atomic lockword_; - - SpinLock(const SpinLock&) = delete; - SpinLock& operator=(const SpinLock&) = delete; -}; - -// Corresponding locker object that arranges to acquire a spinlock for -// the duration of a C++ scope. -class ABSL_SCOPED_LOCKABLE SpinLockHolder { - public: - inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l) - : lock_(l) { - l->Lock(); - } - inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); } - - SpinLockHolder(const SpinLockHolder&) = delete; - SpinLockHolder& operator=(const SpinLockHolder&) = delete; - - private: - SpinLock* lock_; -}; - -// Register a hook for profiling support. -// -// The function pointer registered here will be called whenever a spinlock is -// contended. The callback is given an opaque handle to the contended spinlock -// and the number of wait cycles. This is thread-safe, but only a single -// profiler can be registered. It is an error to call this function multiple -// times with different arguments. -void RegisterSpinLockProfiler(void (*fn)(const void* lock, - int64_t wait_cycles)); - -//------------------------------------------------------------------------------ -// Public interface ends here. -//------------------------------------------------------------------------------ - -// If (result & kSpinLockHeld) == 0, then *this was successfully locked. -// Otherwise, returns last observed value for lockword_. -inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, - uint32_t wait_cycles) { - if ((lock_value & kSpinLockHeld) != 0) { - return lock_value; - } - - uint32_t sched_disabled_bit = 0; - if ((lock_value & kSpinLockCooperative) == 0) { - // For non-cooperative locks we must make sure we mark ourselves as - // non-reschedulable before we attempt to CompareAndSwap. - if (base_internal::SchedulingGuard::DisableRescheduling()) { - sched_disabled_bit = kSpinLockDisabledScheduling; - } - } - - if (!lockword_.compare_exchange_strong( - lock_value, - kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit, - std::memory_order_acquire, std::memory_order_relaxed)) { - base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0); - } - - return lock_value; -} - -} // namespace base_internal -ABSL_NAMESPACE_END + // Acquire this SpinLock. + inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() + { + ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); + if (!TryLockImpl()) + { + SlowLock(); + } + ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); + } + + // Try to acquire this SpinLock without blocking and return true if the + // acquisition was successful. If the lock was not acquired, false is + // returned. If this SpinLock is free at the time of the call, TryLock + // will return true with high probability. + inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) + { + ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); + bool res = TryLockImpl(); + ABSL_TSAN_MUTEX_POST_LOCK( + this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed), 0 + ); + return res; + } + + // Release this SpinLock, which must be held by the calling thread. + inline void Unlock() ABSL_UNLOCK_FUNCTION() + { + ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + lock_value = lockword_.exchange(lock_value & kSpinLockCooperative, std::memory_order_release); + + if ((lock_value & kSpinLockDisabledScheduling) != 0) + { + base_internal::SchedulingGuard::EnableRescheduling(true); + } + if ((lock_value & kWaitTimeMask) != 0) + { + // Collect contentionz profile info, and speed the wakeup of any waiter. + // The wait_cycles value indicates how long this thread spent waiting + // for the lock. + SlowUnlock(lock_value); + } + ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0); + } + + // Determine if the lock is held. When the lock is held by the invoking + // thread, true will always be returned. Intended to be used as + // CHECK(lock.IsHeld()). + inline bool IsHeld() const + { + return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; + } + + // Return immediately if this thread holds the SpinLock exclusively. + // Otherwise, report an error by crashing with a diagnostic. + inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() + { + if (!IsHeld()) + { + ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock"); + } + } + + protected: + // These should not be exported except for testing. + + // Store number of cycles between wait_start_time and wait_end_time in a + // lock value. + static uint32_t EncodeWaitCycles(int64_t wait_start_time, int64_t wait_end_time); + + // Extract number of wait cycles in a lock value. + static uint64_t DecodeWaitCycles(uint32_t lock_value); + + // Provide access to protected method above. Use for testing only. + friend struct SpinLockTest; + + private: + // lockword_ is used to store the following: + // + // bit[0] encodes whether a lock is being held. + // bit[1] encodes whether a lock uses cooperative scheduling. + // bit[2] encodes whether the current lock holder disabled scheduling when + // acquiring the lock. Only set when kSpinLockHeld is also set. + // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. + // This is set by the lock holder to indicate how long it waited on + // the lock before eventually acquiring it. The number of cycles is + // encoded as a 29-bit unsigned int, or in the case that the current + // holder did not wait but another waiter is queued, the LSB + // (kSpinLockSleeper) is set. The implementation does not explicitly + // track the number of queued waiters beyond this. It must always be + // assumed that waiters may exist if the current holder was required to + // queue. + // + // Invariant: if the lock is not held, the value is either 0 or + // kSpinLockCooperative. + static constexpr uint32_t kSpinLockHeld = 1; + static constexpr uint32_t kSpinLockCooperative = 2; + static constexpr uint32_t kSpinLockDisabledScheduling = 4; + static constexpr uint32_t kSpinLockSleeper = 8; + // Includes kSpinLockSleeper. + static constexpr uint32_t kWaitTimeMask = + ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling); + + // Returns true if the provided scheduling mode is cooperative. + static constexpr bool IsCooperative( + base_internal::SchedulingMode scheduling_mode + ) + { + return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + } + + uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles); + void SlowLock() ABSL_ATTRIBUTE_COLD; + void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD; + uint32_t SpinLoop(); + + inline bool TryLockImpl() + { + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0; + } + + std::atomic lockword_; + + SpinLock(const SpinLock&) = delete; + SpinLock& operator=(const SpinLock&) = delete; + }; + + // Corresponding locker object that arranges to acquire a spinlock for + // the duration of a C++ scope. + class ABSL_SCOPED_LOCKABLE SpinLockHolder + { + public: + inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l) : + lock_(l) + { + l->Lock(); + } + inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() + { + lock_->Unlock(); + } + + SpinLockHolder(const SpinLockHolder&) = delete; + SpinLockHolder& operator=(const SpinLockHolder&) = delete; + + private: + SpinLock* lock_; + }; + + // Register a hook for profiling support. + // + // The function pointer registered here will be called whenever a spinlock is + // contended. The callback is given an opaque handle to the contended spinlock + // and the number of wait cycles. This is thread-safe, but only a single + // profiler can be registered. It is an error to call this function multiple + // times with different arguments. + void RegisterSpinLockProfiler(void (*fn)(const void* lock, int64_t wait_cycles)); + + //------------------------------------------------------------------------------ + // Public interface ends here. + //------------------------------------------------------------------------------ + + // If (result & kSpinLockHeld) == 0, then *this was successfully locked. + // Otherwise, returns last observed value for lockword_. + inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, uint32_t wait_cycles) + { + if ((lock_value & kSpinLockHeld) != 0) + { + return lock_value; + } + + uint32_t sched_disabled_bit = 0; + if ((lock_value & kSpinLockCooperative) == 0) + { + // For non-cooperative locks we must make sure we mark ourselves as + // non-reschedulable before we attempt to CompareAndSwap. + if (base_internal::SchedulingGuard::DisableRescheduling()) + { + sched_disabled_bit = kSpinLockDisabledScheduling; + } + } + + if (!lockword_.compare_exchange_strong( + lock_value, + kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit, + std::memory_order_acquire, + std::memory_order_relaxed + )) + { + base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0); + } + + return lock_value; + } + + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_SPINLOCK_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/spinlock_wait.h b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_wait.h index 9a1adcd..a36b6e1 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/spinlock_wait.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/spinlock_wait.h @@ -23,47 +23,47 @@ #include "absl/base/internal/scheduling_mode.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { -// SpinLockWait() waits until it can perform one of several transitions from -// "from" to "to". It returns when it performs a transition where done==true. -struct SpinLockWaitTransition { - uint32_t from; - uint32_t to; - bool done; -}; + // SpinLockWait() waits until it can perform one of several transitions from + // "from" to "to". It returns when it performs a transition where done==true. + struct SpinLockWaitTransition + { + uint32_t from; + uint32_t to; + bool done; + }; -// Wait until *w can transition from trans[i].from to trans[i].to for some i -// satisfying 0<=i *w, int n, - const SpinLockWaitTransition trans[], - SchedulingMode scheduling_mode); + // Wait until *w can transition from trans[i].from to trans[i].to for some i + // satisfying 0<=i* w, int n, const SpinLockWaitTransition trans[], SchedulingMode scheduling_mode); -// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all` -// is true, wake all such threads. On some systems, this may be a no-op; on -// those systems, threads calling SpinLockDelay() will always wake eventually -// even if SpinLockWake() is never called. -void SpinLockWake(std::atomic *w, bool all); + // If possible, wake some thread that has called SpinLockDelay(w, ...). If `all` + // is true, wake all such threads. On some systems, this may be a no-op; on + // those systems, threads calling SpinLockDelay() will always wake eventually + // even if SpinLockWake() is never called. + void SpinLockWake(std::atomic* w, bool all); -// Wait for an appropriate spin delay on iteration "loop" of a -// spin loop on location *w, whose previously observed value was "value". -// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick, -// or may wait for a call to SpinLockWake(w). -void SpinLockDelay(std::atomic *w, uint32_t value, int loop, - base_internal::SchedulingMode scheduling_mode); + // Wait for an appropriate spin delay on iteration "loop" of a + // spin loop on location *w, whose previously observed value was "value". + // SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick, + // or may wait for a call to SpinLockWake(w). + void SpinLockDelay(std::atomic* w, uint32_t value, int loop, base_internal::SchedulingMode scheduling_mode); -// Helper used by AbslInternalSpinLockDelay. -// Returns a suggested delay in nanoseconds for iteration number "loop". -int SpinLockSuggestedDelayNS(int loop); + // Helper used by AbslInternalSpinLockDelay. + // Returns a suggested delay in nanoseconds for iteration number "loop". + int SpinLockSuggestedDelayNS(int loop); -} // namespace base_internal -ABSL_NAMESPACE_END + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl // In some build configurations we pass --detect-odr-violations to the @@ -72,24 +72,26 @@ ABSL_NAMESPACE_END // --detect-odr-violations ignores symbols not mangled with C++ names. // By changing our extension points to be extern "C", we dodge this // check. -extern "C" { -void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic *w, - bool all); -void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( - std::atomic *w, uint32_t value, int loop, - absl::base_internal::SchedulingMode scheduling_mode); +extern "C" +{ + void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic* w, bool all); + void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* w, uint32_t value, int loop, absl::base_internal::SchedulingMode scheduling_mode + ); } -inline void absl::base_internal::SpinLockWake(std::atomic *w, - bool all) { - ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all); +inline void absl::base_internal::SpinLockWake(std::atomic* w, bool all) +{ + ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake) + (w, all); } inline void absl::base_internal::SpinLockDelay( - std::atomic *w, uint32_t value, int loop, - absl::base_internal::SchedulingMode scheduling_mode) { - ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay) - (w, value, loop, scheduling_mode); + std::atomic* w, uint32_t value, int loop, absl::base_internal::SchedulingMode scheduling_mode +) +{ + ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay) + (w, value, loop, scheduling_mode); } #endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/strerror.h b/CAPI/cpp/grpc/include/absl/base/internal/strerror.h index 3500973..5f79457 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/strerror.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/strerror.h @@ -19,21 +19,23 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { -// A portable and thread-safe alternative to C89's `strerror`. -// -// The C89 specification of `strerror` is not suitable for use in a -// multi-threaded application as the returned string may be changed by calls to -// `strerror` from another thread. The many non-stdlib alternatives differ -// enough in their names, availability, and semantics to justify this wrapper -// around them. `errno` will not be modified by a call to `absl::StrError`. -std::string StrError(int errnum); + // A portable and thread-safe alternative to C89's `strerror`. + // + // The C89 specification of `strerror` is not suitable for use in a + // multi-threaded application as the returned string may be changed by calls to + // `strerror` from another thread. The many non-stdlib alternatives differ + // enough in their names, availability, and semantics to justify this wrapper + // around them. `errno` will not be modified by a call to `absl::StrError`. + std::string StrError(int errnum); -} // namespace base_internal -ABSL_NAMESPACE_END + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_STRERROR_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/sysinfo.h b/CAPI/cpp/grpc/include/absl/base/internal/sysinfo.h index 119cf1f..ee27ec7 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/sysinfo.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/sysinfo.h @@ -33,17 +33,19 @@ #include "absl/base/config.h" #include "absl/base/port.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { -// Nominal core processor cycles per second of each processor. This is _not_ -// necessarily the frequency of the CycleClock counter (see cycleclock.h) -// Thread-safe. -double NominalCPUFrequency(); + // Nominal core processor cycles per second of each processor. This is _not_ + // necessarily the frequency of the CycleClock counter (see cycleclock.h) + // Thread-safe. + double NominalCPUFrequency(); -// Number of logical processors (hyperthreads) in system. Thread-safe. -int NumCPUs(); + // Number of logical processors (hyperthreads) in system. Thread-safe. + int NumCPUs(); // Return the thread id of the current thread, as told by the system. // No two currently-live threads implemented by the OS shall have the same ID. @@ -53,22 +55,22 @@ int NumCPUs(); // On Linux, you may send a signal to the resulting ID with kill(). However, // it is recommended for portability that you use pthread_kill() instead. #ifdef _WIN32 -// On Windows, process id and thread id are of the same type according to the -// return types of GetProcessId() and GetThreadId() are both DWORD, an unsigned -// 32-bit type. -using pid_t = uint32_t; + // On Windows, process id and thread id are of the same type according to the + // return types of GetProcessId() and GetThreadId() are both DWORD, an unsigned + // 32-bit type. + using pid_t = uint32_t; #endif -pid_t GetTID(); + pid_t GetTID(); -// Like GetTID(), but caches the result in thread-local storage in order -// to avoid unnecessary system calls. Note that there are some cases where -// one must call through to GetTID directly, which is why this exists as a -// separate function. For example, GetCachedTID() is not safe to call in -// an asynchronous signal-handling context nor right after a call to fork(). -pid_t GetCachedTID(); + // Like GetTID(), but caches the result in thread-local storage in order + // to avoid unnecessary system calls. Note that there are some cases where + // one must call through to GetTID directly, which is why this exists as a + // separate function. For example, GetCachedTID() is not safe to call in + // an asynchronous signal-handling context nor right after a call to fork(). + pid_t GetCachedTID(); -} // namespace base_internal -ABSL_NAMESPACE_END + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_SYSINFO_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/thread_annotations.h b/CAPI/cpp/grpc/include/absl/base/internal/thread_annotations.h index 4dab6a9..9ce05ae 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/thread_annotations.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/thread_annotations.h @@ -39,9 +39,9 @@ #define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ #if defined(__clang__) -#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) +#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) #else -#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op +#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op #endif // GUARDED_BY() @@ -101,10 +101,10 @@ // Mutex m1_; // Mutex m2_ ACQUIRED_AFTER(m1_); #define ACQUIRED_AFTER(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) #define ACQUIRED_BEFORE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) // EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED() // @@ -130,10 +130,10 @@ // void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } // void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } #define EXCLUSIVE_LOCKS_REQUIRED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) #define SHARED_LOCKS_REQUIRED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) // LOCKS_EXCLUDED() // @@ -141,7 +141,7 @@ // cannot be held when calling this function (as Abseil's `Mutex` locks are // non-reentrant). #define LOCKS_EXCLUDED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) // LOCK_RETURNED() // @@ -149,13 +149,13 @@ // a public getter method that returns a pointer to a private mutex should // be annotated with LOCK_RETURNED. #define LOCK_RETURNED(x) \ - THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) + THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) // LOCKABLE // // Documents if a class/type is a lockable type (such as the `Mutex` class). #define LOCKABLE \ - THREAD_ANNOTATION_ATTRIBUTE__(lockable) + THREAD_ANNOTATION_ATTRIBUTE__(lockable) // SCOPED_LOCKABLE // @@ -165,28 +165,28 @@ // arguments; the analysis will assume that the destructor unlocks whatever the // constructor locked. #define SCOPED_LOCKABLE \ - THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) + THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) // EXCLUSIVE_LOCK_FUNCTION() // // Documents functions that acquire a lock in the body of a function, and do // not release it. #define EXCLUSIVE_LOCK_FUNCTION(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) // SHARED_LOCK_FUNCTION() // // Documents functions that acquire a shared (reader) lock in the body of a // function, and do not release it. #define SHARED_LOCK_FUNCTION(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) // UNLOCK_FUNCTION() // // Documents functions that expect a lock to be held on entry to the function, // and release it in the body of the function. #define UNLOCK_FUNCTION(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) // EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION() // @@ -197,20 +197,20 @@ // argument specifies the mutex that is locked on success. If unspecified, this // mutex is assumed to be `this`. #define EXCLUSIVE_TRYLOCK_FUNCTION(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__)) #define SHARED_TRYLOCK_FUNCTION(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) // ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK() // // Documents functions that dynamically check to see if a lock is held, and fail // if it is not held. #define ASSERT_EXCLUSIVE_LOCK(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__)) #define ASSERT_SHARED_LOCK(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__)) // NO_THREAD_SAFETY_ANALYSIS // @@ -218,7 +218,7 @@ // This annotation is used to mark functions that are known to be correct, but // the locking behavior is more complicated than the analyzer can handle. #define NO_THREAD_SAFETY_ANALYSIS \ - THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) + THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) //------------------------------------------------------------------------------ // Tool-Supplied Annotations @@ -239,7 +239,7 @@ // that are incorrect and need to be fixed. It is used by automated tools to // avoid breaking the build when the analysis is updated. // Code owners are expected to eventually fix the routine. -#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS +#define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS // Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY // annotation that needs to be fixed, because it is producing thread safety @@ -251,20 +251,22 @@ // but the compiler cannot confirm that. #define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x) - -namespace thread_safety_analysis { - -// Takes a reference to a guarded data member, and returns an unguarded -// reference. -template -inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS { - return v; -} - -template -inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS { - return v; -} +namespace thread_safety_analysis +{ + + // Takes a reference to a guarded data member, and returns an unguarded + // reference. + template + inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS + { + return v; + } + + template + inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS + { + return v; + } } // namespace thread_safety_analysis diff --git a/CAPI/cpp/grpc/include/absl/base/internal/thread_identity.h b/CAPI/cpp/grpc/include/absl/base/internal/thread_identity.h index 659694b..63852d8 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/thread_identity.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/thread_identity.h @@ -34,156 +34,162 @@ #include "absl/base/internal/per_thread_tls.h" #include "absl/base/optimization.h" -namespace absl { -ABSL_NAMESPACE_BEGIN - -struct SynchLocksHeld; -struct SynchWaitParams; - -namespace base_internal { - -class SpinLock; -struct ThreadIdentity; - -// Used by the implementation of absl::Mutex and absl::CondVar. -struct PerThreadSynch { - // The internal representation of absl::Mutex and absl::CondVar rely - // on the alignment of PerThreadSynch. Both store the address of the - // PerThreadSynch in the high-order bits of their internal state, - // which means the low kLowZeroBits of the address of PerThreadSynch - // must be zero. - static constexpr int kLowZeroBits = 8; - static constexpr int kAlignment = 1 << kLowZeroBits; - - // Returns the associated ThreadIdentity. - // This can be implemented as a cast because we guarantee - // PerThreadSynch is the first element of ThreadIdentity. - ThreadIdentity* thread_identity() { - return reinterpret_cast(this); - } - - PerThreadSynch *next; // Circular waiter queue; initialized to 0. - PerThreadSynch *skip; // If non-zero, all entries in Mutex queue - // up to and including "skip" have same - // condition as this, and will be woken later - bool may_skip; // if false while on mutex queue, a mutex unlocker - // is using this PerThreadSynch as a terminator. Its - // skip field must not be filled in because the loop - // might then skip over the terminator. - bool wake; // This thread is to be woken from a Mutex. - // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the - // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. - // - // The value of "x->cond_waiter" is meaningless if "x" is not on a - // Mutex waiter list. - bool cond_waiter; - bool maybe_unlocking; // Valid at head of Mutex waiter queue; - // true if UnlockSlow could be searching - // for a waiter to wake. Used for an optimization - // in Enqueue(). true is always a valid value. - // Can be reset to false when the unlocker or any - // writer releases the lock, or a reader fully - // releases the lock. It may not be set to false - // by a reader that decrements the count to - // non-zero. protected by mutex spinlock - bool suppress_fatal_errors; // If true, try to proceed even in the face - // of broken invariants. This is used within - // fatal signal handlers to improve the - // chances of debug logging information being - // output successfully. - int priority; // Priority of thread (updated every so often). - - // State values: - // kAvailable: This PerThreadSynch is available. - // kQueued: This PerThreadSynch is unavailable, it's currently queued on a - // Mutex or CondVar waistlist. - // - // Transitions from kQueued to kAvailable require a release - // barrier. This is needed as a waiter may use "state" to - // independently observe that it's no longer queued. - // - // Transitions from kAvailable to kQueued require no barrier, they - // are externally ordered by the Mutex. - enum State { - kAvailable, - kQueued - }; - std::atomic state; - - // The wait parameters of the current wait. waitp is null if the - // thread is not waiting. Transitions from null to non-null must - // occur before the enqueue commit point (state = kQueued in - // Enqueue() and CondVarEnqueue()). Transitions from non-null to - // null must occur after the wait is finished (state = kAvailable in - // Mutex::Block() and CondVar::WaitCommon()). This field may be - // changed only by the thread that describes this PerThreadSynch. A - // special case is Fer(), which calls Enqueue() on another thread, - // but with an identical SynchWaitParams pointer, thus leaving the - // pointer unchanged. - SynchWaitParams* waitp; - - intptr_t readers; // Number of readers in mutex. - - // When priority will next be read (cycles). - int64_t next_priority_read_cycles; - - // Locks held; used during deadlock detection. - // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). - SynchLocksHeld *all_locks; -}; - -// The instances of this class are allocated in NewThreadIdentity() with an -// alignment of PerThreadSynch::kAlignment. -struct ThreadIdentity { - // Must be the first member. The Mutex implementation requires that - // the PerThreadSynch object associated with each thread is - // PerThreadSynch::kAlignment aligned. We provide this alignment on - // ThreadIdentity itself. - PerThreadSynch per_thread_synch; - - // Private: Reserved for absl::synchronization_internal::Waiter. - struct WaiterState { - alignas(void*) char data[128]; - } waiter_state; - - // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter(). - std::atomic* blocked_count_ptr; - - // The following variables are mostly read/written just by the - // thread itself. The only exception is that these are read by - // a ticker thread as a hint. - std::atomic ticker; // Tick counter, incremented once per second. - std::atomic wait_start; // Ticker value when thread started waiting. - std::atomic is_idle; // Has thread become idle yet? - - ThreadIdentity* next; -}; - -// Returns the ThreadIdentity object representing the calling thread; guaranteed -// to be unique for its lifetime. The returned object will remain valid for the -// program's lifetime; although it may be re-assigned to a subsequent thread. -// If one does not exist, return nullptr instead. -// -// Does not malloc(*), and is async-signal safe. -// [*] Technically pthread_setspecific() does malloc on first use; however this -// is handled internally within tcmalloc's initialization already. -// -// New ThreadIdentity objects can be constructed and associated with a thread -// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h. -ThreadIdentity* CurrentThreadIdentityIfPresent(); - -using ThreadIdentityReclaimerFunction = void (*)(void*); - -// Sets the current thread identity to the given value. 'reclaimer' is a -// pointer to the global function for cleaning up instances on thread -// destruction. -void SetCurrentThreadIdentity(ThreadIdentity* identity, - ThreadIdentityReclaimerFunction reclaimer); - -// Removes the currently associated ThreadIdentity from the running thread. -// This must be called from inside the ThreadIdentityReclaimerFunction, and only -// from that function. -void ClearCurrentThreadIdentity(); +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + struct SynchLocksHeld; + struct SynchWaitParams; + + namespace base_internal + { + + class SpinLock; + struct ThreadIdentity; + + // Used by the implementation of absl::Mutex and absl::CondVar. + struct PerThreadSynch + { + // The internal representation of absl::Mutex and absl::CondVar rely + // on the alignment of PerThreadSynch. Both store the address of the + // PerThreadSynch in the high-order bits of their internal state, + // which means the low kLowZeroBits of the address of PerThreadSynch + // must be zero. + static constexpr int kLowZeroBits = 8; + static constexpr int kAlignment = 1 << kLowZeroBits; + + // Returns the associated ThreadIdentity. + // This can be implemented as a cast because we guarantee + // PerThreadSynch is the first element of ThreadIdentity. + ThreadIdentity* thread_identity() + { + return reinterpret_cast(this); + } + + PerThreadSynch* next; // Circular waiter queue; initialized to 0. + PerThreadSynch* skip; // If non-zero, all entries in Mutex queue + // up to and including "skip" have same + // condition as this, and will be woken later + bool may_skip; // if false while on mutex queue, a mutex unlocker + // is using this PerThreadSynch as a terminator. Its + // skip field must not be filled in because the loop + // might then skip over the terminator. + bool wake; // This thread is to be woken from a Mutex. + // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the + // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. + // + // The value of "x->cond_waiter" is meaningless if "x" is not on a + // Mutex waiter list. + bool cond_waiter; + bool maybe_unlocking; // Valid at head of Mutex waiter queue; + // true if UnlockSlow could be searching + // for a waiter to wake. Used for an optimization + // in Enqueue(). true is always a valid value. + // Can be reset to false when the unlocker or any + // writer releases the lock, or a reader fully + // releases the lock. It may not be set to false + // by a reader that decrements the count to + // non-zero. protected by mutex spinlock + bool suppress_fatal_errors; // If true, try to proceed even in the face + // of broken invariants. This is used within + // fatal signal handlers to improve the + // chances of debug logging information being + // output successfully. + int priority; // Priority of thread (updated every so often). + + // State values: + // kAvailable: This PerThreadSynch is available. + // kQueued: This PerThreadSynch is unavailable, it's currently queued on a + // Mutex or CondVar waistlist. + // + // Transitions from kQueued to kAvailable require a release + // barrier. This is needed as a waiter may use "state" to + // independently observe that it's no longer queued. + // + // Transitions from kAvailable to kQueued require no barrier, they + // are externally ordered by the Mutex. + enum State + { + kAvailable, + kQueued + }; + std::atomic state; + + // The wait parameters of the current wait. waitp is null if the + // thread is not waiting. Transitions from null to non-null must + // occur before the enqueue commit point (state = kQueued in + // Enqueue() and CondVarEnqueue()). Transitions from non-null to + // null must occur after the wait is finished (state = kAvailable in + // Mutex::Block() and CondVar::WaitCommon()). This field may be + // changed only by the thread that describes this PerThreadSynch. A + // special case is Fer(), which calls Enqueue() on another thread, + // but with an identical SynchWaitParams pointer, thus leaving the + // pointer unchanged. + SynchWaitParams* waitp; + + intptr_t readers; // Number of readers in mutex. + + // When priority will next be read (cycles). + int64_t next_priority_read_cycles; + + // Locks held; used during deadlock detection. + // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). + SynchLocksHeld* all_locks; + }; + + // The instances of this class are allocated in NewThreadIdentity() with an + // alignment of PerThreadSynch::kAlignment. + struct ThreadIdentity + { + // Must be the first member. The Mutex implementation requires that + // the PerThreadSynch object associated with each thread is + // PerThreadSynch::kAlignment aligned. We provide this alignment on + // ThreadIdentity itself. + PerThreadSynch per_thread_synch; + + // Private: Reserved for absl::synchronization_internal::Waiter. + struct WaiterState + { + alignas(void*) char data[128]; + } waiter_state; + + // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter(). + std::atomic* blocked_count_ptr; + + // The following variables are mostly read/written just by the + // thread itself. The only exception is that these are read by + // a ticker thread as a hint. + std::atomic ticker; // Tick counter, incremented once per second. + std::atomic wait_start; // Ticker value when thread started waiting. + std::atomic is_idle; // Has thread become idle yet? + + ThreadIdentity* next; + }; + + // Returns the ThreadIdentity object representing the calling thread; guaranteed + // to be unique for its lifetime. The returned object will remain valid for the + // program's lifetime; although it may be re-assigned to a subsequent thread. + // If one does not exist, return nullptr instead. + // + // Does not malloc(*), and is async-signal safe. + // [*] Technically pthread_setspecific() does malloc on first use; however this + // is handled internally within tcmalloc's initialization already. + // + // New ThreadIdentity objects can be constructed and associated with a thread + // by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h. + ThreadIdentity* CurrentThreadIdentityIfPresent(); + + using ThreadIdentityReclaimerFunction = void (*)(void*); + + // Sets the current thread identity to the given value. 'reclaimer' is a + // pointer to the global function for cleaning up instances on thread + // destruction. + void SetCurrentThreadIdentity(ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer); + + // Removes the currently associated ThreadIdentity from the running thread. + // This must be called from inside the ThreadIdentityReclaimerFunction, and only + // from that function. + void ClearCurrentThreadIdentity(); // May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE= @@ -213,7 +219,7 @@ void ClearCurrentThreadIdentity(); #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 #elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL) #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 -#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ +#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ (__GOOGLE_GRTE_VERSION__ >= 20140228L) // Support for async-safe TLS was specifically added in GRTEv4. It's not // present in the upstream eglibc. @@ -221,17 +227,17 @@ void ClearCurrentThreadIdentity(); #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS #else #define ABSL_THREAD_IDENTITY_MODE \ - ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC #endif #if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 #if ABSL_PER_THREAD_TLS -ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* - thread_identity_ptr; + ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* + thread_identity_ptr; #elif defined(ABSL_HAVE_THREAD_LOCAL) -ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr; + ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr; #else #error Thread-local storage not detected on this platform #endif @@ -248,9 +254,10 @@ ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr; #endif #ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT -inline ThreadIdentity* CurrentThreadIdentityIfPresent() { - return thread_identity_ptr; -} + inline ThreadIdentity* CurrentThreadIdentityIfPresent() + { + return thread_identity_ptr; + } #endif #elif ABSL_THREAD_IDENTITY_MODE != \ @@ -258,8 +265,8 @@ inline ThreadIdentity* CurrentThreadIdentityIfPresent() { #error Unknown ABSL_THREAD_IDENTITY_MODE #endif -} // namespace base_internal -ABSL_NAMESPACE_END + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/throw_delegate.h b/CAPI/cpp/grpc/include/absl/base/internal/throw_delegate.h index 075f527..379d81d 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/throw_delegate.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/throw_delegate.h @@ -21,55 +21,57 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { -// Helper functions that allow throwing exceptions consistently from anywhere. -// The main use case is for header-based libraries (eg templates), as they will -// be built by many different targets with their own compiler options. -// In particular, this will allow a safe way to throw exceptions even if the -// caller is compiled with -fno-exceptions. This is intended for implementing -// things like map<>::at(), which the standard documents as throwing an -// exception on error. -// -// Using other techniques like #if tricks could lead to ODR violations. -// -// You shouldn't use it unless you're writing code that you know will be built -// both with and without exceptions and you need to conform to an interface -// that uses exceptions. + // Helper functions that allow throwing exceptions consistently from anywhere. + // The main use case is for header-based libraries (eg templates), as they will + // be built by many different targets with their own compiler options. + // In particular, this will allow a safe way to throw exceptions even if the + // caller is compiled with -fno-exceptions. This is intended for implementing + // things like map<>::at(), which the standard documents as throwing an + // exception on error. + // + // Using other techniques like #if tricks could lead to ODR violations. + // + // You shouldn't use it unless you're writing code that you know will be built + // both with and without exceptions and you need to conform to an interface + // that uses exceptions. -[[noreturn]] void ThrowStdLogicError(const std::string& what_arg); -[[noreturn]] void ThrowStdLogicError(const char* what_arg); -[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg); -[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg); -[[noreturn]] void ThrowStdDomainError(const std::string& what_arg); -[[noreturn]] void ThrowStdDomainError(const char* what_arg); -[[noreturn]] void ThrowStdLengthError(const std::string& what_arg); -[[noreturn]] void ThrowStdLengthError(const char* what_arg); -[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg); -[[noreturn]] void ThrowStdOutOfRange(const char* what_arg); -[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg); -[[noreturn]] void ThrowStdRuntimeError(const char* what_arg); -[[noreturn]] void ThrowStdRangeError(const std::string& what_arg); -[[noreturn]] void ThrowStdRangeError(const char* what_arg); -[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg); -[[noreturn]] void ThrowStdOverflowError(const char* what_arg); -[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg); -[[noreturn]] void ThrowStdUnderflowError(const char* what_arg); + [[noreturn]] void ThrowStdLogicError(const std::string& what_arg); + [[noreturn]] void ThrowStdLogicError(const char* what_arg); + [[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg); + [[noreturn]] void ThrowStdInvalidArgument(const char* what_arg); + [[noreturn]] void ThrowStdDomainError(const std::string& what_arg); + [[noreturn]] void ThrowStdDomainError(const char* what_arg); + [[noreturn]] void ThrowStdLengthError(const std::string& what_arg); + [[noreturn]] void ThrowStdLengthError(const char* what_arg); + [[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg); + [[noreturn]] void ThrowStdOutOfRange(const char* what_arg); + [[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg); + [[noreturn]] void ThrowStdRuntimeError(const char* what_arg); + [[noreturn]] void ThrowStdRangeError(const std::string& what_arg); + [[noreturn]] void ThrowStdRangeError(const char* what_arg); + [[noreturn]] void ThrowStdOverflowError(const std::string& what_arg); + [[noreturn]] void ThrowStdOverflowError(const char* what_arg); + [[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg); + [[noreturn]] void ThrowStdUnderflowError(const char* what_arg); -[[noreturn]] void ThrowStdBadFunctionCall(); -[[noreturn]] void ThrowStdBadAlloc(); + [[noreturn]] void ThrowStdBadFunctionCall(); + [[noreturn]] void ThrowStdBadAlloc(); -// ThrowStdBadArrayNewLength() cannot be consistently supported because -// std::bad_array_new_length is missing in libstdc++ until 4.9.0. -// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html -// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html -// libcxx (as of 3.2) and msvc (as of 2015) both have it. -// [[noreturn]] void ThrowStdBadArrayNewLength(); + // ThrowStdBadArrayNewLength() cannot be consistently supported because + // std::bad_array_new_length is missing in libstdc++ until 4.9.0. + // https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html + // https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html + // libcxx (as of 3.2) and msvc (as of 2015) both have it. + // [[noreturn]] void ThrowStdBadArrayNewLength(); -} // namespace base_internal -ABSL_NAMESPACE_END + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/internal/unaligned_access.h b/CAPI/cpp/grpc/include/absl/base/internal/unaligned_access.h index 093dd9b..524e7c4 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/unaligned_access.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/unaligned_access.h @@ -31,51 +31,65 @@ // The unaligned API is C++ only. The declarations use C++ features // (namespaces, inline) which are absent or incompatible in C. #if defined(__cplusplus) -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { - -inline uint16_t UnalignedLoad16(const void *p) { - uint16_t t; - memcpy(&t, p, sizeof t); - return t; -} - -inline uint32_t UnalignedLoad32(const void *p) { - uint32_t t; - memcpy(&t, p, sizeof t); - return t; -} - -inline uint64_t UnalignedLoad64(const void *p) { - uint64_t t; - memcpy(&t, p, sizeof t); - return t; -} - -inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); } - -inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); } - -inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } - -} // namespace base_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { + + inline uint16_t UnalignedLoad16(const void* p) + { + uint16_t t; + memcpy(&t, p, sizeof t); + return t; + } + + inline uint32_t UnalignedLoad32(const void* p) + { + uint32_t t; + memcpy(&t, p, sizeof t); + return t; + } + + inline uint64_t UnalignedLoad64(const void* p) + { + uint64_t t; + memcpy(&t, p, sizeof t); + return t; + } + + inline void UnalignedStore16(void* p, uint16_t v) + { + memcpy(p, &v, sizeof v); + } + + inline void UnalignedStore32(void* p, uint32_t v) + { + memcpy(p, &v, sizeof v); + } + + inline void UnalignedStore64(void* p, uint64_t v) + { + memcpy(p, &v, sizeof v); + } + + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ - (absl::base_internal::UnalignedLoad16(_p)) + (absl::base_internal::UnalignedLoad16(_p)) #define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ - (absl::base_internal::UnalignedLoad32(_p)) + (absl::base_internal::UnalignedLoad32(_p)) #define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ - (absl::base_internal::UnalignedLoad64(_p)) + (absl::base_internal::UnalignedLoad64(_p)) #define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ - (absl::base_internal::UnalignedStore16(_p, _val)) + (absl::base_internal::UnalignedStore16(_p, _val)) #define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ - (absl::base_internal::UnalignedStore32(_p, _val)) + (absl::base_internal::UnalignedStore32(_p, _val)) #define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ - (absl::base_internal::UnalignedStore64(_p, _val)) + (absl::base_internal::UnalignedStore64(_p, _val)) #endif // defined(__cplusplus), end of unaligned API diff --git a/CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock.h b/CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock.h index 2cbeae3..d7af4bc 100644 --- a/CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock.h +++ b/CAPI/cpp/grpc/include/absl/base/internal/unscaledcycleclock.h @@ -70,62 +70,67 @@ // Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence. // Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1 #if !defined(ABSL_USE_UNSCALED_CYCLECLOCK) -#define ABSL_USE_UNSCALED_CYCLECLOCK \ - (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \ - ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT) +#define ABSL_USE_UNSCALED_CYCLECLOCK \ + (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \ + ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT) #endif #if ABSL_USE_UNSCALED_CYCLECLOCK // This macro can be used to test if UnscaledCycleClock::Frequency() // is NominalCPUFrequency() on a particular platform. -#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || \ - defined(_M_IX86) || defined(_M_X64)) +#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || defined(_M_IX86) || defined(_M_X64)) #define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY #endif -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace time_internal { -class UnscaledCycleClockWrapperForGetCurrentTime; -} // namespace time_internal - -namespace base_internal { -class CycleClock; -class UnscaledCycleClockWrapperForInitializeFrequency; - -class UnscaledCycleClock { - private: - UnscaledCycleClock() = delete; - - // Return the value of a cycle counter that counts at a rate that is - // approximately constant. - static int64_t Now(); - - // Return the how much UnscaledCycleClock::Now() increases per second. - // This is not necessarily the core CPU clock frequency. - // It may be the nominal value report by the kernel, rather than a measured - // value. - static double Frequency(); - - // Allowed users - friend class base_internal::CycleClock; - friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime; - friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency; -}; +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace time_internal + { + class UnscaledCycleClockWrapperForGetCurrentTime; + } // namespace time_internal + + namespace base_internal + { + class CycleClock; + class UnscaledCycleClockWrapperForInitializeFrequency; + + class UnscaledCycleClock + { + private: + UnscaledCycleClock() = delete; + + // Return the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // Return the how much UnscaledCycleClock::Now() increases per second. + // This is not necessarily the core CPU clock frequency. + // It may be the nominal value report by the kernel, rather than a measured + // value. + static double Frequency(); + + // Allowed users + friend class base_internal::CycleClock; + friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime; + friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency; + }; #if defined(__x86_64__) -inline int64_t UnscaledCycleClock::Now() { - uint64_t low, high; - __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); - return (high << 32) | low; -} + inline int64_t UnscaledCycleClock::Now() + { + uint64_t low, high; + __asm__ volatile("rdtsc" + : "=a"(low), "=d"(high)); + return (high << 32) | low; + } #endif -} // namespace base_internal -ABSL_NAMESPACE_END + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_USE_UNSCALED_CYCLECLOCK diff --git a/CAPI/cpp/grpc/include/absl/base/log_severity.h b/CAPI/cpp/grpc/include/absl/base/log_severity.h index 8bdca38..ba503a0 100644 --- a/CAPI/cpp/grpc/include/absl/base/log_severity.h +++ b/CAPI/cpp/grpc/include/absl/base/log_severity.h @@ -21,152 +21,157 @@ #include "absl/base/attributes.h" #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN - -// absl::LogSeverity -// -// Four severity levels are defined. Logging APIs should terminate the program -// when a message is logged at severity `kFatal`; the other levels have no -// special semantics. -// -// Values other than the four defined levels (e.g. produced by `static_cast`) -// are valid, but their semantics when passed to a function, macro, or flag -// depend on the function, macro, or flag. The usual behavior is to normalize -// such values to a defined severity level, however in some cases values other -// than the defined levels are useful for comparison. -// -// Example: -// -// // Effectively disables all logging: -// SetMinLogLevel(static_cast(100)); -// -// Abseil flags may be defined with type `LogSeverity`. Dependency layering -// constraints require that the `AbslParseFlag()` overload be declared and -// defined in the flags library itself rather than here. The `AbslUnparseFlag()` -// overload is defined there as well for consistency. -// -// absl::LogSeverity Flag String Representation -// -// An `absl::LogSeverity` has a string representation used for parsing -// command-line flags based on the enumerator name (e.g. `kFatal`) or -// its unprefixed name (without the `k`) in any case-insensitive form. (E.g. -// "FATAL", "fatal" or "Fatal" are all valid.) Unparsing such flags produces an -// unprefixed string representation in all caps (e.g. "FATAL") or an integer. -// -// Additionally, the parser accepts arbitrary integers (as if the type were -// `int`). -// -// Examples: -// -// --my_log_level=kInfo -// --my_log_level=INFO -// --my_log_level=info -// --my_log_level=0 -// -// Unparsing a flag produces the same result as `absl::LogSeverityName()` for -// the standard levels and a base-ten integer otherwise. -enum class LogSeverity : int { - kInfo = 0, - kWarning = 1, - kError = 2, - kFatal = 3, -}; - -// LogSeverities() -// -// Returns an iterable of all standard `absl::LogSeverity` values, ordered from -// least to most severe. -constexpr std::array LogSeverities() { - return {{absl::LogSeverity::kInfo, absl::LogSeverity::kWarning, - absl::LogSeverity::kError, absl::LogSeverity::kFatal}}; -} - -// LogSeverityName() -// -// Returns the all-caps string representation (e.g. "INFO") of the specified -// severity level if it is one of the standard levels and "UNKNOWN" otherwise. -constexpr const char* LogSeverityName(absl::LogSeverity s) { - return s == absl::LogSeverity::kInfo - ? "INFO" - : s == absl::LogSeverity::kWarning - ? "WARNING" - : s == absl::LogSeverity::kError - ? "ERROR" - : s == absl::LogSeverity::kFatal ? "FATAL" : "UNKNOWN"; -} - -// NormalizeLogSeverity() -// -// Values less than `kInfo` normalize to `kInfo`; values greater than `kFatal` -// normalize to `kError` (**NOT** `kFatal`). -constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s) { - return s < absl::LogSeverity::kInfo - ? absl::LogSeverity::kInfo - : s > absl::LogSeverity::kFatal ? absl::LogSeverity::kError : s; -} -constexpr absl::LogSeverity NormalizeLogSeverity(int s) { - return absl::NormalizeLogSeverity(static_cast(s)); -} - -// operator<< -// -// The exact representation of a streamed `absl::LogSeverity` is deliberately -// unspecified; do not rely on it. -std::ostream& operator<<(std::ostream& os, absl::LogSeverity s); - -// Enums representing a lower bound for LogSeverity. APIs that only operate on -// messages of at least a certain level (for example, `SetMinLogLevel()`) use -// this type to specify that level. absl::LogSeverityAtLeast::kInfinity is -// a level above all threshold levels and therefore no log message will -// ever meet this threshold. -enum class LogSeverityAtLeast : int { - kInfo = static_cast(absl::LogSeverity::kInfo), - kWarning = static_cast(absl::LogSeverity::kWarning), - kError = static_cast(absl::LogSeverity::kError), - kFatal = static_cast(absl::LogSeverity::kFatal), - kInfinity = 1000, -}; - -std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s); - -// Enums representing an upper bound for LogSeverity. APIs that only operate on -// messages of at most a certain level (for example, buffer all messages at or -// below a certain level) use this type to specify that level. -// absl::LogSeverityAtMost::kNegativeInfinity is a level below all threshold -// levels and therefore will exclude all log messages. -enum class LogSeverityAtMost : int { - kNegativeInfinity = -1000, - kInfo = static_cast(absl::LogSeverity::kInfo), - kWarning = static_cast(absl::LogSeverity::kWarning), - kError = static_cast(absl::LogSeverity::kError), - kFatal = static_cast(absl::LogSeverity::kFatal), -}; - -std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s); +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // absl::LogSeverity + // + // Four severity levels are defined. Logging APIs should terminate the program + // when a message is logged at severity `kFatal`; the other levels have no + // special semantics. + // + // Values other than the four defined levels (e.g. produced by `static_cast`) + // are valid, but their semantics when passed to a function, macro, or flag + // depend on the function, macro, or flag. The usual behavior is to normalize + // such values to a defined severity level, however in some cases values other + // than the defined levels are useful for comparison. + // + // Example: + // + // // Effectively disables all logging: + // SetMinLogLevel(static_cast(100)); + // + // Abseil flags may be defined with type `LogSeverity`. Dependency layering + // constraints require that the `AbslParseFlag()` overload be declared and + // defined in the flags library itself rather than here. The `AbslUnparseFlag()` + // overload is defined there as well for consistency. + // + // absl::LogSeverity Flag String Representation + // + // An `absl::LogSeverity` has a string representation used for parsing + // command-line flags based on the enumerator name (e.g. `kFatal`) or + // its unprefixed name (without the `k`) in any case-insensitive form. (E.g. + // "FATAL", "fatal" or "Fatal" are all valid.) Unparsing such flags produces an + // unprefixed string representation in all caps (e.g. "FATAL") or an integer. + // + // Additionally, the parser accepts arbitrary integers (as if the type were + // `int`). + // + // Examples: + // + // --my_log_level=kInfo + // --my_log_level=INFO + // --my_log_level=info + // --my_log_level=0 + // + // Unparsing a flag produces the same result as `absl::LogSeverityName()` for + // the standard levels and a base-ten integer otherwise. + enum class LogSeverity : int + { + kInfo = 0, + kWarning = 1, + kError = 2, + kFatal = 3, + }; + + // LogSeverities() + // + // Returns an iterable of all standard `absl::LogSeverity` values, ordered from + // least to most severe. + constexpr std::array LogSeverities() + { + return {{absl::LogSeverity::kInfo, absl::LogSeverity::kWarning, absl::LogSeverity::kError, absl::LogSeverity::kFatal}}; + } + + // LogSeverityName() + // + // Returns the all-caps string representation (e.g. "INFO") of the specified + // severity level if it is one of the standard levels and "UNKNOWN" otherwise. + constexpr const char* LogSeverityName(absl::LogSeverity s) + { + return s == absl::LogSeverity::kInfo ? "INFO" : s == absl::LogSeverity::kWarning ? "WARNING" : + s == absl::LogSeverity::kError ? "ERROR" : + s == absl::LogSeverity::kFatal ? "FATAL" : + "UNKNOWN"; + } + + // NormalizeLogSeverity() + // + // Values less than `kInfo` normalize to `kInfo`; values greater than `kFatal` + // normalize to `kError` (**NOT** `kFatal`). + constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s) + { + return s < absl::LogSeverity::kInfo ? absl::LogSeverity::kInfo : s > absl::LogSeverity::kFatal ? absl::LogSeverity::kError : + s; + } + constexpr absl::LogSeverity NormalizeLogSeverity(int s) + { + return absl::NormalizeLogSeverity(static_cast(s)); + } + + // operator<< + // + // The exact representation of a streamed `absl::LogSeverity` is deliberately + // unspecified; do not rely on it. + std::ostream& operator<<(std::ostream& os, absl::LogSeverity s); + + // Enums representing a lower bound for LogSeverity. APIs that only operate on + // messages of at least a certain level (for example, `SetMinLogLevel()`) use + // this type to specify that level. absl::LogSeverityAtLeast::kInfinity is + // a level above all threshold levels and therefore no log message will + // ever meet this threshold. + enum class LogSeverityAtLeast : int + { + kInfo = static_cast(absl::LogSeverity::kInfo), + kWarning = static_cast(absl::LogSeverity::kWarning), + kError = static_cast(absl::LogSeverity::kError), + kFatal = static_cast(absl::LogSeverity::kFatal), + kInfinity = 1000, + }; + + std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s); + + // Enums representing an upper bound for LogSeverity. APIs that only operate on + // messages of at most a certain level (for example, buffer all messages at or + // below a certain level) use this type to specify that level. + // absl::LogSeverityAtMost::kNegativeInfinity is a level below all threshold + // levels and therefore will exclude all log messages. + enum class LogSeverityAtMost : int + { + kNegativeInfinity = -1000, + kInfo = static_cast(absl::LogSeverity::kInfo), + kWarning = static_cast(absl::LogSeverity::kWarning), + kError = static_cast(absl::LogSeverity::kError), + kFatal = static_cast(absl::LogSeverity::kFatal), + }; + + std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s); #define COMPOP(op1, op2, T) \ - constexpr bool operator op1(absl::T lhs, absl::LogSeverity rhs) { \ - return static_cast(lhs) op1 rhs; \ - } \ - constexpr bool operator op2(absl::LogSeverity lhs, absl::T rhs) { \ - return lhs op2 static_cast(rhs); \ - } - -// Comparisons between `LogSeverity` and `LogSeverityAtLeast`/ -// `LogSeverityAtMost` are only supported in one direction. -// Valid checks are: -// LogSeverity >= LogSeverityAtLeast -// LogSeverity < LogSeverityAtLeast -// LogSeverity <= LogSeverityAtMost -// LogSeverity > LogSeverityAtMost -COMPOP(>, <, LogSeverityAtLeast) -COMPOP(<=, >=, LogSeverityAtLeast) -COMPOP(<, >, LogSeverityAtMost) -COMPOP(>=, <=, LogSeverityAtMost) + constexpr bool operator op1(absl::T lhs, absl::LogSeverity rhs) \ + { \ + return static_cast(lhs) op1 rhs; \ + } \ + constexpr bool operator op2(absl::LogSeverity lhs, absl::T rhs) \ + { \ + return lhs op2 static_cast(rhs); \ + } + + // Comparisons between `LogSeverity` and `LogSeverityAtLeast`/ + // `LogSeverityAtMost` are only supported in one direction. + // Valid checks are: + // LogSeverity >= LogSeverityAtLeast + // LogSeverity < LogSeverityAtLeast + // LogSeverity <= LogSeverityAtMost + // LogSeverity > LogSeverityAtMost + COMPOP(>, <, LogSeverityAtLeast) + COMPOP(<=, >=, LogSeverityAtLeast) + COMPOP(<, >, LogSeverityAtMost) + COMPOP(>=, <=, LogSeverityAtMost) #undef COMPOP -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_LOG_SEVERITY_H_ diff --git a/CAPI/cpp/grpc/include/absl/base/macros.h b/CAPI/cpp/grpc/include/absl/base/macros.h index 3e085a9..e9a7e7a 100644 --- a/CAPI/cpp/grpc/include/absl/base/macros.h +++ b/CAPI/cpp/grpc/include/absl/base/macros.h @@ -42,17 +42,19 @@ // can be used in defining new arrays. If you use this macro on a pointer by // mistake, you will get a compile-time error. #define ABSL_ARRAYSIZE(array) \ - (sizeof(::absl::macros_internal::ArraySizeHelper(array))) + (sizeof(::absl::macros_internal::ArraySizeHelper(array))) -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace macros_internal { -// Note: this internal template function declaration is used by ABSL_ARRAYSIZE. -// The function doesn't need a definition, as we only use its type. -template -auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N]; -} // namespace macros_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace macros_internal + { + // Note: this internal template function declaration is used by ABSL_ARRAYSIZE. + // The function doesn't need a definition, as we only use its type. + template + auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N]; + } // namespace macros_internal + ABSL_NAMESPACE_END } // namespace absl // ABSL_BAD_CALL_IF() @@ -75,7 +77,7 @@ ABSL_NAMESPACE_END // #endif // ABSL_BAD_CALL_IF #if ABSL_HAVE_ATTRIBUTE(enable_if) #define ABSL_BAD_CALL_IF(expr, msg) \ - __attribute__((enable_if(expr, "Bad call trap"), unavailable(msg))) + __attribute__((enable_if(expr, "Bad call trap"), unavailable(msg))) #endif // ABSL_ASSERT() @@ -92,25 +94,24 @@ ABSL_NAMESPACE_END // https://akrzemi1.wordpress.com/2017/05/18/asserts-in-constexpr-functions/ #if defined(NDEBUG) #define ABSL_ASSERT(expr) \ - (false ? static_cast(expr) : static_cast(0)) + (false ? static_cast(expr) : static_cast(0)) #else -#define ABSL_ASSERT(expr) \ - (ABSL_PREDICT_TRUE((expr)) ? static_cast(0) \ - : [] { assert(false && #expr); }()) // NOLINT +#define ABSL_ASSERT(expr) \ + (ABSL_PREDICT_TRUE((expr)) ? static_cast(0) : [] { assert(false && #expr); }()) // NOLINT #endif // `ABSL_INTERNAL_HARDENING_ABORT()` controls how `ABSL_HARDENING_ASSERT()` // aborts the program in release mode (when NDEBUG is defined). The // implementation should abort the program as quickly as possible and ideally it // should not be possible to ignore the abort request. -#if (ABSL_HAVE_BUILTIN(__builtin_trap) && \ - ABSL_HAVE_BUILTIN(__builtin_unreachable)) || \ +#if (ABSL_HAVE_BUILTIN(__builtin_trap) && ABSL_HAVE_BUILTIN(__builtin_unreachable)) || \ (defined(__GNUC__) && !defined(__clang__)) #define ABSL_INTERNAL_HARDENING_ABORT() \ - do { \ - __builtin_trap(); \ - __builtin_unreachable(); \ - } while (false) + do \ + { \ + __builtin_trap(); \ + __builtin_unreachable(); \ + } while (false) #else #define ABSL_INTERNAL_HARDENING_ABORT() abort() #endif @@ -127,9 +128,8 @@ ABSL_NAMESPACE_END // See `ABSL_OPTION_HARDENED` in `absl/base/options.h` for more information on // hardened mode. #if ABSL_OPTION_HARDENED == 1 && defined(NDEBUG) -#define ABSL_HARDENING_ASSERT(expr) \ - (ABSL_PREDICT_TRUE((expr)) ? static_cast(0) \ - : [] { ABSL_INTERNAL_HARDENING_ABORT(); }()) +#define ABSL_HARDENING_ASSERT(expr) \ + (ABSL_PREDICT_TRUE((expr)) ? static_cast(0) : [] { ABSL_INTERNAL_HARDENING_ABORT(); }()) #else #define ABSL_HARDENING_ASSERT(expr) ABSL_ASSERT(expr) #endif @@ -137,11 +137,18 @@ ABSL_NAMESPACE_END #ifdef ABSL_HAVE_EXCEPTIONS #define ABSL_INTERNAL_TRY try #define ABSL_INTERNAL_CATCH_ANY catch (...) -#define ABSL_INTERNAL_RETHROW do { throw; } while (false) +#define ABSL_INTERNAL_RETHROW \ + do \ + { \ + throw; \ + } while (false) #else // ABSL_HAVE_EXCEPTIONS #define ABSL_INTERNAL_TRY if (true) #define ABSL_INTERNAL_CATCH_ANY else if (false) -#define ABSL_INTERNAL_RETHROW do {} while (false) +#define ABSL_INTERNAL_RETHROW \ + do \ + { \ + } while (false) #endif // ABSL_HAVE_EXCEPTIONS // `ABSL_INTERNAL_UNREACHABLE` is an unreachable statement. A program which diff --git a/CAPI/cpp/grpc/include/absl/base/optimization.h b/CAPI/cpp/grpc/include/absl/base/optimization.h index db5cc09..77592b0 100644 --- a/CAPI/cpp/grpc/include/absl/base/optimization.h +++ b/CAPI/cpp/grpc/include/absl/base/optimization.h @@ -40,7 +40,11 @@ // return result; // } #if defined(__pnacl__) -#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() \ + if (volatile int x = 0) \ + { \ + (void)x; \ + } #elif defined(__clang__) // Clang will not tail call given inline volatile assembly. #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") @@ -52,7 +56,11 @@ // The __nop() intrinsic blocks the optimisation. #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop() #else -#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } +#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() \ + if (volatile int x = 0) \ + { \ + (void)x; \ + } #endif // ABSL_CACHELINE_SIZE @@ -210,17 +218,20 @@ #elif ABSL_HAVE_BUILTIN(__builtin_assume) #define ABSL_ASSUME(cond) __builtin_assume(cond) #elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) -#define ABSL_ASSUME(cond) \ - do { \ - if (!(cond)) __builtin_unreachable(); \ - } while (0) +#define ABSL_ASSUME(cond) \ + do \ + { \ + if (!(cond)) \ + __builtin_unreachable(); \ + } while (0) #elif defined(_MSC_VER) #define ABSL_ASSUME(cond) __assume(cond) #else -#define ABSL_ASSUME(cond) \ - do { \ - static_cast(false && (cond)); \ - } while (0) +#define ABSL_ASSUME(cond) \ + do \ + { \ + static_cast(false && (cond)); \ + } while (0) #endif // ABSL_INTERNAL_UNIQUE_SMALL_NAME(cond) @@ -244,7 +255,7 @@ #define ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) #x #define ABSL_INTERNAL_UNIQUE_SMALL_NAME1(x) ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) #define ABSL_INTERNAL_UNIQUE_SMALL_NAME() \ - asm(ABSL_INTERNAL_UNIQUE_SMALL_NAME1(.absl.__COUNTER__)) + asm(ABSL_INTERNAL_UNIQUE_SMALL_NAME1(.absl.__COUNTER__)) #else #define ABSL_INTERNAL_UNIQUE_SMALL_NAME() #endif diff --git a/CAPI/cpp/grpc/include/absl/base/options.h b/CAPI/cpp/grpc/include/absl/base/options.h index 13aeda3..cc589bb 100644 --- a/CAPI/cpp/grpc/include/absl/base/options.h +++ b/CAPI/cpp/grpc/include/absl/base/options.h @@ -102,7 +102,6 @@ #define ABSL_OPTION_USE_STD_ANY 0 - // ABSL_OPTION_USE_STD_OPTIONAL // // This option controls whether absl::optional is implemented as an alias to @@ -129,7 +128,6 @@ #define ABSL_OPTION_USE_STD_OPTIONAL 0 - // ABSL_OPTION_USE_STD_STRING_VIEW // // This option controls whether absl::string_view is implemented as an alias to @@ -182,7 +180,6 @@ #define ABSL_OPTION_USE_STD_VARIANT 0 - // ABSL_OPTION_USE_INLINE_NAMESPACE // ABSL_OPTION_INLINE_NAMESPACE_NAME // diff --git a/CAPI/cpp/grpc/include/absl/base/thread_annotations.h b/CAPI/cpp/grpc/include/absl/base/thread_annotations.h index bc8a620..fc2fc2c 100644 --- a/CAPI/cpp/grpc/include/absl/base/thread_annotations.h +++ b/CAPI/cpp/grpc/include/absl/base/thread_annotations.h @@ -140,14 +140,14 @@ // void bar() const ABSL_SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } #if ABSL_HAVE_ATTRIBUTE(exclusive_locks_required) #define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \ - __attribute__((exclusive_locks_required(__VA_ARGS__))) + __attribute__((exclusive_locks_required(__VA_ARGS__))) #else #define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) #endif #if ABSL_HAVE_ATTRIBUTE(shared_locks_required) #define ABSL_SHARED_LOCKS_REQUIRED(...) \ - __attribute__((shared_locks_required(__VA_ARGS__))) + __attribute__((shared_locks_required(__VA_ARGS__))) #else #define ABSL_SHARED_LOCKS_REQUIRED(...) #endif @@ -202,7 +202,7 @@ // not release it. #if ABSL_HAVE_ATTRIBUTE(exclusive_lock_function) #define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \ - __attribute__((exclusive_lock_function(__VA_ARGS__))) + __attribute__((exclusive_lock_function(__VA_ARGS__))) #else #define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) #endif @@ -213,7 +213,7 @@ // function, and do not release it. #if ABSL_HAVE_ATTRIBUTE(shared_lock_function) #define ABSL_SHARED_LOCK_FUNCTION(...) \ - __attribute__((shared_lock_function(__VA_ARGS__))) + __attribute__((shared_lock_function(__VA_ARGS__))) #else #define ABSL_SHARED_LOCK_FUNCTION(...) #endif @@ -238,14 +238,14 @@ // mutex is assumed to be `this`. #if ABSL_HAVE_ATTRIBUTE(exclusive_trylock_function) #define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) \ - __attribute__((exclusive_trylock_function(__VA_ARGS__))) + __attribute__((exclusive_trylock_function(__VA_ARGS__))) #else #define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) #endif #if ABSL_HAVE_ATTRIBUTE(shared_trylock_function) #define ABSL_SHARED_TRYLOCK_FUNCTION(...) \ - __attribute__((shared_trylock_function(__VA_ARGS__))) + __attribute__((shared_trylock_function(__VA_ARGS__))) #else #define ABSL_SHARED_TRYLOCK_FUNCTION(...) #endif @@ -256,14 +256,14 @@ // if it is not held. #if ABSL_HAVE_ATTRIBUTE(assert_exclusive_lock) #define ABSL_ASSERT_EXCLUSIVE_LOCK(...) \ - __attribute__((assert_exclusive_lock(__VA_ARGS__))) + __attribute__((assert_exclusive_lock(__VA_ARGS__))) #else #define ABSL_ASSERT_EXCLUSIVE_LOCK(...) #endif #if ABSL_HAVE_ATTRIBUTE(assert_shared_lock) #define ABSL_ASSERT_SHARED_LOCK(...) \ - __attribute__((assert_shared_lock(__VA_ARGS__))) + __attribute__((assert_shared_lock(__VA_ARGS__))) #else #define ABSL_ASSERT_SHARED_LOCK(...) #endif @@ -275,7 +275,7 @@ // the locking behavior is more complicated than the analyzer can handle. #if ABSL_HAVE_ATTRIBUTE(no_thread_safety_analysis) #define ABSL_NO_THREAD_SAFETY_ANALYSIS \ - __attribute__((no_thread_safety_analysis)) + __attribute__((no_thread_safety_analysis)) #else #define ABSL_NO_THREAD_SAFETY_ANALYSIS #endif @@ -311,25 +311,29 @@ // but the compiler cannot confirm that. #define ABSL_TS_UNCHECKED_READ(x) absl::base_internal::ts_unchecked_read(x) -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace base_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace base_internal + { -// Takes a reference to a guarded data member, and returns an unguarded -// reference. -// Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead. -template -inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { - return v; -} + // Takes a reference to a guarded data member, and returns an unguarded + // reference. + // Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead. + template + inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS + { + return v; + } -template -inline T& ts_unchecked_read(T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { - return v; -} + template + inline T& ts_unchecked_read(T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS + { + return v; + } -} // namespace base_internal -ABSL_NAMESPACE_END + } // namespace base_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_THREAD_ANNOTATIONS_H_ diff --git a/CAPI/cpp/grpc/include/absl/cleanup/cleanup.h b/CAPI/cpp/grpc/include/absl/cleanup/cleanup.h index 960ccd0..e4a38c3 100644 --- a/CAPI/cpp/grpc/include/absl/cleanup/cleanup.h +++ b/CAPI/cpp/grpc/include/absl/cleanup/cleanup.h @@ -74,67 +74,73 @@ #include "absl/base/macros.h" #include "absl/cleanup/internal/cleanup.h" -namespace absl { -ABSL_NAMESPACE_BEGIN - -template -class ABSL_MUST_USE_RESULT Cleanup final { - static_assert(cleanup_internal::WasDeduced(), - "Explicit template parameters are not supported."); - - static_assert(cleanup_internal::ReturnsVoid(), - "Callbacks that return values are not supported."); - - public: - Cleanup(Callback callback) : storage_(std::move(callback)) {} // NOLINT - - Cleanup(Cleanup&& other) = default; - - void Cancel() && { - ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); - storage_.DestroyCallback(); - } - - void Invoke() && { - ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); - storage_.InvokeCallback(); - storage_.DestroyCallback(); - } - - ~Cleanup() { - if (storage_.IsCallbackEngaged()) { - storage_.InvokeCallback(); - storage_.DestroyCallback(); - } - } - - private: - cleanup_internal::Storage storage_; -}; +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + template + class ABSL_MUST_USE_RESULT Cleanup final + { + static_assert(cleanup_internal::WasDeduced(), "Explicit template parameters are not supported."); + + static_assert(cleanup_internal::ReturnsVoid(), "Callbacks that return values are not supported."); + + public: + Cleanup(Callback callback) : + storage_(std::move(callback)) + { + } // NOLINT + + Cleanup(Cleanup&& other) = default; + + void Cancel() && + { + ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); + storage_.DestroyCallback(); + } + + void Invoke() && + { + ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); + storage_.InvokeCallback(); + storage_.DestroyCallback(); + } + + ~Cleanup() + { + if (storage_.IsCallbackEngaged()) + { + storage_.InvokeCallback(); + storage_.DestroyCallback(); + } + } + + private: + cleanup_internal::Storage storage_; + }; // `absl::Cleanup c = /* callback */;` // // C++17 type deduction API for creating an instance of `absl::Cleanup` #if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) -template -Cleanup(Callback callback) -> Cleanup; + template + Cleanup(Callback callback) -> Cleanup; #endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) -// `auto c = absl::MakeCleanup(/* callback */);` -// -// C++11 type deduction API for creating an instance of `absl::Cleanup` -template -absl::Cleanup MakeCleanup(Callback callback) { - static_assert(cleanup_internal::WasDeduced(), - "Explicit template parameters are not supported."); + // `auto c = absl::MakeCleanup(/* callback */);` + // + // C++11 type deduction API for creating an instance of `absl::Cleanup` + template + absl::Cleanup MakeCleanup(Callback callback) + { + static_assert(cleanup_internal::WasDeduced(), "Explicit template parameters are not supported."); - static_assert(cleanup_internal::ReturnsVoid(), - "Callbacks that return values are not supported."); + static_assert(cleanup_internal::ReturnsVoid(), "Callbacks that return values are not supported."); - return {std::move(callback)}; -} + return {std::move(callback)}; + } -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CLEANUP_CLEANUP_H_ diff --git a/CAPI/cpp/grpc/include/absl/cleanup/internal/cleanup.h b/CAPI/cpp/grpc/include/absl/cleanup/internal/cleanup.h index 2783fcb..d493035 100644 --- a/CAPI/cpp/grpc/include/absl/cleanup/internal/cleanup.h +++ b/CAPI/cpp/grpc/include/absl/cleanup/internal/cleanup.h @@ -24,77 +24,95 @@ #include "absl/base/thread_annotations.h" #include "absl/utility/utility.h" -namespace absl { -ABSL_NAMESPACE_BEGIN - -namespace cleanup_internal { - -struct Tag {}; - -template -constexpr bool WasDeduced() { - return (std::is_same::value) && - (sizeof...(Args) == 0); -} - -template -constexpr bool ReturnsVoid() { - return (std::is_same, void>::value); -} - -template -class Storage { - public: - Storage() = delete; - - explicit Storage(Callback callback) { - // Placement-new into a character buffer is used for eager destruction when - // the cleanup is invoked or cancelled. To ensure this optimizes well, the - // behavior is implemented locally instead of using an absl::optional. - ::new (GetCallbackBuffer()) Callback(std::move(callback)); - is_callback_engaged_ = true; - } - - Storage(Storage&& other) { - ABSL_HARDENING_ASSERT(other.IsCallbackEngaged()); - - ::new (GetCallbackBuffer()) Callback(std::move(other.GetCallback())); - is_callback_engaged_ = true; - - other.DestroyCallback(); - } - - Storage(const Storage& other) = delete; - - Storage& operator=(Storage&& other) = delete; - - Storage& operator=(const Storage& other) = delete; - - void* GetCallbackBuffer() { return static_cast(+callback_buffer_); } - - Callback& GetCallback() { - return *reinterpret_cast(GetCallbackBuffer()); - } - - bool IsCallbackEngaged() const { return is_callback_engaged_; } - - void DestroyCallback() { - is_callback_engaged_ = false; - GetCallback().~Callback(); - } - - void InvokeCallback() ABSL_NO_THREAD_SAFETY_ANALYSIS { - std::move(GetCallback())(); - } - - private: - bool is_callback_engaged_; - alignas(Callback) char callback_buffer_[sizeof(Callback)]; -}; - -} // namespace cleanup_internal - -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace cleanup_internal + { + + struct Tag + { + }; + + template + constexpr bool WasDeduced() + { + return (std::is_same::value) && + (sizeof...(Args) == 0); + } + + template + constexpr bool ReturnsVoid() + { + return (std::is_same, void>::value); + } + + template + class Storage + { + public: + Storage() = delete; + + explicit Storage(Callback callback) + { + // Placement-new into a character buffer is used for eager destruction when + // the cleanup is invoked or cancelled. To ensure this optimizes well, the + // behavior is implemented locally instead of using an absl::optional. + ::new (GetCallbackBuffer()) Callback(std::move(callback)); + is_callback_engaged_ = true; + } + + Storage(Storage&& other) + { + ABSL_HARDENING_ASSERT(other.IsCallbackEngaged()); + + ::new (GetCallbackBuffer()) Callback(std::move(other.GetCallback())); + is_callback_engaged_ = true; + + other.DestroyCallback(); + } + + Storage(const Storage& other) = delete; + + Storage& operator=(Storage&& other) = delete; + + Storage& operator=(const Storage& other) = delete; + + void* GetCallbackBuffer() + { + return static_cast(+callback_buffer_); + } + + Callback& GetCallback() + { + return *reinterpret_cast(GetCallbackBuffer()); + } + + bool IsCallbackEngaged() const + { + return is_callback_engaged_; + } + + void DestroyCallback() + { + is_callback_engaged_ = false; + GetCallback().~Callback(); + } + + void InvokeCallback() ABSL_NO_THREAD_SAFETY_ANALYSIS + { + std::move(GetCallback())(); + } + + private: + bool is_callback_engaged_; + alignas(Callback) char callback_buffer_[sizeof(Callback)]; + }; + + } // namespace cleanup_internal + + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CLEANUP_INTERNAL_CLEANUP_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/btree_map.h b/CAPI/cpp/grpc/include/absl/container/btree_map.h index 286817f..2de7106 100644 --- a/CAPI/cpp/grpc/include/absl/container/btree_map.h +++ b/CAPI/cpp/grpc/include/absl/container/btree_map.h @@ -50,802 +50,820 @@ #ifndef ABSL_CONTAINER_BTREE_MAP_H_ #define ABSL_CONTAINER_BTREE_MAP_H_ -#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/btree.h" // IWYU pragma: export #include "absl/container/internal/btree_container.h" // IWYU pragma: export -namespace absl { -ABSL_NAMESPACE_BEGIN - -namespace container_internal { - -template -struct map_params; - -} // namespace container_internal - -// absl::btree_map<> -// -// An `absl::btree_map` is an ordered associative container of -// unique keys and associated values designed to be a more efficient replacement -// for `std::map` (in most cases). -// -// Keys are sorted using an (optional) comparison function, which defaults to -// `std::less`. -// -// An `absl::btree_map` uses a default allocator of -// `std::allocator>` to allocate (and deallocate) -// nodes, and construct and destruct values within those nodes. You may -// instead specify a custom allocator `A` (which in turn requires specifying a -// custom comparator `C`) as in `absl::btree_map`. -// -template , - typename Alloc = std::allocator>> -class btree_map - : public container_internal::btree_map_container< - container_internal::btree>> { - using Base = typename btree_map::btree_map_container; - - public: - // Constructors and Assignment Operators - // - // A `btree_map` supports the same overload set as `std::map` - // for construction and assignment: - // - // * Default constructor - // - // absl::btree_map map1; - // - // * Initializer List constructor - // - // absl::btree_map map2 = - // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; - // - // * Copy constructor - // - // absl::btree_map map3(map2); - // - // * Copy assignment operator - // - // absl::btree_map map4; - // map4 = map3; - // - // * Move constructor - // - // // Move is guaranteed efficient - // absl::btree_map map5(std::move(map4)); - // - // * Move assignment operator - // - // // May be efficient if allocators are compatible - // absl::btree_map map6; - // map6 = std::move(map5); - // - // * Range constructor - // - // std::vector> v = {{1, "a"}, {2, "b"}}; - // absl::btree_map map7(v.begin(), v.end()); - btree_map() {} - using Base::Base; - - // btree_map::begin() - // - // Returns an iterator to the beginning of the `btree_map`. - using Base::begin; - - // btree_map::cbegin() - // - // Returns a const iterator to the beginning of the `btree_map`. - using Base::cbegin; - - // btree_map::end() - // - // Returns an iterator to the end of the `btree_map`. - using Base::end; - - // btree_map::cend() - // - // Returns a const iterator to the end of the `btree_map`. - using Base::cend; - - // btree_map::empty() - // - // Returns whether or not the `btree_map` is empty. - using Base::empty; - - // btree_map::max_size() - // - // Returns the largest theoretical possible number of elements within a - // `btree_map` under current memory constraints. This value can be thought - // of as the largest value of `std::distance(begin(), end())` for a - // `btree_map`. - using Base::max_size; - - // btree_map::size() - // - // Returns the number of elements currently within the `btree_map`. - using Base::size; - - // btree_map::clear() - // - // Removes all elements from the `btree_map`. Invalidates any references, - // pointers, or iterators referring to contained elements. - using Base::clear; - - // btree_map::erase() - // - // Erases elements within the `btree_map`. If an erase occurs, any references, - // pointers, or iterators are invalidated. - // Overloads are listed below. - // - // iterator erase(iterator position): - // iterator erase(const_iterator position): - // - // Erases the element at `position` of the `btree_map`, returning - // the iterator pointing to the element after the one that was erased - // (or end() if none exists). - // - // iterator erase(const_iterator first, const_iterator last): - // - // Erases the elements in the open interval [`first`, `last`), returning - // the iterator pointing to the element after the interval that was erased - // (or end() if none exists). - // - // template size_type erase(const K& key): - // - // Erases the element with the matching key, if it exists, returning the - // number of elements erased (0 or 1). - using Base::erase; - - // btree_map::insert() - // - // Inserts an element of the specified value into the `btree_map`, - // returning an iterator pointing to the newly inserted element, provided that - // an element with the given key does not already exist. If an insertion - // occurs, any references, pointers, or iterators are invalidated. - // Overloads are listed below. - // - // std::pair insert(const value_type& value): - // - // Inserts a value into the `btree_map`. Returns a pair consisting of an - // iterator to the inserted element (or to the element that prevented the - // insertion) and a bool denoting whether the insertion took place. - // - // std::pair insert(value_type&& value): - // - // Inserts a moveable value into the `btree_map`. Returns a pair - // consisting of an iterator to the inserted element (or to the element that - // prevented the insertion) and a bool denoting whether the insertion took - // place. - // - // iterator insert(const_iterator hint, const value_type& value): - // iterator insert(const_iterator hint, value_type&& value): - // - // Inserts a value, using the position of `hint` as a non-binding suggestion - // for where to begin the insertion search. Returns an iterator to the - // inserted element, or to the existing element that prevented the - // insertion. - // - // void insert(InputIterator first, InputIterator last): - // - // Inserts a range of values [`first`, `last`). - // - // void insert(std::initializer_list ilist): - // - // Inserts the elements within the initializer list `ilist`. - using Base::insert; - - // btree_map::insert_or_assign() - // - // Inserts an element of the specified value into the `btree_map` provided - // that a value with the given key does not already exist, or replaces the - // corresponding mapped type with the forwarded `obj` argument if a key for - // that value already exists, returning an iterator pointing to the newly - // inserted element. Overloads are listed below. - // - // pair insert_or_assign(const key_type& k, M&& obj): - // pair insert_or_assign(key_type&& k, M&& obj): - // - // Inserts/Assigns (or moves) the element of the specified key into the - // `btree_map`. If the returned bool is true, insertion took place, and if - // it's false, assignment took place. - // - // iterator insert_or_assign(const_iterator hint, - // const key_type& k, M&& obj): - // iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj): - // - // Inserts/Assigns (or moves) the element of the specified key into the - // `btree_map` using the position of `hint` as a non-binding suggestion - // for where to begin the insertion search. - using Base::insert_or_assign; - - // btree_map::emplace() - // - // Inserts an element of the specified value by constructing it in-place - // within the `btree_map`, provided that no element with the given key - // already exists. - // - // The element may be constructed even if there already is an element with the - // key in the container, in which case the newly constructed element will be - // destroyed immediately. Prefer `try_emplace()` unless your key is not - // copyable or moveable. - // - // If an insertion occurs, any references, pointers, or iterators are - // invalidated. - using Base::emplace; - - // btree_map::emplace_hint() - // - // Inserts an element of the specified value by constructing it in-place - // within the `btree_map`, using the position of `hint` as a non-binding - // suggestion for where to begin the insertion search, and only inserts - // provided that no element with the given key already exists. - // - // The element may be constructed even if there already is an element with the - // key in the container, in which case the newly constructed element will be - // destroyed immediately. Prefer `try_emplace()` unless your key is not - // copyable or moveable. - // - // If an insertion occurs, any references, pointers, or iterators are - // invalidated. - using Base::emplace_hint; - - // btree_map::try_emplace() - // - // Inserts an element of the specified value by constructing it in-place - // within the `btree_map`, provided that no element with the given key - // already exists. Unlike `emplace()`, if an element with the given key - // already exists, we guarantee that no element is constructed. - // - // If an insertion occurs, any references, pointers, or iterators are - // invalidated. - // - // Overloads are listed below. - // - // std::pair try_emplace(const key_type& k, Args&&... args): - // std::pair try_emplace(key_type&& k, Args&&... args): - // - // Inserts (via copy or move) the element of the specified key into the - // `btree_map`. - // - // iterator try_emplace(const_iterator hint, - // const key_type& k, Args&&... args): - // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): - // - // Inserts (via copy or move) the element of the specified key into the - // `btree_map` using the position of `hint` as a non-binding suggestion - // for where to begin the insertion search. - using Base::try_emplace; - - // btree_map::extract() - // - // Extracts the indicated element, erasing it in the process, and returns it - // as a C++17-compatible node handle. Overloads are listed below. - // - // node_type extract(const_iterator position): - // - // Extracts the element at the indicated position and returns a node handle - // owning that extracted data. - // - // template node_type extract(const K& k): - // - // Extracts the element with the key matching the passed key value and - // returns a node handle owning that extracted data. If the `btree_map` - // does not contain an element with a matching key, this function returns an - // empty node handle. - // - // NOTE: when compiled in an earlier version of C++ than C++17, - // `node_type::key()` returns a const reference to the key instead of a - // mutable reference. We cannot safely return a mutable reference without - // std::launder (which is not available before C++17). - // - // NOTE: In this context, `node_type` refers to the C++17 concept of a - // move-only type that owns and provides access to the elements in associative - // containers (https://en.cppreference.com/w/cpp/container/node_handle). - // It does NOT refer to the data layout of the underlying btree. - using Base::extract; - - // btree_map::merge() - // - // Extracts elements from a given `source` btree_map into this - // `btree_map`. If the destination `btree_map` already contains an - // element with an equivalent key, that element is not extracted. - using Base::merge; - - // btree_map::swap(btree_map& other) - // - // Exchanges the contents of this `btree_map` with those of the `other` - // btree_map, avoiding invocation of any move, copy, or swap operations on - // individual elements. - // - // All iterators and references on the `btree_map` remain valid, excepting - // for the past-the-end iterator, which is invalidated. - using Base::swap; - - // btree_map::at() - // - // Returns a reference to the mapped value of the element with key equivalent - // to the passed key. - using Base::at; - - // btree_map::contains() - // - // template bool contains(const K& key) const: - // - // Determines whether an element comparing equal to the given `key` exists - // within the `btree_map`, returning `true` if so or `false` otherwise. - // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. - using Base::contains; - - // btree_map::count() - // - // template size_type count(const K& key) const: - // - // Returns the number of elements comparing equal to the given `key` within - // the `btree_map`. Note that this function will return either `1` or `0` - // since duplicate elements are not allowed within a `btree_map`. - // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. - using Base::count; - - // btree_map::equal_range() - // - // Returns a half-open range [first, last), defined by a `std::pair` of two - // iterators, containing all elements with the passed key in the `btree_map`. - using Base::equal_range; - - // btree_map::find() - // - // template iterator find(const K& key): - // template const_iterator find(const K& key) const: - // - // Finds an element with the passed `key` within the `btree_map`. - // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. - using Base::find; - - // btree_map::lower_bound() - // - // template iterator lower_bound(const K& key): - // template const_iterator lower_bound(const K& key) const: - // - // Finds the first element with a key that is not less than `key` within the - // `btree_map`. - // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. - using Base::lower_bound; - - // btree_map::upper_bound() - // - // template iterator upper_bound(const K& key): - // template const_iterator upper_bound(const K& key) const: - // - // Finds the first element with a key that is greater than `key` within the - // `btree_map`. - // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. - using Base::upper_bound; - - // btree_map::operator[]() - // - // Returns a reference to the value mapped to the passed key within the - // `btree_map`, performing an `insert()` if the key does not already - // exist. - // - // If an insertion occurs, any references, pointers, or iterators are - // invalidated. Otherwise iterators are not affected and references are not - // invalidated. Overloads are listed below. - // - // T& operator[](key_type&& key): - // T& operator[](const key_type& key): - // - // Inserts a value_type object constructed in-place if the element with the - // given key does not exist. - using Base::operator[]; - - // btree_map::get_allocator() - // - // Returns the allocator function associated with this `btree_map`. - using Base::get_allocator; - - // btree_map::key_comp(); - // - // Returns the key comparator associated with this `btree_map`. - using Base::key_comp; - - // btree_map::value_comp(); - // - // Returns the value comparator associated with this `btree_map`. - using Base::value_comp; -}; - -// absl::swap(absl::btree_map<>, absl::btree_map<>) -// -// Swaps the contents of two `absl::btree_map` containers. -template -void swap(btree_map &x, btree_map &y) { - return x.swap(y); -} - -// absl::erase_if(absl::btree_map<>, Pred) -// -// Erases all elements that satisfy the predicate pred from the container. -// Returns the number of erased elements. -template -typename btree_map::size_type erase_if( - btree_map &map, Pred pred) { - return container_internal::btree_access::erase_if(map, std::move(pred)); -} - -// absl::btree_multimap -// -// An `absl::btree_multimap` is an ordered associative container of -// keys and associated values designed to be a more efficient replacement for -// `std::multimap` (in most cases). Unlike `absl::btree_map`, a B-tree multimap -// allows multiple elements with equivalent keys. -// -// Keys are sorted using an (optional) comparison function, which defaults to -// `std::less`. -// -// An `absl::btree_multimap` uses a default allocator of -// `std::allocator>` to allocate (and deallocate) -// nodes, and construct and destruct values within those nodes. You may -// instead specify a custom allocator `A` (which in turn requires specifying a -// custom comparator `C`) as in `absl::btree_multimap`. -// -template , - typename Alloc = std::allocator>> -class btree_multimap - : public container_internal::btree_multimap_container< - container_internal::btree>> { - using Base = typename btree_multimap::btree_multimap_container; - - public: - // Constructors and Assignment Operators - // - // A `btree_multimap` supports the same overload set as `std::multimap` - // for construction and assignment: - // - // * Default constructor - // - // absl::btree_multimap map1; - // - // * Initializer List constructor - // - // absl::btree_multimap map2 = - // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; - // - // * Copy constructor - // - // absl::btree_multimap map3(map2); - // - // * Copy assignment operator - // - // absl::btree_multimap map4; - // map4 = map3; - // - // * Move constructor - // - // // Move is guaranteed efficient - // absl::btree_multimap map5(std::move(map4)); - // - // * Move assignment operator - // - // // May be efficient if allocators are compatible - // absl::btree_multimap map6; - // map6 = std::move(map5); - // - // * Range constructor - // - // std::vector> v = {{1, "a"}, {2, "b"}}; - // absl::btree_multimap map7(v.begin(), v.end()); - btree_multimap() {} - using Base::Base; - - // btree_multimap::begin() - // - // Returns an iterator to the beginning of the `btree_multimap`. - using Base::begin; - - // btree_multimap::cbegin() - // - // Returns a const iterator to the beginning of the `btree_multimap`. - using Base::cbegin; - - // btree_multimap::end() - // - // Returns an iterator to the end of the `btree_multimap`. - using Base::end; - - // btree_multimap::cend() - // - // Returns a const iterator to the end of the `btree_multimap`. - using Base::cend; - - // btree_multimap::empty() - // - // Returns whether or not the `btree_multimap` is empty. - using Base::empty; - - // btree_multimap::max_size() - // - // Returns the largest theoretical possible number of elements within a - // `btree_multimap` under current memory constraints. This value can be - // thought of as the largest value of `std::distance(begin(), end())` for a - // `btree_multimap`. - using Base::max_size; - - // btree_multimap::size() - // - // Returns the number of elements currently within the `btree_multimap`. - using Base::size; - - // btree_multimap::clear() - // - // Removes all elements from the `btree_multimap`. Invalidates any references, - // pointers, or iterators referring to contained elements. - using Base::clear; - - // btree_multimap::erase() - // - // Erases elements within the `btree_multimap`. If an erase occurs, any - // references, pointers, or iterators are invalidated. - // Overloads are listed below. - // - // iterator erase(iterator position): - // iterator erase(const_iterator position): - // - // Erases the element at `position` of the `btree_multimap`, returning - // the iterator pointing to the element after the one that was erased - // (or end() if none exists). - // - // iterator erase(const_iterator first, const_iterator last): - // - // Erases the elements in the open interval [`first`, `last`), returning - // the iterator pointing to the element after the interval that was erased - // (or end() if none exists). - // - // template size_type erase(const K& key): - // - // Erases the elements matching the key, if any exist, returning the - // number of elements erased. - using Base::erase; - - // btree_multimap::insert() - // - // Inserts an element of the specified value into the `btree_multimap`, - // returning an iterator pointing to the newly inserted element. - // Any references, pointers, or iterators are invalidated. Overloads are - // listed below. - // - // iterator insert(const value_type& value): - // - // Inserts a value into the `btree_multimap`, returning an iterator to the - // inserted element. - // - // iterator insert(value_type&& value): - // - // Inserts a moveable value into the `btree_multimap`, returning an iterator - // to the inserted element. - // - // iterator insert(const_iterator hint, const value_type& value): - // iterator insert(const_iterator hint, value_type&& value): - // - // Inserts a value, using the position of `hint` as a non-binding suggestion - // for where to begin the insertion search. Returns an iterator to the - // inserted element. - // - // void insert(InputIterator first, InputIterator last): - // - // Inserts a range of values [`first`, `last`). - // - // void insert(std::initializer_list ilist): - // - // Inserts the elements within the initializer list `ilist`. - using Base::insert; - - // btree_multimap::emplace() - // - // Inserts an element of the specified value by constructing it in-place - // within the `btree_multimap`. Any references, pointers, or iterators are - // invalidated. - using Base::emplace; - - // btree_multimap::emplace_hint() - // - // Inserts an element of the specified value by constructing it in-place - // within the `btree_multimap`, using the position of `hint` as a non-binding - // suggestion for where to begin the insertion search. - // - // Any references, pointers, or iterators are invalidated. - using Base::emplace_hint; - - // btree_multimap::extract() - // - // Extracts the indicated element, erasing it in the process, and returns it - // as a C++17-compatible node handle. Overloads are listed below. - // - // node_type extract(const_iterator position): - // - // Extracts the element at the indicated position and returns a node handle - // owning that extracted data. - // - // template node_type extract(const K& k): - // - // Extracts the element with the key matching the passed key value and - // returns a node handle owning that extracted data. If the `btree_multimap` - // does not contain an element with a matching key, this function returns an - // empty node handle. - // - // NOTE: when compiled in an earlier version of C++ than C++17, - // `node_type::key()` returns a const reference to the key instead of a - // mutable reference. We cannot safely return a mutable reference without - // std::launder (which is not available before C++17). - // - // NOTE: In this context, `node_type` refers to the C++17 concept of a - // move-only type that owns and provides access to the elements in associative - // containers (https://en.cppreference.com/w/cpp/container/node_handle). - // It does NOT refer to the data layout of the underlying btree. - using Base::extract; - - // btree_multimap::merge() - // - // Extracts all elements from a given `source` btree_multimap into this - // `btree_multimap`. - using Base::merge; - - // btree_multimap::swap(btree_multimap& other) - // - // Exchanges the contents of this `btree_multimap` with those of the `other` - // btree_multimap, avoiding invocation of any move, copy, or swap operations - // on individual elements. - // - // All iterators and references on the `btree_multimap` remain valid, - // excepting for the past-the-end iterator, which is invalidated. - using Base::swap; - - // btree_multimap::contains() - // - // template bool contains(const K& key) const: - // - // Determines whether an element comparing equal to the given `key` exists - // within the `btree_multimap`, returning `true` if so or `false` otherwise. - // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. - using Base::contains; - - // btree_multimap::count() - // - // template size_type count(const K& key) const: - // - // Returns the number of elements comparing equal to the given `key` within - // the `btree_multimap`. - // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. - using Base::count; - - // btree_multimap::equal_range() - // - // Returns a half-open range [first, last), defined by a `std::pair` of two - // iterators, containing all elements with the passed key in the - // `btree_multimap`. - using Base::equal_range; - - // btree_multimap::find() - // - // template iterator find(const K& key): - // template const_iterator find(const K& key) const: - // - // Finds an element with the passed `key` within the `btree_multimap`. - // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. - using Base::find; - - // btree_multimap::lower_bound() - // - // template iterator lower_bound(const K& key): - // template const_iterator lower_bound(const K& key) const: - // - // Finds the first element with a key that is not less than `key` within the - // `btree_multimap`. - // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. - using Base::lower_bound; - - // btree_multimap::upper_bound() - // - // template iterator upper_bound(const K& key): - // template const_iterator upper_bound(const K& key) const: - // - // Finds the first element with a key that is greater than `key` within the - // `btree_multimap`. - // - // Supports heterogeneous lookup, provided that the map has a compatible - // heterogeneous comparator. - using Base::upper_bound; - - // btree_multimap::get_allocator() - // - // Returns the allocator function associated with this `btree_multimap`. - using Base::get_allocator; - - // btree_multimap::key_comp(); - // - // Returns the key comparator associated with this `btree_multimap`. - using Base::key_comp; - - // btree_multimap::value_comp(); - // - // Returns the value comparator associated with this `btree_multimap`. - using Base::value_comp; -}; - -// absl::swap(absl::btree_multimap<>, absl::btree_multimap<>) -// -// Swaps the contents of two `absl::btree_multimap` containers. -template -void swap(btree_multimap &x, btree_multimap &y) { - return x.swap(y); -} - -// absl::erase_if(absl::btree_multimap<>, Pred) -// -// Erases all elements that satisfy the predicate pred from the container. -// Returns the number of erased elements. -template -typename btree_multimap::size_type erase_if( - btree_multimap &map, Pred pred) { - return container_internal::btree_access::erase_if(map, std::move(pred)); -} - -namespace container_internal { - -// A parameters structure for holding the type parameters for a btree_map. -// Compare and Alloc should be nothrow copy-constructible. -template -struct map_params : common_params> { - using super_type = typename map_params::common_params; - using mapped_type = Data; - // This type allows us to move keys when it is safe to do so. It is safe - // for maps in which value_type and mutable_value_type are layout compatible. - using slot_policy = typename super_type::slot_policy; - using slot_type = typename super_type::slot_type; - using value_type = typename super_type::value_type; - using init_type = typename super_type::init_type; - - template - static auto key(const V &value) -> decltype(value.first) { - return value.first; - } - static const Key &key(const slot_type *s) { return slot_policy::key(s); } - static const Key &key(slot_type *s) { return slot_policy::key(s); } - // For use in node handle. - static auto mutable_key(slot_type *s) - -> decltype(slot_policy::mutable_key(s)) { - return slot_policy::mutable_key(s); - } - static mapped_type &value(value_type *value) { return value->second; } -}; - -} // namespace container_internal - -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace container_internal + { + + template + struct map_params; + + } // namespace container_internal + + // absl::btree_map<> + // + // An `absl::btree_map` is an ordered associative container of + // unique keys and associated values designed to be a more efficient replacement + // for `std::map` (in most cases). + // + // Keys are sorted using an (optional) comparison function, which defaults to + // `std::less`. + // + // An `absl::btree_map` uses a default allocator of + // `std::allocator>` to allocate (and deallocate) + // nodes, and construct and destruct values within those nodes. You may + // instead specify a custom allocator `A` (which in turn requires specifying a + // custom comparator `C`) as in `absl::btree_map`. + // + template, typename Alloc = std::allocator>> + class btree_map : public container_internal::btree_map_container>> + { + using Base = typename btree_map::btree_map_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_map` supports the same overload set as `std::map` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_map map1; + // + // * Initializer List constructor + // + // absl::btree_map map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::btree_map map3(map2); + // + // * Copy assignment operator + // + // absl::btree_map map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_map map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_map map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::btree_map map7(v.begin(), v.end()); + btree_map() + { + } + using Base::Base; + + // btree_map::begin() + // + // Returns an iterator to the beginning of the `btree_map`. + using Base::begin; + + // btree_map::cbegin() + // + // Returns a const iterator to the beginning of the `btree_map`. + using Base::cbegin; + + // btree_map::end() + // + // Returns an iterator to the end of the `btree_map`. + using Base::end; + + // btree_map::cend() + // + // Returns a const iterator to the end of the `btree_map`. + using Base::cend; + + // btree_map::empty() + // + // Returns whether or not the `btree_map` is empty. + using Base::empty; + + // btree_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_map` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `btree_map`. + using Base::max_size; + + // btree_map::size() + // + // Returns the number of elements currently within the `btree_map`. + using Base::size; + + // btree_map::clear() + // + // Removes all elements from the `btree_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_map::erase() + // + // Erases elements within the `btree_map`. If an erase occurs, any references, + // pointers, or iterators are invalidated. + // Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_map`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // btree_map::insert() + // + // Inserts an element of the specified value into the `btree_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If an insertion + // occurs, any references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // std::pair insert(const value_type& value): + // + // Inserts a value into the `btree_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(value_type&& value): + // + // Inserts a moveable value into the `btree_map`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_map::insert_or_assign() + // + // Inserts an element of the specified value into the `btree_map` provided + // that a value with the given key does not already exist, or replaces the + // corresponding mapped type with the forwarded `obj` argument if a key for + // that value already exists, returning an iterator pointing to the newly + // inserted element. Overloads are listed below. + // + // pair insert_or_assign(const key_type& k, M&& obj): + // pair insert_or_assign(key_type&& k, M&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `btree_map`. If the returned bool is true, insertion took place, and if + // it's false, assignment took place. + // + // iterator insert_or_assign(const_iterator hint, + // const key_type& k, M&& obj): + // iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `btree_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // btree_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace_hint; + + // btree_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + // + // Overloads are listed below. + // + // std::pair try_emplace(const key_type& k, Args&&... args): + // std::pair try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `btree_map`. + // + // iterator try_emplace(const_iterator hint, + // const key_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `btree_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::try_emplace; + + // btree_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_map` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_map::merge() + // + // Extracts elements from a given `source` btree_map into this + // `btree_map`. If the destination `btree_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_map::swap(btree_map& other) + // + // Exchanges the contents of this `btree_map` with those of the `other` + // btree_map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `btree_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // btree_map::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_map`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_map::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_map`. Note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_map::equal_range() + // + // Returns a half-open range [first, last), defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the `btree_map`. + using Base::equal_range; + + // btree_map::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_map::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element with a key that is not less than `key` within the + // `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_map::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element with a key that is greater than `key` within the + // `btree_map`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `btree_map`, performing an `insert()` if the key does not already + // exist. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. Otherwise iterators are not affected and references are not + // invalidated. Overloads are listed below. + // + // T& operator[](key_type&& key): + // T& operator[](const key_type& key): + // + // Inserts a value_type object constructed in-place if the element with the + // given key does not exist. + using Base::operator[]; + + // btree_map::get_allocator() + // + // Returns the allocator function associated with this `btree_map`. + using Base::get_allocator; + + // btree_map::key_comp(); + // + // Returns the key comparator associated with this `btree_map`. + using Base::key_comp; + + // btree_map::value_comp(); + // + // Returns the value comparator associated with this `btree_map`. + using Base::value_comp; + }; + + // absl::swap(absl::btree_map<>, absl::btree_map<>) + // + // Swaps the contents of two `absl::btree_map` containers. + template + void swap(btree_map& x, btree_map& y) + { + return x.swap(y); + } + + // absl::erase_if(absl::btree_map<>, Pred) + // + // Erases all elements that satisfy the predicate pred from the container. + // Returns the number of erased elements. + template + typename btree_map::size_type erase_if( + btree_map& map, Pred pred + ) + { + return container_internal::btree_access::erase_if(map, std::move(pred)); + } + + // absl::btree_multimap + // + // An `absl::btree_multimap` is an ordered associative container of + // keys and associated values designed to be a more efficient replacement for + // `std::multimap` (in most cases). Unlike `absl::btree_map`, a B-tree multimap + // allows multiple elements with equivalent keys. + // + // Keys are sorted using an (optional) comparison function, which defaults to + // `std::less`. + // + // An `absl::btree_multimap` uses a default allocator of + // `std::allocator>` to allocate (and deallocate) + // nodes, and construct and destruct values within those nodes. You may + // instead specify a custom allocator `A` (which in turn requires specifying a + // custom comparator `C`) as in `absl::btree_multimap`. + // + template, typename Alloc = std::allocator>> + class btree_multimap : public container_internal::btree_multimap_container>> + { + using Base = typename btree_multimap::btree_multimap_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_multimap` supports the same overload set as `std::multimap` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_multimap map1; + // + // * Initializer List constructor + // + // absl::btree_multimap map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::btree_multimap map3(map2); + // + // * Copy assignment operator + // + // absl::btree_multimap map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_multimap map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_multimap map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::btree_multimap map7(v.begin(), v.end()); + btree_multimap() + { + } + using Base::Base; + + // btree_multimap::begin() + // + // Returns an iterator to the beginning of the `btree_multimap`. + using Base::begin; + + // btree_multimap::cbegin() + // + // Returns a const iterator to the beginning of the `btree_multimap`. + using Base::cbegin; + + // btree_multimap::end() + // + // Returns an iterator to the end of the `btree_multimap`. + using Base::end; + + // btree_multimap::cend() + // + // Returns a const iterator to the end of the `btree_multimap`. + using Base::cend; + + // btree_multimap::empty() + // + // Returns whether or not the `btree_multimap` is empty. + using Base::empty; + + // btree_multimap::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_multimap` under current memory constraints. This value can be + // thought of as the largest value of `std::distance(begin(), end())` for a + // `btree_multimap`. + using Base::max_size; + + // btree_multimap::size() + // + // Returns the number of elements currently within the `btree_multimap`. + using Base::size; + + // btree_multimap::clear() + // + // Removes all elements from the `btree_multimap`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_multimap::erase() + // + // Erases elements within the `btree_multimap`. If an erase occurs, any + // references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_multimap`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the elements matching the key, if any exist, returning the + // number of elements erased. + using Base::erase; + + // btree_multimap::insert() + // + // Inserts an element of the specified value into the `btree_multimap`, + // returning an iterator pointing to the newly inserted element. + // Any references, pointers, or iterators are invalidated. Overloads are + // listed below. + // + // iterator insert(const value_type& value): + // + // Inserts a value into the `btree_multimap`, returning an iterator to the + // inserted element. + // + // iterator insert(value_type&& value): + // + // Inserts a moveable value into the `btree_multimap`, returning an iterator + // to the inserted element. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_multimap::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multimap`. Any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_multimap::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multimap`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search. + // + // Any references, pointers, or iterators are invalidated. + using Base::emplace_hint; + + // btree_multimap::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_multimap` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_multimap::merge() + // + // Extracts all elements from a given `source` btree_multimap into this + // `btree_multimap`. + using Base::merge; + + // btree_multimap::swap(btree_multimap& other) + // + // Exchanges the contents of this `btree_multimap` with those of the `other` + // btree_multimap, avoiding invocation of any move, copy, or swap operations + // on individual elements. + // + // All iterators and references on the `btree_multimap` remain valid, + // excepting for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_multimap::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_multimap`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_multimap::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_multimap::equal_range() + // + // Returns a half-open range [first, last), defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_multimap`. + using Base::equal_range; + + // btree_multimap::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_multimap::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element with a key that is not less than `key` within the + // `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_multimap::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element with a key that is greater than `key` within the + // `btree_multimap`. + // + // Supports heterogeneous lookup, provided that the map has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_multimap::get_allocator() + // + // Returns the allocator function associated with this `btree_multimap`. + using Base::get_allocator; + + // btree_multimap::key_comp(); + // + // Returns the key comparator associated with this `btree_multimap`. + using Base::key_comp; + + // btree_multimap::value_comp(); + // + // Returns the value comparator associated with this `btree_multimap`. + using Base::value_comp; + }; + + // absl::swap(absl::btree_multimap<>, absl::btree_multimap<>) + // + // Swaps the contents of two `absl::btree_multimap` containers. + template + void swap(btree_multimap& x, btree_multimap& y) + { + return x.swap(y); + } + + // absl::erase_if(absl::btree_multimap<>, Pred) + // + // Erases all elements that satisfy the predicate pred from the container. + // Returns the number of erased elements. + template + typename btree_multimap::size_type erase_if( + btree_multimap& map, Pred pred + ) + { + return container_internal::btree_access::erase_if(map, std::move(pred)); + } + + namespace container_internal + { + + // A parameters structure for holding the type parameters for a btree_map. + // Compare and Alloc should be nothrow copy-constructible. + template + struct map_params : common_params> + { + using super_type = typename map_params::common_params; + using mapped_type = Data; + // This type allows us to move keys when it is safe to do so. It is safe + // for maps in which value_type and mutable_value_type are layout compatible. + using slot_policy = typename super_type::slot_policy; + using slot_type = typename super_type::slot_type; + using value_type = typename super_type::value_type; + using init_type = typename super_type::init_type; + + template + static auto key(const V& value) -> decltype(value.first) + { + return value.first; + } + static const Key& key(const slot_type* s) + { + return slot_policy::key(s); + } + static const Key& key(slot_type* s) + { + return slot_policy::key(s); + } + // For use in node handle. + static auto mutable_key(slot_type* s) + -> decltype(slot_policy::mutable_key(s)) + { + return slot_policy::mutable_key(s); + } + static mapped_type& value(value_type* value) + { + return value->second; + } + }; + + } // namespace container_internal + + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_BTREE_MAP_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/btree_set.h b/CAPI/cpp/grpc/include/absl/container/btree_set.h index 695b09f..863000a 100644 --- a/CAPI/cpp/grpc/include/absl/container/btree_set.h +++ b/CAPI/cpp/grpc/include/absl/container/btree_set.h @@ -49,745 +49,768 @@ #ifndef ABSL_CONTAINER_BTREE_SET_H_ #define ABSL_CONTAINER_BTREE_SET_H_ -#include "absl/container/internal/btree.h" // IWYU pragma: export +#include "absl/container/internal/btree.h" // IWYU pragma: export #include "absl/container/internal/btree_container.h" // IWYU pragma: export -namespace absl { -ABSL_NAMESPACE_BEGIN - -namespace container_internal { - -template -struct set_slot_policy; - -template -struct set_params; - -} // namespace container_internal - -// absl::btree_set<> -// -// An `absl::btree_set` is an ordered associative container of unique key -// values designed to be a more efficient replacement for `std::set` (in most -// cases). -// -// Keys are sorted using an (optional) comparison function, which defaults to -// `std::less`. -// -// An `absl::btree_set` uses a default allocator of `std::allocator` to -// allocate (and deallocate) nodes, and construct and destruct values within -// those nodes. You may instead specify a custom allocator `A` (which in turn -// requires specifying a custom comparator `C`) as in -// `absl::btree_set`. -// -template , - typename Alloc = std::allocator> -class btree_set - : public container_internal::btree_set_container< - container_internal::btree>> { - using Base = typename btree_set::btree_set_container; - - public: - // Constructors and Assignment Operators - // - // A `btree_set` supports the same overload set as `std::set` - // for construction and assignment: - // - // * Default constructor - // - // absl::btree_set set1; - // - // * Initializer List constructor - // - // absl::btree_set set2 = - // {{"huey"}, {"dewey"}, {"louie"},}; - // - // * Copy constructor - // - // absl::btree_set set3(set2); - // - // * Copy assignment operator - // - // absl::btree_set set4; - // set4 = set3; - // - // * Move constructor - // - // // Move is guaranteed efficient - // absl::btree_set set5(std::move(set4)); - // - // * Move assignment operator - // - // // May be efficient if allocators are compatible - // absl::btree_set set6; - // set6 = std::move(set5); - // - // * Range constructor - // - // std::vector v = {"a", "b"}; - // absl::btree_set set7(v.begin(), v.end()); - btree_set() {} - using Base::Base; - - // btree_set::begin() - // - // Returns an iterator to the beginning of the `btree_set`. - using Base::begin; - - // btree_set::cbegin() - // - // Returns a const iterator to the beginning of the `btree_set`. - using Base::cbegin; - - // btree_set::end() - // - // Returns an iterator to the end of the `btree_set`. - using Base::end; - - // btree_set::cend() - // - // Returns a const iterator to the end of the `btree_set`. - using Base::cend; - - // btree_set::empty() - // - // Returns whether or not the `btree_set` is empty. - using Base::empty; - - // btree_set::max_size() - // - // Returns the largest theoretical possible number of elements within a - // `btree_set` under current memory constraints. This value can be thought - // of as the largest value of `std::distance(begin(), end())` for a - // `btree_set`. - using Base::max_size; - - // btree_set::size() - // - // Returns the number of elements currently within the `btree_set`. - using Base::size; - - // btree_set::clear() - // - // Removes all elements from the `btree_set`. Invalidates any references, - // pointers, or iterators referring to contained elements. - using Base::clear; - - // btree_set::erase() - // - // Erases elements within the `btree_set`. Overloads are listed below. - // - // iterator erase(iterator position): - // iterator erase(const_iterator position): - // - // Erases the element at `position` of the `btree_set`, returning - // the iterator pointing to the element after the one that was erased - // (or end() if none exists). - // - // iterator erase(const_iterator first, const_iterator last): - // - // Erases the elements in the open interval [`first`, `last`), returning - // the iterator pointing to the element after the interval that was erased - // (or end() if none exists). - // - // template size_type erase(const K& key): - // - // Erases the element with the matching key, if it exists, returning the - // number of elements erased (0 or 1). - using Base::erase; - - // btree_set::insert() - // - // Inserts an element of the specified value into the `btree_set`, - // returning an iterator pointing to the newly inserted element, provided that - // an element with the given key does not already exist. If an insertion - // occurs, any references, pointers, or iterators are invalidated. - // Overloads are listed below. - // - // std::pair insert(const value_type& value): - // - // Inserts a value into the `btree_set`. Returns a pair consisting of an - // iterator to the inserted element (or to the element that prevented the - // insertion) and a bool denoting whether the insertion took place. - // - // std::pair insert(value_type&& value): - // - // Inserts a moveable value into the `btree_set`. Returns a pair - // consisting of an iterator to the inserted element (or to the element that - // prevented the insertion) and a bool denoting whether the insertion took - // place. - // - // iterator insert(const_iterator hint, const value_type& value): - // iterator insert(const_iterator hint, value_type&& value): - // - // Inserts a value, using the position of `hint` as a non-binding suggestion - // for where to begin the insertion search. Returns an iterator to the - // inserted element, or to the existing element that prevented the - // insertion. - // - // void insert(InputIterator first, InputIterator last): - // - // Inserts a range of values [`first`, `last`). - // - // void insert(std::initializer_list ilist): - // - // Inserts the elements within the initializer list `ilist`. - using Base::insert; - - // btree_set::emplace() - // - // Inserts an element of the specified value by constructing it in-place - // within the `btree_set`, provided that no element with the given key - // already exists. - // - // The element may be constructed even if there already is an element with the - // key in the container, in which case the newly constructed element will be - // destroyed immediately. - // - // If an insertion occurs, any references, pointers, or iterators are - // invalidated. - using Base::emplace; - - // btree_set::emplace_hint() - // - // Inserts an element of the specified value by constructing it in-place - // within the `btree_set`, using the position of `hint` as a non-binding - // suggestion for where to begin the insertion search, and only inserts - // provided that no element with the given key already exists. - // - // The element may be constructed even if there already is an element with the - // key in the container, in which case the newly constructed element will be - // destroyed immediately. - // - // If an insertion occurs, any references, pointers, or iterators are - // invalidated. - using Base::emplace_hint; - - // btree_set::extract() - // - // Extracts the indicated element, erasing it in the process, and returns it - // as a C++17-compatible node handle. Overloads are listed below. - // - // node_type extract(const_iterator position): - // - // Extracts the element at the indicated position and returns a node handle - // owning that extracted data. - // - // template node_type extract(const K& k): - // - // Extracts the element with the key matching the passed key value and - // returns a node handle owning that extracted data. If the `btree_set` - // does not contain an element with a matching key, this function returns an - // empty node handle. - // - // NOTE: In this context, `node_type` refers to the C++17 concept of a - // move-only type that owns and provides access to the elements in associative - // containers (https://en.cppreference.com/w/cpp/container/node_handle). - // It does NOT refer to the data layout of the underlying btree. - using Base::extract; - - // btree_set::merge() - // - // Extracts elements from a given `source` btree_set into this - // `btree_set`. If the destination `btree_set` already contains an - // element with an equivalent key, that element is not extracted. - using Base::merge; - - // btree_set::swap(btree_set& other) - // - // Exchanges the contents of this `btree_set` with those of the `other` - // btree_set, avoiding invocation of any move, copy, or swap operations on - // individual elements. - // - // All iterators and references on the `btree_set` remain valid, excepting - // for the past-the-end iterator, which is invalidated. - using Base::swap; - - // btree_set::contains() - // - // template bool contains(const K& key) const: - // - // Determines whether an element comparing equal to the given `key` exists - // within the `btree_set`, returning `true` if so or `false` otherwise. - // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. - using Base::contains; - - // btree_set::count() - // - // template size_type count(const K& key) const: - // - // Returns the number of elements comparing equal to the given `key` within - // the `btree_set`. Note that this function will return either `1` or `0` - // since duplicate elements are not allowed within a `btree_set`. - // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. - using Base::count; - - // btree_set::equal_range() - // - // Returns a closed range [first, last], defined by a `std::pair` of two - // iterators, containing all elements with the passed key in the - // `btree_set`. - using Base::equal_range; - - // btree_set::find() - // - // template iterator find(const K& key): - // template const_iterator find(const K& key) const: - // - // Finds an element with the passed `key` within the `btree_set`. - // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. - using Base::find; - - // btree_set::lower_bound() - // - // template iterator lower_bound(const K& key): - // template const_iterator lower_bound(const K& key) const: - // - // Finds the first element that is not less than `key` within the `btree_set`. - // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. - using Base::lower_bound; - - // btree_set::upper_bound() - // - // template iterator upper_bound(const K& key): - // template const_iterator upper_bound(const K& key) const: - // - // Finds the first element that is greater than `key` within the `btree_set`. - // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. - using Base::upper_bound; - - // btree_set::get_allocator() - // - // Returns the allocator function associated with this `btree_set`. - using Base::get_allocator; - - // btree_set::key_comp(); - // - // Returns the key comparator associated with this `btree_set`. - using Base::key_comp; - - // btree_set::value_comp(); - // - // Returns the value comparator associated with this `btree_set`. The keys to - // sort the elements are the values themselves, therefore `value_comp` and its - // sibling member function `key_comp` are equivalent. - using Base::value_comp; -}; - -// absl::swap(absl::btree_set<>, absl::btree_set<>) -// -// Swaps the contents of two `absl::btree_set` containers. -template -void swap(btree_set &x, btree_set &y) { - return x.swap(y); -} - -// absl::erase_if(absl::btree_set<>, Pred) -// -// Erases all elements that satisfy the predicate pred from the container. -// Returns the number of erased elements. -template -typename btree_set::size_type erase_if(btree_set &set, - Pred pred) { - return container_internal::btree_access::erase_if(set, std::move(pred)); -} - -// absl::btree_multiset<> -// -// An `absl::btree_multiset` is an ordered associative container of -// keys and associated values designed to be a more efficient replacement -// for `std::multiset` (in most cases). Unlike `absl::btree_set`, a B-tree -// multiset allows equivalent elements. -// -// Keys are sorted using an (optional) comparison function, which defaults to -// `std::less`. -// -// An `absl::btree_multiset` uses a default allocator of `std::allocator` -// to allocate (and deallocate) nodes, and construct and destruct values within -// those nodes. You may instead specify a custom allocator `A` (which in turn -// requires specifying a custom comparator `C`) as in -// `absl::btree_multiset`. -// -template , - typename Alloc = std::allocator> -class btree_multiset - : public container_internal::btree_multiset_container< - container_internal::btree>> { - using Base = typename btree_multiset::btree_multiset_container; - - public: - // Constructors and Assignment Operators - // - // A `btree_multiset` supports the same overload set as `std::set` - // for construction and assignment: - // - // * Default constructor - // - // absl::btree_multiset set1; - // - // * Initializer List constructor - // - // absl::btree_multiset set2 = - // {{"huey"}, {"dewey"}, {"louie"},}; - // - // * Copy constructor - // - // absl::btree_multiset set3(set2); - // - // * Copy assignment operator - // - // absl::btree_multiset set4; - // set4 = set3; - // - // * Move constructor - // - // // Move is guaranteed efficient - // absl::btree_multiset set5(std::move(set4)); - // - // * Move assignment operator - // - // // May be efficient if allocators are compatible - // absl::btree_multiset set6; - // set6 = std::move(set5); - // - // * Range constructor - // - // std::vector v = {"a", "b"}; - // absl::btree_multiset set7(v.begin(), v.end()); - btree_multiset() {} - using Base::Base; - - // btree_multiset::begin() - // - // Returns an iterator to the beginning of the `btree_multiset`. - using Base::begin; - - // btree_multiset::cbegin() - // - // Returns a const iterator to the beginning of the `btree_multiset`. - using Base::cbegin; - - // btree_multiset::end() - // - // Returns an iterator to the end of the `btree_multiset`. - using Base::end; - - // btree_multiset::cend() - // - // Returns a const iterator to the end of the `btree_multiset`. - using Base::cend; - - // btree_multiset::empty() - // - // Returns whether or not the `btree_multiset` is empty. - using Base::empty; - - // btree_multiset::max_size() - // - // Returns the largest theoretical possible number of elements within a - // `btree_multiset` under current memory constraints. This value can be - // thought of as the largest value of `std::distance(begin(), end())` for a - // `btree_multiset`. - using Base::max_size; - - // btree_multiset::size() - // - // Returns the number of elements currently within the `btree_multiset`. - using Base::size; - - // btree_multiset::clear() - // - // Removes all elements from the `btree_multiset`. Invalidates any references, - // pointers, or iterators referring to contained elements. - using Base::clear; - - // btree_multiset::erase() - // - // Erases elements within the `btree_multiset`. Overloads are listed below. - // - // iterator erase(iterator position): - // iterator erase(const_iterator position): - // - // Erases the element at `position` of the `btree_multiset`, returning - // the iterator pointing to the element after the one that was erased - // (or end() if none exists). - // - // iterator erase(const_iterator first, const_iterator last): - // - // Erases the elements in the open interval [`first`, `last`), returning - // the iterator pointing to the element after the interval that was erased - // (or end() if none exists). - // - // template size_type erase(const K& key): - // - // Erases the elements matching the key, if any exist, returning the - // number of elements erased. - using Base::erase; - - // btree_multiset::insert() - // - // Inserts an element of the specified value into the `btree_multiset`, - // returning an iterator pointing to the newly inserted element. - // Any references, pointers, or iterators are invalidated. Overloads are - // listed below. - // - // iterator insert(const value_type& value): - // - // Inserts a value into the `btree_multiset`, returning an iterator to the - // inserted element. - // - // iterator insert(value_type&& value): - // - // Inserts a moveable value into the `btree_multiset`, returning an iterator - // to the inserted element. - // - // iterator insert(const_iterator hint, const value_type& value): - // iterator insert(const_iterator hint, value_type&& value): - // - // Inserts a value, using the position of `hint` as a non-binding suggestion - // for where to begin the insertion search. Returns an iterator to the - // inserted element. - // - // void insert(InputIterator first, InputIterator last): - // - // Inserts a range of values [`first`, `last`). - // - // void insert(std::initializer_list ilist): - // - // Inserts the elements within the initializer list `ilist`. - using Base::insert; - - // btree_multiset::emplace() - // - // Inserts an element of the specified value by constructing it in-place - // within the `btree_multiset`. Any references, pointers, or iterators are - // invalidated. - using Base::emplace; - - // btree_multiset::emplace_hint() - // - // Inserts an element of the specified value by constructing it in-place - // within the `btree_multiset`, using the position of `hint` as a non-binding - // suggestion for where to begin the insertion search. - // - // Any references, pointers, or iterators are invalidated. - using Base::emplace_hint; - - // btree_multiset::extract() - // - // Extracts the indicated element, erasing it in the process, and returns it - // as a C++17-compatible node handle. Overloads are listed below. - // - // node_type extract(const_iterator position): - // - // Extracts the element at the indicated position and returns a node handle - // owning that extracted data. - // - // template node_type extract(const K& k): - // - // Extracts the element with the key matching the passed key value and - // returns a node handle owning that extracted data. If the `btree_multiset` - // does not contain an element with a matching key, this function returns an - // empty node handle. - // - // NOTE: In this context, `node_type` refers to the C++17 concept of a - // move-only type that owns and provides access to the elements in associative - // containers (https://en.cppreference.com/w/cpp/container/node_handle). - // It does NOT refer to the data layout of the underlying btree. - using Base::extract; - - // btree_multiset::merge() - // - // Extracts all elements from a given `source` btree_multiset into this - // `btree_multiset`. - using Base::merge; - - // btree_multiset::swap(btree_multiset& other) - // - // Exchanges the contents of this `btree_multiset` with those of the `other` - // btree_multiset, avoiding invocation of any move, copy, or swap operations - // on individual elements. - // - // All iterators and references on the `btree_multiset` remain valid, - // excepting for the past-the-end iterator, which is invalidated. - using Base::swap; - - // btree_multiset::contains() - // - // template bool contains(const K& key) const: - // - // Determines whether an element comparing equal to the given `key` exists - // within the `btree_multiset`, returning `true` if so or `false` otherwise. - // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. - using Base::contains; - - // btree_multiset::count() - // - // template size_type count(const K& key) const: - // - // Returns the number of elements comparing equal to the given `key` within - // the `btree_multiset`. - // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. - using Base::count; - - // btree_multiset::equal_range() - // - // Returns a closed range [first, last], defined by a `std::pair` of two - // iterators, containing all elements with the passed key in the - // `btree_multiset`. - using Base::equal_range; - - // btree_multiset::find() - // - // template iterator find(const K& key): - // template const_iterator find(const K& key) const: - // - // Finds an element with the passed `key` within the `btree_multiset`. - // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. - using Base::find; - - // btree_multiset::lower_bound() - // - // template iterator lower_bound(const K& key): - // template const_iterator lower_bound(const K& key) const: - // - // Finds the first element that is not less than `key` within the - // `btree_multiset`. - // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. - using Base::lower_bound; - - // btree_multiset::upper_bound() - // - // template iterator upper_bound(const K& key): - // template const_iterator upper_bound(const K& key) const: - // - // Finds the first element that is greater than `key` within the - // `btree_multiset`. - // - // Supports heterogeneous lookup, provided that the set has a compatible - // heterogeneous comparator. - using Base::upper_bound; - - // btree_multiset::get_allocator() - // - // Returns the allocator function associated with this `btree_multiset`. - using Base::get_allocator; - - // btree_multiset::key_comp(); - // - // Returns the key comparator associated with this `btree_multiset`. - using Base::key_comp; - - // btree_multiset::value_comp(); - // - // Returns the value comparator associated with this `btree_multiset`. The - // keys to sort the elements are the values themselves, therefore `value_comp` - // and its sibling member function `key_comp` are equivalent. - using Base::value_comp; -}; - -// absl::swap(absl::btree_multiset<>, absl::btree_multiset<>) -// -// Swaps the contents of two `absl::btree_multiset` containers. -template -void swap(btree_multiset &x, btree_multiset &y) { - return x.swap(y); -} - -// absl::erase_if(absl::btree_multiset<>, Pred) -// -// Erases all elements that satisfy the predicate pred from the container. -// Returns the number of erased elements. -template -typename btree_multiset::size_type erase_if( - btree_multiset & set, Pred pred) { - return container_internal::btree_access::erase_if(set, std::move(pred)); -} - -namespace container_internal { - -// This type implements the necessary functions from the -// absl::container_internal::slot_type interface for btree_(multi)set. -template -struct set_slot_policy { - using slot_type = Key; - using value_type = Key; - using mutable_value_type = Key; - - static value_type &element(slot_type *slot) { return *slot; } - static const value_type &element(const slot_type *slot) { return *slot; } - - template - static void construct(Alloc *alloc, slot_type *slot, Args &&...args) { - absl::allocator_traits::construct(*alloc, slot, - std::forward(args)...); - } - - template - static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { - absl::allocator_traits::construct(*alloc, slot, std::move(*other)); - } - - template - static void construct(Alloc *alloc, slot_type *slot, const slot_type *other) { - absl::allocator_traits::construct(*alloc, slot, *other); - } - - template - static void destroy(Alloc *alloc, slot_type *slot) { - absl::allocator_traits::destroy(*alloc, slot); - } - - template - static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) { - construct(alloc, new_slot, old_slot); - destroy(alloc, old_slot); - } -}; - -// A parameters structure for holding the type parameters for a btree_set. -// Compare and Alloc should be nothrow copy-constructible. -template -struct set_params : common_params> { - using value_type = Key; - using slot_type = typename set_params::common_params::slot_type; - - template - static const V &key(const V &value) { - return value; - } - static const Key &key(const slot_type *slot) { return *slot; } - static const Key &key(slot_type *slot) { return *slot; } -}; - -} // namespace container_internal - -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace container_internal + { + + template + struct set_slot_policy; + + template + struct set_params; + + } // namespace container_internal + + // absl::btree_set<> + // + // An `absl::btree_set` is an ordered associative container of unique key + // values designed to be a more efficient replacement for `std::set` (in most + // cases). + // + // Keys are sorted using an (optional) comparison function, which defaults to + // `std::less`. + // + // An `absl::btree_set` uses a default allocator of `std::allocator` to + // allocate (and deallocate) nodes, and construct and destruct values within + // those nodes. You may instead specify a custom allocator `A` (which in turn + // requires specifying a custom comparator `C`) as in + // `absl::btree_set`. + // + template, typename Alloc = std::allocator> + class btree_set : public container_internal::btree_set_container>> + { + using Base = typename btree_set::btree_set_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_set` supports the same overload set as `std::set` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_set set1; + // + // * Initializer List constructor + // + // absl::btree_set set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::btree_set set3(set2); + // + // * Copy assignment operator + // + // absl::btree_set set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_set set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_set set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::btree_set set7(v.begin(), v.end()); + btree_set() + { + } + using Base::Base; + + // btree_set::begin() + // + // Returns an iterator to the beginning of the `btree_set`. + using Base::begin; + + // btree_set::cbegin() + // + // Returns a const iterator to the beginning of the `btree_set`. + using Base::cbegin; + + // btree_set::end() + // + // Returns an iterator to the end of the `btree_set`. + using Base::end; + + // btree_set::cend() + // + // Returns a const iterator to the end of the `btree_set`. + using Base::cend; + + // btree_set::empty() + // + // Returns whether or not the `btree_set` is empty. + using Base::empty; + + // btree_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_set` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `btree_set`. + using Base::max_size; + + // btree_set::size() + // + // Returns the number of elements currently within the `btree_set`. + using Base::size; + + // btree_set::clear() + // + // Removes all elements from the `btree_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_set::erase() + // + // Erases elements within the `btree_set`. Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_set`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // btree_set::insert() + // + // Inserts an element of the specified value into the `btree_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If an insertion + // occurs, any references, pointers, or iterators are invalidated. + // Overloads are listed below. + // + // std::pair insert(const value_type& value): + // + // Inserts a value into the `btree_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(value_type&& value): + // + // Inserts a moveable value into the `btree_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If an insertion occurs, any references, pointers, or iterators are + // invalidated. + using Base::emplace_hint; + + // btree_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_set::merge() + // + // Extracts elements from a given `source` btree_set into this + // `btree_set`. If the destination `btree_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // btree_set::swap(btree_set& other) + // + // Exchanges the contents of this `btree_set` with those of the `other` + // btree_set, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `btree_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_set::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_set`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_set::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_set`. Note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_set`. + using Base::equal_range; + + // btree_set::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_set::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element that is not less than `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_set::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element that is greater than `key` within the `btree_set`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_set::get_allocator() + // + // Returns the allocator function associated with this `btree_set`. + using Base::get_allocator; + + // btree_set::key_comp(); + // + // Returns the key comparator associated with this `btree_set`. + using Base::key_comp; + + // btree_set::value_comp(); + // + // Returns the value comparator associated with this `btree_set`. The keys to + // sort the elements are the values themselves, therefore `value_comp` and its + // sibling member function `key_comp` are equivalent. + using Base::value_comp; + }; + + // absl::swap(absl::btree_set<>, absl::btree_set<>) + // + // Swaps the contents of two `absl::btree_set` containers. + template + void swap(btree_set& x, btree_set& y) + { + return x.swap(y); + } + + // absl::erase_if(absl::btree_set<>, Pred) + // + // Erases all elements that satisfy the predicate pred from the container. + // Returns the number of erased elements. + template + typename btree_set::size_type erase_if(btree_set& set, Pred pred) + { + return container_internal::btree_access::erase_if(set, std::move(pred)); + } + + // absl::btree_multiset<> + // + // An `absl::btree_multiset` is an ordered associative container of + // keys and associated values designed to be a more efficient replacement + // for `std::multiset` (in most cases). Unlike `absl::btree_set`, a B-tree + // multiset allows equivalent elements. + // + // Keys are sorted using an (optional) comparison function, which defaults to + // `std::less`. + // + // An `absl::btree_multiset` uses a default allocator of `std::allocator` + // to allocate (and deallocate) nodes, and construct and destruct values within + // those nodes. You may instead specify a custom allocator `A` (which in turn + // requires specifying a custom comparator `C`) as in + // `absl::btree_multiset`. + // + template, typename Alloc = std::allocator> + class btree_multiset : public container_internal::btree_multiset_container>> + { + using Base = typename btree_multiset::btree_multiset_container; + + public: + // Constructors and Assignment Operators + // + // A `btree_multiset` supports the same overload set as `std::set` + // for construction and assignment: + // + // * Default constructor + // + // absl::btree_multiset set1; + // + // * Initializer List constructor + // + // absl::btree_multiset set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::btree_multiset set3(set2); + // + // * Copy assignment operator + // + // absl::btree_multiset set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::btree_multiset set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::btree_multiset set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::btree_multiset set7(v.begin(), v.end()); + btree_multiset() + { + } + using Base::Base; + + // btree_multiset::begin() + // + // Returns an iterator to the beginning of the `btree_multiset`. + using Base::begin; + + // btree_multiset::cbegin() + // + // Returns a const iterator to the beginning of the `btree_multiset`. + using Base::cbegin; + + // btree_multiset::end() + // + // Returns an iterator to the end of the `btree_multiset`. + using Base::end; + + // btree_multiset::cend() + // + // Returns a const iterator to the end of the `btree_multiset`. + using Base::cend; + + // btree_multiset::empty() + // + // Returns whether or not the `btree_multiset` is empty. + using Base::empty; + + // btree_multiset::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `btree_multiset` under current memory constraints. This value can be + // thought of as the largest value of `std::distance(begin(), end())` for a + // `btree_multiset`. + using Base::max_size; + + // btree_multiset::size() + // + // Returns the number of elements currently within the `btree_multiset`. + using Base::size; + + // btree_multiset::clear() + // + // Removes all elements from the `btree_multiset`. Invalidates any references, + // pointers, or iterators referring to contained elements. + using Base::clear; + + // btree_multiset::erase() + // + // Erases elements within the `btree_multiset`. Overloads are listed below. + // + // iterator erase(iterator position): + // iterator erase(const_iterator position): + // + // Erases the element at `position` of the `btree_multiset`, returning + // the iterator pointing to the element after the one that was erased + // (or end() if none exists). + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning + // the iterator pointing to the element after the interval that was erased + // (or end() if none exists). + // + // template size_type erase(const K& key): + // + // Erases the elements matching the key, if any exist, returning the + // number of elements erased. + using Base::erase; + + // btree_multiset::insert() + // + // Inserts an element of the specified value into the `btree_multiset`, + // returning an iterator pointing to the newly inserted element. + // Any references, pointers, or iterators are invalidated. Overloads are + // listed below. + // + // iterator insert(const value_type& value): + // + // Inserts a value into the `btree_multiset`, returning an iterator to the + // inserted element. + // + // iterator insert(value_type&& value): + // + // Inserts a moveable value into the `btree_multiset`, returning an iterator + // to the inserted element. + // + // iterator insert(const_iterator hint, const value_type& value): + // iterator insert(const_iterator hint, value_type&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + using Base::insert; + + // btree_multiset::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multiset`. Any references, pointers, or iterators are + // invalidated. + using Base::emplace; + + // btree_multiset::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `btree_multiset`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search. + // + // Any references, pointers, or iterators are invalidated. + using Base::emplace_hint; + + // btree_multiset::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // template node_type extract(const K& k): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `btree_multiset` + // does not contain an element with a matching key, this function returns an + // empty node handle. + // + // NOTE: In this context, `node_type` refers to the C++17 concept of a + // move-only type that owns and provides access to the elements in associative + // containers (https://en.cppreference.com/w/cpp/container/node_handle). + // It does NOT refer to the data layout of the underlying btree. + using Base::extract; + + // btree_multiset::merge() + // + // Extracts all elements from a given `source` btree_multiset into this + // `btree_multiset`. + using Base::merge; + + // btree_multiset::swap(btree_multiset& other) + // + // Exchanges the contents of this `btree_multiset` with those of the `other` + // btree_multiset, avoiding invocation of any move, copy, or swap operations + // on individual elements. + // + // All iterators and references on the `btree_multiset` remain valid, + // excepting for the past-the-end iterator, which is invalidated. + using Base::swap; + + // btree_multiset::contains() + // + // template bool contains(const K& key) const: + // + // Determines whether an element comparing equal to the given `key` exists + // within the `btree_multiset`, returning `true` if so or `false` otherwise. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::contains; + + // btree_multiset::count() + // + // template size_type count(const K& key) const: + // + // Returns the number of elements comparing equal to the given `key` within + // the `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::count; + + // btree_multiset::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `btree_multiset`. + using Base::equal_range; + + // btree_multiset::find() + // + // template iterator find(const K& key): + // template const_iterator find(const K& key) const: + // + // Finds an element with the passed `key` within the `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::find; + + // btree_multiset::lower_bound() + // + // template iterator lower_bound(const K& key): + // template const_iterator lower_bound(const K& key) const: + // + // Finds the first element that is not less than `key` within the + // `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::lower_bound; + + // btree_multiset::upper_bound() + // + // template iterator upper_bound(const K& key): + // template const_iterator upper_bound(const K& key) const: + // + // Finds the first element that is greater than `key` within the + // `btree_multiset`. + // + // Supports heterogeneous lookup, provided that the set has a compatible + // heterogeneous comparator. + using Base::upper_bound; + + // btree_multiset::get_allocator() + // + // Returns the allocator function associated with this `btree_multiset`. + using Base::get_allocator; + + // btree_multiset::key_comp(); + // + // Returns the key comparator associated with this `btree_multiset`. + using Base::key_comp; + + // btree_multiset::value_comp(); + // + // Returns the value comparator associated with this `btree_multiset`. The + // keys to sort the elements are the values themselves, therefore `value_comp` + // and its sibling member function `key_comp` are equivalent. + using Base::value_comp; + }; + + // absl::swap(absl::btree_multiset<>, absl::btree_multiset<>) + // + // Swaps the contents of two `absl::btree_multiset` containers. + template + void swap(btree_multiset& x, btree_multiset& y) + { + return x.swap(y); + } + + // absl::erase_if(absl::btree_multiset<>, Pred) + // + // Erases all elements that satisfy the predicate pred from the container. + // Returns the number of erased elements. + template + typename btree_multiset::size_type erase_if( + btree_multiset& set, Pred pred + ) + { + return container_internal::btree_access::erase_if(set, std::move(pred)); + } + + namespace container_internal + { + + // This type implements the necessary functions from the + // absl::container_internal::slot_type interface for btree_(multi)set. + template + struct set_slot_policy + { + using slot_type = Key; + using value_type = Key; + using mutable_value_type = Key; + + static value_type& element(slot_type* slot) + { + return *slot; + } + static const value_type& element(const slot_type* slot) + { + return *slot; + } + + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) + { + absl::allocator_traits::construct(*alloc, slot, std::forward(args)...); + } + + template + static void construct(Alloc* alloc, slot_type* slot, slot_type* other) + { + absl::allocator_traits::construct(*alloc, slot, std::move(*other)); + } + + template + static void construct(Alloc* alloc, slot_type* slot, const slot_type* other) + { + absl::allocator_traits::construct(*alloc, slot, *other); + } + + template + static void destroy(Alloc* alloc, slot_type* slot) + { + absl::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) + { + construct(alloc, new_slot, old_slot); + destroy(alloc, old_slot); + } + }; + + // A parameters structure for holding the type parameters for a btree_set. + // Compare and Alloc should be nothrow copy-constructible. + template + struct set_params : common_params> + { + using value_type = Key; + using slot_type = typename set_params::common_params::slot_type; + + template + static const V& key(const V& value) + { + return value; + } + static const Key& key(const slot_type* slot) + { + return *slot; + } + static const Key& key(slot_type* slot) + { + return *slot; + } + }; + + } // namespace container_internal + + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_BTREE_SET_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/btree_test.h b/CAPI/cpp/grpc/include/absl/container/btree_test.h index 6249080..7739ea8 100644 --- a/CAPI/cpp/grpc/include/absl/container/btree_test.h +++ b/CAPI/cpp/grpc/include/absl/container/btree_test.h @@ -28,139 +28,188 @@ #include "absl/strings/cord.h" #include "absl/time/time.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -// Like remove_const but propagates the removal through std::pair. -template -struct remove_pair_const { - using type = typename std::remove_const::type; -}; -template -struct remove_pair_const > { - using type = std::pair::type, - typename remove_pair_const::type>; -}; - -// Utility class to provide an accessor for a key given a value. The default -// behavior is to treat the value as a pair and return the first element. -template -struct KeyOfValue { - struct type { - const K& operator()(const V& p) const { return p.first; } - }; -}; - -// Partial specialization of KeyOfValue class for when the key and value are -// the same type such as in set<> and btree_set<>. -template -struct KeyOfValue { - struct type { - const K& operator()(const K& k) const { return k; } - }; -}; - -inline char* GenerateDigits(char buf[16], unsigned val, unsigned maxval) { - assert(val <= maxval); - constexpr unsigned kBase = 64; // avoid integer division. - unsigned p = 15; - buf[p--] = 0; - while (maxval > 0) { - buf[p--] = ' ' + (val % kBase); - val /= kBase; - maxval /= kBase; - } - return buf + p + 1; -} - -template -struct Generator { - int maxval; - explicit Generator(int m) : maxval(m) {} - K operator()(int i) const { - assert(i <= maxval); - return K(i); - } -}; - -template <> -struct Generator { - int maxval; - explicit Generator(int m) : maxval(m) {} - absl::Time operator()(int i) const { return absl::FromUnixMillis(i); } -}; - -template <> -struct Generator { - int maxval; - explicit Generator(int m) : maxval(m) {} - std::string operator()(int i) const { - char buf[16]; - return GenerateDigits(buf, i, maxval); - } -}; - -template <> -struct Generator { - int maxval; - explicit Generator(int m) : maxval(m) {} - Cord operator()(int i) const { - char buf[16]; - return Cord(GenerateDigits(buf, i, maxval)); - } -}; - -template -struct Generator > { - Generator::type> tgen; - Generator::type> ugen; - - explicit Generator(int m) : tgen(m), ugen(m) {} - std::pair operator()(int i) const { - return std::make_pair(tgen(i), ugen(i)); - } -}; - -// Generate n values for our tests and benchmarks. Value range is [0, maxval]. -inline std::vector GenerateNumbersWithSeed(int n, int maxval, int seed) { - // NOTE: Some tests rely on generated numbers not changing between test runs. - // We use std::minstd_rand0 because it is well-defined, but don't use - // std::uniform_int_distribution because platforms use different algorithms. - std::minstd_rand0 rng(seed); - - std::vector values; - absl::flat_hash_set unique_values; - if (values.size() < n) { - for (int i = values.size(); i < n; i++) { - int value; - do { - value = static_cast(rng()) % (maxval + 1); - } while (!unique_values.insert(value).second); - - values.push_back(value); - } - } - return values; -} - -// Generates n values in the range [0, maxval]. -template -std::vector GenerateValuesWithSeed(int n, int maxval, int seed) { - const std::vector nums = GenerateNumbersWithSeed(n, maxval, seed); - Generator gen(maxval); - std::vector vec; - - vec.reserve(n); - for (int i = 0; i < n; i++) { - vec.push_back(gen(nums[i])); - } - - return vec; -} - -} // namespace container_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // Like remove_const but propagates the removal through std::pair. + template + struct remove_pair_const + { + using type = typename std::remove_const::type; + }; + template + struct remove_pair_const> + { + using type = std::pair::type, typename remove_pair_const::type>; + }; + + // Utility class to provide an accessor for a key given a value. The default + // behavior is to treat the value as a pair and return the first element. + template + struct KeyOfValue + { + struct type + { + const K& operator()(const V& p) const + { + return p.first; + } + }; + }; + + // Partial specialization of KeyOfValue class for when the key and value are + // the same type such as in set<> and btree_set<>. + template + struct KeyOfValue + { + struct type + { + const K& operator()(const K& k) const + { + return k; + } + }; + }; + + inline char* GenerateDigits(char buf[16], unsigned val, unsigned maxval) + { + assert(val <= maxval); + constexpr unsigned kBase = 64; // avoid integer division. + unsigned p = 15; + buf[p--] = 0; + while (maxval > 0) + { + buf[p--] = ' ' + (val % kBase); + val /= kBase; + maxval /= kBase; + } + return buf + p + 1; + } + + template + struct Generator + { + int maxval; + explicit Generator(int m) : + maxval(m) + { + } + K operator()(int i) const + { + assert(i <= maxval); + return K(i); + } + }; + + template<> + struct Generator + { + int maxval; + explicit Generator(int m) : + maxval(m) + { + } + absl::Time operator()(int i) const + { + return absl::FromUnixMillis(i); + } + }; + + template<> + struct Generator + { + int maxval; + explicit Generator(int m) : + maxval(m) + { + } + std::string operator()(int i) const + { + char buf[16]; + return GenerateDigits(buf, i, maxval); + } + }; + + template<> + struct Generator + { + int maxval; + explicit Generator(int m) : + maxval(m) + { + } + Cord operator()(int i) const + { + char buf[16]; + return Cord(GenerateDigits(buf, i, maxval)); + } + }; + + template + struct Generator> + { + Generator::type> tgen; + Generator::type> ugen; + + explicit Generator(int m) : + tgen(m), + ugen(m) + { + } + std::pair operator()(int i) const + { + return std::make_pair(tgen(i), ugen(i)); + } + }; + + // Generate n values for our tests and benchmarks. Value range is [0, maxval]. + inline std::vector GenerateNumbersWithSeed(int n, int maxval, int seed) + { + // NOTE: Some tests rely on generated numbers not changing between test runs. + // We use std::minstd_rand0 because it is well-defined, but don't use + // std::uniform_int_distribution because platforms use different algorithms. + std::minstd_rand0 rng(seed); + + std::vector values; + absl::flat_hash_set unique_values; + if (values.size() < n) + { + for (int i = values.size(); i < n; i++) + { + int value; + do + { + value = static_cast(rng()) % (maxval + 1); + } while (!unique_values.insert(value).second); + + values.push_back(value); + } + } + return values; + } + + // Generates n values in the range [0, maxval]. + template + std::vector GenerateValuesWithSeed(int n, int maxval, int seed) + { + const std::vector nums = GenerateNumbersWithSeed(n, maxval, seed); + Generator gen(maxval); + std::vector vec; + + vec.reserve(n); + for (int i = 0; i < n; i++) + { + vec.push_back(gen(nums[i])); + } + + return vec; + } + + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_BTREE_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/fixed_array.h b/CAPI/cpp/grpc/include/absl/container/fixed_array.h index 2aefae3..4100c92 100644 --- a/CAPI/cpp/grpc/include/absl/container/fixed_array.h +++ b/CAPI/cpp/grpc/include/absl/container/fixed_array.h @@ -50,480 +50,604 @@ #include "absl/container/internal/compressed_tuple.h" #include "absl/memory/memory.h" -namespace absl { -ABSL_NAMESPACE_BEGIN - -constexpr static auto kFixedArrayUseDefault = static_cast(-1); - -// ----------------------------------------------------------------------------- -// FixedArray -// ----------------------------------------------------------------------------- -// -// A `FixedArray` provides a run-time fixed-size array, allocating a small array -// inline for efficiency. -// -// Most users should not specify an `inline_elements` argument and let -// `FixedArray` automatically determine the number of elements -// to store inline based on `sizeof(T)`. If `inline_elements` is specified, the -// `FixedArray` implementation will use inline storage for arrays with a -// length <= `inline_elements`. -// -// Note that a `FixedArray` constructed with a `size_type` argument will -// default-initialize its values by leaving trivially constructible types -// uninitialized (e.g. int, int[4], double), and others default-constructed. -// This matches the behavior of c-style arrays and `std::array`, but not -// `std::vector`. -template > -class FixedArray { - static_assert(!std::is_array::value || std::extent::value > 0, - "Arrays with unknown bounds cannot be used with FixedArray."); - - static constexpr size_t kInlineBytesDefault = 256; - - using AllocatorTraits = std::allocator_traits; - // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17, - // but this seems to be mostly pedantic. - template - using EnableIfForwardIterator = absl::enable_if_t::iterator_category, - std::forward_iterator_tag>::value>; - static constexpr bool NoexceptCopyable() { - return std::is_nothrow_copy_constructible::value && - absl::allocator_is_nothrow::value; - } - static constexpr bool NoexceptMovable() { - return std::is_nothrow_move_constructible::value && - absl::allocator_is_nothrow::value; - } - static constexpr bool DefaultConstructorIsNonTrivial() { - return !absl::is_trivially_default_constructible::value; - } - - public: - using allocator_type = typename AllocatorTraits::allocator_type; - using value_type = typename AllocatorTraits::value_type; - using pointer = typename AllocatorTraits::pointer; - using const_pointer = typename AllocatorTraits::const_pointer; - using reference = value_type&; - using const_reference = const value_type&; - using size_type = typename AllocatorTraits::size_type; - using difference_type = typename AllocatorTraits::difference_type; - using iterator = pointer; - using const_iterator = const_pointer; - using reverse_iterator = std::reverse_iterator; - using const_reverse_iterator = std::reverse_iterator; - - static constexpr size_type inline_elements = - (N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type) - : static_cast(N)); - - FixedArray( - const FixedArray& other, - const allocator_type& a = allocator_type()) noexcept(NoexceptCopyable()) - : FixedArray(other.begin(), other.end(), a) {} - - FixedArray( - FixedArray&& other, - const allocator_type& a = allocator_type()) noexcept(NoexceptMovable()) - : FixedArray(std::make_move_iterator(other.begin()), - std::make_move_iterator(other.end()), a) {} - - // Creates an array object that can store `n` elements. - // Note that trivially constructible elements will be uninitialized. - explicit FixedArray(size_type n, const allocator_type& a = allocator_type()) - : storage_(n, a) { - if (DefaultConstructorIsNonTrivial()) { - memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), - storage_.end()); - } - } - - // Creates an array initialized with `n` copies of `val`. - FixedArray(size_type n, const value_type& val, - const allocator_type& a = allocator_type()) - : storage_(n, a) { - memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), - storage_.end(), val); - } - - // Creates an array initialized with the size and contents of `init_list`. - FixedArray(std::initializer_list init_list, - const allocator_type& a = allocator_type()) - : FixedArray(init_list.begin(), init_list.end(), a) {} - - // Creates an array initialized with the elements from the input - // range. The array's size will always be `std::distance(first, last)`. - // REQUIRES: Iterator must be a forward_iterator or better. - template * = nullptr> - FixedArray(Iterator first, Iterator last, - const allocator_type& a = allocator_type()) - : storage_(std::distance(first, last), a) { - memory_internal::CopyRange(storage_.alloc(), storage_.begin(), first, last); - } - - ~FixedArray() noexcept { - for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) { - AllocatorTraits::destroy(storage_.alloc(), cur); - } - } - - // Assignments are deleted because they break the invariant that the size of a - // `FixedArray` never changes. - void operator=(FixedArray&&) = delete; - void operator=(const FixedArray&) = delete; - - // FixedArray::size() - // - // Returns the length of the fixed array. - size_type size() const { return storage_.size(); } - - // FixedArray::max_size() - // - // Returns the largest possible value of `std::distance(begin(), end())` for a - // `FixedArray`. This is equivalent to the most possible addressable bytes - // over the number of bytes taken by T. - constexpr size_type max_size() const { - return (std::numeric_limits::max)() / sizeof(value_type); - } - - // FixedArray::empty() - // - // Returns whether or not the fixed array is empty. - bool empty() const { return size() == 0; } - - // FixedArray::memsize() - // - // Returns the memory size of the fixed array in bytes. - size_t memsize() const { return size() * sizeof(value_type); } - - // FixedArray::data() - // - // Returns a const T* pointer to elements of the `FixedArray`. This pointer - // can be used to access (but not modify) the contained elements. - const_pointer data() const { return AsValueType(storage_.begin()); } - - // Overload of FixedArray::data() to return a T* pointer to elements of the - // fixed array. This pointer can be used to access and modify the contained - // elements. - pointer data() { return AsValueType(storage_.begin()); } - - // FixedArray::operator[] - // - // Returns a reference the ith element of the fixed array. - // REQUIRES: 0 <= i < size() - reference operator[](size_type i) { - ABSL_HARDENING_ASSERT(i < size()); - return data()[i]; - } - - // Overload of FixedArray::operator()[] to return a const reference to the - // ith element of the fixed array. - // REQUIRES: 0 <= i < size() - const_reference operator[](size_type i) const { - ABSL_HARDENING_ASSERT(i < size()); - return data()[i]; - } - - // FixedArray::at - // - // Bounds-checked access. Returns a reference to the ith element of the fixed - // array, or throws std::out_of_range - reference at(size_type i) { - if (ABSL_PREDICT_FALSE(i >= size())) { - base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); - } - return data()[i]; - } - - // Overload of FixedArray::at() to return a const reference to the ith element - // of the fixed array. - const_reference at(size_type i) const { - if (ABSL_PREDICT_FALSE(i >= size())) { - base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); - } - return data()[i]; - } - - // FixedArray::front() - // - // Returns a reference to the first element of the fixed array. - reference front() { - ABSL_HARDENING_ASSERT(!empty()); - return data()[0]; - } - - // Overload of FixedArray::front() to return a reference to the first element - // of a fixed array of const values. - const_reference front() const { - ABSL_HARDENING_ASSERT(!empty()); - return data()[0]; - } - - // FixedArray::back() - // - // Returns a reference to the last element of the fixed array. - reference back() { - ABSL_HARDENING_ASSERT(!empty()); - return data()[size() - 1]; - } - - // Overload of FixedArray::back() to return a reference to the last element - // of a fixed array of const values. - const_reference back() const { - ABSL_HARDENING_ASSERT(!empty()); - return data()[size() - 1]; - } - - // FixedArray::begin() - // - // Returns an iterator to the beginning of the fixed array. - iterator begin() { return data(); } - - // Overload of FixedArray::begin() to return a const iterator to the - // beginning of the fixed array. - const_iterator begin() const { return data(); } - - // FixedArray::cbegin() - // - // Returns a const iterator to the beginning of the fixed array. - const_iterator cbegin() const { return begin(); } - - // FixedArray::end() - // - // Returns an iterator to the end of the fixed array. - iterator end() { return data() + size(); } - - // Overload of FixedArray::end() to return a const iterator to the end of the - // fixed array. - const_iterator end() const { return data() + size(); } - - // FixedArray::cend() - // - // Returns a const iterator to the end of the fixed array. - const_iterator cend() const { return end(); } - - // FixedArray::rbegin() - // - // Returns a reverse iterator from the end of the fixed array. - reverse_iterator rbegin() { return reverse_iterator(end()); } - - // Overload of FixedArray::rbegin() to return a const reverse iterator from - // the end of the fixed array. - const_reverse_iterator rbegin() const { - return const_reverse_iterator(end()); - } - - // FixedArray::crbegin() - // - // Returns a const reverse iterator from the end of the fixed array. - const_reverse_iterator crbegin() const { return rbegin(); } - - // FixedArray::rend() - // - // Returns a reverse iterator from the beginning of the fixed array. - reverse_iterator rend() { return reverse_iterator(begin()); } - - // Overload of FixedArray::rend() for returning a const reverse iterator - // from the beginning of the fixed array. - const_reverse_iterator rend() const { - return const_reverse_iterator(begin()); - } - - // FixedArray::crend() - // - // Returns a reverse iterator from the beginning of the fixed array. - const_reverse_iterator crend() const { return rend(); } - - // FixedArray::fill() - // - // Assigns the given `value` to all elements in the fixed array. - void fill(const value_type& val) { std::fill(begin(), end(), val); } - - // Relational operators. Equality operators are elementwise using - // `operator==`, while order operators order FixedArrays lexicographically. - friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) { - return absl::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); - } - - friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) { - return !(lhs == rhs); - } - - friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) { - return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), - rhs.end()); - } - - friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) { - return rhs < lhs; - } - - friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) { - return !(rhs < lhs); - } - - friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) { - return !(lhs < rhs); - } - - template - friend H AbslHashValue(H h, const FixedArray& v) { - return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()), - v.size()); - } - - private: - // StorageElement - // - // For FixedArrays with a C-style-array value_type, StorageElement is a POD - // wrapper struct called StorageElementWrapper that holds the value_type - // instance inside. This is needed for construction and destruction of the - // entire array regardless of how many dimensions it has. For all other cases, - // StorageElement is just an alias of value_type. - // - // Maintainer's Note: The simpler solution would be to simply wrap value_type - // in a struct whether it's an array or not. That causes some paranoid - // diagnostics to misfire, believing that 'data()' returns a pointer to a - // single element, rather than the packed array that it really is. - // e.g.: - // - // FixedArray buf(1); - // sprintf(buf.data(), "foo"); - // - // error: call to int __builtin___sprintf_chk(etc...) - // will always overflow destination buffer [-Werror] - // - template , - size_t InnerN = std::extent::value> - struct StorageElementWrapper { - InnerT array[InnerN]; - }; - - using StorageElement = - absl::conditional_t::value, - StorageElementWrapper, value_type>; - - static pointer AsValueType(pointer ptr) { return ptr; } - static pointer AsValueType(StorageElementWrapper* ptr) { - return std::addressof(ptr->array); - } - - static_assert(sizeof(StorageElement) == sizeof(value_type), ""); - static_assert(alignof(StorageElement) == alignof(value_type), ""); - - class NonEmptyInlinedStorage { - public: - StorageElement* data() { return reinterpret_cast(buff_); } - void AnnotateConstruct(size_type n); - void AnnotateDestruct(size_type n); +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + constexpr static auto kFixedArrayUseDefault = static_cast(-1); + + // ----------------------------------------------------------------------------- + // FixedArray + // ----------------------------------------------------------------------------- + // + // A `FixedArray` provides a run-time fixed-size array, allocating a small array + // inline for efficiency. + // + // Most users should not specify an `inline_elements` argument and let + // `FixedArray` automatically determine the number of elements + // to store inline based on `sizeof(T)`. If `inline_elements` is specified, the + // `FixedArray` implementation will use inline storage for arrays with a + // length <= `inline_elements`. + // + // Note that a `FixedArray` constructed with a `size_type` argument will + // default-initialize its values by leaving trivially constructible types + // uninitialized (e.g. int, int[4], double), and others default-constructed. + // This matches the behavior of c-style arrays and `std::array`, but not + // `std::vector`. + template> + class FixedArray + { + static_assert(!std::is_array::value || std::extent::value > 0, "Arrays with unknown bounds cannot be used with FixedArray."); + + static constexpr size_t kInlineBytesDefault = 256; + + using AllocatorTraits = std::allocator_traits; + // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17, + // but this seems to be mostly pedantic. + template + using EnableIfForwardIterator = absl::enable_if_t::iterator_category, + std::forward_iterator_tag>::value>; + static constexpr bool NoexceptCopyable() + { + return std::is_nothrow_copy_constructible::value && + absl::allocator_is_nothrow::value; + } + static constexpr bool NoexceptMovable() + { + return std::is_nothrow_move_constructible::value && + absl::allocator_is_nothrow::value; + } + static constexpr bool DefaultConstructorIsNonTrivial() + { + return !absl::is_trivially_default_constructible::value; + } + + public: + using allocator_type = typename AllocatorTraits::allocator_type; + using value_type = typename AllocatorTraits::value_type; + using pointer = typename AllocatorTraits::pointer; + using const_pointer = typename AllocatorTraits::const_pointer; + using reference = value_type&; + using const_reference = const value_type&; + using size_type = typename AllocatorTraits::size_type; + using difference_type = typename AllocatorTraits::difference_type; + using iterator = pointer; + using const_iterator = const_pointer; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + static constexpr size_type inline_elements = + (N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type) : static_cast(N)); + + FixedArray( + const FixedArray& other, + const allocator_type& a = allocator_type() + ) noexcept(NoexceptCopyable()) : + FixedArray(other.begin(), other.end(), a) + { + } + + FixedArray( + FixedArray&& other, + const allocator_type& a = allocator_type() + ) noexcept(NoexceptMovable()) : + FixedArray(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), a) + { + } + + // Creates an array object that can store `n` elements. + // Note that trivially constructible elements will be uninitialized. + explicit FixedArray(size_type n, const allocator_type& a = allocator_type()) : + storage_(n, a) + { + if (DefaultConstructorIsNonTrivial()) + { + memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), storage_.end()); + } + } + + // Creates an array initialized with `n` copies of `val`. + FixedArray(size_type n, const value_type& val, const allocator_type& a = allocator_type()) : + storage_(n, a) + { + memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), storage_.end(), val); + } + + // Creates an array initialized with the size and contents of `init_list`. + FixedArray(std::initializer_list init_list, const allocator_type& a = allocator_type()) : + FixedArray(init_list.begin(), init_list.end(), a) + { + } + + // Creates an array initialized with the elements from the input + // range. The array's size will always be `std::distance(first, last)`. + // REQUIRES: Iterator must be a forward_iterator or better. + template* = nullptr> + FixedArray(Iterator first, Iterator last, const allocator_type& a = allocator_type()) : + storage_(std::distance(first, last), a) + { + memory_internal::CopyRange(storage_.alloc(), storage_.begin(), first, last); + } + + ~FixedArray() noexcept + { + for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) + { + AllocatorTraits::destroy(storage_.alloc(), cur); + } + } + + // Assignments are deleted because they break the invariant that the size of a + // `FixedArray` never changes. + void operator=(FixedArray&&) = delete; + void operator=(const FixedArray&) = delete; + + // FixedArray::size() + // + // Returns the length of the fixed array. + size_type size() const + { + return storage_.size(); + } + + // FixedArray::max_size() + // + // Returns the largest possible value of `std::distance(begin(), end())` for a + // `FixedArray`. This is equivalent to the most possible addressable bytes + // over the number of bytes taken by T. + constexpr size_type max_size() const + { + return (std::numeric_limits::max)() / sizeof(value_type); + } + + // FixedArray::empty() + // + // Returns whether or not the fixed array is empty. + bool empty() const + { + return size() == 0; + } + + // FixedArray::memsize() + // + // Returns the memory size of the fixed array in bytes. + size_t memsize() const + { + return size() * sizeof(value_type); + } + + // FixedArray::data() + // + // Returns a const T* pointer to elements of the `FixedArray`. This pointer + // can be used to access (but not modify) the contained elements. + const_pointer data() const + { + return AsValueType(storage_.begin()); + } + + // Overload of FixedArray::data() to return a T* pointer to elements of the + // fixed array. This pointer can be used to access and modify the contained + // elements. + pointer data() + { + return AsValueType(storage_.begin()); + } + + // FixedArray::operator[] + // + // Returns a reference the ith element of the fixed array. + // REQUIRES: 0 <= i < size() + reference operator[](size_type i) + { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // Overload of FixedArray::operator()[] to return a const reference to the + // ith element of the fixed array. + // REQUIRES: 0 <= i < size() + const_reference operator[](size_type i) const + { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // FixedArray::at + // + // Bounds-checked access. Returns a reference to the ith element of the fixed + // array, or throws std::out_of_range + reference at(size_type i) + { + if (ABSL_PREDICT_FALSE(i >= size())) + { + base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); + } + return data()[i]; + } + + // Overload of FixedArray::at() to return a const reference to the ith element + // of the fixed array. + const_reference at(size_type i) const + { + if (ABSL_PREDICT_FALSE(i >= size())) + { + base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); + } + return data()[i]; + } + + // FixedArray::front() + // + // Returns a reference to the first element of the fixed array. + reference front() + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // Overload of FixedArray::front() to return a reference to the first element + // of a fixed array of const values. + const_reference front() const + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // FixedArray::back() + // + // Returns a reference to the last element of the fixed array. + reference back() + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // Overload of FixedArray::back() to return a reference to the last element + // of a fixed array of const values. + const_reference back() const + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // FixedArray::begin() + // + // Returns an iterator to the beginning of the fixed array. + iterator begin() + { + return data(); + } + + // Overload of FixedArray::begin() to return a const iterator to the + // beginning of the fixed array. + const_iterator begin() const + { + return data(); + } + + // FixedArray::cbegin() + // + // Returns a const iterator to the beginning of the fixed array. + const_iterator cbegin() const + { + return begin(); + } + + // FixedArray::end() + // + // Returns an iterator to the end of the fixed array. + iterator end() + { + return data() + size(); + } + + // Overload of FixedArray::end() to return a const iterator to the end of the + // fixed array. + const_iterator end() const + { + return data() + size(); + } + + // FixedArray::cend() + // + // Returns a const iterator to the end of the fixed array. + const_iterator cend() const + { + return end(); + } + + // FixedArray::rbegin() + // + // Returns a reverse iterator from the end of the fixed array. + reverse_iterator rbegin() + { + return reverse_iterator(end()); + } + + // Overload of FixedArray::rbegin() to return a const reverse iterator from + // the end of the fixed array. + const_reverse_iterator rbegin() const + { + return const_reverse_iterator(end()); + } + + // FixedArray::crbegin() + // + // Returns a const reverse iterator from the end of the fixed array. + const_reverse_iterator crbegin() const + { + return rbegin(); + } + + // FixedArray::rend() + // + // Returns a reverse iterator from the beginning of the fixed array. + reverse_iterator rend() + { + return reverse_iterator(begin()); + } + + // Overload of FixedArray::rend() for returning a const reverse iterator + // from the beginning of the fixed array. + const_reverse_iterator rend() const + { + return const_reverse_iterator(begin()); + } + + // FixedArray::crend() + // + // Returns a reverse iterator from the beginning of the fixed array. + const_reverse_iterator crend() const + { + return rend(); + } + + // FixedArray::fill() + // + // Assigns the given `value` to all elements in the fixed array. + void fill(const value_type& val) + { + std::fill(begin(), end(), val); + } + + // Relational operators. Equality operators are elementwise using + // `operator==`, while order operators order FixedArrays lexicographically. + friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) + { + return absl::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); + } + + friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) + { + return !(lhs == rhs); + } + + friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) + { + return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); + } + + friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) + { + return rhs < lhs; + } + + friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) + { + return !(rhs < lhs); + } + + friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) + { + return !(lhs < rhs); + } + + template + friend H AbslHashValue(H h, const FixedArray& v) + { + return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()), v.size()); + } + + private: + // StorageElement + // + // For FixedArrays with a C-style-array value_type, StorageElement is a POD + // wrapper struct called StorageElementWrapper that holds the value_type + // instance inside. This is needed for construction and destruction of the + // entire array regardless of how many dimensions it has. For all other cases, + // StorageElement is just an alias of value_type. + // + // Maintainer's Note: The simpler solution would be to simply wrap value_type + // in a struct whether it's an array or not. That causes some paranoid + // diagnostics to misfire, believing that 'data()' returns a pointer to a + // single element, rather than the packed array that it really is. + // e.g.: + // + // FixedArray buf(1); + // sprintf(buf.data(), "foo"); + // + // error: call to int __builtin___sprintf_chk(etc...) + // will always overflow destination buffer [-Werror] + // + template, size_t InnerN = std::extent::value> + struct StorageElementWrapper + { + InnerT array[InnerN]; + }; + + using StorageElement = + absl::conditional_t::value, StorageElementWrapper, value_type>; + + static pointer AsValueType(pointer ptr) + { + return ptr; + } + static pointer AsValueType(StorageElementWrapper* ptr) + { + return std::addressof(ptr->array); + } + + static_assert(sizeof(StorageElement) == sizeof(value_type), ""); + static_assert(alignof(StorageElement) == alignof(value_type), ""); + + class NonEmptyInlinedStorage + { + public: + StorageElement* data() + { + return reinterpret_cast(buff_); + } + void AnnotateConstruct(size_type n); + void AnnotateDestruct(size_type n); #ifdef ABSL_HAVE_ADDRESS_SANITIZER - void* RedzoneBegin() { return &redzone_begin_; } - void* RedzoneEnd() { return &redzone_end_ + 1; } + void* RedzoneBegin() + { + return &redzone_begin_; + } + void* RedzoneEnd() + { + return &redzone_end_ + 1; + } #endif // ABSL_HAVE_ADDRESS_SANITIZER - private: - ABSL_ADDRESS_SANITIZER_REDZONE(redzone_begin_); - alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])]; - ABSL_ADDRESS_SANITIZER_REDZONE(redzone_end_); - }; - - class EmptyInlinedStorage { - public: - StorageElement* data() { return nullptr; } - void AnnotateConstruct(size_type) {} - void AnnotateDestruct(size_type) {} - }; - - using InlinedStorage = - absl::conditional_t; - - // Storage - // - // An instance of Storage manages the inline and out-of-line memory for - // instances of FixedArray. This guarantees that even when construction of - // individual elements fails in the FixedArray constructor body, the - // destructor for Storage will still be called and out-of-line memory will be - // properly deallocated. - // - class Storage : public InlinedStorage { - public: - Storage(size_type n, const allocator_type& a) - : size_alloc_(n, a), data_(InitializeData()) {} - - ~Storage() noexcept { - if (UsingInlinedStorage(size())) { - InlinedStorage::AnnotateDestruct(size()); - } else { - AllocatorTraits::deallocate(alloc(), AsValueType(begin()), size()); - } - } - - size_type size() const { return size_alloc_.template get<0>(); } - StorageElement* begin() const { return data_; } - StorageElement* end() const { return begin() + size(); } - allocator_type& alloc() { return size_alloc_.template get<1>(); } - - private: - static bool UsingInlinedStorage(size_type n) { - return n <= inline_elements; - } - - StorageElement* InitializeData() { - if (UsingInlinedStorage(size())) { - InlinedStorage::AnnotateConstruct(size()); - return InlinedStorage::data(); - } else { - return reinterpret_cast( - AllocatorTraits::allocate(alloc(), size())); - } - } - - // `CompressedTuple` takes advantage of EBCO for stateless `allocator_type`s - container_internal::CompressedTuple size_alloc_; - StorageElement* data_; - }; - - Storage storage_; -}; + private: + ABSL_ADDRESS_SANITIZER_REDZONE(redzone_begin_); + alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])]; + ABSL_ADDRESS_SANITIZER_REDZONE(redzone_end_); + }; + + class EmptyInlinedStorage + { + public: + StorageElement* data() + { + return nullptr; + } + void AnnotateConstruct(size_type) + { + } + void AnnotateDestruct(size_type) + { + } + }; + + using InlinedStorage = + absl::conditional_t; + + // Storage + // + // An instance of Storage manages the inline and out-of-line memory for + // instances of FixedArray. This guarantees that even when construction of + // individual elements fails in the FixedArray constructor body, the + // destructor for Storage will still be called and out-of-line memory will be + // properly deallocated. + // + class Storage : public InlinedStorage + { + public: + Storage(size_type n, const allocator_type& a) : + size_alloc_(n, a), + data_(InitializeData()) + { + } + + ~Storage() noexcept + { + if (UsingInlinedStorage(size())) + { + InlinedStorage::AnnotateDestruct(size()); + } + else + { + AllocatorTraits::deallocate(alloc(), AsValueType(begin()), size()); + } + } + + size_type size() const + { + return size_alloc_.template get<0>(); + } + StorageElement* begin() const + { + return data_; + } + StorageElement* end() const + { + return begin() + size(); + } + allocator_type& alloc() + { + return size_alloc_.template get<1>(); + } + + private: + static bool UsingInlinedStorage(size_type n) + { + return n <= inline_elements; + } + + StorageElement* InitializeData() + { + if (UsingInlinedStorage(size())) + { + InlinedStorage::AnnotateConstruct(size()); + return InlinedStorage::data(); + } + else + { + return reinterpret_cast( + AllocatorTraits::allocate(alloc(), size()) + ); + } + } + + // `CompressedTuple` takes advantage of EBCO for stateless `allocator_type`s + container_internal::CompressedTuple size_alloc_; + StorageElement* data_; + }; + + Storage storage_; + }; #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL -template -constexpr size_t FixedArray::kInlineBytesDefault; + template + constexpr size_t FixedArray::kInlineBytesDefault; -template -constexpr typename FixedArray::size_type - FixedArray::inline_elements; + template + constexpr typename FixedArray::size_type + FixedArray::inline_elements; #endif -template -void FixedArray::NonEmptyInlinedStorage::AnnotateConstruct( - typename FixedArray::size_type n) { + template + void FixedArray::NonEmptyInlinedStorage::AnnotateConstruct( + typename FixedArray::size_type n + ) + { #ifdef ABSL_HAVE_ADDRESS_SANITIZER - if (!n) return; - ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), - data() + n); - ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), - RedzoneBegin()); -#endif // ABSL_HAVE_ADDRESS_SANITIZER - static_cast(n); // Mark used when not in asan mode -} + if (!n) + return; + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), data() + n); + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), RedzoneBegin()); +#endif // ABSL_HAVE_ADDRESS_SANITIZER + static_cast(n); // Mark used when not in asan mode + } -template -void FixedArray::NonEmptyInlinedStorage::AnnotateDestruct( - typename FixedArray::size_type n) { + template + void FixedArray::NonEmptyInlinedStorage::AnnotateDestruct( + typename FixedArray::size_type n + ) + { #ifdef ABSL_HAVE_ADDRESS_SANITIZER - if (!n) return; - ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, - RedzoneEnd()); - ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), - data()); -#endif // ABSL_HAVE_ADDRESS_SANITIZER - static_cast(n); // Mark used when not in asan mode -} -ABSL_NAMESPACE_END + if (!n) + return; + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, RedzoneEnd()); + ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), data()); +#endif // ABSL_HAVE_ADDRESS_SANITIZER + static_cast(n); // Mark used when not in asan mode + } + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_FIXED_ARRAY_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/flat_hash_map.h b/CAPI/cpp/grpc/include/absl/container/flat_hash_map.h index e6bdbd9..d2fbdbf 100644 --- a/CAPI/cpp/grpc/include/absl/container/flat_hash_map.h +++ b/CAPI/cpp/grpc/include/absl/container/flat_hash_map.h @@ -39,575 +39,596 @@ #include "absl/base/macros.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export -#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export +#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export #include "absl/memory/memory.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { -template -struct FlatHashMapPolicy; -} // namespace container_internal - -// ----------------------------------------------------------------------------- -// absl::flat_hash_map -// ----------------------------------------------------------------------------- -// -// An `absl::flat_hash_map` is an unordered associative container which -// has been optimized for both speed and memory footprint in most common use -// cases. Its interface is similar to that of `std::unordered_map` with -// the following notable differences: -// -// * Requires keys that are CopyConstructible -// * Requires values that are MoveConstructible -// * Supports heterogeneous lookup, through `find()`, `operator[]()` and -// `insert()`, provided that the map is provided a compatible heterogeneous -// hashing function and equality operator. -// * Invalidates any references and pointers to elements within the table after -// `rehash()`. -// * Contains a `capacity()` member function indicating the number of element -// slots (open, deleted, and empty) within the hash map. -// * Returns `void` from the `erase(iterator)` overload. -// -// By default, `flat_hash_map` uses the `absl::Hash` hashing framework. -// All fundamental and Abseil types that support the `absl::Hash` framework have -// a compatible equality operator for comparing insertions into `flat_hash_map`. -// If your type is not yet supported by the `absl::Hash` framework, see -// absl/hash/hash.h for information on extending Abseil hashing to user-defined -// types. -// -// Using `absl::flat_hash_map` at interface boundaries in dynamically loaded -// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may -// be randomized across dynamically loaded libraries. -// -// NOTE: A `flat_hash_map` stores its value types directly inside its -// implementation array to avoid memory indirection. Because a `flat_hash_map` -// is designed to move data when rehashed, map values will not retain pointer -// stability. If you require pointer stability, or if your values are large, -// consider using `absl::flat_hash_map>` instead. -// If your types are not moveable or you require pointer stability for keys, -// consider `absl::node_hash_map`. -// -// Example: -// -// // Create a flat hash map of three strings (that map to strings) -// absl::flat_hash_map ducks = -// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; -// -// // Insert a new element into the flat hash map -// ducks.insert({"d", "donald"}); -// -// // Force a rehash of the flat hash map -// ducks.rehash(0); -// -// // Find the element with the key "b" -// std::string search_key = "b"; -// auto result = ducks.find(search_key); -// if (result != ducks.end()) { -// std::cout << "Result: " << result->second << std::endl; -// } -template , - class Eq = absl::container_internal::hash_default_eq, - class Allocator = std::allocator>> -class flat_hash_map : public absl::container_internal::raw_hash_map< - absl::container_internal::FlatHashMapPolicy, - Hash, Eq, Allocator> { - using Base = typename flat_hash_map::raw_hash_map; - - public: - // Constructors and Assignment Operators - // - // A flat_hash_map supports the same overload set as `std::unordered_map` - // for construction and assignment: - // - // * Default constructor - // - // // No allocation for the table's elements is made. - // absl::flat_hash_map map1; - // - // * Initializer List constructor - // - // absl::flat_hash_map map2 = - // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; - // - // * Copy constructor - // - // absl::flat_hash_map map3(map2); - // - // * Copy assignment operator - // - // // Hash functor and Comparator are copied as well - // absl::flat_hash_map map4; - // map4 = map3; - // - // * Move constructor - // - // // Move is guaranteed efficient - // absl::flat_hash_map map5(std::move(map4)); - // - // * Move assignment operator - // - // // May be efficient if allocators are compatible - // absl::flat_hash_map map6; - // map6 = std::move(map5); - // - // * Range constructor - // - // std::vector> v = {{1, "a"}, {2, "b"}}; - // absl::flat_hash_map map7(v.begin(), v.end()); - flat_hash_map() {} - using Base::Base; - - // flat_hash_map::begin() - // - // Returns an iterator to the beginning of the `flat_hash_map`. - using Base::begin; - - // flat_hash_map::cbegin() - // - // Returns a const iterator to the beginning of the `flat_hash_map`. - using Base::cbegin; - - // flat_hash_map::cend() - // - // Returns a const iterator to the end of the `flat_hash_map`. - using Base::cend; - - // flat_hash_map::end() - // - // Returns an iterator to the end of the `flat_hash_map`. - using Base::end; - - // flat_hash_map::capacity() - // - // Returns the number of element slots (assigned, deleted, and empty) - // available within the `flat_hash_map`. - // - // NOTE: this member function is particular to `absl::flat_hash_map` and is - // not provided in the `std::unordered_map` API. - using Base::capacity; - - // flat_hash_map::empty() - // - // Returns whether or not the `flat_hash_map` is empty. - using Base::empty; - - // flat_hash_map::max_size() - // - // Returns the largest theoretical possible number of elements within a - // `flat_hash_map` under current memory constraints. This value can be thought - // of the largest value of `std::distance(begin(), end())` for a - // `flat_hash_map`. - using Base::max_size; - - // flat_hash_map::size() - // - // Returns the number of elements currently within the `flat_hash_map`. - using Base::size; - - // flat_hash_map::clear() - // - // Removes all elements from the `flat_hash_map`. Invalidates any references, - // pointers, or iterators referring to contained elements. - // - // NOTE: this operation may shrink the underlying buffer. To avoid shrinking - // the underlying buffer call `erase(begin(), end())`. - using Base::clear; - - // flat_hash_map::erase() - // - // Erases elements within the `flat_hash_map`. Erasing does not trigger a - // rehash. Overloads are listed below. - // - // void erase(const_iterator pos): - // - // Erases the element at `position` of the `flat_hash_map`, returning - // `void`. - // - // NOTE: returning `void` in this case is different than that of STL - // containers in general and `std::unordered_map` in particular (which - // return an iterator to the element following the erased element). If that - // iterator is needed, simply post increment the iterator: - // - // map.erase(it++); - // - // iterator erase(const_iterator first, const_iterator last): - // - // Erases the elements in the open interval [`first`, `last`), returning an - // iterator pointing to `last`. - // - // size_type erase(const key_type& key): - // - // Erases the element with the matching key, if it exists, returning the - // number of elements erased (0 or 1). - using Base::erase; - - // flat_hash_map::insert() - // - // Inserts an element of the specified value into the `flat_hash_map`, - // returning an iterator pointing to the newly inserted element, provided that - // an element with the given key does not already exist. If rehashing occurs - // due to the insertion, all iterators are invalidated. Overloads are listed - // below. - // - // std::pair insert(const init_type& value): - // - // Inserts a value into the `flat_hash_map`. Returns a pair consisting of an - // iterator to the inserted element (or to the element that prevented the - // insertion) and a bool denoting whether the insertion took place. - // - // std::pair insert(T&& value): - // std::pair insert(init_type&& value): - // - // Inserts a moveable value into the `flat_hash_map`. Returns a pair - // consisting of an iterator to the inserted element (or to the element that - // prevented the insertion) and a bool denoting whether the insertion took - // place. - // - // iterator insert(const_iterator hint, const init_type& value): - // iterator insert(const_iterator hint, T&& value): - // iterator insert(const_iterator hint, init_type&& value); - // - // Inserts a value, using the position of `hint` as a non-binding suggestion - // for where to begin the insertion search. Returns an iterator to the - // inserted element, or to the existing element that prevented the - // insertion. - // - // void insert(InputIterator first, InputIterator last): - // - // Inserts a range of values [`first`, `last`). - // - // NOTE: Although the STL does not specify which element may be inserted if - // multiple keys compare equivalently, for `flat_hash_map` we guarantee the - // first match is inserted. - // - // void insert(std::initializer_list ilist): - // - // Inserts the elements within the initializer list `ilist`. - // - // NOTE: Although the STL does not specify which element may be inserted if - // multiple keys compare equivalently within the initializer list, for - // `flat_hash_map` we guarantee the first match is inserted. - using Base::insert; - - // flat_hash_map::insert_or_assign() - // - // Inserts an element of the specified value into the `flat_hash_map` provided - // that a value with the given key does not already exist, or replaces it with - // the element value if a key for that value already exists, returning an - // iterator pointing to the newly inserted element. If rehashing occurs due - // to the insertion, all existing iterators are invalidated. Overloads are - // listed below. - // - // pair insert_or_assign(const init_type& k, T&& obj): - // pair insert_or_assign(init_type&& k, T&& obj): - // - // Inserts/Assigns (or moves) the element of the specified key into the - // `flat_hash_map`. - // - // iterator insert_or_assign(const_iterator hint, - // const init_type& k, T&& obj): - // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj): - // - // Inserts/Assigns (or moves) the element of the specified key into the - // `flat_hash_map` using the position of `hint` as a non-binding suggestion - // for where to begin the insertion search. - using Base::insert_or_assign; - - // flat_hash_map::emplace() - // - // Inserts an element of the specified value by constructing it in-place - // within the `flat_hash_map`, provided that no element with the given key - // already exists. - // - // The element may be constructed even if there already is an element with the - // key in the container, in which case the newly constructed element will be - // destroyed immediately. Prefer `try_emplace()` unless your key is not - // copyable or moveable. - // - // If rehashing occurs due to the insertion, all iterators are invalidated. - using Base::emplace; - - // flat_hash_map::emplace_hint() - // - // Inserts an element of the specified value by constructing it in-place - // within the `flat_hash_map`, using the position of `hint` as a non-binding - // suggestion for where to begin the insertion search, and only inserts - // provided that no element with the given key already exists. - // - // The element may be constructed even if there already is an element with the - // key in the container, in which case the newly constructed element will be - // destroyed immediately. Prefer `try_emplace()` unless your key is not - // copyable or moveable. - // - // If rehashing occurs due to the insertion, all iterators are invalidated. - using Base::emplace_hint; - - // flat_hash_map::try_emplace() - // - // Inserts an element of the specified value by constructing it in-place - // within the `flat_hash_map`, provided that no element with the given key - // already exists. Unlike `emplace()`, if an element with the given key - // already exists, we guarantee that no element is constructed. - // - // If rehashing occurs due to the insertion, all iterators are invalidated. - // Overloads are listed below. - // - // pair try_emplace(const key_type& k, Args&&... args): - // pair try_emplace(key_type&& k, Args&&... args): - // - // Inserts (via copy or move) the element of the specified key into the - // `flat_hash_map`. - // - // iterator try_emplace(const_iterator hint, - // const key_type& k, Args&&... args): - // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): - // - // Inserts (via copy or move) the element of the specified key into the - // `flat_hash_map` using the position of `hint` as a non-binding suggestion - // for where to begin the insertion search. - // - // All `try_emplace()` overloads make the same guarantees regarding rvalue - // arguments as `std::unordered_map::try_emplace()`, namely that these - // functions will not move from rvalue arguments if insertions do not happen. - using Base::try_emplace; - - // flat_hash_map::extract() - // - // Extracts the indicated element, erasing it in the process, and returns it - // as a C++17-compatible node handle. Overloads are listed below. - // - // node_type extract(const_iterator position): - // - // Extracts the key,value pair of the element at the indicated position and - // returns a node handle owning that extracted data. - // - // node_type extract(const key_type& x): - // - // Extracts the key,value pair of the element with a key matching the passed - // key value and returns a node handle owning that extracted data. If the - // `flat_hash_map` does not contain an element with a matching key, this - // function returns an empty node handle. - // - // NOTE: when compiled in an earlier version of C++ than C++17, - // `node_type::key()` returns a const reference to the key instead of a - // mutable reference. We cannot safely return a mutable reference without - // std::launder (which is not available before C++17). - using Base::extract; - - // flat_hash_map::merge() - // - // Extracts elements from a given `source` flat hash map into this - // `flat_hash_map`. If the destination `flat_hash_map` already contains an - // element with an equivalent key, that element is not extracted. - using Base::merge; - - // flat_hash_map::swap(flat_hash_map& other) - // - // Exchanges the contents of this `flat_hash_map` with those of the `other` - // flat hash map, avoiding invocation of any move, copy, or swap operations on - // individual elements. - // - // All iterators and references on the `flat_hash_map` remain valid, excepting - // for the past-the-end iterator, which is invalidated. - // - // `swap()` requires that the flat hash map's hashing and key equivalence - // functions be Swappable, and are exchanged using unqualified calls to - // non-member `swap()`. If the map's allocator has - // `std::allocator_traits::propagate_on_container_swap::value` - // set to `true`, the allocators are also exchanged using an unqualified call - // to non-member `swap()`; otherwise, the allocators are not swapped. - using Base::swap; - - // flat_hash_map::rehash(count) - // - // Rehashes the `flat_hash_map`, setting the number of slots to be at least - // the passed value. If the new number of slots increases the load factor more - // than the current maximum load factor - // (`count` < `size()` / `max_load_factor()`), then the new number of slots - // will be at least `size()` / `max_load_factor()`. - // - // To force a rehash, pass rehash(0). - // - // NOTE: unlike behavior in `std::unordered_map`, references are also - // invalidated upon a `rehash()`. - using Base::rehash; - - // flat_hash_map::reserve(count) - // - // Sets the number of slots in the `flat_hash_map` to the number needed to - // accommodate at least `count` total elements without exceeding the current - // maximum load factor, and may rehash the container if needed. - using Base::reserve; - - // flat_hash_map::at() - // - // Returns a reference to the mapped value of the element with key equivalent - // to the passed key. - using Base::at; - - // flat_hash_map::contains() - // - // Determines whether an element with a key comparing equal to the given `key` - // exists within the `flat_hash_map`, returning `true` if so or `false` - // otherwise. - using Base::contains; - - // flat_hash_map::count(const Key& key) const - // - // Returns the number of elements with a key comparing equal to the given - // `key` within the `flat_hash_map`. note that this function will return - // either `1` or `0` since duplicate keys are not allowed within a - // `flat_hash_map`. - using Base::count; - - // flat_hash_map::equal_range() - // - // Returns a closed range [first, last], defined by a `std::pair` of two - // iterators, containing all elements with the passed key in the - // `flat_hash_map`. - using Base::equal_range; - - // flat_hash_map::find() - // - // Finds an element with the passed `key` within the `flat_hash_map`. - using Base::find; - - // flat_hash_map::operator[]() - // - // Returns a reference to the value mapped to the passed key within the - // `flat_hash_map`, performing an `insert()` if the key does not already - // exist. - // - // If an insertion occurs and results in a rehashing of the container, all - // iterators are invalidated. Otherwise iterators are not affected and - // references are not invalidated. Overloads are listed below. - // - // T& operator[](const Key& key): - // - // Inserts an init_type object constructed in-place if the element with the - // given key does not exist. - // - // T& operator[](Key&& key): - // - // Inserts an init_type object constructed in-place provided that an element - // with the given key does not exist. - using Base::operator[]; - - // flat_hash_map::bucket_count() - // - // Returns the number of "buckets" within the `flat_hash_map`. Note that - // because a flat hash map contains all elements within its internal storage, - // this value simply equals the current capacity of the `flat_hash_map`. - using Base::bucket_count; - - // flat_hash_map::load_factor() - // - // Returns the current load factor of the `flat_hash_map` (the average number - // of slots occupied with a value within the hash map). - using Base::load_factor; - - // flat_hash_map::max_load_factor() - // - // Manages the maximum load factor of the `flat_hash_map`. Overloads are - // listed below. - // - // float flat_hash_map::max_load_factor() - // - // Returns the current maximum load factor of the `flat_hash_map`. - // - // void flat_hash_map::max_load_factor(float ml) - // - // Sets the maximum load factor of the `flat_hash_map` to the passed value. - // - // NOTE: This overload is provided only for API compatibility with the STL; - // `flat_hash_map` will ignore any set load factor and manage its rehashing - // internally as an implementation detail. - using Base::max_load_factor; - - // flat_hash_map::get_allocator() - // - // Returns the allocator function associated with this `flat_hash_map`. - using Base::get_allocator; - - // flat_hash_map::hash_function() - // - // Returns the hashing function used to hash the keys within this - // `flat_hash_map`. - using Base::hash_function; - - // flat_hash_map::key_eq() - // - // Returns the function used for comparing keys equality. - using Base::key_eq; -}; - -// erase_if(flat_hash_map<>, Pred) -// -// Erases all elements that satisfy the predicate `pred` from the container `c`. -// Returns the number of erased elements. -template -typename flat_hash_map::size_type erase_if( - flat_hash_map& c, Predicate pred) { - return container_internal::EraseIf(pred, &c); -} - -namespace container_internal { - -template -struct FlatHashMapPolicy { - using slot_policy = container_internal::map_slot_policy; - using slot_type = typename slot_policy::slot_type; - using key_type = K; - using mapped_type = V; - using init_type = std::pair; - - template - static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { - slot_policy::construct(alloc, slot, std::forward(args)...); - } - - template - static void destroy(Allocator* alloc, slot_type* slot) { - slot_policy::destroy(alloc, slot); - } - - template - static void transfer(Allocator* alloc, slot_type* new_slot, - slot_type* old_slot) { - slot_policy::transfer(alloc, new_slot, old_slot); - } - - template - static decltype(absl::container_internal::DecomposePair( - std::declval(), std::declval()...)) - apply(F&& f, Args&&... args) { - return absl::container_internal::DecomposePair(std::forward(f), - std::forward(args)...); - } - - static size_t space_used(const slot_type*) { return 0; } - - static std::pair& element(slot_type* slot) { return slot->value; } - - static V& value(std::pair* kv) { return kv->second; } - static const V& value(const std::pair* kv) { return kv->second; } -}; - -} // namespace container_internal - -namespace container_algorithm_internal { - -// Specialization of trait in absl/algorithm/container.h -template -struct IsUnorderedContainer< - absl::flat_hash_map> : std::true_type {}; - -} // namespace container_algorithm_internal - -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + template + struct FlatHashMapPolicy; + } // namespace container_internal + + // ----------------------------------------------------------------------------- + // absl::flat_hash_map + // ----------------------------------------------------------------------------- + // + // An `absl::flat_hash_map` is an unordered associative container which + // has been optimized for both speed and memory footprint in most common use + // cases. Its interface is similar to that of `std::unordered_map` with + // the following notable differences: + // + // * Requires keys that are CopyConstructible + // * Requires values that are MoveConstructible + // * Supports heterogeneous lookup, through `find()`, `operator[]()` and + // `insert()`, provided that the map is provided a compatible heterogeneous + // hashing function and equality operator. + // * Invalidates any references and pointers to elements within the table after + // `rehash()`. + // * Contains a `capacity()` member function indicating the number of element + // slots (open, deleted, and empty) within the hash map. + // * Returns `void` from the `erase(iterator)` overload. + // + // By default, `flat_hash_map` uses the `absl::Hash` hashing framework. + // All fundamental and Abseil types that support the `absl::Hash` framework have + // a compatible equality operator for comparing insertions into `flat_hash_map`. + // If your type is not yet supported by the `absl::Hash` framework, see + // absl/hash/hash.h for information on extending Abseil hashing to user-defined + // types. + // + // Using `absl::flat_hash_map` at interface boundaries in dynamically loaded + // libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may + // be randomized across dynamically loaded libraries. + // + // NOTE: A `flat_hash_map` stores its value types directly inside its + // implementation array to avoid memory indirection. Because a `flat_hash_map` + // is designed to move data when rehashed, map values will not retain pointer + // stability. If you require pointer stability, or if your values are large, + // consider using `absl::flat_hash_map>` instead. + // If your types are not moveable or you require pointer stability for keys, + // consider `absl::node_hash_map`. + // + // Example: + // + // // Create a flat hash map of three strings (that map to strings) + // absl::flat_hash_map ducks = + // {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; + // + // // Insert a new element into the flat hash map + // ducks.insert({"d", "donald"}); + // + // // Force a rehash of the flat hash map + // ducks.rehash(0); + // + // // Find the element with the key "b" + // std::string search_key = "b"; + // auto result = ducks.find(search_key); + // if (result != ducks.end()) { + // std::cout << "Result: " << result->second << std::endl; + // } + template, class Eq = absl::container_internal::hash_default_eq, class Allocator = std::allocator>> + class flat_hash_map : public absl::container_internal::raw_hash_map, Hash, Eq, Allocator> + { + using Base = typename flat_hash_map::raw_hash_map; + + public: + // Constructors and Assignment Operators + // + // A flat_hash_map supports the same overload set as `std::unordered_map` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::flat_hash_map map1; + // + // * Initializer List constructor + // + // absl::flat_hash_map map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::flat_hash_map map3(map2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::flat_hash_map map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::flat_hash_map map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::flat_hash_map map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::flat_hash_map map7(v.begin(), v.end()); + flat_hash_map() + { + } + using Base::Base; + + // flat_hash_map::begin() + // + // Returns an iterator to the beginning of the `flat_hash_map`. + using Base::begin; + + // flat_hash_map::cbegin() + // + // Returns a const iterator to the beginning of the `flat_hash_map`. + using Base::cbegin; + + // flat_hash_map::cend() + // + // Returns a const iterator to the end of the `flat_hash_map`. + using Base::cend; + + // flat_hash_map::end() + // + // Returns an iterator to the end of the `flat_hash_map`. + using Base::end; + + // flat_hash_map::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `flat_hash_map`. + // + // NOTE: this member function is particular to `absl::flat_hash_map` and is + // not provided in the `std::unordered_map` API. + using Base::capacity; + + // flat_hash_map::empty() + // + // Returns whether or not the `flat_hash_map` is empty. + using Base::empty; + + // flat_hash_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `flat_hash_map` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `flat_hash_map`. + using Base::max_size; + + // flat_hash_map::size() + // + // Returns the number of elements currently within the `flat_hash_map`. + using Base::size; + + // flat_hash_map::clear() + // + // Removes all elements from the `flat_hash_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // flat_hash_map::erase() + // + // Erases elements within the `flat_hash_map`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `flat_hash_map`, returning + // `void`. + // + // NOTE: returning `void` in this case is different than that of STL + // containers in general and `std::unordered_map` in particular (which + // return an iterator to the element following the erased element). If that + // iterator is needed, simply post increment the iterator: + // + // map.erase(it++); + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // flat_hash_map::insert() + // + // Inserts an element of the specified value into the `flat_hash_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const init_type& value): + // + // Inserts a value into the `flat_hash_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // std::pair insert(init_type&& value): + // + // Inserts a moveable value into the `flat_hash_map`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const init_type& value): + // iterator insert(const_iterator hint, T&& value): + // iterator insert(const_iterator hint, init_type&& value); + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `flat_hash_map` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `flat_hash_map` we guarantee the first match is inserted. + using Base::insert; + + // flat_hash_map::insert_or_assign() + // + // Inserts an element of the specified value into the `flat_hash_map` provided + // that a value with the given key does not already exist, or replaces it with + // the element value if a key for that value already exists, returning an + // iterator pointing to the newly inserted element. If rehashing occurs due + // to the insertion, all existing iterators are invalidated. Overloads are + // listed below. + // + // pair insert_or_assign(const init_type& k, T&& obj): + // pair insert_or_assign(init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `flat_hash_map`. + // + // iterator insert_or_assign(const_iterator hint, + // const init_type& k, T&& obj): + // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `flat_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // flat_hash_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // flat_hash_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // flat_hash_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + // Overloads are listed below. + // + // pair try_emplace(const key_type& k, Args&&... args): + // pair try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `flat_hash_map`. + // + // iterator try_emplace(const_iterator hint, + // const key_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `flat_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + // + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. + using Base::try_emplace; + + // flat_hash_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the key,value pair of the element at the indicated position and + // returns a node handle owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the key,value pair of the element with a key matching the passed + // key value and returns a node handle owning that extracted data. If the + // `flat_hash_map` does not contain an element with a matching key, this + // function returns an empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + using Base::extract; + + // flat_hash_map::merge() + // + // Extracts elements from a given `source` flat hash map into this + // `flat_hash_map`. If the destination `flat_hash_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // flat_hash_map::swap(flat_hash_map& other) + // + // Exchanges the contents of this `flat_hash_map` with those of the `other` + // flat hash map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `flat_hash_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the flat hash map's hashing and key equivalence + // functions be Swappable, and are exchanged using unqualified calls to + // non-member `swap()`. If the map's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // flat_hash_map::rehash(count) + // + // Rehashes the `flat_hash_map`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_map`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // flat_hash_map::reserve(count) + // + // Sets the number of slots in the `flat_hash_map` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // flat_hash_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // flat_hash_map::contains() + // + // Determines whether an element with a key comparing equal to the given `key` + // exists within the `flat_hash_map`, returning `true` if so or `false` + // otherwise. + using Base::contains; + + // flat_hash_map::count(const Key& key) const + // + // Returns the number of elements with a key comparing equal to the given + // `key` within the `flat_hash_map`. note that this function will return + // either `1` or `0` since duplicate keys are not allowed within a + // `flat_hash_map`. + using Base::count; + + // flat_hash_map::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `flat_hash_map`. + using Base::equal_range; + + // flat_hash_map::find() + // + // Finds an element with the passed `key` within the `flat_hash_map`. + using Base::find; + + // flat_hash_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `flat_hash_map`, performing an `insert()` if the key does not already + // exist. + // + // If an insertion occurs and results in a rehashing of the container, all + // iterators are invalidated. Otherwise iterators are not affected and + // references are not invalidated. Overloads are listed below. + // + // T& operator[](const Key& key): + // + // Inserts an init_type object constructed in-place if the element with the + // given key does not exist. + // + // T& operator[](Key&& key): + // + // Inserts an init_type object constructed in-place provided that an element + // with the given key does not exist. + using Base::operator[]; + + // flat_hash_map::bucket_count() + // + // Returns the number of "buckets" within the `flat_hash_map`. Note that + // because a flat hash map contains all elements within its internal storage, + // this value simply equals the current capacity of the `flat_hash_map`. + using Base::bucket_count; + + // flat_hash_map::load_factor() + // + // Returns the current load factor of the `flat_hash_map` (the average number + // of slots occupied with a value within the hash map). + using Base::load_factor; + + // flat_hash_map::max_load_factor() + // + // Manages the maximum load factor of the `flat_hash_map`. Overloads are + // listed below. + // + // float flat_hash_map::max_load_factor() + // + // Returns the current maximum load factor of the `flat_hash_map`. + // + // void flat_hash_map::max_load_factor(float ml) + // + // Sets the maximum load factor of the `flat_hash_map` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `flat_hash_map` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // flat_hash_map::get_allocator() + // + // Returns the allocator function associated with this `flat_hash_map`. + using Base::get_allocator; + + // flat_hash_map::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `flat_hash_map`. + using Base::hash_function; + + // flat_hash_map::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; + }; + + // erase_if(flat_hash_map<>, Pred) + // + // Erases all elements that satisfy the predicate `pred` from the container `c`. + // Returns the number of erased elements. + template + typename flat_hash_map::size_type erase_if( + flat_hash_map& c, Predicate pred + ) + { + return container_internal::EraseIf(pred, &c); + } + + namespace container_internal + { + + template + struct FlatHashMapPolicy + { + using slot_policy = container_internal::map_slot_policy; + using slot_type = typename slot_policy::slot_type; + using key_type = K; + using mapped_type = V; + using init_type = std::pair; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) + { + slot_policy::construct(alloc, slot, std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) + { + slot_policy::destroy(alloc, slot); + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot) + { + slot_policy::transfer(alloc, new_slot, old_slot); + } + + template + static decltype(absl::container_internal::DecomposePair( + std::declval(), std::declval()... + )) + apply(F&& f, Args&&... args) + { + return absl::container_internal::DecomposePair(std::forward(f), std::forward(args)...); + } + + static size_t space_used(const slot_type*) + { + return 0; + } + + static std::pair& element(slot_type* slot) + { + return slot->value; + } + + static V& value(std::pair* kv) + { + return kv->second; + } + static const V& value(const std::pair* kv) + { + return kv->second; + } + }; + + } // namespace container_internal + + namespace container_algorithm_internal + { + + // Specialization of trait in absl/algorithm/container.h + template + struct IsUnorderedContainer< + absl::flat_hash_map> : std::true_type + { + }; + + } // namespace container_algorithm_internal + + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_FLAT_HASH_MAP_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/flat_hash_set.h b/CAPI/cpp/grpc/include/absl/container/flat_hash_set.h index 4938c70..15f96ad 100644 --- a/CAPI/cpp/grpc/include/absl/container/flat_hash_set.h +++ b/CAPI/cpp/grpc/include/absl/container/flat_hash_set.h @@ -36,475 +36,492 @@ #include "absl/base/macros.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export -#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export +#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export #include "absl/memory/memory.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { -template -struct FlatHashSetPolicy; -} // namespace container_internal - -// ----------------------------------------------------------------------------- -// absl::flat_hash_set -// ----------------------------------------------------------------------------- -// -// An `absl::flat_hash_set` is an unordered associative container which has -// been optimized for both speed and memory footprint in most common use cases. -// Its interface is similar to that of `std::unordered_set` with the -// following notable differences: -// -// * Requires keys that are CopyConstructible -// * Supports heterogeneous lookup, through `find()` and `insert()`, provided -// that the set is provided a compatible heterogeneous hashing function and -// equality operator. -// * Invalidates any references and pointers to elements within the table after -// `rehash()`. -// * Contains a `capacity()` member function indicating the number of element -// slots (open, deleted, and empty) within the hash set. -// * Returns `void` from the `erase(iterator)` overload. -// -// By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All -// fundamental and Abseil types that support the `absl::Hash` framework have a -// compatible equality operator for comparing insertions into `flat_hash_set`. -// If your type is not yet supported by the `absl::Hash` framework, see -// absl/hash/hash.h for information on extending Abseil hashing to user-defined -// types. -// -// Using `absl::flat_hash_set` at interface boundaries in dynamically loaded -// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may -// be randomized across dynamically loaded libraries. -// -// NOTE: A `flat_hash_set` stores its keys directly inside its implementation -// array to avoid memory indirection. Because a `flat_hash_set` is designed to -// move data when rehashed, set keys will not retain pointer stability. If you -// require pointer stability, consider using -// `absl::flat_hash_set>`. If your type is not moveable and -// you require pointer stability, consider `absl::node_hash_set` instead. -// -// Example: -// -// // Create a flat hash set of three strings -// absl::flat_hash_set ducks = -// {"huey", "dewey", "louie"}; -// -// // Insert a new element into the flat hash set -// ducks.insert("donald"); -// -// // Force a rehash of the flat hash set -// ducks.rehash(0); -// -// // See if "dewey" is present -// if (ducks.contains("dewey")) { -// std::cout << "We found dewey!" << std::endl; -// } -template , - class Eq = absl::container_internal::hash_default_eq, - class Allocator = std::allocator> -class flat_hash_set - : public absl::container_internal::raw_hash_set< - absl::container_internal::FlatHashSetPolicy, Hash, Eq, Allocator> { - using Base = typename flat_hash_set::raw_hash_set; - - public: - // Constructors and Assignment Operators - // - // A flat_hash_set supports the same overload set as `std::unordered_set` - // for construction and assignment: - // - // * Default constructor - // - // // No allocation for the table's elements is made. - // absl::flat_hash_set set1; - // - // * Initializer List constructor - // - // absl::flat_hash_set set2 = - // {{"huey"}, {"dewey"}, {"louie"},}; - // - // * Copy constructor - // - // absl::flat_hash_set set3(set2); - // - // * Copy assignment operator - // - // // Hash functor and Comparator are copied as well - // absl::flat_hash_set set4; - // set4 = set3; - // - // * Move constructor - // - // // Move is guaranteed efficient - // absl::flat_hash_set set5(std::move(set4)); - // - // * Move assignment operator - // - // // May be efficient if allocators are compatible - // absl::flat_hash_set set6; - // set6 = std::move(set5); - // - // * Range constructor - // - // std::vector v = {"a", "b"}; - // absl::flat_hash_set set7(v.begin(), v.end()); - flat_hash_set() {} - using Base::Base; - - // flat_hash_set::begin() - // - // Returns an iterator to the beginning of the `flat_hash_set`. - using Base::begin; - - // flat_hash_set::cbegin() - // - // Returns a const iterator to the beginning of the `flat_hash_set`. - using Base::cbegin; - - // flat_hash_set::cend() - // - // Returns a const iterator to the end of the `flat_hash_set`. - using Base::cend; - - // flat_hash_set::end() - // - // Returns an iterator to the end of the `flat_hash_set`. - using Base::end; - - // flat_hash_set::capacity() - // - // Returns the number of element slots (assigned, deleted, and empty) - // available within the `flat_hash_set`. - // - // NOTE: this member function is particular to `absl::flat_hash_set` and is - // not provided in the `std::unordered_set` API. - using Base::capacity; - - // flat_hash_set::empty() - // - // Returns whether or not the `flat_hash_set` is empty. - using Base::empty; - - // flat_hash_set::max_size() - // - // Returns the largest theoretical possible number of elements within a - // `flat_hash_set` under current memory constraints. This value can be thought - // of the largest value of `std::distance(begin(), end())` for a - // `flat_hash_set`. - using Base::max_size; - - // flat_hash_set::size() - // - // Returns the number of elements currently within the `flat_hash_set`. - using Base::size; - - // flat_hash_set::clear() - // - // Removes all elements from the `flat_hash_set`. Invalidates any references, - // pointers, or iterators referring to contained elements. - // - // NOTE: this operation may shrink the underlying buffer. To avoid shrinking - // the underlying buffer call `erase(begin(), end())`. - using Base::clear; - - // flat_hash_set::erase() - // - // Erases elements within the `flat_hash_set`. Erasing does not trigger a - // rehash. Overloads are listed below. - // - // void erase(const_iterator pos): - // - // Erases the element at `position` of the `flat_hash_set`, returning - // `void`. - // - // NOTE: returning `void` in this case is different than that of STL - // containers in general and `std::unordered_set` in particular (which - // return an iterator to the element following the erased element). If that - // iterator is needed, simply post increment the iterator: - // - // set.erase(it++); - // - // iterator erase(const_iterator first, const_iterator last): - // - // Erases the elements in the open interval [`first`, `last`), returning an - // iterator pointing to `last`. - // - // size_type erase(const key_type& key): - // - // Erases the element with the matching key, if it exists, returning the - // number of elements erased (0 or 1). - using Base::erase; - - // flat_hash_set::insert() - // - // Inserts an element of the specified value into the `flat_hash_set`, - // returning an iterator pointing to the newly inserted element, provided that - // an element with the given key does not already exist. If rehashing occurs - // due to the insertion, all iterators are invalidated. Overloads are listed - // below. - // - // std::pair insert(const T& value): - // - // Inserts a value into the `flat_hash_set`. Returns a pair consisting of an - // iterator to the inserted element (or to the element that prevented the - // insertion) and a bool denoting whether the insertion took place. - // - // std::pair insert(T&& value): - // - // Inserts a moveable value into the `flat_hash_set`. Returns a pair - // consisting of an iterator to the inserted element (or to the element that - // prevented the insertion) and a bool denoting whether the insertion took - // place. - // - // iterator insert(const_iterator hint, const T& value): - // iterator insert(const_iterator hint, T&& value): - // - // Inserts a value, using the position of `hint` as a non-binding suggestion - // for where to begin the insertion search. Returns an iterator to the - // inserted element, or to the existing element that prevented the - // insertion. - // - // void insert(InputIterator first, InputIterator last): - // - // Inserts a range of values [`first`, `last`). - // - // NOTE: Although the STL does not specify which element may be inserted if - // multiple keys compare equivalently, for `flat_hash_set` we guarantee the - // first match is inserted. - // - // void insert(std::initializer_list ilist): - // - // Inserts the elements within the initializer list `ilist`. - // - // NOTE: Although the STL does not specify which element may be inserted if - // multiple keys compare equivalently within the initializer list, for - // `flat_hash_set` we guarantee the first match is inserted. - using Base::insert; - - // flat_hash_set::emplace() - // - // Inserts an element of the specified value by constructing it in-place - // within the `flat_hash_set`, provided that no element with the given key - // already exists. - // - // The element may be constructed even if there already is an element with the - // key in the container, in which case the newly constructed element will be - // destroyed immediately. - // - // If rehashing occurs due to the insertion, all iterators are invalidated. - using Base::emplace; - - // flat_hash_set::emplace_hint() - // - // Inserts an element of the specified value by constructing it in-place - // within the `flat_hash_set`, using the position of `hint` as a non-binding - // suggestion for where to begin the insertion search, and only inserts - // provided that no element with the given key already exists. - // - // The element may be constructed even if there already is an element with the - // key in the container, in which case the newly constructed element will be - // destroyed immediately. - // - // If rehashing occurs due to the insertion, all iterators are invalidated. - using Base::emplace_hint; - - // flat_hash_set::extract() - // - // Extracts the indicated element, erasing it in the process, and returns it - // as a C++17-compatible node handle. Overloads are listed below. - // - // node_type extract(const_iterator position): - // - // Extracts the element at the indicated position and returns a node handle - // owning that extracted data. - // - // node_type extract(const key_type& x): - // - // Extracts the element with the key matching the passed key value and - // returns a node handle owning that extracted data. If the `flat_hash_set` - // does not contain an element with a matching key, this function returns an - // empty node handle. - using Base::extract; - - // flat_hash_set::merge() - // - // Extracts elements from a given `source` flat hash set into this - // `flat_hash_set`. If the destination `flat_hash_set` already contains an - // element with an equivalent key, that element is not extracted. - using Base::merge; - - // flat_hash_set::swap(flat_hash_set& other) - // - // Exchanges the contents of this `flat_hash_set` with those of the `other` - // flat hash set, avoiding invocation of any move, copy, or swap operations on - // individual elements. - // - // All iterators and references on the `flat_hash_set` remain valid, excepting - // for the past-the-end iterator, which is invalidated. - // - // `swap()` requires that the flat hash set's hashing and key equivalence - // functions be Swappable, and are exchaged using unqualified calls to - // non-member `swap()`. If the set's allocator has - // `std::allocator_traits::propagate_on_container_swap::value` - // set to `true`, the allocators are also exchanged using an unqualified call - // to non-member `swap()`; otherwise, the allocators are not swapped. - using Base::swap; - - // flat_hash_set::rehash(count) - // - // Rehashes the `flat_hash_set`, setting the number of slots to be at least - // the passed value. If the new number of slots increases the load factor more - // than the current maximum load factor - // (`count` < `size()` / `max_load_factor()`), then the new number of slots - // will be at least `size()` / `max_load_factor()`. - // - // To force a rehash, pass rehash(0). - // - // NOTE: unlike behavior in `std::unordered_set`, references are also - // invalidated upon a `rehash()`. - using Base::rehash; - - // flat_hash_set::reserve(count) - // - // Sets the number of slots in the `flat_hash_set` to the number needed to - // accommodate at least `count` total elements without exceeding the current - // maximum load factor, and may rehash the container if needed. - using Base::reserve; - - // flat_hash_set::contains() - // - // Determines whether an element comparing equal to the given `key` exists - // within the `flat_hash_set`, returning `true` if so or `false` otherwise. - using Base::contains; - - // flat_hash_set::count(const Key& key) const - // - // Returns the number of elements comparing equal to the given `key` within - // the `flat_hash_set`. note that this function will return either `1` or `0` - // since duplicate elements are not allowed within a `flat_hash_set`. - using Base::count; - - // flat_hash_set::equal_range() - // - // Returns a closed range [first, last], defined by a `std::pair` of two - // iterators, containing all elements with the passed key in the - // `flat_hash_set`. - using Base::equal_range; - - // flat_hash_set::find() - // - // Finds an element with the passed `key` within the `flat_hash_set`. - using Base::find; - - // flat_hash_set::bucket_count() - // - // Returns the number of "buckets" within the `flat_hash_set`. Note that - // because a flat hash set contains all elements within its internal storage, - // this value simply equals the current capacity of the `flat_hash_set`. - using Base::bucket_count; - - // flat_hash_set::load_factor() - // - // Returns the current load factor of the `flat_hash_set` (the average number - // of slots occupied with a value within the hash set). - using Base::load_factor; - - // flat_hash_set::max_load_factor() - // - // Manages the maximum load factor of the `flat_hash_set`. Overloads are - // listed below. - // - // float flat_hash_set::max_load_factor() - // - // Returns the current maximum load factor of the `flat_hash_set`. - // - // void flat_hash_set::max_load_factor(float ml) - // - // Sets the maximum load factor of the `flat_hash_set` to the passed value. - // - // NOTE: This overload is provided only for API compatibility with the STL; - // `flat_hash_set` will ignore any set load factor and manage its rehashing - // internally as an implementation detail. - using Base::max_load_factor; - - // flat_hash_set::get_allocator() - // - // Returns the allocator function associated with this `flat_hash_set`. - using Base::get_allocator; - - // flat_hash_set::hash_function() - // - // Returns the hashing function used to hash the keys within this - // `flat_hash_set`. - using Base::hash_function; - - // flat_hash_set::key_eq() - // - // Returns the function used for comparing keys equality. - using Base::key_eq; -}; - -// erase_if(flat_hash_set<>, Pred) -// -// Erases all elements that satisfy the predicate `pred` from the container `c`. -// Returns the number of erased elements. -template -typename flat_hash_set::size_type erase_if( - flat_hash_set& c, Predicate pred) { - return container_internal::EraseIf(pred, &c); -} - -namespace container_internal { - -template -struct FlatHashSetPolicy { - using slot_type = T; - using key_type = T; - using init_type = T; - using constant_iterators = std::true_type; - - template - static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { - absl::allocator_traits::construct(*alloc, slot, - std::forward(args)...); - } - - template - static void destroy(Allocator* alloc, slot_type* slot) { - absl::allocator_traits::destroy(*alloc, slot); - } - - template - static void transfer(Allocator* alloc, slot_type* new_slot, - slot_type* old_slot) { - construct(alloc, new_slot, std::move(*old_slot)); - destroy(alloc, old_slot); - } - - static T& element(slot_type* slot) { return *slot; } - - template - static decltype(absl::container_internal::DecomposeValue( - std::declval(), std::declval()...)) - apply(F&& f, Args&&... args) { - return absl::container_internal::DecomposeValue( - std::forward(f), std::forward(args)...); - } - - static size_t space_used(const T*) { return 0; } -}; -} // namespace container_internal - -namespace container_algorithm_internal { - -// Specialization of trait in absl/algorithm/container.h -template -struct IsUnorderedContainer> - : std::true_type {}; - -} // namespace container_algorithm_internal - -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + template + struct FlatHashSetPolicy; + } // namespace container_internal + + // ----------------------------------------------------------------------------- + // absl::flat_hash_set + // ----------------------------------------------------------------------------- + // + // An `absl::flat_hash_set` is an unordered associative container which has + // been optimized for both speed and memory footprint in most common use cases. + // Its interface is similar to that of `std::unordered_set` with the + // following notable differences: + // + // * Requires keys that are CopyConstructible + // * Supports heterogeneous lookup, through `find()` and `insert()`, provided + // that the set is provided a compatible heterogeneous hashing function and + // equality operator. + // * Invalidates any references and pointers to elements within the table after + // `rehash()`. + // * Contains a `capacity()` member function indicating the number of element + // slots (open, deleted, and empty) within the hash set. + // * Returns `void` from the `erase(iterator)` overload. + // + // By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All + // fundamental and Abseil types that support the `absl::Hash` framework have a + // compatible equality operator for comparing insertions into `flat_hash_set`. + // If your type is not yet supported by the `absl::Hash` framework, see + // absl/hash/hash.h for information on extending Abseil hashing to user-defined + // types. + // + // Using `absl::flat_hash_set` at interface boundaries in dynamically loaded + // libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may + // be randomized across dynamically loaded libraries. + // + // NOTE: A `flat_hash_set` stores its keys directly inside its implementation + // array to avoid memory indirection. Because a `flat_hash_set` is designed to + // move data when rehashed, set keys will not retain pointer stability. If you + // require pointer stability, consider using + // `absl::flat_hash_set>`. If your type is not moveable and + // you require pointer stability, consider `absl::node_hash_set` instead. + // + // Example: + // + // // Create a flat hash set of three strings + // absl::flat_hash_set ducks = + // {"huey", "dewey", "louie"}; + // + // // Insert a new element into the flat hash set + // ducks.insert("donald"); + // + // // Force a rehash of the flat hash set + // ducks.rehash(0); + // + // // See if "dewey" is present + // if (ducks.contains("dewey")) { + // std::cout << "We found dewey!" << std::endl; + // } + template, class Eq = absl::container_internal::hash_default_eq, class Allocator = std::allocator> + class flat_hash_set : public absl::container_internal::raw_hash_set, Hash, Eq, Allocator> + { + using Base = typename flat_hash_set::raw_hash_set; + + public: + // Constructors and Assignment Operators + // + // A flat_hash_set supports the same overload set as `std::unordered_set` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::flat_hash_set set1; + // + // * Initializer List constructor + // + // absl::flat_hash_set set2 = + // {{"huey"}, {"dewey"}, {"louie"},}; + // + // * Copy constructor + // + // absl::flat_hash_set set3(set2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::flat_hash_set set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::flat_hash_set set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::flat_hash_set set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::flat_hash_set set7(v.begin(), v.end()); + flat_hash_set() + { + } + using Base::Base; + + // flat_hash_set::begin() + // + // Returns an iterator to the beginning of the `flat_hash_set`. + using Base::begin; + + // flat_hash_set::cbegin() + // + // Returns a const iterator to the beginning of the `flat_hash_set`. + using Base::cbegin; + + // flat_hash_set::cend() + // + // Returns a const iterator to the end of the `flat_hash_set`. + using Base::cend; + + // flat_hash_set::end() + // + // Returns an iterator to the end of the `flat_hash_set`. + using Base::end; + + // flat_hash_set::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `flat_hash_set`. + // + // NOTE: this member function is particular to `absl::flat_hash_set` and is + // not provided in the `std::unordered_set` API. + using Base::capacity; + + // flat_hash_set::empty() + // + // Returns whether or not the `flat_hash_set` is empty. + using Base::empty; + + // flat_hash_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `flat_hash_set` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `flat_hash_set`. + using Base::max_size; + + // flat_hash_set::size() + // + // Returns the number of elements currently within the `flat_hash_set`. + using Base::size; + + // flat_hash_set::clear() + // + // Removes all elements from the `flat_hash_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // flat_hash_set::erase() + // + // Erases elements within the `flat_hash_set`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `flat_hash_set`, returning + // `void`. + // + // NOTE: returning `void` in this case is different than that of STL + // containers in general and `std::unordered_set` in particular (which + // return an iterator to the element following the erased element). If that + // iterator is needed, simply post increment the iterator: + // + // set.erase(it++); + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // flat_hash_set::insert() + // + // Inserts an element of the specified value into the `flat_hash_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const T& value): + // + // Inserts a value into the `flat_hash_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // + // Inserts a moveable value into the `flat_hash_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const T& value): + // iterator insert(const_iterator hint, T&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `flat_hash_set` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `flat_hash_set` we guarantee the first match is inserted. + using Base::insert; + + // flat_hash_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // flat_hash_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `flat_hash_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // flat_hash_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `flat_hash_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + using Base::extract; + + // flat_hash_set::merge() + // + // Extracts elements from a given `source` flat hash set into this + // `flat_hash_set`. If the destination `flat_hash_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // flat_hash_set::swap(flat_hash_set& other) + // + // Exchanges the contents of this `flat_hash_set` with those of the `other` + // flat hash set, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `flat_hash_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the flat hash set's hashing and key equivalence + // functions be Swappable, and are exchaged using unqualified calls to + // non-member `swap()`. If the set's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // flat_hash_set::rehash(count) + // + // Rehashes the `flat_hash_set`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_set`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // flat_hash_set::reserve(count) + // + // Sets the number of slots in the `flat_hash_set` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // flat_hash_set::contains() + // + // Determines whether an element comparing equal to the given `key` exists + // within the `flat_hash_set`, returning `true` if so or `false` otherwise. + using Base::contains; + + // flat_hash_set::count(const Key& key) const + // + // Returns the number of elements comparing equal to the given `key` within + // the `flat_hash_set`. note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `flat_hash_set`. + using Base::count; + + // flat_hash_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `flat_hash_set`. + using Base::equal_range; + + // flat_hash_set::find() + // + // Finds an element with the passed `key` within the `flat_hash_set`. + using Base::find; + + // flat_hash_set::bucket_count() + // + // Returns the number of "buckets" within the `flat_hash_set`. Note that + // because a flat hash set contains all elements within its internal storage, + // this value simply equals the current capacity of the `flat_hash_set`. + using Base::bucket_count; + + // flat_hash_set::load_factor() + // + // Returns the current load factor of the `flat_hash_set` (the average number + // of slots occupied with a value within the hash set). + using Base::load_factor; + + // flat_hash_set::max_load_factor() + // + // Manages the maximum load factor of the `flat_hash_set`. Overloads are + // listed below. + // + // float flat_hash_set::max_load_factor() + // + // Returns the current maximum load factor of the `flat_hash_set`. + // + // void flat_hash_set::max_load_factor(float ml) + // + // Sets the maximum load factor of the `flat_hash_set` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `flat_hash_set` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // flat_hash_set::get_allocator() + // + // Returns the allocator function associated with this `flat_hash_set`. + using Base::get_allocator; + + // flat_hash_set::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `flat_hash_set`. + using Base::hash_function; + + // flat_hash_set::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; + }; + + // erase_if(flat_hash_set<>, Pred) + // + // Erases all elements that satisfy the predicate `pred` from the container `c`. + // Returns the number of erased elements. + template + typename flat_hash_set::size_type erase_if( + flat_hash_set& c, Predicate pred + ) + { + return container_internal::EraseIf(pred, &c); + } + + namespace container_internal + { + + template + struct FlatHashSetPolicy + { + using slot_type = T; + using key_type = T; + using init_type = T; + using constant_iterators = std::true_type; + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) + { + absl::allocator_traits::construct(*alloc, slot, std::forward(args)...); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) + { + absl::allocator_traits::destroy(*alloc, slot); + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot) + { + construct(alloc, new_slot, std::move(*old_slot)); + destroy(alloc, old_slot); + } + + static T& element(slot_type* slot) + { + return *slot; + } + + template + static decltype(absl::container_internal::DecomposeValue( + std::declval(), std::declval()... + )) + apply(F&& f, Args&&... args) + { + return absl::container_internal::DecomposeValue( + std::forward(f), std::forward(args)... + ); + } + + static size_t space_used(const T*) + { + return 0; + } + }; + } // namespace container_internal + + namespace container_algorithm_internal + { + + // Specialization of trait in absl/algorithm/container.h + template + struct IsUnorderedContainer> : std::true_type + { + }; + + } // namespace container_algorithm_internal + + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_FLAT_HASH_SET_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/inlined_vector.h b/CAPI/cpp/grpc/include/absl/container/inlined_vector.h index bc1c4a7..cc2c39b 100644 --- a/CAPI/cpp/grpc/include/absl/container/inlined_vector.h +++ b/CAPI/cpp/grpc/include/absl/container/inlined_vector.h @@ -53,814 +53,932 @@ #include "absl/container/internal/inlined_vector.h" #include "absl/memory/memory.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -// ----------------------------------------------------------------------------- -// InlinedVector -// ----------------------------------------------------------------------------- -// -// An `absl::InlinedVector` is designed to be a drop-in replacement for -// `std::vector` for use cases where the vector's size is sufficiently small -// that it can be inlined. If the inlined vector does grow beyond its estimated -// capacity, it will trigger an initial allocation on the heap, and will behave -// as a `std::vector`. The API of the `absl::InlinedVector` within this file is -// designed to cover the same API footprint as covered by `std::vector`. -template > -class InlinedVector { - static_assert(N > 0, "`absl::InlinedVector` requires an inlined capacity."); - - using Storage = inlined_vector_internal::Storage; - - template - using AllocatorTraits = inlined_vector_internal::AllocatorTraits; - template - using MoveIterator = inlined_vector_internal::MoveIterator; - template - using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk; - - template - using IteratorValueAdapter = - inlined_vector_internal::IteratorValueAdapter; - template - using CopyValueAdapter = inlined_vector_internal::CopyValueAdapter; - template - using DefaultValueAdapter = - inlined_vector_internal::DefaultValueAdapter; - - template - using EnableIfAtLeastForwardIterator = absl::enable_if_t< - inlined_vector_internal::IsAtLeastForwardIterator::value, int>; - template - using DisableIfAtLeastForwardIterator = absl::enable_if_t< - !inlined_vector_internal::IsAtLeastForwardIterator::value, int>; - - public: - using allocator_type = A; - using value_type = inlined_vector_internal::ValueType; - using pointer = inlined_vector_internal::Pointer; - using const_pointer = inlined_vector_internal::ConstPointer; - using size_type = inlined_vector_internal::SizeType; - using difference_type = inlined_vector_internal::DifferenceType; - using reference = inlined_vector_internal::Reference; - using const_reference = inlined_vector_internal::ConstReference; - using iterator = inlined_vector_internal::Iterator; - using const_iterator = inlined_vector_internal::ConstIterator; - using reverse_iterator = inlined_vector_internal::ReverseIterator; - using const_reverse_iterator = - inlined_vector_internal::ConstReverseIterator; - - // --------------------------------------------------------------------------- - // InlinedVector Constructors and Destructor - // --------------------------------------------------------------------------- - - // Creates an empty inlined vector with a value-initialized allocator. - InlinedVector() noexcept(noexcept(allocator_type())) : storage_() {} - - // Creates an empty inlined vector with a copy of `allocator`. - explicit InlinedVector(const allocator_type& allocator) noexcept - : storage_(allocator) {} - - // Creates an inlined vector with `n` copies of `value_type()`. - explicit InlinedVector(size_type n, - const allocator_type& allocator = allocator_type()) - : storage_(allocator) { - storage_.Initialize(DefaultValueAdapter(), n); - } - - // Creates an inlined vector with `n` copies of `v`. - InlinedVector(size_type n, const_reference v, - const allocator_type& allocator = allocator_type()) - : storage_(allocator) { - storage_.Initialize(CopyValueAdapter(std::addressof(v)), n); - } - - // Creates an inlined vector with copies of the elements of `list`. - InlinedVector(std::initializer_list list, - const allocator_type& allocator = allocator_type()) - : InlinedVector(list.begin(), list.end(), allocator) {} - - // Creates an inlined vector with elements constructed from the provided - // forward iterator range [`first`, `last`). - // - // NOTE: the `enable_if` prevents ambiguous interpretation between a call to - // this constructor with two integral arguments and a call to the above - // `InlinedVector(size_type, const_reference)` constructor. - template = 0> - InlinedVector(ForwardIterator first, ForwardIterator last, - const allocator_type& allocator = allocator_type()) - : storage_(allocator) { - storage_.Initialize(IteratorValueAdapter(first), - static_cast(std::distance(first, last))); - } - - // Creates an inlined vector with elements constructed from the provided input - // iterator range [`first`, `last`). - template = 0> - InlinedVector(InputIterator first, InputIterator last, - const allocator_type& allocator = allocator_type()) - : storage_(allocator) { - std::copy(first, last, std::back_inserter(*this)); - } - - // Creates an inlined vector by copying the contents of `other` using - // `other`'s allocator. - InlinedVector(const InlinedVector& other) - : InlinedVector(other, other.storage_.GetAllocator()) {} - - // Creates an inlined vector by copying the contents of `other` using the - // provided `allocator`. - InlinedVector(const InlinedVector& other, const allocator_type& allocator) - : storage_(allocator) { - if (other.empty()) { - // Empty; nothing to do. - } else if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) { - // Memcpy-able and do not need allocation. - storage_.MemcpyFrom(other.storage_); - } else { - storage_.InitFrom(other.storage_); - } - } - - // Creates an inlined vector by moving in the contents of `other` without - // allocating. If `other` contains allocated memory, the newly-created inlined - // vector will take ownership of that memory. However, if `other` does not - // contain allocated memory, the newly-created inlined vector will perform - // element-wise move construction of the contents of `other`. - // - // NOTE: since no allocation is performed for the inlined vector in either - // case, the `noexcept(...)` specification depends on whether moving the - // underlying objects can throw. It is assumed assumed that... - // a) move constructors should only throw due to allocation failure. - // b) if `value_type`'s move constructor allocates, it uses the same - // allocation function as the inlined vector's allocator. - // Thus, the move constructor is non-throwing if the allocator is non-throwing - // or `value_type`'s move constructor is specified as `noexcept`. - InlinedVector(InlinedVector&& other) noexcept( - absl::allocator_is_nothrow::value || - std::is_nothrow_move_constructible::value) - : storage_(other.storage_.GetAllocator()) { - if (IsMemcpyOk::value) { - storage_.MemcpyFrom(other.storage_); - - other.storage_.SetInlinedSize(0); - } else if (other.storage_.GetIsAllocated()) { - storage_.SetAllocation({other.storage_.GetAllocatedData(), - other.storage_.GetAllocatedCapacity()}); - storage_.SetAllocatedSize(other.storage_.GetSize()); - - other.storage_.SetInlinedSize(0); - } else { - IteratorValueAdapter> other_values( - MoveIterator(other.storage_.GetInlinedData())); - - inlined_vector_internal::ConstructElements( - storage_.GetAllocator(), storage_.GetInlinedData(), other_values, - other.storage_.GetSize()); - - storage_.SetInlinedSize(other.storage_.GetSize()); - } - } - - // Creates an inlined vector by moving in the contents of `other` with a copy - // of `allocator`. - // - // NOTE: if `other`'s allocator is not equal to `allocator`, even if `other` - // contains allocated memory, this move constructor will still allocate. Since - // allocation is performed, this constructor can only be `noexcept` if the - // specified allocator is also `noexcept`. - InlinedVector( - InlinedVector&& other, - const allocator_type& - allocator) noexcept(absl::allocator_is_nothrow::value) - : storage_(allocator) { - if (IsMemcpyOk::value) { - storage_.MemcpyFrom(other.storage_); - - other.storage_.SetInlinedSize(0); - } else if ((storage_.GetAllocator() == other.storage_.GetAllocator()) && - other.storage_.GetIsAllocated()) { - storage_.SetAllocation({other.storage_.GetAllocatedData(), - other.storage_.GetAllocatedCapacity()}); - storage_.SetAllocatedSize(other.storage_.GetSize()); - - other.storage_.SetInlinedSize(0); - } else { - storage_.Initialize(IteratorValueAdapter>( - MoveIterator(other.data())), - other.size()); - } - } - - ~InlinedVector() {} - - // --------------------------------------------------------------------------- - // InlinedVector Member Accessors - // --------------------------------------------------------------------------- - - // `InlinedVector::empty()` - // - // Returns whether the inlined vector contains no elements. - bool empty() const noexcept { return !size(); } - - // `InlinedVector::size()` - // - // Returns the number of elements in the inlined vector. - size_type size() const noexcept { return storage_.GetSize(); } - - // `InlinedVector::max_size()` - // - // Returns the maximum number of elements the inlined vector can hold. - size_type max_size() const noexcept { - // One bit of the size storage is used to indicate whether the inlined - // vector contains allocated memory. As a result, the maximum size that the - // inlined vector can express is half of the max for `size_type`. - return (std::numeric_limits::max)() / 2; - } - - // `InlinedVector::capacity()` - // - // Returns the number of elements that could be stored in the inlined vector - // without requiring a reallocation. - // - // NOTE: for most inlined vectors, `capacity()` should be equal to the - // template parameter `N`. For inlined vectors which exceed this capacity, - // they will no longer be inlined and `capacity()` will equal the capactity of - // the allocated memory. - size_type capacity() const noexcept { - return storage_.GetIsAllocated() ? storage_.GetAllocatedCapacity() - : storage_.GetInlinedCapacity(); - } - - // `InlinedVector::data()` - // - // Returns a `pointer` to the elements of the inlined vector. This pointer - // can be used to access and modify the contained elements. - // - // NOTE: only elements within [`data()`, `data() + size()`) are valid. - pointer data() noexcept { - return storage_.GetIsAllocated() ? storage_.GetAllocatedData() - : storage_.GetInlinedData(); - } - - // Overload of `InlinedVector::data()` that returns a `const_pointer` to the - // elements of the inlined vector. This pointer can be used to access but not - // modify the contained elements. - // - // NOTE: only elements within [`data()`, `data() + size()`) are valid. - const_pointer data() const noexcept { - return storage_.GetIsAllocated() ? storage_.GetAllocatedData() - : storage_.GetInlinedData(); - } - - // `InlinedVector::operator[](...)` - // - // Returns a `reference` to the `i`th element of the inlined vector. - reference operator[](size_type i) { - ABSL_HARDENING_ASSERT(i < size()); - return data()[i]; - } - - // Overload of `InlinedVector::operator[](...)` that returns a - // `const_reference` to the `i`th element of the inlined vector. - const_reference operator[](size_type i) const { - ABSL_HARDENING_ASSERT(i < size()); - return data()[i]; - } - - // `InlinedVector::at(...)` - // - // Returns a `reference` to the `i`th element of the inlined vector. - // - // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`, - // in both debug and non-debug builds, `std::out_of_range` will be thrown. - reference at(size_type i) { - if (ABSL_PREDICT_FALSE(i >= size())) { - base_internal::ThrowStdOutOfRange( - "`InlinedVector::at(size_type)` failed bounds check"); - } - return data()[i]; - } - - // Overload of `InlinedVector::at(...)` that returns a `const_reference` to - // the `i`th element of the inlined vector. - // - // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`, - // in both debug and non-debug builds, `std::out_of_range` will be thrown. - const_reference at(size_type i) const { - if (ABSL_PREDICT_FALSE(i >= size())) { - base_internal::ThrowStdOutOfRange( - "`InlinedVector::at(size_type) const` failed bounds check"); - } - return data()[i]; - } - - // `InlinedVector::front()` - // - // Returns a `reference` to the first element of the inlined vector. - reference front() { - ABSL_HARDENING_ASSERT(!empty()); - return data()[0]; - } - - // Overload of `InlinedVector::front()` that returns a `const_reference` to - // the first element of the inlined vector. - const_reference front() const { - ABSL_HARDENING_ASSERT(!empty()); - return data()[0]; - } - - // `InlinedVector::back()` - // - // Returns a `reference` to the last element of the inlined vector. - reference back() { - ABSL_HARDENING_ASSERT(!empty()); - return data()[size() - 1]; - } - - // Overload of `InlinedVector::back()` that returns a `const_reference` to the - // last element of the inlined vector. - const_reference back() const { - ABSL_HARDENING_ASSERT(!empty()); - return data()[size() - 1]; - } - - // `InlinedVector::begin()` - // - // Returns an `iterator` to the beginning of the inlined vector. - iterator begin() noexcept { return data(); } - - // Overload of `InlinedVector::begin()` that returns a `const_iterator` to - // the beginning of the inlined vector. - const_iterator begin() const noexcept { return data(); } - - // `InlinedVector::end()` - // - // Returns an `iterator` to the end of the inlined vector. - iterator end() noexcept { return data() + size(); } - - // Overload of `InlinedVector::end()` that returns a `const_iterator` to the - // end of the inlined vector. - const_iterator end() const noexcept { return data() + size(); } - - // `InlinedVector::cbegin()` - // - // Returns a `const_iterator` to the beginning of the inlined vector. - const_iterator cbegin() const noexcept { return begin(); } - - // `InlinedVector::cend()` - // - // Returns a `const_iterator` to the end of the inlined vector. - const_iterator cend() const noexcept { return end(); } - - // `InlinedVector::rbegin()` - // - // Returns a `reverse_iterator` from the end of the inlined vector. - reverse_iterator rbegin() noexcept { return reverse_iterator(end()); } - - // Overload of `InlinedVector::rbegin()` that returns a - // `const_reverse_iterator` from the end of the inlined vector. - const_reverse_iterator rbegin() const noexcept { - return const_reverse_iterator(end()); - } - - // `InlinedVector::rend()` - // - // Returns a `reverse_iterator` from the beginning of the inlined vector. - reverse_iterator rend() noexcept { return reverse_iterator(begin()); } - - // Overload of `InlinedVector::rend()` that returns a `const_reverse_iterator` - // from the beginning of the inlined vector. - const_reverse_iterator rend() const noexcept { - return const_reverse_iterator(begin()); - } - - // `InlinedVector::crbegin()` - // - // Returns a `const_reverse_iterator` from the end of the inlined vector. - const_reverse_iterator crbegin() const noexcept { return rbegin(); } - - // `InlinedVector::crend()` - // - // Returns a `const_reverse_iterator` from the beginning of the inlined - // vector. - const_reverse_iterator crend() const noexcept { return rend(); } - - // `InlinedVector::get_allocator()` - // - // Returns a copy of the inlined vector's allocator. - allocator_type get_allocator() const { return storage_.GetAllocator(); } - - // --------------------------------------------------------------------------- - // InlinedVector Member Mutators - // --------------------------------------------------------------------------- - - // `InlinedVector::operator=(...)` - // - // Replaces the elements of the inlined vector with copies of the elements of - // `list`. - InlinedVector& operator=(std::initializer_list list) { - assign(list.begin(), list.end()); - - return *this; - } - - // Overload of `InlinedVector::operator=(...)` that replaces the elements of - // the inlined vector with copies of the elements of `other`. - InlinedVector& operator=(const InlinedVector& other) { - if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { - const_pointer other_data = other.data(); - assign(other_data, other_data + other.size()); - } - - return *this; - } - - // Overload of `InlinedVector::operator=(...)` that moves the elements of - // `other` into the inlined vector. - // - // NOTE: as a result of calling this overload, `other` is left in a valid but - // unspecified state. - InlinedVector& operator=(InlinedVector&& other) { - if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { - if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) { - inlined_vector_internal::DestroyAdapter::DestroyElements( - storage_.GetAllocator(), data(), size()); - storage_.DeallocateIfAllocated(); - storage_.MemcpyFrom(other.storage_); - - other.storage_.SetInlinedSize(0); - } else { - storage_.Assign(IteratorValueAdapter>( - MoveIterator(other.storage_.GetInlinedData())), - other.size()); - } - } - - return *this; - } - - // `InlinedVector::assign(...)` - // - // Replaces the contents of the inlined vector with `n` copies of `v`. - void assign(size_type n, const_reference v) { - storage_.Assign(CopyValueAdapter(std::addressof(v)), n); - } - - // Overload of `InlinedVector::assign(...)` that replaces the contents of the - // inlined vector with copies of the elements of `list`. - void assign(std::initializer_list list) { - assign(list.begin(), list.end()); - } - - // Overload of `InlinedVector::assign(...)` to replace the contents of the - // inlined vector with the range [`first`, `last`). - // - // NOTE: this overload is for iterators that are "forward" category or better. - template = 0> - void assign(ForwardIterator first, ForwardIterator last) { - storage_.Assign(IteratorValueAdapter(first), - static_cast(std::distance(first, last))); - } - - // Overload of `InlinedVector::assign(...)` to replace the contents of the - // inlined vector with the range [`first`, `last`). - // - // NOTE: this overload is for iterators that are "input" category. - template = 0> - void assign(InputIterator first, InputIterator last) { - size_type i = 0; - for (; i < size() && first != last; ++i, static_cast(++first)) { - data()[i] = *first; - } - - erase(data() + i, data() + size()); - std::copy(first, last, std::back_inserter(*this)); - } - - // `InlinedVector::resize(...)` - // - // Resizes the inlined vector to contain `n` elements. - // - // NOTE: If `n` is smaller than `size()`, extra elements are destroyed. If `n` - // is larger than `size()`, new elements are value-initialized. - void resize(size_type n) { - ABSL_HARDENING_ASSERT(n <= max_size()); - storage_.Resize(DefaultValueAdapter(), n); - } - - // Overload of `InlinedVector::resize(...)` that resizes the inlined vector to - // contain `n` elements. - // - // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n` - // is larger than `size()`, new elements are copied-constructed from `v`. - void resize(size_type n, const_reference v) { - ABSL_HARDENING_ASSERT(n <= max_size()); - storage_.Resize(CopyValueAdapter(std::addressof(v)), n); - } - - // `InlinedVector::insert(...)` - // - // Inserts a copy of `v` at `pos`, returning an `iterator` to the newly - // inserted element. - iterator insert(const_iterator pos, const_reference v) { - return emplace(pos, v); - } - - // Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using - // move semantics, returning an `iterator` to the newly inserted element. - iterator insert(const_iterator pos, value_type&& v) { - return emplace(pos, std::move(v)); - } - - // Overload of `InlinedVector::insert(...)` that inserts `n` contiguous copies - // of `v` starting at `pos`, returning an `iterator` pointing to the first of - // the newly inserted elements. - iterator insert(const_iterator pos, size_type n, const_reference v) { - ABSL_HARDENING_ASSERT(pos >= begin()); - ABSL_HARDENING_ASSERT(pos <= end()); - - if (ABSL_PREDICT_TRUE(n != 0)) { - value_type dealias = v; - // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2 - // It appears that GCC thinks that since `pos` is a const pointer and may - // point to uninitialized memory at this point, a warning should be - // issued. But `pos` is actually only used to compute an array index to - // write to. +namespace absl +{ + ABSL_NAMESPACE_BEGIN + // ----------------------------------------------------------------------------- + // InlinedVector + // ----------------------------------------------------------------------------- + // + // An `absl::InlinedVector` is designed to be a drop-in replacement for + // `std::vector` for use cases where the vector's size is sufficiently small + // that it can be inlined. If the inlined vector does grow beyond its estimated + // capacity, it will trigger an initial allocation on the heap, and will behave + // as a `std::vector`. The API of the `absl::InlinedVector` within this file is + // designed to cover the same API footprint as covered by `std::vector`. + template> + class InlinedVector + { + static_assert(N > 0, "`absl::InlinedVector` requires an inlined capacity."); + + using Storage = inlined_vector_internal::Storage; + + template + using AllocatorTraits = inlined_vector_internal::AllocatorTraits; + template + using MoveIterator = inlined_vector_internal::MoveIterator; + template + using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk; + + template + using IteratorValueAdapter = + inlined_vector_internal::IteratorValueAdapter; + template + using CopyValueAdapter = inlined_vector_internal::CopyValueAdapter; + template + using DefaultValueAdapter = + inlined_vector_internal::DefaultValueAdapter; + + template + using EnableIfAtLeastForwardIterator = absl::enable_if_t< + inlined_vector_internal::IsAtLeastForwardIterator::value, + int>; + template + using DisableIfAtLeastForwardIterator = absl::enable_if_t< + !inlined_vector_internal::IsAtLeastForwardIterator::value, + int>; + + public: + using allocator_type = A; + using value_type = inlined_vector_internal::ValueType; + using pointer = inlined_vector_internal::Pointer; + using const_pointer = inlined_vector_internal::ConstPointer; + using size_type = inlined_vector_internal::SizeType; + using difference_type = inlined_vector_internal::DifferenceType; + using reference = inlined_vector_internal::Reference; + using const_reference = inlined_vector_internal::ConstReference; + using iterator = inlined_vector_internal::Iterator; + using const_iterator = inlined_vector_internal::ConstIterator; + using reverse_iterator = inlined_vector_internal::ReverseIterator; + using const_reverse_iterator = + inlined_vector_internal::ConstReverseIterator; + + // --------------------------------------------------------------------------- + // InlinedVector Constructors and Destructor + // --------------------------------------------------------------------------- + + // Creates an empty inlined vector with a value-initialized allocator. + InlinedVector() noexcept(noexcept(allocator_type())) : + storage_() + { + } + + // Creates an empty inlined vector with a copy of `allocator`. + explicit InlinedVector(const allocator_type& allocator) noexcept + : + storage_(allocator) + { + } + + // Creates an inlined vector with `n` copies of `value_type()`. + explicit InlinedVector(size_type n, const allocator_type& allocator = allocator_type()) : + storage_(allocator) + { + storage_.Initialize(DefaultValueAdapter(), n); + } + + // Creates an inlined vector with `n` copies of `v`. + InlinedVector(size_type n, const_reference v, const allocator_type& allocator = allocator_type()) : + storage_(allocator) + { + storage_.Initialize(CopyValueAdapter(std::addressof(v)), n); + } + + // Creates an inlined vector with copies of the elements of `list`. + InlinedVector(std::initializer_list list, const allocator_type& allocator = allocator_type()) : + InlinedVector(list.begin(), list.end(), allocator) + { + } + + // Creates an inlined vector with elements constructed from the provided + // forward iterator range [`first`, `last`). + // + // NOTE: the `enable_if` prevents ambiguous interpretation between a call to + // this constructor with two integral arguments and a call to the above + // `InlinedVector(size_type, const_reference)` constructor. + template = 0> + InlinedVector(ForwardIterator first, ForwardIterator last, const allocator_type& allocator = allocator_type()) : + storage_(allocator) + { + storage_.Initialize(IteratorValueAdapter(first), static_cast(std::distance(first, last))); + } + + // Creates an inlined vector with elements constructed from the provided input + // iterator range [`first`, `last`). + template = 0> + InlinedVector(InputIterator first, InputIterator last, const allocator_type& allocator = allocator_type()) : + storage_(allocator) + { + std::copy(first, last, std::back_inserter(*this)); + } + + // Creates an inlined vector by copying the contents of `other` using + // `other`'s allocator. + InlinedVector(const InlinedVector& other) : + InlinedVector(other, other.storage_.GetAllocator()) + { + } + + // Creates an inlined vector by copying the contents of `other` using the + // provided `allocator`. + InlinedVector(const InlinedVector& other, const allocator_type& allocator) : + storage_(allocator) + { + if (other.empty()) + { + // Empty; nothing to do. + } + else if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) + { + // Memcpy-able and do not need allocation. + storage_.MemcpyFrom(other.storage_); + } + else + { + storage_.InitFrom(other.storage_); + } + } + + // Creates an inlined vector by moving in the contents of `other` without + // allocating. If `other` contains allocated memory, the newly-created inlined + // vector will take ownership of that memory. However, if `other` does not + // contain allocated memory, the newly-created inlined vector will perform + // element-wise move construction of the contents of `other`. + // + // NOTE: since no allocation is performed for the inlined vector in either + // case, the `noexcept(...)` specification depends on whether moving the + // underlying objects can throw. It is assumed assumed that... + // a) move constructors should only throw due to allocation failure. + // b) if `value_type`'s move constructor allocates, it uses the same + // allocation function as the inlined vector's allocator. + // Thus, the move constructor is non-throwing if the allocator is non-throwing + // or `value_type`'s move constructor is specified as `noexcept`. + InlinedVector(InlinedVector&& other) noexcept( + absl::allocator_is_nothrow::value || + std::is_nothrow_move_constructible::value + ) : + storage_(other.storage_.GetAllocator()) + { + if (IsMemcpyOk::value) + { + storage_.MemcpyFrom(other.storage_); + + other.storage_.SetInlinedSize(0); + } + else if (other.storage_.GetIsAllocated()) + { + storage_.SetAllocation({other.storage_.GetAllocatedData(), other.storage_.GetAllocatedCapacity()}); + storage_.SetAllocatedSize(other.storage_.GetSize()); + + other.storage_.SetInlinedSize(0); + } + else + { + IteratorValueAdapter> other_values( + MoveIterator(other.storage_.GetInlinedData()) + ); + + inlined_vector_internal::ConstructElements( + storage_.GetAllocator(), storage_.GetInlinedData(), other_values, other.storage_.GetSize() + ); + + storage_.SetInlinedSize(other.storage_.GetSize()); + } + } + + // Creates an inlined vector by moving in the contents of `other` with a copy + // of `allocator`. + // + // NOTE: if `other`'s allocator is not equal to `allocator`, even if `other` + // contains allocated memory, this move constructor will still allocate. Since + // allocation is performed, this constructor can only be `noexcept` if the + // specified allocator is also `noexcept`. + InlinedVector( + InlinedVector&& other, + const allocator_type& + allocator + ) noexcept(absl::allocator_is_nothrow::value) : + storage_(allocator) + { + if (IsMemcpyOk::value) + { + storage_.MemcpyFrom(other.storage_); + + other.storage_.SetInlinedSize(0); + } + else if ((storage_.GetAllocator() == other.storage_.GetAllocator()) && other.storage_.GetIsAllocated()) + { + storage_.SetAllocation({other.storage_.GetAllocatedData(), other.storage_.GetAllocatedCapacity()}); + storage_.SetAllocatedSize(other.storage_.GetSize()); + + other.storage_.SetInlinedSize(0); + } + else + { + storage_.Initialize(IteratorValueAdapter>(MoveIterator(other.data())), other.size()); + } + } + + ~InlinedVector() + { + } + + // --------------------------------------------------------------------------- + // InlinedVector Member Accessors + // --------------------------------------------------------------------------- + + // `InlinedVector::empty()` + // + // Returns whether the inlined vector contains no elements. + bool empty() const noexcept + { + return !size(); + } + + // `InlinedVector::size()` + // + // Returns the number of elements in the inlined vector. + size_type size() const noexcept + { + return storage_.GetSize(); + } + + // `InlinedVector::max_size()` + // + // Returns the maximum number of elements the inlined vector can hold. + size_type max_size() const noexcept + { + // One bit of the size storage is used to indicate whether the inlined + // vector contains allocated memory. As a result, the maximum size that the + // inlined vector can express is half of the max for `size_type`. + return (std::numeric_limits::max)() / 2; + } + + // `InlinedVector::capacity()` + // + // Returns the number of elements that could be stored in the inlined vector + // without requiring a reallocation. + // + // NOTE: for most inlined vectors, `capacity()` should be equal to the + // template parameter `N`. For inlined vectors which exceed this capacity, + // they will no longer be inlined and `capacity()` will equal the capactity of + // the allocated memory. + size_type capacity() const noexcept + { + return storage_.GetIsAllocated() ? storage_.GetAllocatedCapacity() : storage_.GetInlinedCapacity(); + } + + // `InlinedVector::data()` + // + // Returns a `pointer` to the elements of the inlined vector. This pointer + // can be used to access and modify the contained elements. + // + // NOTE: only elements within [`data()`, `data() + size()`) are valid. + pointer data() noexcept + { + return storage_.GetIsAllocated() ? storage_.GetAllocatedData() : storage_.GetInlinedData(); + } + + // Overload of `InlinedVector::data()` that returns a `const_pointer` to the + // elements of the inlined vector. This pointer can be used to access but not + // modify the contained elements. + // + // NOTE: only elements within [`data()`, `data() + size()`) are valid. + const_pointer data() const noexcept + { + return storage_.GetIsAllocated() ? storage_.GetAllocatedData() : storage_.GetInlinedData(); + } + + // `InlinedVector::operator[](...)` + // + // Returns a `reference` to the `i`th element of the inlined vector. + reference operator[](size_type i) + { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // Overload of `InlinedVector::operator[](...)` that returns a + // `const_reference` to the `i`th element of the inlined vector. + const_reference operator[](size_type i) const + { + ABSL_HARDENING_ASSERT(i < size()); + return data()[i]; + } + + // `InlinedVector::at(...)` + // + // Returns a `reference` to the `i`th element of the inlined vector. + // + // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`, + // in both debug and non-debug builds, `std::out_of_range` will be thrown. + reference at(size_type i) + { + if (ABSL_PREDICT_FALSE(i >= size())) + { + base_internal::ThrowStdOutOfRange( + "`InlinedVector::at(size_type)` failed bounds check" + ); + } + return data()[i]; + } + + // Overload of `InlinedVector::at(...)` that returns a `const_reference` to + // the `i`th element of the inlined vector. + // + // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`, + // in both debug and non-debug builds, `std::out_of_range` will be thrown. + const_reference at(size_type i) const + { + if (ABSL_PREDICT_FALSE(i >= size())) + { + base_internal::ThrowStdOutOfRange( + "`InlinedVector::at(size_type) const` failed bounds check" + ); + } + return data()[i]; + } + + // `InlinedVector::front()` + // + // Returns a `reference` to the first element of the inlined vector. + reference front() + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // Overload of `InlinedVector::front()` that returns a `const_reference` to + // the first element of the inlined vector. + const_reference front() const + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[0]; + } + + // `InlinedVector::back()` + // + // Returns a `reference` to the last element of the inlined vector. + reference back() + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // Overload of `InlinedVector::back()` that returns a `const_reference` to the + // last element of the inlined vector. + const_reference back() const + { + ABSL_HARDENING_ASSERT(!empty()); + return data()[size() - 1]; + } + + // `InlinedVector::begin()` + // + // Returns an `iterator` to the beginning of the inlined vector. + iterator begin() noexcept + { + return data(); + } + + // Overload of `InlinedVector::begin()` that returns a `const_iterator` to + // the beginning of the inlined vector. + const_iterator begin() const noexcept + { + return data(); + } + + // `InlinedVector::end()` + // + // Returns an `iterator` to the end of the inlined vector. + iterator end() noexcept + { + return data() + size(); + } + + // Overload of `InlinedVector::end()` that returns a `const_iterator` to the + // end of the inlined vector. + const_iterator end() const noexcept + { + return data() + size(); + } + + // `InlinedVector::cbegin()` + // + // Returns a `const_iterator` to the beginning of the inlined vector. + const_iterator cbegin() const noexcept + { + return begin(); + } + + // `InlinedVector::cend()` + // + // Returns a `const_iterator` to the end of the inlined vector. + const_iterator cend() const noexcept + { + return end(); + } + + // `InlinedVector::rbegin()` + // + // Returns a `reverse_iterator` from the end of the inlined vector. + reverse_iterator rbegin() noexcept + { + return reverse_iterator(end()); + } + + // Overload of `InlinedVector::rbegin()` that returns a + // `const_reverse_iterator` from the end of the inlined vector. + const_reverse_iterator rbegin() const noexcept + { + return const_reverse_iterator(end()); + } + + // `InlinedVector::rend()` + // + // Returns a `reverse_iterator` from the beginning of the inlined vector. + reverse_iterator rend() noexcept + { + return reverse_iterator(begin()); + } + + // Overload of `InlinedVector::rend()` that returns a `const_reverse_iterator` + // from the beginning of the inlined vector. + const_reverse_iterator rend() const noexcept + { + return const_reverse_iterator(begin()); + } + + // `InlinedVector::crbegin()` + // + // Returns a `const_reverse_iterator` from the end of the inlined vector. + const_reverse_iterator crbegin() const noexcept + { + return rbegin(); + } + + // `InlinedVector::crend()` + // + // Returns a `const_reverse_iterator` from the beginning of the inlined + // vector. + const_reverse_iterator crend() const noexcept + { + return rend(); + } + + // `InlinedVector::get_allocator()` + // + // Returns a copy of the inlined vector's allocator. + allocator_type get_allocator() const + { + return storage_.GetAllocator(); + } + + // --------------------------------------------------------------------------- + // InlinedVector Member Mutators + // --------------------------------------------------------------------------- + + // `InlinedVector::operator=(...)` + // + // Replaces the elements of the inlined vector with copies of the elements of + // `list`. + InlinedVector& operator=(std::initializer_list list) + { + assign(list.begin(), list.end()); + + return *this; + } + + // Overload of `InlinedVector::operator=(...)` that replaces the elements of + // the inlined vector with copies of the elements of `other`. + InlinedVector& operator=(const InlinedVector& other) + { + if (ABSL_PREDICT_TRUE(this != std::addressof(other))) + { + const_pointer other_data = other.data(); + assign(other_data, other_data + other.size()); + } + + return *this; + } + + // Overload of `InlinedVector::operator=(...)` that moves the elements of + // `other` into the inlined vector. + // + // NOTE: as a result of calling this overload, `other` is left in a valid but + // unspecified state. + InlinedVector& operator=(InlinedVector&& other) + { + if (ABSL_PREDICT_TRUE(this != std::addressof(other))) + { + if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) + { + inlined_vector_internal::DestroyAdapter::DestroyElements( + storage_.GetAllocator(), data(), size() + ); + storage_.DeallocateIfAllocated(); + storage_.MemcpyFrom(other.storage_); + + other.storage_.SetInlinedSize(0); + } + else + { + storage_.Assign(IteratorValueAdapter>(MoveIterator(other.storage_.GetInlinedData())), other.size()); + } + } + + return *this; + } + + // `InlinedVector::assign(...)` + // + // Replaces the contents of the inlined vector with `n` copies of `v`. + void assign(size_type n, const_reference v) + { + storage_.Assign(CopyValueAdapter(std::addressof(v)), n); + } + + // Overload of `InlinedVector::assign(...)` that replaces the contents of the + // inlined vector with copies of the elements of `list`. + void assign(std::initializer_list list) + { + assign(list.begin(), list.end()); + } + + // Overload of `InlinedVector::assign(...)` to replace the contents of the + // inlined vector with the range [`first`, `last`). + // + // NOTE: this overload is for iterators that are "forward" category or better. + template = 0> + void assign(ForwardIterator first, ForwardIterator last) + { + storage_.Assign(IteratorValueAdapter(first), static_cast(std::distance(first, last))); + } + + // Overload of `InlinedVector::assign(...)` to replace the contents of the + // inlined vector with the range [`first`, `last`). + // + // NOTE: this overload is for iterators that are "input" category. + template = 0> + void assign(InputIterator first, InputIterator last) + { + size_type i = 0; + for (; i < size() && first != last; ++i, static_cast(++first)) + { + data()[i] = *first; + } + + erase(data() + i, data() + size()); + std::copy(first, last, std::back_inserter(*this)); + } + + // `InlinedVector::resize(...)` + // + // Resizes the inlined vector to contain `n` elements. + // + // NOTE: If `n` is smaller than `size()`, extra elements are destroyed. If `n` + // is larger than `size()`, new elements are value-initialized. + void resize(size_type n) + { + ABSL_HARDENING_ASSERT(n <= max_size()); + storage_.Resize(DefaultValueAdapter(), n); + } + + // Overload of `InlinedVector::resize(...)` that resizes the inlined vector to + // contain `n` elements. + // + // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n` + // is larger than `size()`, new elements are copied-constructed from `v`. + void resize(size_type n, const_reference v) + { + ABSL_HARDENING_ASSERT(n <= max_size()); + storage_.Resize(CopyValueAdapter(std::addressof(v)), n); + } + + // `InlinedVector::insert(...)` + // + // Inserts a copy of `v` at `pos`, returning an `iterator` to the newly + // inserted element. + iterator insert(const_iterator pos, const_reference v) + { + return emplace(pos, v); + } + + // Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using + // move semantics, returning an `iterator` to the newly inserted element. + iterator insert(const_iterator pos, value_type&& v) + { + return emplace(pos, std::move(v)); + } + + // Overload of `InlinedVector::insert(...)` that inserts `n` contiguous copies + // of `v` starting at `pos`, returning an `iterator` pointing to the first of + // the newly inserted elements. + iterator insert(const_iterator pos, size_type n, const_reference v) + { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + if (ABSL_PREDICT_TRUE(n != 0)) + { + value_type dealias = v; + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2 + // It appears that GCC thinks that since `pos` is a const pointer and may + // point to uninitialized memory at this point, a warning should be + // issued. But `pos` is actually only used to compute an array index to + // write to. #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #endif - return storage_.Insert(pos, CopyValueAdapter(std::addressof(dealias)), - n); + return storage_.Insert(pos, CopyValueAdapter(std::addressof(dealias)), n); #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic pop #endif - } else { - return const_cast(pos); + } + else + { + return const_cast(pos); + } + } + + // Overload of `InlinedVector::insert(...)` that inserts copies of the + // elements of `list` starting at `pos`, returning an `iterator` pointing to + // the first of the newly inserted elements. + iterator insert(const_iterator pos, std::initializer_list list) + { + return insert(pos, list.begin(), list.end()); + } + + // Overload of `InlinedVector::insert(...)` that inserts the range [`first`, + // `last`) starting at `pos`, returning an `iterator` pointing to the first + // of the newly inserted elements. + // + // NOTE: this overload is for iterators that are "forward" category or better. + template = 0> + iterator insert(const_iterator pos, ForwardIterator first, ForwardIterator last) + { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + if (ABSL_PREDICT_TRUE(first != last)) + { + return storage_.Insert(pos, IteratorValueAdapter(first), std::distance(first, last)); + } + else + { + return const_cast(pos); + } + } + + // Overload of `InlinedVector::insert(...)` that inserts the range [`first`, + // `last`) starting at `pos`, returning an `iterator` pointing to the first + // of the newly inserted elements. + // + // NOTE: this overload is for iterators that are "input" category. + template = 0> + iterator insert(const_iterator pos, InputIterator first, InputIterator last) + { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + size_type index = std::distance(cbegin(), pos); + for (size_type i = index; first != last; ++i, static_cast(++first)) + { + insert(data() + i, *first); + } + + return iterator(data() + index); + } + + // `InlinedVector::emplace(...)` + // + // Constructs and inserts an element using `args...` in the inlined vector at + // `pos`, returning an `iterator` pointing to the newly emplaced element. + template + iterator emplace(const_iterator pos, Args&&... args) + { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos <= end()); + + value_type dealias(std::forward(args)...); + return storage_.Insert(pos, IteratorValueAdapter>(MoveIterator(std::addressof(dealias))), 1); + } + + // `InlinedVector::emplace_back(...)` + // + // Constructs and inserts an element using `args...` in the inlined vector at + // `end()`, returning a `reference` to the newly emplaced element. + template + reference emplace_back(Args&&... args) + { + return storage_.EmplaceBack(std::forward(args)...); + } + + // `InlinedVector::push_back(...)` + // + // Inserts a copy of `v` in the inlined vector at `end()`. + void push_back(const_reference v) + { + static_cast(emplace_back(v)); + } + + // Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()` + // using move semantics. + void push_back(value_type&& v) + { + static_cast(emplace_back(std::move(v))); + } + + // `InlinedVector::pop_back()` + // + // Destroys the element at `back()`, reducing the size by `1`. + void pop_back() noexcept + { + ABSL_HARDENING_ASSERT(!empty()); + + AllocatorTraits::destroy(storage_.GetAllocator(), data() + (size() - 1)); + storage_.SubtractSize(1); + } + + // `InlinedVector::erase(...)` + // + // Erases the element at `pos`, returning an `iterator` pointing to where the + // erased element was located. + // + // NOTE: may return `end()`, which is not dereferencable. + iterator erase(const_iterator pos) + { + ABSL_HARDENING_ASSERT(pos >= begin()); + ABSL_HARDENING_ASSERT(pos < end()); + + return storage_.Erase(pos, pos + 1); + } + + // Overload of `InlinedVector::erase(...)` that erases every element in the + // range [`from`, `to`), returning an `iterator` pointing to where the first + // erased element was located. + // + // NOTE: may return `end()`, which is not dereferencable. + iterator erase(const_iterator from, const_iterator to) + { + ABSL_HARDENING_ASSERT(from >= begin()); + ABSL_HARDENING_ASSERT(from <= to); + ABSL_HARDENING_ASSERT(to <= end()); + + if (ABSL_PREDICT_TRUE(from != to)) + { + return storage_.Erase(from, to); + } + else + { + return const_cast(from); + } + } + + // `InlinedVector::clear()` + // + // Destroys all elements in the inlined vector, setting the size to `0` and + // deallocating any held memory. + void clear() noexcept + { + inlined_vector_internal::DestroyAdapter::DestroyElements( + storage_.GetAllocator(), data(), size() + ); + storage_.DeallocateIfAllocated(); + + storage_.SetInlinedSize(0); + } + + // `InlinedVector::reserve(...)` + // + // Ensures that there is enough room for at least `n` elements. + void reserve(size_type n) + { + storage_.Reserve(n); + } + + // `InlinedVector::shrink_to_fit()` + // + // Attempts to reduce memory usage by moving elements to (or keeping elements + // in) the smallest available buffer sufficient for containing `size()` + // elements. + // + // If `size()` is sufficiently small, the elements will be moved into (or kept + // in) the inlined space. + void shrink_to_fit() + { + if (storage_.GetIsAllocated()) + { + storage_.ShrinkToFit(); + } + } + + // `InlinedVector::swap(...)` + // + // Swaps the contents of the inlined vector with `other`. + void swap(InlinedVector& other) + { + if (ABSL_PREDICT_TRUE(this != std::addressof(other))) + { + storage_.Swap(std::addressof(other.storage_)); + } + } + + private: + template + friend H AbslHashValue(H h, const absl::InlinedVector& a); + + Storage storage_; + }; + + // ----------------------------------------------------------------------------- + // InlinedVector Non-Member Functions + // ----------------------------------------------------------------------------- + + // `swap(...)` + // + // Swaps the contents of two inlined vectors. + template + void swap(absl::InlinedVector& a, absl::InlinedVector& b) noexcept(noexcept(a.swap(b))) + { + a.swap(b); } - } - - // Overload of `InlinedVector::insert(...)` that inserts copies of the - // elements of `list` starting at `pos`, returning an `iterator` pointing to - // the first of the newly inserted elements. - iterator insert(const_iterator pos, std::initializer_list list) { - return insert(pos, list.begin(), list.end()); - } - - // Overload of `InlinedVector::insert(...)` that inserts the range [`first`, - // `last`) starting at `pos`, returning an `iterator` pointing to the first - // of the newly inserted elements. - // - // NOTE: this overload is for iterators that are "forward" category or better. - template = 0> - iterator insert(const_iterator pos, ForwardIterator first, - ForwardIterator last) { - ABSL_HARDENING_ASSERT(pos >= begin()); - ABSL_HARDENING_ASSERT(pos <= end()); - - if (ABSL_PREDICT_TRUE(first != last)) { - return storage_.Insert(pos, - IteratorValueAdapter(first), - std::distance(first, last)); - } else { - return const_cast(pos); - } - } - - // Overload of `InlinedVector::insert(...)` that inserts the range [`first`, - // `last`) starting at `pos`, returning an `iterator` pointing to the first - // of the newly inserted elements. - // - // NOTE: this overload is for iterators that are "input" category. - template = 0> - iterator insert(const_iterator pos, InputIterator first, InputIterator last) { - ABSL_HARDENING_ASSERT(pos >= begin()); - ABSL_HARDENING_ASSERT(pos <= end()); - - size_type index = std::distance(cbegin(), pos); - for (size_type i = index; first != last; ++i, static_cast(++first)) { - insert(data() + i, *first); + + // `operator==(...)` + // + // Tests for value-equality of two inlined vectors. + template + bool operator==(const absl::InlinedVector& a, const absl::InlinedVector& b) + { + auto a_data = a.data(); + auto b_data = b.data(); + return absl::equal(a_data, a_data + a.size(), b_data, b_data + b.size()); } - return iterator(data() + index); - } - - // `InlinedVector::emplace(...)` - // - // Constructs and inserts an element using `args...` in the inlined vector at - // `pos`, returning an `iterator` pointing to the newly emplaced element. - template - iterator emplace(const_iterator pos, Args&&... args) { - ABSL_HARDENING_ASSERT(pos >= begin()); - ABSL_HARDENING_ASSERT(pos <= end()); - - value_type dealias(std::forward(args)...); - return storage_.Insert(pos, - IteratorValueAdapter>( - MoveIterator(std::addressof(dealias))), - 1); - } - - // `InlinedVector::emplace_back(...)` - // - // Constructs and inserts an element using `args...` in the inlined vector at - // `end()`, returning a `reference` to the newly emplaced element. - template - reference emplace_back(Args&&... args) { - return storage_.EmplaceBack(std::forward(args)...); - } - - // `InlinedVector::push_back(...)` - // - // Inserts a copy of `v` in the inlined vector at `end()`. - void push_back(const_reference v) { static_cast(emplace_back(v)); } - - // Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()` - // using move semantics. - void push_back(value_type&& v) { - static_cast(emplace_back(std::move(v))); - } - - // `InlinedVector::pop_back()` - // - // Destroys the element at `back()`, reducing the size by `1`. - void pop_back() noexcept { - ABSL_HARDENING_ASSERT(!empty()); - - AllocatorTraits::destroy(storage_.GetAllocator(), data() + (size() - 1)); - storage_.SubtractSize(1); - } - - // `InlinedVector::erase(...)` - // - // Erases the element at `pos`, returning an `iterator` pointing to where the - // erased element was located. - // - // NOTE: may return `end()`, which is not dereferencable. - iterator erase(const_iterator pos) { - ABSL_HARDENING_ASSERT(pos >= begin()); - ABSL_HARDENING_ASSERT(pos < end()); - - return storage_.Erase(pos, pos + 1); - } - - // Overload of `InlinedVector::erase(...)` that erases every element in the - // range [`from`, `to`), returning an `iterator` pointing to where the first - // erased element was located. - // - // NOTE: may return `end()`, which is not dereferencable. - iterator erase(const_iterator from, const_iterator to) { - ABSL_HARDENING_ASSERT(from >= begin()); - ABSL_HARDENING_ASSERT(from <= to); - ABSL_HARDENING_ASSERT(to <= end()); - - if (ABSL_PREDICT_TRUE(from != to)) { - return storage_.Erase(from, to); - } else { - return const_cast(from); + // `operator!=(...)` + // + // Tests for value-inequality of two inlined vectors. + template + bool operator!=(const absl::InlinedVector& a, const absl::InlinedVector& b) + { + return !(a == b); } - } - - // `InlinedVector::clear()` - // - // Destroys all elements in the inlined vector, setting the size to `0` and - // deallocating any held memory. - void clear() noexcept { - inlined_vector_internal::DestroyAdapter::DestroyElements( - storage_.GetAllocator(), data(), size()); - storage_.DeallocateIfAllocated(); - - storage_.SetInlinedSize(0); - } - - // `InlinedVector::reserve(...)` - // - // Ensures that there is enough room for at least `n` elements. - void reserve(size_type n) { storage_.Reserve(n); } - - // `InlinedVector::shrink_to_fit()` - // - // Attempts to reduce memory usage by moving elements to (or keeping elements - // in) the smallest available buffer sufficient for containing `size()` - // elements. - // - // If `size()` is sufficiently small, the elements will be moved into (or kept - // in) the inlined space. - void shrink_to_fit() { - if (storage_.GetIsAllocated()) { - storage_.ShrinkToFit(); + + // `operator<(...)` + // + // Tests whether the value of an inlined vector is less than the value of + // another inlined vector using a lexicographical comparison algorithm. + template + bool operator<(const absl::InlinedVector& a, const absl::InlinedVector& b) + { + auto a_data = a.data(); + auto b_data = b.data(); + return std::lexicographical_compare(a_data, a_data + a.size(), b_data, b_data + b.size()); } - } - - // `InlinedVector::swap(...)` - // - // Swaps the contents of the inlined vector with `other`. - void swap(InlinedVector& other) { - if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { - storage_.Swap(std::addressof(other.storage_)); + + // `operator>(...)` + // + // Tests whether the value of an inlined vector is greater than the value of + // another inlined vector using a lexicographical comparison algorithm. + template + bool operator>(const absl::InlinedVector& a, const absl::InlinedVector& b) + { + return b < a; } - } - private: - template - friend H AbslHashValue(H h, const absl::InlinedVector& a); + // `operator<=(...)` + // + // Tests whether the value of an inlined vector is less than or equal to the + // value of another inlined vector using a lexicographical comparison algorithm. + template + bool operator<=(const absl::InlinedVector& a, const absl::InlinedVector& b) + { + return !(b < a); + } - Storage storage_; -}; + // `operator>=(...)` + // + // Tests whether the value of an inlined vector is greater than or equal to the + // value of another inlined vector using a lexicographical comparison algorithm. + template + bool operator>=(const absl::InlinedVector& a, const absl::InlinedVector& b) + { + return !(a < b); + } -// ----------------------------------------------------------------------------- -// InlinedVector Non-Member Functions -// ----------------------------------------------------------------------------- + // `AbslHashValue(...)` + // + // Provides `absl::Hash` support for `absl::InlinedVector`. It is uncommon to + // call this directly. + template + H AbslHashValue(H h, const absl::InlinedVector& a) + { + auto size = a.size(); + return H::combine(H::combine_contiguous(std::move(h), a.data(), size), size); + } -// `swap(...)` -// -// Swaps the contents of two inlined vectors. -template -void swap(absl::InlinedVector& a, - absl::InlinedVector& b) noexcept(noexcept(a.swap(b))) { - a.swap(b); -} - -// `operator==(...)` -// -// Tests for value-equality of two inlined vectors. -template -bool operator==(const absl::InlinedVector& a, - const absl::InlinedVector& b) { - auto a_data = a.data(); - auto b_data = b.data(); - return absl::equal(a_data, a_data + a.size(), b_data, b_data + b.size()); -} - -// `operator!=(...)` -// -// Tests for value-inequality of two inlined vectors. -template -bool operator!=(const absl::InlinedVector& a, - const absl::InlinedVector& b) { - return !(a == b); -} - -// `operator<(...)` -// -// Tests whether the value of an inlined vector is less than the value of -// another inlined vector using a lexicographical comparison algorithm. -template -bool operator<(const absl::InlinedVector& a, - const absl::InlinedVector& b) { - auto a_data = a.data(); - auto b_data = b.data(); - return std::lexicographical_compare(a_data, a_data + a.size(), b_data, - b_data + b.size()); -} - -// `operator>(...)` -// -// Tests whether the value of an inlined vector is greater than the value of -// another inlined vector using a lexicographical comparison algorithm. -template -bool operator>(const absl::InlinedVector& a, - const absl::InlinedVector& b) { - return b < a; -} - -// `operator<=(...)` -// -// Tests whether the value of an inlined vector is less than or equal to the -// value of another inlined vector using a lexicographical comparison algorithm. -template -bool operator<=(const absl::InlinedVector& a, - const absl::InlinedVector& b) { - return !(b < a); -} - -// `operator>=(...)` -// -// Tests whether the value of an inlined vector is greater than or equal to the -// value of another inlined vector using a lexicographical comparison algorithm. -template -bool operator>=(const absl::InlinedVector& a, - const absl::InlinedVector& b) { - return !(a < b); -} - -// `AbslHashValue(...)` -// -// Provides `absl::Hash` support for `absl::InlinedVector`. It is uncommon to -// call this directly. -template -H AbslHashValue(H h, const absl::InlinedVector& a) { - auto size = a.size(); - return H::combine(H::combine_contiguous(std::move(h), a.data(), size), size); -} - -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INLINED_VECTOR_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/btree.h b/CAPI/cpp/grpc/include/absl/container/internal/btree.h index 01f4e74..f6fc781 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/btree.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/btree.h @@ -71,9 +71,11 @@ #include "absl/types/compare.h" #include "absl/utility/utility.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { #ifdef ABSL_BTREE_ENABLE_GENERATIONS #error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set @@ -85,2770 +87,3333 @@ namespace container_internal { #define ABSL_BTREE_ENABLE_GENERATIONS #endif -template -using compare_result_t = absl::result_of_t; - -// A helper class that indicates if the Compare parameter is a key-compare-to -// comparator. -template -using btree_is_key_compare_to = - std::is_convertible, absl::weak_ordering>; - -struct StringBtreeDefaultLess { - using is_transparent = void; - - StringBtreeDefaultLess() = default; - - // Compatibility constructor. - StringBtreeDefaultLess(std::less) {} // NOLINT - StringBtreeDefaultLess(std::less) {} // NOLINT - - // Allow converting to std::less for use in key_comp()/value_comp(). - explicit operator std::less() const { return {}; } - explicit operator std::less() const { return {}; } - explicit operator std::less() const { return {}; } - - absl::weak_ordering operator()(absl::string_view lhs, - absl::string_view rhs) const { - return compare_internal::compare_result_as_ordering(lhs.compare(rhs)); - } - StringBtreeDefaultLess(std::less) {} // NOLINT - absl::weak_ordering operator()(const absl::Cord &lhs, - const absl::Cord &rhs) const { - return compare_internal::compare_result_as_ordering(lhs.Compare(rhs)); - } - absl::weak_ordering operator()(const absl::Cord &lhs, - absl::string_view rhs) const { - return compare_internal::compare_result_as_ordering(lhs.Compare(rhs)); - } - absl::weak_ordering operator()(absl::string_view lhs, - const absl::Cord &rhs) const { - return compare_internal::compare_result_as_ordering(-rhs.Compare(lhs)); - } -}; - -struct StringBtreeDefaultGreater { - using is_transparent = void; - - StringBtreeDefaultGreater() = default; - - StringBtreeDefaultGreater(std::greater) {} // NOLINT - StringBtreeDefaultGreater(std::greater) {} // NOLINT - - // Allow converting to std::greater for use in key_comp()/value_comp(). - explicit operator std::greater() const { return {}; } - explicit operator std::greater() const { return {}; } - explicit operator std::greater() const { return {}; } - - absl::weak_ordering operator()(absl::string_view lhs, - absl::string_view rhs) const { - return compare_internal::compare_result_as_ordering(rhs.compare(lhs)); - } - StringBtreeDefaultGreater(std::greater) {} // NOLINT - absl::weak_ordering operator()(const absl::Cord &lhs, - const absl::Cord &rhs) const { - return compare_internal::compare_result_as_ordering(rhs.Compare(lhs)); - } - absl::weak_ordering operator()(const absl::Cord &lhs, - absl::string_view rhs) const { - return compare_internal::compare_result_as_ordering(-lhs.Compare(rhs)); - } - absl::weak_ordering operator()(absl::string_view lhs, - const absl::Cord &rhs) const { - return compare_internal::compare_result_as_ordering(rhs.Compare(lhs)); - } -}; - -// See below comments for checked_compare. -template ::value> -struct checked_compare_base : Compare { - using Compare::Compare; - explicit checked_compare_base(Compare c) : Compare(std::move(c)) {} - const Compare &comp() const { return *this; } -}; -template -struct checked_compare_base { - explicit checked_compare_base(Compare c) : compare(std::move(c)) {} - const Compare &comp() const { return compare; } - Compare compare; -}; - -// A mechanism for opting out of checked_compare for use only in btree_test.cc. -struct BtreeTestOnlyCheckedCompareOptOutBase {}; - -// A helper class to adapt the specified comparator for two use cases: -// (1) When using common Abseil string types with common comparison functors, -// convert a boolean comparison into a three-way comparison that returns an -// `absl::weak_ordering`. This helper class is specialized for -// less, greater, less, -// greater, less, and greater. -// (2) Adapt the comparator to diagnose cases of non-strict-weak-ordering (see -// https://en.cppreference.com/w/cpp/named_req/Compare) in debug mode. Whenever -// a comparison is made, we will make assertions to verify that the comparator -// is valid. -template -struct key_compare_adapter { - // Inherit from checked_compare_base to support function pointers and also - // keep empty-base-optimization (EBO) support for classes. - // Note: we can't use CompressedTuple here because that would interfere - // with the EBO for `btree::rightmost_`. `btree::rightmost_` is itself a - // CompressedTuple and nested `CompressedTuple`s don't support EBO. - // TODO(b/214288561): use CompressedTuple instead once it supports EBO for - // nested `CompressedTuple`s. - struct checked_compare : checked_compare_base { - private: - using Base = typename checked_compare::checked_compare_base; - using Base::comp; - - // If possible, returns whether `t` is equivalent to itself. We can only do - // this for `Key`s because we can't be sure that it's safe to call - // `comp()(k, k)` otherwise. Even if SFINAE allows it, there could be a - // compilation failure inside the implementation of the comparison operator. - bool is_self_equivalent(const Key &k) const { - // Note: this works for both boolean and three-way comparators. - return comp()(k, k) == 0; - } - // If we can't compare `t` with itself, returns true unconditionally. - template - bool is_self_equivalent(const T &) const { - return true; - } - - public: - using Base::Base; - checked_compare(Compare comp) : Base(std::move(comp)) {} // NOLINT - - // Allow converting to Compare for use in key_comp()/value_comp(). - explicit operator Compare() const { return comp(); } - - template >::value, - int> = 0> - bool operator()(const T &lhs, const U &rhs) const { - // NOTE: if any of these assertions fail, then the comparator does not - // establish a strict-weak-ordering (see - // https://en.cppreference.com/w/cpp/named_req/Compare). - assert(is_self_equivalent(lhs)); - assert(is_self_equivalent(rhs)); - const bool lhs_comp_rhs = comp()(lhs, rhs); - assert(!lhs_comp_rhs || !comp()(rhs, lhs)); - return lhs_comp_rhs; - } - - template < - typename T, typename U, - absl::enable_if_t, - absl::weak_ordering>::value, - int> = 0> - absl::weak_ordering operator()(const T &lhs, const U &rhs) const { - // NOTE: if any of these assertions fail, then the comparator does not - // establish a strict-weak-ordering (see - // https://en.cppreference.com/w/cpp/named_req/Compare). - assert(is_self_equivalent(lhs)); - assert(is_self_equivalent(rhs)); - const absl::weak_ordering lhs_comp_rhs = comp()(lhs, rhs); + template + using compare_result_t = absl::result_of_t; + + // A helper class that indicates if the Compare parameter is a key-compare-to + // comparator. + template + using btree_is_key_compare_to = + std::is_convertible, absl::weak_ordering>; + + struct StringBtreeDefaultLess + { + using is_transparent = void; + + StringBtreeDefaultLess() = default; + + // Compatibility constructor. + StringBtreeDefaultLess(std::less) + { + } // NOLINT + StringBtreeDefaultLess(std::less) + { + } // NOLINT + + // Allow converting to std::less for use in key_comp()/value_comp(). + explicit operator std::less() const + { + return {}; + } + explicit operator std::less() const + { + return {}; + } + explicit operator std::less() const + { + return {}; + } + + absl::weak_ordering operator()(absl::string_view lhs, absl::string_view rhs) const + { + return compare_internal::compare_result_as_ordering(lhs.compare(rhs)); + } + StringBtreeDefaultLess(std::less) + { + } // NOLINT + absl::weak_ordering operator()(const absl::Cord& lhs, const absl::Cord& rhs) const + { + return compare_internal::compare_result_as_ordering(lhs.Compare(rhs)); + } + absl::weak_ordering operator()(const absl::Cord& lhs, absl::string_view rhs) const + { + return compare_internal::compare_result_as_ordering(lhs.Compare(rhs)); + } + absl::weak_ordering operator()(absl::string_view lhs, const absl::Cord& rhs) const + { + return compare_internal::compare_result_as_ordering(-rhs.Compare(lhs)); + } + }; + + struct StringBtreeDefaultGreater + { + using is_transparent = void; + + StringBtreeDefaultGreater() = default; + + StringBtreeDefaultGreater(std::greater) + { + } // NOLINT + StringBtreeDefaultGreater(std::greater) + { + } // NOLINT + + // Allow converting to std::greater for use in key_comp()/value_comp(). + explicit operator std::greater() const + { + return {}; + } + explicit operator std::greater() const + { + return {}; + } + explicit operator std::greater() const + { + return {}; + } + + absl::weak_ordering operator()(absl::string_view lhs, absl::string_view rhs) const + { + return compare_internal::compare_result_as_ordering(rhs.compare(lhs)); + } + StringBtreeDefaultGreater(std::greater) + { + } // NOLINT + absl::weak_ordering operator()(const absl::Cord& lhs, const absl::Cord& rhs) const + { + return compare_internal::compare_result_as_ordering(rhs.Compare(lhs)); + } + absl::weak_ordering operator()(const absl::Cord& lhs, absl::string_view rhs) const + { + return compare_internal::compare_result_as_ordering(-lhs.Compare(rhs)); + } + absl::weak_ordering operator()(absl::string_view lhs, const absl::Cord& rhs) const + { + return compare_internal::compare_result_as_ordering(rhs.Compare(lhs)); + } + }; + + // See below comments for checked_compare. + template::value> + struct checked_compare_base : Compare + { + using Compare::Compare; + explicit checked_compare_base(Compare c) : + Compare(std::move(c)) + { + } + const Compare& comp() const + { + return *this; + } + }; + template + struct checked_compare_base + { + explicit checked_compare_base(Compare c) : + compare(std::move(c)) + { + } + const Compare& comp() const + { + return compare; + } + Compare compare; + }; + + // A mechanism for opting out of checked_compare for use only in btree_test.cc. + struct BtreeTestOnlyCheckedCompareOptOutBase + { + }; + + // A helper class to adapt the specified comparator for two use cases: + // (1) When using common Abseil string types with common comparison functors, + // convert a boolean comparison into a three-way comparison that returns an + // `absl::weak_ordering`. This helper class is specialized for + // less, greater, less, + // greater, less, and greater. + // (2) Adapt the comparator to diagnose cases of non-strict-weak-ordering (see + // https://en.cppreference.com/w/cpp/named_req/Compare) in debug mode. Whenever + // a comparison is made, we will make assertions to verify that the comparator + // is valid. + template + struct key_compare_adapter + { + // Inherit from checked_compare_base to support function pointers and also + // keep empty-base-optimization (EBO) support for classes. + // Note: we can't use CompressedTuple here because that would interfere + // with the EBO for `btree::rightmost_`. `btree::rightmost_` is itself a + // CompressedTuple and nested `CompressedTuple`s don't support EBO. + // TODO(b/214288561): use CompressedTuple instead once it supports EBO for + // nested `CompressedTuple`s. + struct checked_compare : checked_compare_base + { + private: + using Base = typename checked_compare::checked_compare_base; + using Base::comp; + + // If possible, returns whether `t` is equivalent to itself. We can only do + // this for `Key`s because we can't be sure that it's safe to call + // `comp()(k, k)` otherwise. Even if SFINAE allows it, there could be a + // compilation failure inside the implementation of the comparison operator. + bool is_self_equivalent(const Key& k) const + { + // Note: this works for both boolean and three-way comparators. + return comp()(k, k) == 0; + } + // If we can't compare `t` with itself, returns true unconditionally. + template + bool is_self_equivalent(const T&) const + { + return true; + } + + public: + using Base::Base; + checked_compare(Compare comp) : + Base(std::move(comp)) + { + } // NOLINT + + // Allow converting to Compare for use in key_comp()/value_comp(). + explicit operator Compare() const + { + return comp(); + } + + template>::value, int> = 0> + bool operator()(const T& lhs, const U& rhs) const + { + // NOTE: if any of these assertions fail, then the comparator does not + // establish a strict-weak-ordering (see + // https://en.cppreference.com/w/cpp/named_req/Compare). + assert(is_self_equivalent(lhs)); + assert(is_self_equivalent(rhs)); + const bool lhs_comp_rhs = comp()(lhs, rhs); + assert(!lhs_comp_rhs || !comp()(rhs, lhs)); + return lhs_comp_rhs; + } + + template< + typename T, + typename U, + absl::enable_if_t, absl::weak_ordering>::value, int> = 0> + absl::weak_ordering operator()(const T& lhs, const U& rhs) const + { + // NOTE: if any of these assertions fail, then the comparator does not + // establish a strict-weak-ordering (see + // https://en.cppreference.com/w/cpp/named_req/Compare). + assert(is_self_equivalent(lhs)); + assert(is_self_equivalent(rhs)); + const absl::weak_ordering lhs_comp_rhs = comp()(lhs, rhs); #ifndef NDEBUG - const absl::weak_ordering rhs_comp_lhs = comp()(rhs, lhs); - if (lhs_comp_rhs > 0) { - assert(rhs_comp_lhs < 0 && "lhs_comp_rhs > 0 -> rhs_comp_lhs < 0"); - } else if (lhs_comp_rhs == 0) { - assert(rhs_comp_lhs == 0 && "lhs_comp_rhs == 0 -> rhs_comp_lhs == 0"); - } else { - assert(rhs_comp_lhs > 0 && "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0"); - } + const absl::weak_ordering rhs_comp_lhs = comp()(rhs, lhs); + if (lhs_comp_rhs > 0) + { + assert(rhs_comp_lhs < 0 && "lhs_comp_rhs > 0 -> rhs_comp_lhs < 0"); + } + else if (lhs_comp_rhs == 0) + { + assert(rhs_comp_lhs == 0 && "lhs_comp_rhs == 0 -> rhs_comp_lhs == 0"); + } + else + { + assert(rhs_comp_lhs > 0 && "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0"); + } #endif - return lhs_comp_rhs; - } - }; - using type = absl::conditional_t< - std::is_base_of::value, - Compare, checked_compare>; -}; - -template <> -struct key_compare_adapter, std::string> { - using type = StringBtreeDefaultLess; -}; - -template <> -struct key_compare_adapter, std::string> { - using type = StringBtreeDefaultGreater; -}; - -template <> -struct key_compare_adapter, absl::string_view> { - using type = StringBtreeDefaultLess; -}; - -template <> -struct key_compare_adapter, absl::string_view> { - using type = StringBtreeDefaultGreater; -}; - -template <> -struct key_compare_adapter, absl::Cord> { - using type = StringBtreeDefaultLess; -}; - -template <> -struct key_compare_adapter, absl::Cord> { - using type = StringBtreeDefaultGreater; -}; - -// Detects an 'absl_btree_prefer_linear_node_search' member. This is -// a protocol used as an opt-in or opt-out of linear search. -// -// For example, this would be useful for key types that wrap an integer -// and define their own cheap operator<(). For example: -// -// class K { -// public: -// using absl_btree_prefer_linear_node_search = std::true_type; -// ... -// private: -// friend bool operator<(K a, K b) { return a.k_ < b.k_; } -// int k_; -// }; -// -// btree_map m; // Uses linear search -// -// If T has the preference tag, then it has a preference. -// Btree will use the tag's truth value. -template -struct has_linear_node_search_preference : std::false_type {}; -template -struct prefers_linear_node_search : std::false_type {}; -template -struct has_linear_node_search_preference< - T, absl::void_t> - : std::true_type {}; -template -struct prefers_linear_node_search< - T, absl::void_t> - : T::absl_btree_prefer_linear_node_search {}; - -template -constexpr bool compare_has_valid_result_type() { - using compare_result_type = compare_result_t; - return std::is_same::value || - std::is_convertible::value; -} - -template -class map_value_compare { - template - friend class btree; - - // Note: this `protected` is part of the API of std::map::value_compare. See - // https://en.cppreference.com/w/cpp/container/map/value_compare. - protected: - explicit map_value_compare(original_key_compare c) : comp(std::move(c)) {} - - original_key_compare comp; // NOLINT - - public: - auto operator()(const value_type &lhs, const value_type &rhs) const - -> decltype(comp(lhs.first, rhs.first)) { - return comp(lhs.first, rhs.first); - } -}; - -template -struct common_params { - using original_key_compare = Compare; - - // If Compare is a common comparator for a string-like type, then we adapt it - // to use heterogeneous lookup and to be a key-compare-to comparator. - // We also adapt the comparator to diagnose invalid comparators in debug mode. - // We disable this when `Compare` is invalid in a way that will cause - // adaptation to fail (having invalid return type) so that we can give a - // better compilation failure in static_assert_validation. If we don't do - // this, then there will be cascading compilation failures that are confusing - // for users. - using key_compare = - absl::conditional_t(), - Compare, - typename key_compare_adapter::type>; - - static constexpr bool kIsKeyCompareStringAdapted = - std::is_same::value || - std::is_same::value; - static constexpr bool kIsKeyCompareTransparent = - IsTransparent::value || - kIsKeyCompareStringAdapted; - static constexpr bool kEnableGenerations = + return lhs_comp_rhs; + } + }; + using type = absl::conditional_t< + std::is_base_of::value, + Compare, + checked_compare>; + }; + + template<> + struct key_compare_adapter, std::string> + { + using type = StringBtreeDefaultLess; + }; + + template<> + struct key_compare_adapter, std::string> + { + using type = StringBtreeDefaultGreater; + }; + + template<> + struct key_compare_adapter, absl::string_view> + { + using type = StringBtreeDefaultLess; + }; + + template<> + struct key_compare_adapter, absl::string_view> + { + using type = StringBtreeDefaultGreater; + }; + + template<> + struct key_compare_adapter, absl::Cord> + { + using type = StringBtreeDefaultLess; + }; + + template<> + struct key_compare_adapter, absl::Cord> + { + using type = StringBtreeDefaultGreater; + }; + + // Detects an 'absl_btree_prefer_linear_node_search' member. This is + // a protocol used as an opt-in or opt-out of linear search. + // + // For example, this would be useful for key types that wrap an integer + // and define their own cheap operator<(). For example: + // + // class K { + // public: + // using absl_btree_prefer_linear_node_search = std::true_type; + // ... + // private: + // friend bool operator<(K a, K b) { return a.k_ < b.k_; } + // int k_; + // }; + // + // btree_map m; // Uses linear search + // + // If T has the preference tag, then it has a preference. + // Btree will use the tag's truth value. + template + struct has_linear_node_search_preference : std::false_type + { + }; + template + struct prefers_linear_node_search : std::false_type + { + }; + template + struct has_linear_node_search_preference< + T, + absl::void_t> : std::true_type + { + }; + template + struct prefers_linear_node_search< + T, + absl::void_t> : T::absl_btree_prefer_linear_node_search + { + }; + + template + constexpr bool compare_has_valid_result_type() + { + using compare_result_type = compare_result_t; + return std::is_same::value || + std::is_convertible::value; + } + + template + class map_value_compare + { + template + friend class btree; + + // Note: this `protected` is part of the API of std::map::value_compare. See + // https://en.cppreference.com/w/cpp/container/map/value_compare. + + protected: + explicit map_value_compare(original_key_compare c) : + comp(std::move(c)) + { + } + + original_key_compare comp; // NOLINT + + public: + auto operator()(const value_type& lhs, const value_type& rhs) const + -> decltype(comp(lhs.first, rhs.first)) + { + return comp(lhs.first, rhs.first); + } + }; + + template + struct common_params + { + using original_key_compare = Compare; + + // If Compare is a common comparator for a string-like type, then we adapt it + // to use heterogeneous lookup and to be a key-compare-to comparator. + // We also adapt the comparator to diagnose invalid comparators in debug mode. + // We disable this when `Compare` is invalid in a way that will cause + // adaptation to fail (having invalid return type) so that we can give a + // better compilation failure in static_assert_validation. If we don't do + // this, then there will be cascading compilation failures that are confusing + // for users. + using key_compare = + absl::conditional_t(), Compare, typename key_compare_adapter::type>; + + static constexpr bool kIsKeyCompareStringAdapted = + std::is_same::value || + std::is_same::value; + static constexpr bool kIsKeyCompareTransparent = + IsTransparent::value || + kIsKeyCompareStringAdapted; + static constexpr bool kEnableGenerations = #ifdef ABSL_BTREE_ENABLE_GENERATIONS - true; + true; #else - false; + false; #endif - // A type which indicates if we have a key-compare-to functor or a plain old - // key-compare functor. - using is_key_compare_to = btree_is_key_compare_to; - - using allocator_type = Alloc; - using key_type = Key; - using size_type = size_t; - using difference_type = ptrdiff_t; - - using slot_policy = SlotPolicy; - using slot_type = typename slot_policy::slot_type; - using value_type = typename slot_policy::value_type; - using init_type = typename slot_policy::mutable_value_type; - using pointer = value_type *; - using const_pointer = const value_type *; - using reference = value_type &; - using const_reference = const value_type &; - - using value_compare = - absl::conditional_t, - original_key_compare>; - using is_map_container = std::integral_constant; - - // For the given lookup key type, returns whether we can have multiple - // equivalent keys in the btree. If this is a multi-container, then we can. - // Otherwise, we can have multiple equivalent keys only if all of the - // following conditions are met: - // - The comparator is transparent. - // - The lookup key type is not the same as key_type. - // - The comparator is not a StringBtreeDefault{Less,Greater} comparator - // that we know has the same equivalence classes for all lookup types. - template - constexpr static bool can_have_multiple_equivalent_keys() { - return IsMulti || (IsTransparent::value && - !std::is_same::value && - !kIsKeyCompareStringAdapted); - } - - enum { - kTargetNodeSize = TargetNodeSize, - - // Upper bound for the available space for slots. This is largest for leaf - // nodes, which have overhead of at least a pointer + 4 bytes (for storing - // 3 field_types and an enum). - kNodeSlotSpace = - TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4), - }; - - // This is an integral type large enough to hold as many slots as will fit a - // node of TargetNodeSize bytes. - using node_count_type = - absl::conditional_t<(kNodeSlotSpace / sizeof(slot_type) > - (std::numeric_limits::max)()), - uint16_t, uint8_t>; // NOLINT - - // The following methods are necessary for passing this struct as PolicyTraits - // for node_handle and/or are used within btree. - static value_type &element(slot_type *slot) { - return slot_policy::element(slot); - } - static const value_type &element(const slot_type *slot) { - return slot_policy::element(slot); - } - template - static void construct(Alloc *alloc, slot_type *slot, Args &&... args) { - slot_policy::construct(alloc, slot, std::forward(args)...); - } - static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { - slot_policy::construct(alloc, slot, other); - } - static void destroy(Alloc *alloc, slot_type *slot) { - slot_policy::destroy(alloc, slot); - } - static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) { - slot_policy::transfer(alloc, new_slot, old_slot); - } -}; - -// An adapter class that converts a lower-bound compare into an upper-bound -// compare. Note: there is no need to make a version of this adapter specialized -// for key-compare-to functors because the upper-bound (the first value greater -// than the input) is never an exact match. -template -struct upper_bound_adapter { - explicit upper_bound_adapter(const Compare &c) : comp(c) {} - template - bool operator()(const K1 &a, const K2 &b) const { - // Returns true when a is not greater than b. - return !compare_internal::compare_result_as_less_than(comp(b, a)); - } - - private: - Compare comp; -}; - -enum class MatchKind : uint8_t { kEq, kNe }; - -template -struct SearchResult { - V value; - MatchKind match; - - static constexpr bool HasMatch() { return true; } - bool IsEq() const { return match == MatchKind::kEq; } -}; - -// When we don't use CompareTo, `match` is not present. -// This ensures that callers can't use it accidentally when it provides no -// useful information. -template -struct SearchResult { - SearchResult() {} - explicit SearchResult(V v) : value(v) {} - SearchResult(V v, MatchKind /*match*/) : value(v) {} - - V value; - - static constexpr bool HasMatch() { return false; } - static constexpr bool IsEq() { return false; } -}; - -// A node in the btree holding. The same node type is used for both internal -// and leaf nodes in the btree, though the nodes are allocated in such a way -// that the children array is only valid in internal nodes. -template -class btree_node { - using is_key_compare_to = typename Params::is_key_compare_to; - using field_type = typename Params::node_count_type; - using allocator_type = typename Params::allocator_type; - using slot_type = typename Params::slot_type; - using original_key_compare = typename Params::original_key_compare; - - public: - using params_type = Params; - using key_type = typename Params::key_type; - using value_type = typename Params::value_type; - using pointer = typename Params::pointer; - using const_pointer = typename Params::const_pointer; - using reference = typename Params::reference; - using const_reference = typename Params::const_reference; - using key_compare = typename Params::key_compare; - using size_type = typename Params::size_type; - using difference_type = typename Params::difference_type; - - // Btree decides whether to use linear node search as follows: - // - If the comparator expresses a preference, use that. - // - If the key expresses a preference, use that. - // - If the key is arithmetic and the comparator is std::less or - // std::greater, choose linear. - // - Otherwise, choose binary. - // TODO(ezb): Might make sense to add condition(s) based on node-size. - using use_linear_search = std::integral_constant< - bool, has_linear_node_search_preference::value - ? prefers_linear_node_search::value - : has_linear_node_search_preference::value - ? prefers_linear_node_search::value - : std::is_arithmetic::value && - (std::is_same, - original_key_compare>::value || - std::is_same, - original_key_compare>::value)>; - - // This class is organized by absl::container_internal::Layout as if it had - // the following structure: - // // A pointer to the node's parent. - // btree_node *parent; - // - // // When ABSL_BTREE_ENABLE_GENERATIONS is defined, we also have a - // // generation integer in order to check that when iterators are - // // used, they haven't been invalidated already. Only the generation on - // // the root is used, but we have one on each node because whether a node - // // is root or not can change. - // uint32_t generation; - // - // // The position of the node in the node's parent. - // field_type position; - // // The index of the first populated value in `values`. - // // TODO(ezb): right now, `start` is always 0. Update insertion/merge - // // logic to allow for floating storage within nodes. - // field_type start; - // // The index after the last populated value in `values`. Currently, this - // // is the same as the count of values. - // field_type finish; - // // The maximum number of values the node can hold. This is an integer in - // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf - // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal - // // nodes (even though there are still kNodeSlots values in the node). - // // TODO(ezb): make max_count use only 4 bits and record log2(capacity) - // // to free extra bits for is_root, etc. - // field_type max_count; - // - // // The array of values. The capacity is `max_count` for leaf nodes and - // // kNodeSlots for internal nodes. Only the values in - // // [start, finish) have been initialized and are valid. - // slot_type values[max_count]; - // - // // The array of child pointers. The keys in children[i] are all less - // // than key(i). The keys in children[i + 1] are all greater than key(i). - // // There are 0 children for leaf nodes and kNodeSlots + 1 children for - // // internal nodes. - // btree_node *children[kNodeSlots + 1]; - // - // This class is only constructed by EmptyNodeType. Normally, pointers to the - // layout above are allocated, cast to btree_node*, and de-allocated within - // the btree implementation. - ~btree_node() = default; - btree_node(btree_node const &) = delete; - btree_node &operator=(btree_node const &) = delete; - - // Public for EmptyNodeType. - constexpr static size_type Alignment() { - static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(), - "Alignment of all nodes must be equal."); - return InternalLayout().Alignment(); - } - - protected: - btree_node() = default; - - private: - using layout_type = - absl::container_internal::Layout; - constexpr static size_type SizeWithNSlots(size_type n) { - return layout_type( - /*parent*/ 1, - /*generation*/ params_type::kEnableGenerations ? 1 : 0, - /*position, start, finish, max_count*/ 4, - /*slots*/ n, - /*children*/ 0) - .AllocSize(); - } - // A lower bound for the overhead of fields other than slots in a leaf node. - constexpr static size_type MinimumOverhead() { - return SizeWithNSlots(1) - sizeof(slot_type); - } - - // Compute how many values we can fit onto a leaf node taking into account - // padding. - constexpr static size_type NodeTargetSlots(const size_type begin, - const size_type end) { - return begin == end ? begin - : SizeWithNSlots((begin + end) / 2 + 1) > - params_type::kTargetNodeSize - ? NodeTargetSlots(begin, (begin + end) / 2) - : NodeTargetSlots((begin + end) / 2 + 1, end); - } - - enum { - kTargetNodeSize = params_type::kTargetNodeSize, - kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize), - - // We need a minimum of 3 slots per internal node in order to perform - // splitting (1 value for the two nodes involved in the split and 1 value - // propagated to the parent as the delimiter for the split). For performance - // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy - // of 1/3 (for a node, not a b-tree). - kMinNodeSlots = 4, - - kNodeSlots = - kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots, - - // The node is internal (i.e. is not a leaf node) if and only if `max_count` - // has this value. - kInternalNodeMaxCount = 0, - }; - - // Leaves can have less than kNodeSlots values. - constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) { - return layout_type( - /*parent*/ 1, - /*generation*/ params_type::kEnableGenerations ? 1 : 0, - /*position, start, finish, max_count*/ 4, - /*slots*/ slot_count, - /*children*/ 0); - } - constexpr static layout_type InternalLayout() { - return layout_type( - /*parent*/ 1, - /*generation*/ params_type::kEnableGenerations ? 1 : 0, - /*position, start, finish, max_count*/ 4, - /*slots*/ kNodeSlots, - /*children*/ kNodeSlots + 1); - } - constexpr static size_type LeafSize(const int slot_count = kNodeSlots) { - return LeafLayout(slot_count).AllocSize(); - } - constexpr static size_type InternalSize() { - return InternalLayout().AllocSize(); - } - - // N is the index of the type in the Layout definition. - // ElementType is the Nth type in the Layout definition. - template - inline typename layout_type::template ElementType *GetField() { - // We assert that we don't read from values that aren't there. - assert(N < 4 || is_internal()); - return InternalLayout().template Pointer(reinterpret_cast(this)); - } - template - inline const typename layout_type::template ElementType *GetField() const { - assert(N < 4 || is_internal()); - return InternalLayout().template Pointer( - reinterpret_cast(this)); - } - void set_parent(btree_node *p) { *GetField<0>() = p; } - field_type &mutable_finish() { return GetField<2>()[2]; } - slot_type *slot(int i) { return &GetField<3>()[i]; } - slot_type *start_slot() { return slot(start()); } - slot_type *finish_slot() { return slot(finish()); } - const slot_type *slot(int i) const { return &GetField<3>()[i]; } - void set_position(field_type v) { GetField<2>()[0] = v; } - void set_start(field_type v) { GetField<2>()[1] = v; } - void set_finish(field_type v) { GetField<2>()[2] = v; } - // This method is only called by the node init methods. - void set_max_count(field_type v) { GetField<2>()[3] = v; } - - public: - // Whether this is a leaf node or not. This value doesn't change after the - // node is created. - bool is_leaf() const { return GetField<2>()[3] != kInternalNodeMaxCount; } - // Whether this is an internal node or not. This value doesn't change after - // the node is created. - bool is_internal() const { return !is_leaf(); } - - // Getter for the position of this node in its parent. - field_type position() const { return GetField<2>()[0]; } - - // Getter for the offset of the first value in the `values` array. - field_type start() const { - // TODO(ezb): when floating storage is implemented, return GetField<2>()[1]; - assert(GetField<2>()[1] == 0); - return 0; - } - - // Getter for the offset after the last value in the `values` array. - field_type finish() const { return GetField<2>()[2]; } - - // Getters for the number of values stored in this node. - field_type count() const { - assert(finish() >= start()); - return finish() - start(); - } - field_type max_count() const { - // Internal nodes have max_count==kInternalNodeMaxCount. - // Leaf nodes have max_count in [1, kNodeSlots]. - const field_type max_count = GetField<2>()[3]; - return max_count == field_type{kInternalNodeMaxCount} - ? field_type{kNodeSlots} - : max_count; - } - - // Getter for the parent of this node. - btree_node *parent() const { return *GetField<0>(); } - // Getter for whether the node is the root of the tree. The parent of the - // root of the tree is the leftmost node in the tree which is guaranteed to - // be a leaf. - bool is_root() const { return parent()->is_leaf(); } - void make_root() { - assert(parent()->is_root()); - set_generation(parent()->generation()); - set_parent(parent()->parent()); - } - - // Gets the root node's generation integer, which is the one used by the tree. - uint32_t *get_root_generation() const { - assert(params_type::kEnableGenerations); - const btree_node *curr = this; - for (; !curr->is_root(); curr = curr->parent()) continue; - return const_cast(&curr->GetField<1>()[0]); - } - - // Returns the generation for iterator validation. - uint32_t generation() const { - return params_type::kEnableGenerations ? *get_root_generation() : 0; - } - // Updates generation. Should only be called on a root node or during node - // initialization. - void set_generation(uint32_t generation) { - if (params_type::kEnableGenerations) GetField<1>()[0] = generation; - } - // Updates the generation. We do this whenever the node is mutated. - void next_generation() { - if (params_type::kEnableGenerations) ++*get_root_generation(); - } - - // Getters for the key/value at position i in the node. - const key_type &key(int i) const { return params_type::key(slot(i)); } - reference value(int i) { return params_type::element(slot(i)); } - const_reference value(int i) const { return params_type::element(slot(i)); } - - // Getters/setter for the child at position i in the node. - btree_node *child(int i) const { return GetField<4>()[i]; } - btree_node *start_child() const { return child(start()); } - btree_node *&mutable_child(int i) { return GetField<4>()[i]; } - void clear_child(int i) { - absl::container_internal::SanitizerPoisonObject(&mutable_child(i)); - } - void set_child(int i, btree_node *c) { - absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i)); - mutable_child(i) = c; - c->set_position(i); - } - void init_child(int i, btree_node *c) { - set_child(i, c); - c->set_parent(this); - } - - // Returns the position of the first value whose key is not less than k. - template - SearchResult lower_bound( - const K &k, const key_compare &comp) const { - return use_linear_search::value ? linear_search(k, comp) - : binary_search(k, comp); - } - // Returns the position of the first value whose key is greater than k. - template - int upper_bound(const K &k, const key_compare &comp) const { - auto upper_compare = upper_bound_adapter(comp); - return use_linear_search::value ? linear_search(k, upper_compare).value - : binary_search(k, upper_compare).value; - } - - template - SearchResult::value> - linear_search(const K &k, const Compare &comp) const { - return linear_search_impl(k, start(), finish(), comp, - btree_is_key_compare_to()); - } - - template - SearchResult::value> - binary_search(const K &k, const Compare &comp) const { - return binary_search_impl(k, start(), finish(), comp, - btree_is_key_compare_to()); - } - - // Returns the position of the first value whose key is not less than k using - // linear search performed using plain compare. - template - SearchResult linear_search_impl( - const K &k, int s, const int e, const Compare &comp, - std::false_type /* IsCompareTo */) const { - while (s < e) { - if (!comp(key(s), k)) { - break; - } - ++s; - } - return SearchResult{s}; - } - - // Returns the position of the first value whose key is not less than k using - // linear search performed using compare-to. - template - SearchResult linear_search_impl( - const K &k, int s, const int e, const Compare &comp, - std::true_type /* IsCompareTo */) const { - while (s < e) { - const absl::weak_ordering c = comp(key(s), k); - if (c == 0) { - return {s, MatchKind::kEq}; - } else if (c > 0) { - break; - } - ++s; - } - return {s, MatchKind::kNe}; - } - - // Returns the position of the first value whose key is not less than k using - // binary search performed using plain compare. - template - SearchResult binary_search_impl( - const K &k, int s, int e, const Compare &comp, - std::false_type /* IsCompareTo */) const { - while (s != e) { - const int mid = (s + e) >> 1; - if (comp(key(mid), k)) { - s = mid + 1; - } else { - e = mid; - } - } - return SearchResult{s}; - } - - // Returns the position of the first value whose key is not less than k using - // binary search performed using compare-to. - template - SearchResult binary_search_impl( - const K &k, int s, int e, const CompareTo &comp, - std::true_type /* IsCompareTo */) const { - if (params_type::template can_have_multiple_equivalent_keys()) { - MatchKind exact_match = MatchKind::kNe; - while (s != e) { - const int mid = (s + e) >> 1; - const absl::weak_ordering c = comp(key(mid), k); - if (c < 0) { - s = mid + 1; - } else { - e = mid; - if (c == 0) { - // Need to return the first value whose key is not less than k, - // which requires continuing the binary search if there could be - // multiple equivalent keys. - exact_match = MatchKind::kEq; - } - } - } - return {s, exact_match}; - } else { // Can't have multiple equivalent keys. - while (s != e) { - const int mid = (s + e) >> 1; - const absl::weak_ordering c = comp(key(mid), k); - if (c < 0) { - s = mid + 1; - } else if (c > 0) { - e = mid; - } else { - return {mid, MatchKind::kEq}; - } - } - return {s, MatchKind::kNe}; - } - } - - // Emplaces a value at position i, shifting all existing values and - // children at positions >= i to the right by 1. - template - void emplace_value(size_type i, allocator_type *alloc, Args &&... args); - - // Removes the values at positions [i, i + to_erase), shifting all existing - // values and children after that range to the left by to_erase. Clears all - // children between [i, i + to_erase). - void remove_values(field_type i, field_type to_erase, allocator_type *alloc); - - // Rebalances a node with its right sibling. - void rebalance_right_to_left(int to_move, btree_node *right, - allocator_type *alloc); - void rebalance_left_to_right(int to_move, btree_node *right, - allocator_type *alloc); - - // Splits a node, moving a portion of the node's values to its right sibling. - void split(int insert_position, btree_node *dest, allocator_type *alloc); - - // Merges a node with its right sibling, moving all of the values and the - // delimiting key in the parent node onto itself, and deleting the src node. - void merge(btree_node *src, allocator_type *alloc); - - // Node allocation/deletion routines. - void init_leaf(int max_count, btree_node *parent) { - set_generation(0); - set_parent(parent); - set_position(0); - set_start(0); - set_finish(0); - set_max_count(max_count); - absl::container_internal::SanitizerPoisonMemoryRegion( - start_slot(), max_count * sizeof(slot_type)); - } - void init_internal(btree_node *parent) { - init_leaf(kNodeSlots, parent); - // Set `max_count` to a sentinel value to indicate that this node is - // internal. - set_max_count(kInternalNodeMaxCount); - absl::container_internal::SanitizerPoisonMemoryRegion( - &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *)); - } - - static void deallocate(const size_type size, btree_node *node, - allocator_type *alloc) { - absl::container_internal::Deallocate(alloc, node, size); - } - - // Deletes a node and all of its children. - static void clear_and_delete(btree_node *node, allocator_type *alloc); - - private: - template - void value_init(const field_type i, allocator_type *alloc, Args &&... args) { - next_generation(); - absl::container_internal::SanitizerUnpoisonObject(slot(i)); - params_type::construct(alloc, slot(i), std::forward(args)...); - } - void value_destroy(const field_type i, allocator_type *alloc) { - next_generation(); - params_type::destroy(alloc, slot(i)); - absl::container_internal::SanitizerPoisonObject(slot(i)); - } - void value_destroy_n(const field_type i, const field_type n, - allocator_type *alloc) { - next_generation(); - for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) { - params_type::destroy(alloc, s); - absl::container_internal::SanitizerPoisonObject(s); - } - } - - static void transfer(slot_type *dest, slot_type *src, allocator_type *alloc) { - absl::container_internal::SanitizerUnpoisonObject(dest); - params_type::transfer(alloc, dest, src); - absl::container_internal::SanitizerPoisonObject(src); - } - - // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`. - void transfer(const size_type dest_i, const size_type src_i, - btree_node *src_node, allocator_type *alloc) { - next_generation(); - transfer(slot(dest_i), src_node->slot(src_i), alloc); - } - - // Transfers `n` values starting at value `src_i` in `src_node` into the - // values starting at value `dest_i` in `this`. - void transfer_n(const size_type n, const size_type dest_i, - const size_type src_i, btree_node *src_node, - allocator_type *alloc) { - next_generation(); - for (slot_type *src = src_node->slot(src_i), *end = src + n, - *dest = slot(dest_i); - src != end; ++src, ++dest) { - transfer(dest, src, alloc); - } - } - - // Same as above, except that we start at the end and work our way to the - // beginning. - void transfer_n_backward(const size_type n, const size_type dest_i, - const size_type src_i, btree_node *src_node, - allocator_type *alloc) { - next_generation(); - for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n, - *dest = slot(dest_i + n - 1); - src != end; --src, --dest) { - transfer(dest, src, alloc); - } - } - - template - friend class btree; - template - friend class btree_iterator; - friend class BtreeNodePeer; - friend struct btree_access; -}; - -template -class btree_iterator { - using key_type = typename Node::key_type; - using size_type = typename Node::size_type; - using params_type = typename Node::params_type; - using is_map_container = typename params_type::is_map_container; - - using node_type = Node; - using normal_node = typename std::remove_const::type; - using const_node = const Node; - using normal_pointer = typename params_type::pointer; - using normal_reference = typename params_type::reference; - using const_pointer = typename params_type::const_pointer; - using const_reference = typename params_type::const_reference; - using slot_type = typename params_type::slot_type; - - using iterator = - btree_iterator; - using const_iterator = - btree_iterator; - - public: - // These aliases are public for std::iterator_traits. - using difference_type = typename Node::difference_type; - using value_type = typename params_type::value_type; - using pointer = Pointer; - using reference = Reference; - using iterator_category = std::bidirectional_iterator_tag; - - btree_iterator() : btree_iterator(nullptr, -1) {} - explicit btree_iterator(Node *n) : btree_iterator(n, n->start()) {} - btree_iterator(Node *n, int p) : node_(n), position_(p) { + // A type which indicates if we have a key-compare-to functor or a plain old + // key-compare functor. + using is_key_compare_to = btree_is_key_compare_to; + + using allocator_type = Alloc; + using key_type = Key; + using size_type = size_t; + using difference_type = ptrdiff_t; + + using slot_policy = SlotPolicy; + using slot_type = typename slot_policy::slot_type; + using value_type = typename slot_policy::value_type; + using init_type = typename slot_policy::mutable_value_type; + using pointer = value_type*; + using const_pointer = const value_type*; + using reference = value_type&; + using const_reference = const value_type&; + + using value_compare = + absl::conditional_t, original_key_compare>; + using is_map_container = std::integral_constant; + + // For the given lookup key type, returns whether we can have multiple + // equivalent keys in the btree. If this is a multi-container, then we can. + // Otherwise, we can have multiple equivalent keys only if all of the + // following conditions are met: + // - The comparator is transparent. + // - The lookup key type is not the same as key_type. + // - The comparator is not a StringBtreeDefault{Less,Greater} comparator + // that we know has the same equivalence classes for all lookup types. + template + constexpr static bool can_have_multiple_equivalent_keys() + { + return IsMulti || (IsTransparent::value && + !std::is_same::value && + !kIsKeyCompareStringAdapted); + } + + enum + { + kTargetNodeSize = TargetNodeSize, + + // Upper bound for the available space for slots. This is largest for leaf + // nodes, which have overhead of at least a pointer + 4 bytes (for storing + // 3 field_types and an enum). + kNodeSlotSpace = + TargetNodeSize - /*minimum overhead=*/(sizeof(void*) + 4), + }; + + // This is an integral type large enough to hold as many slots as will fit a + // node of TargetNodeSize bytes. + using node_count_type = + absl::conditional_t<(kNodeSlotSpace / sizeof(slot_type) > (std::numeric_limits::max)()), uint16_t, uint8_t>; // NOLINT + + // The following methods are necessary for passing this struct as PolicyTraits + // for node_handle and/or are used within btree. + static value_type& element(slot_type* slot) + { + return slot_policy::element(slot); + } + static const value_type& element(const slot_type* slot) + { + return slot_policy::element(slot); + } + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) + { + slot_policy::construct(alloc, slot, std::forward(args)...); + } + static void construct(Alloc* alloc, slot_type* slot, slot_type* other) + { + slot_policy::construct(alloc, slot, other); + } + static void destroy(Alloc* alloc, slot_type* slot) + { + slot_policy::destroy(alloc, slot); + } + static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) + { + slot_policy::transfer(alloc, new_slot, old_slot); + } + }; + + // An adapter class that converts a lower-bound compare into an upper-bound + // compare. Note: there is no need to make a version of this adapter specialized + // for key-compare-to functors because the upper-bound (the first value greater + // than the input) is never an exact match. + template + struct upper_bound_adapter + { + explicit upper_bound_adapter(const Compare& c) : + comp(c) + { + } + template + bool operator()(const K1& a, const K2& b) const + { + // Returns true when a is not greater than b. + return !compare_internal::compare_result_as_less_than(comp(b, a)); + } + + private: + Compare comp; + }; + + enum class MatchKind : uint8_t + { + kEq, + kNe + }; + + template + struct SearchResult + { + V value; + MatchKind match; + + static constexpr bool HasMatch() + { + return true; + } + bool IsEq() const + { + return match == MatchKind::kEq; + } + }; + + // When we don't use CompareTo, `match` is not present. + // This ensures that callers can't use it accidentally when it provides no + // useful information. + template + struct SearchResult + { + SearchResult() + { + } + explicit SearchResult(V v) : + value(v) + { + } + SearchResult(V v, MatchKind /*match*/) : + value(v) + { + } + + V value; + + static constexpr bool HasMatch() + { + return false; + } + static constexpr bool IsEq() + { + return false; + } + }; + + // A node in the btree holding. The same node type is used for both internal + // and leaf nodes in the btree, though the nodes are allocated in such a way + // that the children array is only valid in internal nodes. + template + class btree_node + { + using is_key_compare_to = typename Params::is_key_compare_to; + using field_type = typename Params::node_count_type; + using allocator_type = typename Params::allocator_type; + using slot_type = typename Params::slot_type; + using original_key_compare = typename Params::original_key_compare; + + public: + using params_type = Params; + using key_type = typename Params::key_type; + using value_type = typename Params::value_type; + using pointer = typename Params::pointer; + using const_pointer = typename Params::const_pointer; + using reference = typename Params::reference; + using const_reference = typename Params::const_reference; + using key_compare = typename Params::key_compare; + using size_type = typename Params::size_type; + using difference_type = typename Params::difference_type; + + // Btree decides whether to use linear node search as follows: + // - If the comparator expresses a preference, use that. + // - If the key expresses a preference, use that. + // - If the key is arithmetic and the comparator is std::less or + // std::greater, choose linear. + // - Otherwise, choose binary. + // TODO(ezb): Might make sense to add condition(s) based on node-size. + using use_linear_search = std::integral_constant< + bool, + has_linear_node_search_preference::value ? prefers_linear_node_search::value : has_linear_node_search_preference::value ? prefers_linear_node_search::value : + std::is_arithmetic::value && (std::is_same, original_key_compare>::value || std::is_same, original_key_compare>::value)>; + + // This class is organized by absl::container_internal::Layout as if it had + // the following structure: + // // A pointer to the node's parent. + // btree_node *parent; + // + // // When ABSL_BTREE_ENABLE_GENERATIONS is defined, we also have a + // // generation integer in order to check that when iterators are + // // used, they haven't been invalidated already. Only the generation on + // // the root is used, but we have one on each node because whether a node + // // is root or not can change. + // uint32_t generation; + // + // // The position of the node in the node's parent. + // field_type position; + // // The index of the first populated value in `values`. + // // TODO(ezb): right now, `start` is always 0. Update insertion/merge + // // logic to allow for floating storage within nodes. + // field_type start; + // // The index after the last populated value in `values`. Currently, this + // // is the same as the count of values. + // field_type finish; + // // The maximum number of values the node can hold. This is an integer in + // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf + // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal + // // nodes (even though there are still kNodeSlots values in the node). + // // TODO(ezb): make max_count use only 4 bits and record log2(capacity) + // // to free extra bits for is_root, etc. + // field_type max_count; + // + // // The array of values. The capacity is `max_count` for leaf nodes and + // // kNodeSlots for internal nodes. Only the values in + // // [start, finish) have been initialized and are valid. + // slot_type values[max_count]; + // + // // The array of child pointers. The keys in children[i] are all less + // // than key(i). The keys in children[i + 1] are all greater than key(i). + // // There are 0 children for leaf nodes and kNodeSlots + 1 children for + // // internal nodes. + // btree_node *children[kNodeSlots + 1]; + // + // This class is only constructed by EmptyNodeType. Normally, pointers to the + // layout above are allocated, cast to btree_node*, and de-allocated within + // the btree implementation. + ~btree_node() = default; + btree_node(btree_node const&) = delete; + btree_node& operator=(btree_node const&) = delete; + + // Public for EmptyNodeType. + constexpr static size_type Alignment() + { + static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(), "Alignment of all nodes must be equal."); + return InternalLayout().Alignment(); + } + + protected: + btree_node() = default; + + private: + using layout_type = + absl::container_internal::Layout; + constexpr static size_type SizeWithNSlots(size_type n) + { + return layout_type( + /*parent*/ 1, + /*generation*/ params_type::kEnableGenerations ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ n, + /*children*/ 0 + ) + .AllocSize(); + } + // A lower bound for the overhead of fields other than slots in a leaf node. + constexpr static size_type MinimumOverhead() + { + return SizeWithNSlots(1) - sizeof(slot_type); + } + + // Compute how many values we can fit onto a leaf node taking into account + // padding. + constexpr static size_type NodeTargetSlots(const size_type begin, const size_type end) + { + return begin == end ? begin : SizeWithNSlots((begin + end) / 2 + 1) > params_type::kTargetNodeSize ? NodeTargetSlots(begin, (begin + end) / 2) : + NodeTargetSlots((begin + end) / 2 + 1, end); + } + + enum + { + kTargetNodeSize = params_type::kTargetNodeSize, + kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize), + + // We need a minimum of 3 slots per internal node in order to perform + // splitting (1 value for the two nodes involved in the split and 1 value + // propagated to the parent as the delimiter for the split). For performance + // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy + // of 1/3 (for a node, not a b-tree). + kMinNodeSlots = 4, + + kNodeSlots = + kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots, + + // The node is internal (i.e. is not a leaf node) if and only if `max_count` + // has this value. + kInternalNodeMaxCount = 0, + }; + + // Leaves can have less than kNodeSlots values. + constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) + { + return layout_type( + /*parent*/ 1, + /*generation*/ params_type::kEnableGenerations ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ slot_count, + /*children*/ 0 + ); + } + constexpr static layout_type InternalLayout() + { + return layout_type( + /*parent*/ 1, + /*generation*/ params_type::kEnableGenerations ? 1 : 0, + /*position, start, finish, max_count*/ 4, + /*slots*/ kNodeSlots, + /*children*/ kNodeSlots + 1 + ); + } + constexpr static size_type LeafSize(const int slot_count = kNodeSlots) + { + return LeafLayout(slot_count).AllocSize(); + } + constexpr static size_type InternalSize() + { + return InternalLayout().AllocSize(); + } + + // N is the index of the type in the Layout definition. + // ElementType is the Nth type in the Layout definition. + template + inline typename layout_type::template ElementType* GetField() + { + // We assert that we don't read from values that aren't there. + assert(N < 4 || is_internal()); + return InternalLayout().template Pointer(reinterpret_cast(this)); + } + template + inline const typename layout_type::template ElementType* GetField() const + { + assert(N < 4 || is_internal()); + return InternalLayout().template Pointer( + reinterpret_cast(this) + ); + } + void set_parent(btree_node* p) + { + *GetField<0>() = p; + } + field_type& mutable_finish() + { + return GetField<2>()[2]; + } + slot_type* slot(int i) + { + return &GetField<3>()[i]; + } + slot_type* start_slot() + { + return slot(start()); + } + slot_type* finish_slot() + { + return slot(finish()); + } + const slot_type* slot(int i) const + { + return &GetField<3>()[i]; + } + void set_position(field_type v) + { + GetField<2>()[0] = v; + } + void set_start(field_type v) + { + GetField<2>()[1] = v; + } + void set_finish(field_type v) + { + GetField<2>()[2] = v; + } + // This method is only called by the node init methods. + void set_max_count(field_type v) + { + GetField<2>()[3] = v; + } + + public: + // Whether this is a leaf node or not. This value doesn't change after the + // node is created. + bool is_leaf() const + { + return GetField<2>()[3] != kInternalNodeMaxCount; + } + // Whether this is an internal node or not. This value doesn't change after + // the node is created. + bool is_internal() const + { + return !is_leaf(); + } + + // Getter for the position of this node in its parent. + field_type position() const + { + return GetField<2>()[0]; + } + + // Getter for the offset of the first value in the `values` array. + field_type start() const + { + // TODO(ezb): when floating storage is implemented, return GetField<2>()[1]; + assert(GetField<2>()[1] == 0); + return 0; + } + + // Getter for the offset after the last value in the `values` array. + field_type finish() const + { + return GetField<2>()[2]; + } + + // Getters for the number of values stored in this node. + field_type count() const + { + assert(finish() >= start()); + return finish() - start(); + } + field_type max_count() const + { + // Internal nodes have max_count==kInternalNodeMaxCount. + // Leaf nodes have max_count in [1, kNodeSlots]. + const field_type max_count = GetField<2>()[3]; + return max_count == field_type{kInternalNodeMaxCount} ? field_type{kNodeSlots} : max_count; + } + + // Getter for the parent of this node. + btree_node* parent() const + { + return *GetField<0>(); + } + // Getter for whether the node is the root of the tree. The parent of the + // root of the tree is the leftmost node in the tree which is guaranteed to + // be a leaf. + bool is_root() const + { + return parent()->is_leaf(); + } + void make_root() + { + assert(parent()->is_root()); + set_generation(parent()->generation()); + set_parent(parent()->parent()); + } + + // Gets the root node's generation integer, which is the one used by the tree. + uint32_t* get_root_generation() const + { + assert(params_type::kEnableGenerations); + const btree_node* curr = this; + for (; !curr->is_root(); curr = curr->parent()) + continue; + return const_cast(&curr->GetField<1>()[0]); + } + + // Returns the generation for iterator validation. + uint32_t generation() const + { + return params_type::kEnableGenerations ? *get_root_generation() : 0; + } + // Updates generation. Should only be called on a root node or during node + // initialization. + void set_generation(uint32_t generation) + { + if (params_type::kEnableGenerations) + GetField<1>()[0] = generation; + } + // Updates the generation. We do this whenever the node is mutated. + void next_generation() + { + if (params_type::kEnableGenerations) + ++*get_root_generation(); + } + + // Getters for the key/value at position i in the node. + const key_type& key(int i) const + { + return params_type::key(slot(i)); + } + reference value(int i) + { + return params_type::element(slot(i)); + } + const_reference value(int i) const + { + return params_type::element(slot(i)); + } + + // Getters/setter for the child at position i in the node. + btree_node* child(int i) const + { + return GetField<4>()[i]; + } + btree_node* start_child() const + { + return child(start()); + } + btree_node*& mutable_child(int i) + { + return GetField<4>()[i]; + } + void clear_child(int i) + { + absl::container_internal::SanitizerPoisonObject(&mutable_child(i)); + } + void set_child(int i, btree_node* c) + { + absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i)); + mutable_child(i) = c; + c->set_position(i); + } + void init_child(int i, btree_node* c) + { + set_child(i, c); + c->set_parent(this); + } + + // Returns the position of the first value whose key is not less than k. + template + SearchResult lower_bound( + const K& k, const key_compare& comp + ) const + { + return use_linear_search::value ? linear_search(k, comp) : binary_search(k, comp); + } + // Returns the position of the first value whose key is greater than k. + template + int upper_bound(const K& k, const key_compare& comp) const + { + auto upper_compare = upper_bound_adapter(comp); + return use_linear_search::value ? linear_search(k, upper_compare).value : binary_search(k, upper_compare).value; + } + + template + SearchResult::value> + linear_search(const K& k, const Compare& comp) const + { + return linear_search_impl(k, start(), finish(), comp, btree_is_key_compare_to()); + } + + template + SearchResult::value> + binary_search(const K& k, const Compare& comp) const + { + return binary_search_impl(k, start(), finish(), comp, btree_is_key_compare_to()); + } + + // Returns the position of the first value whose key is not less than k using + // linear search performed using plain compare. + template + SearchResult linear_search_impl( + const K& k, int s, const int e, const Compare& comp, std::false_type /* IsCompareTo */ + ) const + { + while (s < e) + { + if (!comp(key(s), k)) + { + break; + } + ++s; + } + return SearchResult{s}; + } + + // Returns the position of the first value whose key is not less than k using + // linear search performed using compare-to. + template + SearchResult linear_search_impl( + const K& k, int s, const int e, const Compare& comp, std::true_type /* IsCompareTo */ + ) const + { + while (s < e) + { + const absl::weak_ordering c = comp(key(s), k); + if (c == 0) + { + return {s, MatchKind::kEq}; + } + else if (c > 0) + { + break; + } + ++s; + } + return {s, MatchKind::kNe}; + } + + // Returns the position of the first value whose key is not less than k using + // binary search performed using plain compare. + template + SearchResult binary_search_impl( + const K& k, int s, int e, const Compare& comp, std::false_type /* IsCompareTo */ + ) const + { + while (s != e) + { + const int mid = (s + e) >> 1; + if (comp(key(mid), k)) + { + s = mid + 1; + } + else + { + e = mid; + } + } + return SearchResult{s}; + } + + // Returns the position of the first value whose key is not less than k using + // binary search performed using compare-to. + template + SearchResult binary_search_impl( + const K& k, int s, int e, const CompareTo& comp, std::true_type /* IsCompareTo */ + ) const + { + if (params_type::template can_have_multiple_equivalent_keys()) + { + MatchKind exact_match = MatchKind::kNe; + while (s != e) + { + const int mid = (s + e) >> 1; + const absl::weak_ordering c = comp(key(mid), k); + if (c < 0) + { + s = mid + 1; + } + else + { + e = mid; + if (c == 0) + { + // Need to return the first value whose key is not less than k, + // which requires continuing the binary search if there could be + // multiple equivalent keys. + exact_match = MatchKind::kEq; + } + } + } + return {s, exact_match}; + } + else + { // Can't have multiple equivalent keys. + while (s != e) + { + const int mid = (s + e) >> 1; + const absl::weak_ordering c = comp(key(mid), k); + if (c < 0) + { + s = mid + 1; + } + else if (c > 0) + { + e = mid; + } + else + { + return {mid, MatchKind::kEq}; + } + } + return {s, MatchKind::kNe}; + } + } + + // Emplaces a value at position i, shifting all existing values and + // children at positions >= i to the right by 1. + template + void emplace_value(size_type i, allocator_type* alloc, Args&&... args); + + // Removes the values at positions [i, i + to_erase), shifting all existing + // values and children after that range to the left by to_erase. Clears all + // children between [i, i + to_erase). + void remove_values(field_type i, field_type to_erase, allocator_type* alloc); + + // Rebalances a node with its right sibling. + void rebalance_right_to_left(int to_move, btree_node* right, allocator_type* alloc); + void rebalance_left_to_right(int to_move, btree_node* right, allocator_type* alloc); + + // Splits a node, moving a portion of the node's values to its right sibling. + void split(int insert_position, btree_node* dest, allocator_type* alloc); + + // Merges a node with its right sibling, moving all of the values and the + // delimiting key in the parent node onto itself, and deleting the src node. + void merge(btree_node* src, allocator_type* alloc); + + // Node allocation/deletion routines. + void init_leaf(int max_count, btree_node* parent) + { + set_generation(0); + set_parent(parent); + set_position(0); + set_start(0); + set_finish(0); + set_max_count(max_count); + absl::container_internal::SanitizerPoisonMemoryRegion( + start_slot(), max_count * sizeof(slot_type) + ); + } + void init_internal(btree_node* parent) + { + init_leaf(kNodeSlots, parent); + // Set `max_count` to a sentinel value to indicate that this node is + // internal. + set_max_count(kInternalNodeMaxCount); + absl::container_internal::SanitizerPoisonMemoryRegion( + &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node*) + ); + } + + static void deallocate(const size_type size, btree_node* node, allocator_type* alloc) + { + absl::container_internal::Deallocate(alloc, node, size); + } + + // Deletes a node and all of its children. + static void clear_and_delete(btree_node* node, allocator_type* alloc); + + private: + template + void value_init(const field_type i, allocator_type* alloc, Args&&... args) + { + next_generation(); + absl::container_internal::SanitizerUnpoisonObject(slot(i)); + params_type::construct(alloc, slot(i), std::forward(args)...); + } + void value_destroy(const field_type i, allocator_type* alloc) + { + next_generation(); + params_type::destroy(alloc, slot(i)); + absl::container_internal::SanitizerPoisonObject(slot(i)); + } + void value_destroy_n(const field_type i, const field_type n, allocator_type* alloc) + { + next_generation(); + for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) + { + params_type::destroy(alloc, s); + absl::container_internal::SanitizerPoisonObject(s); + } + } + + static void transfer(slot_type* dest, slot_type* src, allocator_type* alloc) + { + absl::container_internal::SanitizerUnpoisonObject(dest); + params_type::transfer(alloc, dest, src); + absl::container_internal::SanitizerPoisonObject(src); + } + + // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`. + void transfer(const size_type dest_i, const size_type src_i, btree_node* src_node, allocator_type* alloc) + { + next_generation(); + transfer(slot(dest_i), src_node->slot(src_i), alloc); + } + + // Transfers `n` values starting at value `src_i` in `src_node` into the + // values starting at value `dest_i` in `this`. + void transfer_n(const size_type n, const size_type dest_i, const size_type src_i, btree_node* src_node, allocator_type* alloc) + { + next_generation(); + for (slot_type *src = src_node->slot(src_i), *end = src + n, *dest = slot(dest_i); + src != end; + ++src, ++dest) + { + transfer(dest, src, alloc); + } + } + + // Same as above, except that we start at the end and work our way to the + // beginning. + void transfer_n_backward(const size_type n, const size_type dest_i, const size_type src_i, btree_node* src_node, allocator_type* alloc) + { + next_generation(); + for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n, *dest = slot(dest_i + n - 1); + src != end; + --src, --dest) + { + transfer(dest, src, alloc); + } + } + + template + friend class btree; + template + friend class btree_iterator; + friend class BtreeNodePeer; + friend struct btree_access; + }; + + template + class btree_iterator + { + using key_type = typename Node::key_type; + using size_type = typename Node::size_type; + using params_type = typename Node::params_type; + using is_map_container = typename params_type::is_map_container; + + using node_type = Node; + using normal_node = typename std::remove_const::type; + using const_node = const Node; + using normal_pointer = typename params_type::pointer; + using normal_reference = typename params_type::reference; + using const_pointer = typename params_type::const_pointer; + using const_reference = typename params_type::const_reference; + using slot_type = typename params_type::slot_type; + + using iterator = + btree_iterator; + using const_iterator = + btree_iterator; + + public: + // These aliases are public for std::iterator_traits. + using difference_type = typename Node::difference_type; + using value_type = typename params_type::value_type; + using pointer = Pointer; + using reference = Reference; + using iterator_category = std::bidirectional_iterator_tag; + + btree_iterator() : + btree_iterator(nullptr, -1) + { + } + explicit btree_iterator(Node* n) : + btree_iterator(n, n->start()) + { + } + btree_iterator(Node* n, int p) : + node_(n), + position_(p) + { #ifdef ABSL_BTREE_ENABLE_GENERATIONS - // Use `~uint32_t{}` as a sentinel value for iterator generations so it - // doesn't match the initial value for the actual generation. - generation_ = n != nullptr ? n->generation() : ~uint32_t{}; + // Use `~uint32_t{}` as a sentinel value for iterator generations so it + // doesn't match the initial value for the actual generation. + generation_ = n != nullptr ? n->generation() : ~uint32_t{}; #endif - } - - // NOTE: this SFINAE allows for implicit conversions from iterator to - // const_iterator, but it specifically avoids hiding the copy constructor so - // that the trivial one will be used when possible. - template , iterator>::value && - std::is_same::value, - int> = 0> - btree_iterator(const btree_iterator other) // NOLINT - : node_(other.node_), position_(other.position_) { + } + + // NOTE: this SFINAE allows for implicit conversions from iterator to + // const_iterator, but it specifically avoids hiding the copy constructor so + // that the trivial one will be used when possible. + template, iterator>::value && std::is_same::value, int> = 0> + btree_iterator(const btree_iterator other) // NOLINT + : + node_(other.node_), + position_(other.position_) + { #ifdef ABSL_BTREE_ENABLE_GENERATIONS - generation_ = other.generation_; + generation_ = other.generation_; #endif - } - - bool operator==(const iterator &other) const { - return node_ == other.node_ && position_ == other.position_; - } - bool operator==(const const_iterator &other) const { - return node_ == other.node_ && position_ == other.position_; - } - bool operator!=(const iterator &other) const { - return node_ != other.node_ || position_ != other.position_; - } - bool operator!=(const const_iterator &other) const { - return node_ != other.node_ || position_ != other.position_; - } - - // Accessors for the key/value the iterator is pointing at. - reference operator*() const { - ABSL_HARDENING_ASSERT(node_ != nullptr); - ABSL_HARDENING_ASSERT(node_->start() <= position_); - ABSL_HARDENING_ASSERT(node_->finish() > position_); - assert_valid_generation(); - return node_->value(position_); - } - pointer operator->() const { return &operator*(); } - - btree_iterator &operator++() { - increment(); - return *this; - } - btree_iterator &operator--() { - decrement(); - return *this; - } - btree_iterator operator++(int) { - btree_iterator tmp = *this; - ++*this; - return tmp; - } - btree_iterator operator--(int) { - btree_iterator tmp = *this; - --*this; - return tmp; - } - - private: - friend iterator; - friend const_iterator; - template - friend class btree; - template - friend class btree_container; - template - friend class btree_set_container; - template - friend class btree_map_container; - template - friend class btree_multiset_container; - template - friend class base_checker; - friend struct btree_access; - - // This SFINAE allows explicit conversions from const_iterator to - // iterator, but also avoids hiding the copy constructor. - // NOTE: the const_cast is safe because this constructor is only called by - // non-const methods and the container owns the nodes. - template , const_iterator>::value && - std::is_same::value, - int> = 0> - explicit btree_iterator(const btree_iterator other) - : node_(const_cast(other.node_)), - position_(other.position_) { + } + + bool operator==(const iterator& other) const + { + return node_ == other.node_ && position_ == other.position_; + } + bool operator==(const const_iterator& other) const + { + return node_ == other.node_ && position_ == other.position_; + } + bool operator!=(const iterator& other) const + { + return node_ != other.node_ || position_ != other.position_; + } + bool operator!=(const const_iterator& other) const + { + return node_ != other.node_ || position_ != other.position_; + } + + // Accessors for the key/value the iterator is pointing at. + reference operator*() const + { + ABSL_HARDENING_ASSERT(node_ != nullptr); + ABSL_HARDENING_ASSERT(node_->start() <= position_); + ABSL_HARDENING_ASSERT(node_->finish() > position_); + assert_valid_generation(); + return node_->value(position_); + } + pointer operator->() const + { + return &operator*(); + } + + btree_iterator& operator++() + { + increment(); + return *this; + } + btree_iterator& operator--() + { + decrement(); + return *this; + } + btree_iterator operator++(int) + { + btree_iterator tmp = *this; + ++*this; + return tmp; + } + btree_iterator operator--(int) + { + btree_iterator tmp = *this; + --*this; + return tmp; + } + + private: + friend iterator; + friend const_iterator; + template + friend class btree; + template + friend class btree_container; + template + friend class btree_set_container; + template + friend class btree_map_container; + template + friend class btree_multiset_container; + template + friend class base_checker; + friend struct btree_access; + + // This SFINAE allows explicit conversions from const_iterator to + // iterator, but also avoids hiding the copy constructor. + // NOTE: the const_cast is safe because this constructor is only called by + // non-const methods and the container owns the nodes. + template, const_iterator>::value && std::is_same::value, int> = 0> + explicit btree_iterator(const btree_iterator other) : + node_(const_cast(other.node_)), + position_(other.position_) + { #ifdef ABSL_BTREE_ENABLE_GENERATIONS - generation_ = other.generation_; + generation_ = other.generation_; #endif - } - - // Increment/decrement the iterator. - void increment() { - assert_valid_generation(); - if (node_->is_leaf() && ++position_ < node_->finish()) { - return; - } - increment_slow(); - } - void increment_slow(); - - void decrement() { - assert_valid_generation(); - if (node_->is_leaf() && --position_ >= node_->start()) { - return; - } - decrement_slow(); - } - void decrement_slow(); - - // Updates the generation. For use internally right before we return an - // iterator to the user. - void update_generation() { + } + + // Increment/decrement the iterator. + void increment() + { + assert_valid_generation(); + if (node_->is_leaf() && ++position_ < node_->finish()) + { + return; + } + increment_slow(); + } + void increment_slow(); + + void decrement() + { + assert_valid_generation(); + if (node_->is_leaf() && --position_ >= node_->start()) + { + return; + } + decrement_slow(); + } + void decrement_slow(); + + // Updates the generation. For use internally right before we return an + // iterator to the user. + void update_generation() + { #ifdef ABSL_BTREE_ENABLE_GENERATIONS - if (node_ != nullptr) generation_ = node_->generation(); + if (node_ != nullptr) + generation_ = node_->generation(); #endif - } - - const key_type &key() const { return node_->key(position_); } - decltype(std::declval()->slot(0)) slot() { - return node_->slot(position_); - } - - void assert_valid_generation() const { + } + + const key_type& key() const + { + return node_->key(position_); + } + decltype(std::declval()->slot(0)) slot() + { + return node_->slot(position_); + } + + void assert_valid_generation() const + { #ifdef ABSL_BTREE_ENABLE_GENERATIONS - if (node_ != nullptr && node_->generation() != generation_) { - ABSL_INTERNAL_LOG( - FATAL, - "Attempting to use an invalidated iterator. The corresponding b-tree " - "container has been mutated since this iterator was constructed."); - } + if (node_ != nullptr && node_->generation() != generation_) + { + ABSL_INTERNAL_LOG( + FATAL, + "Attempting to use an invalidated iterator. The corresponding b-tree " + "container has been mutated since this iterator was constructed." + ); + } #endif - } - - // The node in the tree the iterator is pointing at. - Node *node_; - // The position within the node of the tree the iterator is pointing at. - // NOTE: this is an int rather than a field_type because iterators can point - // to invalid positions (such as -1) in certain circumstances. - int position_; + } + + // The node in the tree the iterator is pointing at. + Node* node_; + // The position within the node of the tree the iterator is pointing at. + // NOTE: this is an int rather than a field_type because iterators can point + // to invalid positions (such as -1) in certain circumstances. + int position_; #ifdef ABSL_BTREE_ENABLE_GENERATIONS - // Used to check that the iterator hasn't been invalidated. - uint32_t generation_; + // Used to check that the iterator hasn't been invalidated. + uint32_t generation_; #endif -}; - -template -class btree { - using node_type = btree_node; - using is_key_compare_to = typename Params::is_key_compare_to; - using field_type = typename node_type::field_type; - - // We use a static empty node for the root/leftmost/rightmost of empty btrees - // in order to avoid branching in begin()/end(). - struct alignas(node_type::Alignment()) EmptyNodeType : node_type { - using field_type = typename node_type::field_type; - node_type *parent; + }; + + template + class btree + { + using node_type = btree_node; + using is_key_compare_to = typename Params::is_key_compare_to; + using field_type = typename node_type::field_type; + + // We use a static empty node for the root/leftmost/rightmost of empty btrees + // in order to avoid branching in begin()/end(). + struct alignas(node_type::Alignment()) EmptyNodeType : node_type + { + using field_type = typename node_type::field_type; + node_type* parent; #ifdef ABSL_BTREE_ENABLE_GENERATIONS - uint32_t generation = 0; + uint32_t generation = 0; #endif - field_type position = 0; - field_type start = 0; - field_type finish = 0; - // max_count must be != kInternalNodeMaxCount (so that this node is regarded - // as a leaf node). max_count() is never called when the tree is empty. - field_type max_count = node_type::kInternalNodeMaxCount + 1; + field_type position = 0; + field_type start = 0; + field_type finish = 0; + // max_count must be != kInternalNodeMaxCount (so that this node is regarded + // as a leaf node). max_count() is never called when the tree is empty. + field_type max_count = node_type::kInternalNodeMaxCount + 1; #ifdef _MSC_VER - // MSVC has constexpr code generations bugs here. - EmptyNodeType() : parent(this) {} + // MSVC has constexpr code generations bugs here. + EmptyNodeType() : + parent(this) + { + } #else - constexpr EmptyNodeType(node_type *p) : parent(p) {} + constexpr EmptyNodeType(node_type* p) : + parent(p) + { + } #endif - }; + }; - static node_type *EmptyNode() { + static node_type* EmptyNode() + { #ifdef _MSC_VER - static EmptyNodeType *empty_node = new EmptyNodeType; - // This assert fails on some other construction methods. - assert(empty_node->parent == empty_node); - return empty_node; + static EmptyNodeType* empty_node = new EmptyNodeType; + // This assert fails on some other construction methods. + assert(empty_node->parent == empty_node); + return empty_node; #else - static constexpr EmptyNodeType empty_node( - const_cast(&empty_node)); - return const_cast(&empty_node); + static constexpr EmptyNodeType empty_node( + const_cast(&empty_node) + ); + return const_cast(&empty_node); #endif - } - - enum : uint32_t { - kNodeSlots = node_type::kNodeSlots, - kMinNodeValues = kNodeSlots / 2, - }; - - struct node_stats { - using size_type = typename Params::size_type; - - node_stats(size_type l, size_type i) : leaf_nodes(l), internal_nodes(i) {} - - node_stats &operator+=(const node_stats &other) { - leaf_nodes += other.leaf_nodes; - internal_nodes += other.internal_nodes; - return *this; - } - - size_type leaf_nodes; - size_type internal_nodes; - }; - - public: - using key_type = typename Params::key_type; - using value_type = typename Params::value_type; - using size_type = typename Params::size_type; - using difference_type = typename Params::difference_type; - using key_compare = typename Params::key_compare; - using original_key_compare = typename Params::original_key_compare; - using value_compare = typename Params::value_compare; - using allocator_type = typename Params::allocator_type; - using reference = typename Params::reference; - using const_reference = typename Params::const_reference; - using pointer = typename Params::pointer; - using const_pointer = typename Params::const_pointer; - using iterator = - typename btree_iterator::iterator; - using const_iterator = typename iterator::const_iterator; - using reverse_iterator = std::reverse_iterator; - using const_reverse_iterator = std::reverse_iterator; - using node_handle_type = node_handle; - - // Internal types made public for use by btree_container types. - using params_type = Params; - using slot_type = typename Params::slot_type; - - private: - // Copies or moves (depending on the template parameter) the values in - // other into this btree in their order in other. This btree must be empty - // before this method is called. This method is used in copy construction, - // copy assignment, and move assignment. - template - void copy_or_move_values_in_order(Btree &other); - - // Validates that various assumptions/requirements are true at compile time. - constexpr static bool static_assert_validation(); - - public: - btree(const key_compare &comp, const allocator_type &alloc) - : root_(EmptyNode()), rightmost_(comp, alloc, EmptyNode()), size_(0) {} - - btree(const btree &other) : btree(other, other.allocator()) {} - btree(const btree &other, const allocator_type &alloc) - : btree(other.key_comp(), alloc) { - copy_or_move_values_in_order(other); - } - btree(btree &&other) noexcept - : root_(absl::exchange(other.root_, EmptyNode())), - rightmost_(std::move(other.rightmost_)), - size_(absl::exchange(other.size_, 0)) { - other.mutable_rightmost() = EmptyNode(); - } - btree(btree &&other, const allocator_type &alloc) - : btree(other.key_comp(), alloc) { - if (alloc == other.allocator()) { - swap(other); - } else { - // Move values from `other` one at a time when allocators are different. - copy_or_move_values_in_order(other); - } - } - - ~btree() { - // Put static_asserts in destructor to avoid triggering them before the type - // is complete. - static_assert(static_assert_validation(), "This call must be elided."); - clear(); - } - - // Assign the contents of other to *this. - btree &operator=(const btree &other); - btree &operator=(btree &&other) noexcept; - - iterator begin() { return iterator(leftmost()); } - const_iterator begin() const { return const_iterator(leftmost()); } - iterator end() { return iterator(rightmost(), rightmost()->finish()); } - const_iterator end() const { - return const_iterator(rightmost(), rightmost()->finish()); - } - reverse_iterator rbegin() { return reverse_iterator(end()); } - const_reverse_iterator rbegin() const { - return const_reverse_iterator(end()); - } - reverse_iterator rend() { return reverse_iterator(begin()); } - const_reverse_iterator rend() const { - return const_reverse_iterator(begin()); - } - - // Finds the first element whose key is not less than `key`. - template - iterator lower_bound(const K &key) { - return internal_end(internal_lower_bound(key).value); - } - template - const_iterator lower_bound(const K &key) const { - return internal_end(internal_lower_bound(key).value); - } - - // Finds the first element whose key is not less than `key` and also returns - // whether that element is equal to `key`. - template - std::pair lower_bound_equal(const K &key) const; - - // Finds the first element whose key is greater than `key`. - template - iterator upper_bound(const K &key) { - return internal_end(internal_upper_bound(key)); - } - template - const_iterator upper_bound(const K &key) const { - return internal_end(internal_upper_bound(key)); - } - - // Finds the range of values which compare equal to key. The first member of - // the returned pair is equal to lower_bound(key). The second member of the - // pair is equal to upper_bound(key). - template - std::pair equal_range(const K &key); - template - std::pair equal_range(const K &key) const { - return const_cast(this)->equal_range(key); - } - - // Inserts a value into the btree only if it does not already exist. The - // boolean return value indicates whether insertion succeeded or failed. - // Requirement: if `key` already exists in the btree, does not consume `args`. - // Requirement: `key` is never referenced after consuming `args`. - template - std::pair insert_unique(const K &key, Args &&... args); - - // Inserts with hint. Checks to see if the value should be placed immediately - // before `position` in the tree. If so, then the insertion will take - // amortized constant time. If not, the insertion will take amortized - // logarithmic time as if a call to insert_unique() were made. - // Requirement: if `key` already exists in the btree, does not consume `args`. - // Requirement: `key` is never referenced after consuming `args`. - template - std::pair insert_hint_unique(iterator position, - const K &key, - Args &&... args); - - // Insert a range of values into the btree. - // Note: the first overload avoids constructing a value_type if the key - // already exists in the btree. - template ()( - params_type::key(*std::declval()), - std::declval()))> - void insert_iterator_unique(InputIterator b, InputIterator e, int); - // We need the second overload for cases in which we need to construct a - // value_type in order to compare it with the keys already in the btree. - template - void insert_iterator_unique(InputIterator b, InputIterator e, char); - - // Inserts a value into the btree. - template - iterator insert_multi(const key_type &key, ValueType &&v); - - // Inserts a value into the btree. - template - iterator insert_multi(ValueType &&v) { - return insert_multi(params_type::key(v), std::forward(v)); - } - - // Insert with hint. Check to see if the value should be placed immediately - // before position in the tree. If it does, then the insertion will take - // amortized constant time. If not, the insertion will take amortized - // logarithmic time as if a call to insert_multi(v) were made. - template - iterator insert_hint_multi(iterator position, ValueType &&v); - - // Insert a range of values into the btree. - template - void insert_iterator_multi(InputIterator b, InputIterator e); - - // Erase the specified iterator from the btree. The iterator must be valid - // (i.e. not equal to end()). Return an iterator pointing to the node after - // the one that was erased (or end() if none exists). - // Requirement: does not read the value at `*iter`. - iterator erase(iterator iter); - - // Erases range. Returns the number of keys erased and an iterator pointing - // to the element after the last erased element. - std::pair erase_range(iterator begin, iterator end); - - // Finds an element with key equivalent to `key` or returns `end()` if `key` - // is not present. - template - iterator find(const K &key) { - return internal_end(internal_find(key)); - } - template - const_iterator find(const K &key) const { - return internal_end(internal_find(key)); - } - - // Clear the btree, deleting all of the values it contains. - void clear(); - - // Swaps the contents of `this` and `other`. - void swap(btree &other); - - const key_compare &key_comp() const noexcept { - return rightmost_.template get<0>(); - } - template - bool compare_keys(const K1 &a, const K2 &b) const { - return compare_internal::compare_result_as_less_than(key_comp()(a, b)); - } - - value_compare value_comp() const { - return value_compare(original_key_compare(key_comp())); - } - - // Verifies the structure of the btree. - void verify() const; - - // Size routines. - size_type size() const { return size_; } - size_type max_size() const { return (std::numeric_limits::max)(); } - bool empty() const { return size_ == 0; } - - // The height of the btree. An empty tree will have height 0. - size_type height() const { - size_type h = 0; - if (!empty()) { - // Count the length of the chain from the leftmost node up to the - // root. We actually count from the root back around to the level below - // the root, but the calculation is the same because of the circularity - // of that traversal. - const node_type *n = root(); - do { - ++h; - n = n->parent(); - } while (n != root()); - } - return h; - } - - // The number of internal, leaf and total nodes used by the btree. - size_type leaf_nodes() const { return internal_stats(root()).leaf_nodes; } - size_type internal_nodes() const { - return internal_stats(root()).internal_nodes; - } - size_type nodes() const { - node_stats stats = internal_stats(root()); - return stats.leaf_nodes + stats.internal_nodes; - } - - // The total number of bytes used by the btree. - // TODO(b/169338300): update to support node_btree_*. - size_type bytes_used() const { - node_stats stats = internal_stats(root()); - if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) { - return sizeof(*this) + node_type::LeafSize(root()->max_count()); - } else { - return sizeof(*this) + stats.leaf_nodes * node_type::LeafSize() + - stats.internal_nodes * node_type::InternalSize(); - } - } - - // The average number of bytes used per value stored in the btree assuming - // random insertion order. - static double average_bytes_per_value() { - // The expected number of values per node with random insertion order is the - // average of the maximum and minimum numbers of values per node. - const double expected_values_per_node = - (kNodeSlots + kMinNodeValues) / 2.0; - return node_type::LeafSize() / expected_values_per_node; - } - - // The fullness of the btree. Computed as the number of elements in the btree - // divided by the maximum number of elements a tree with the current number - // of nodes could hold. A value of 1 indicates perfect space - // utilization. Smaller values indicate space wastage. - // Returns 0 for empty trees. - double fullness() const { - if (empty()) return 0.0; - return static_cast(size()) / (nodes() * kNodeSlots); - } - // The overhead of the btree structure in bytes per node. Computed as the - // total number of bytes used by the btree minus the number of bytes used for - // storing elements divided by the number of elements. - // Returns 0 for empty trees. - double overhead() const { - if (empty()) return 0.0; - return (bytes_used() - size() * sizeof(value_type)) / - static_cast(size()); - } - - // The allocator used by the btree. - allocator_type get_allocator() const { return allocator(); } - - private: - friend struct btree_access; - - // Internal accessor routines. - node_type *root() { return root_; } - const node_type *root() const { return root_; } - node_type *&mutable_root() noexcept { return root_; } - node_type *rightmost() { return rightmost_.template get<2>(); } - const node_type *rightmost() const { return rightmost_.template get<2>(); } - node_type *&mutable_rightmost() noexcept { - return rightmost_.template get<2>(); - } - key_compare *mutable_key_comp() noexcept { - return &rightmost_.template get<0>(); - } - - // The leftmost node is stored as the parent of the root node. - node_type *leftmost() { return root()->parent(); } - const node_type *leftmost() const { return root()->parent(); } - - // Allocator routines. - allocator_type *mutable_allocator() noexcept { - return &rightmost_.template get<1>(); - } - const allocator_type &allocator() const noexcept { - return rightmost_.template get<1>(); - } - - // Allocates a correctly aligned node of at least size bytes using the - // allocator. - node_type *allocate(const size_type size) { - return reinterpret_cast( - absl::container_internal::Allocate( - mutable_allocator(), size)); - } - - // Node creation/deletion routines. - node_type *new_internal_node(node_type *parent) { - node_type *n = allocate(node_type::InternalSize()); - n->init_internal(parent); - return n; - } - node_type *new_leaf_node(node_type *parent) { - node_type *n = allocate(node_type::LeafSize()); - n->init_leaf(kNodeSlots, parent); - return n; - } - node_type *new_leaf_root_node(const int max_count) { - node_type *n = allocate(node_type::LeafSize(max_count)); - n->init_leaf(max_count, /*parent=*/n); - return n; - } - - // Deletion helper routines. - iterator rebalance_after_delete(iterator iter); - - // Rebalances or splits the node iter points to. - void rebalance_or_split(iterator *iter); - - // Merges the values of left, right and the delimiting key on their parent - // onto left, removing the delimiting key and deleting right. - void merge_nodes(node_type *left, node_type *right); - - // Tries to merge node with its left or right sibling, and failing that, - // rebalance with its left or right sibling. Returns true if a merge - // occurred, at which point it is no longer valid to access node. Returns - // false if no merging took place. - bool try_merge_or_rebalance(iterator *iter); - - // Tries to shrink the height of the tree by 1. - void try_shrink(); - - iterator internal_end(iterator iter) { - return iter.node_ != nullptr ? iter : end(); - } - const_iterator internal_end(const_iterator iter) const { - return iter.node_ != nullptr ? iter : end(); - } - - // Emplaces a value into the btree immediately before iter. Requires that - // key(v) <= iter.key() and (--iter).key() <= key(v). - template - iterator internal_emplace(iterator iter, Args &&... args); - - // Returns an iterator pointing to the first value >= the value "iter" is - // pointing at. Note that "iter" might be pointing to an invalid location such - // as iter.position_ == iter.node_->finish(). This routine simply moves iter - // up in the tree to a valid location. Requires: iter.node_ is non-null. - template - static IterType internal_last(IterType iter); - - // Returns an iterator pointing to the leaf position at which key would - // reside in the tree, unless there is an exact match - in which case, the - // result may not be on a leaf. When there's a three-way comparator, we can - // return whether there was an exact match. This allows the caller to avoid a - // subsequent comparison to determine if an exact match was made, which is - // important for keys with expensive comparison, such as strings. - template - SearchResult internal_locate( - const K &key) const; - - // Internal routine which implements lower_bound(). - template - SearchResult internal_lower_bound( - const K &key) const; - - // Internal routine which implements upper_bound(). - template - iterator internal_upper_bound(const K &key) const; - - // Internal routine which implements find(). - template - iterator internal_find(const K &key) const; - - // Verifies the tree structure of node. - int internal_verify(const node_type *node, const key_type *lo, - const key_type *hi) const; - - node_stats internal_stats(const node_type *node) const { - // The root can be a static empty node. - if (node == nullptr || (node == root() && empty())) { - return node_stats(0, 0); - } - if (node->is_leaf()) { - return node_stats(1, 0); - } - node_stats res(0, 1); - for (int i = node->start(); i <= node->finish(); ++i) { - res += internal_stats(node->child(i)); - } - return res; - } - - node_type *root_; - - // A pointer to the rightmost node. Note that the leftmost node is stored as - // the root's parent. We use compressed tuple in order to save space because - // key_compare and allocator_type are usually empty. - absl::container_internal::CompressedTuple - rightmost_; - - // Number of values. - size_type size_; -}; - -//// -// btree_node methods -template -template -inline void btree_node

::emplace_value(const size_type i, - allocator_type *alloc, - Args &&... args) { - assert(i >= start()); - assert(i <= finish()); - // Shift old values to create space for new value and then construct it in - // place. - if (i < finish()) { - transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this, - alloc); - } - value_init(i, alloc, std::forward(args)...); - set_finish(finish() + 1); - - if (is_internal() && finish() > i + 1) { - for (field_type j = finish(); j > i + 1; --j) { - set_child(j, child(j - 1)); - } - clear_child(i + 1); - } -} - -template -inline void btree_node

::remove_values(const field_type i, - const field_type to_erase, - allocator_type *alloc) { - // Transfer values after the removed range into their new places. - value_destroy_n(i, to_erase, alloc); - const field_type orig_finish = finish(); - const field_type src_i = i + to_erase; - transfer_n(orig_finish - src_i, i, src_i, this, alloc); - - if (is_internal()) { - // Delete all children between begin and end. - for (int j = 0; j < to_erase; ++j) { - clear_and_delete(child(i + j + 1), alloc); - } - // Rotate children after end into new positions. - for (int j = i + to_erase + 1; j <= orig_finish; ++j) { - set_child(j - to_erase, child(j)); - clear_child(j); - } - } - set_finish(orig_finish - to_erase); -} - -template -void btree_node

::rebalance_right_to_left(const int to_move, - btree_node *right, - allocator_type *alloc) { - assert(parent() == right->parent()); - assert(position() + 1 == right->position()); - assert(right->count() >= count()); - assert(to_move >= 1); - assert(to_move <= right->count()); - - // 1) Move the delimiting value in the parent to the left node. - transfer(finish(), position(), parent(), alloc); - - // 2) Move the (to_move - 1) values from the right node to the left node. - transfer_n(to_move - 1, finish() + 1, right->start(), right, alloc); - - // 3) Move the new delimiting value to the parent from the right node. - parent()->transfer(position(), right->start() + to_move - 1, right, alloc); - - // 4) Shift the values in the right node to their correct positions. - right->transfer_n(right->count() - to_move, right->start(), - right->start() + to_move, right, alloc); - - if (is_internal()) { - // Move the child pointers from the right to the left node. - for (int i = 0; i < to_move; ++i) { - init_child(finish() + i + 1, right->child(i)); - } - for (int i = right->start(); i <= right->finish() - to_move; ++i) { - assert(i + to_move <= right->max_count()); - right->init_child(i, right->child(i + to_move)); - right->clear_child(i + to_move); - } - } - - // Fixup `finish` on the left and right nodes. - set_finish(finish() + to_move); - right->set_finish(right->finish() - to_move); -} - -template -void btree_node

::rebalance_left_to_right(const int to_move, - btree_node *right, - allocator_type *alloc) { - assert(parent() == right->parent()); - assert(position() + 1 == right->position()); - assert(count() >= right->count()); - assert(to_move >= 1); - assert(to_move <= count()); - - // Values in the right node are shifted to the right to make room for the - // new to_move values. Then, the delimiting value in the parent and the - // other (to_move - 1) values in the left node are moved into the right node. - // Lastly, a new delimiting value is moved from the left node into the - // parent, and the remaining empty left node entries are destroyed. - - // 1) Shift existing values in the right node to their correct positions. - right->transfer_n_backward(right->count(), right->start() + to_move, - right->start(), right, alloc); - - // 2) Move the delimiting value in the parent to the right node. - right->transfer(right->start() + to_move - 1, position(), parent(), alloc); - - // 3) Move the (to_move - 1) values from the left node to the right node. - right->transfer_n(to_move - 1, right->start(), finish() - (to_move - 1), this, - alloc); - - // 4) Move the new delimiting value to the parent from the left node. - parent()->transfer(position(), finish() - to_move, this, alloc); - - if (is_internal()) { - // Move the child pointers from the left to the right node. - for (int i = right->finish(); i >= right->start(); --i) { - right->init_child(i + to_move, right->child(i)); - right->clear_child(i); - } - for (int i = 1; i <= to_move; ++i) { - right->init_child(i - 1, child(finish() - to_move + i)); - clear_child(finish() - to_move + i); - } - } - - // Fixup the counts on the left and right nodes. - set_finish(finish() - to_move); - right->set_finish(right->finish() + to_move); -} - -template -void btree_node

::split(const int insert_position, btree_node *dest, - allocator_type *alloc) { - assert(dest->count() == 0); - assert(max_count() == kNodeSlots); - - // We bias the split based on the position being inserted. If we're - // inserting at the beginning of the left node then bias the split to put - // more values on the right node. If we're inserting at the end of the - // right node then bias the split to put more values on the left node. - if (insert_position == start()) { - dest->set_finish(dest->start() + finish() - 1); - } else if (insert_position == kNodeSlots) { - dest->set_finish(dest->start()); - } else { - dest->set_finish(dest->start() + count() / 2); - } - set_finish(finish() - dest->count()); - assert(count() >= 1); - - // Move values from the left sibling to the right sibling. - dest->transfer_n(dest->count(), dest->start(), finish(), this, alloc); - - // The split key is the largest value in the left sibling. - --mutable_finish(); - parent()->emplace_value(position(), alloc, finish_slot()); - value_destroy(finish(), alloc); - parent()->init_child(position() + 1, dest); - - if (is_internal()) { - for (int i = dest->start(), j = finish() + 1; i <= dest->finish(); - ++i, ++j) { - assert(child(j) != nullptr); - dest->init_child(i, child(j)); - clear_child(j); - } - } -} - -template -void btree_node

::merge(btree_node *src, allocator_type *alloc) { - assert(parent() == src->parent()); - assert(position() + 1 == src->position()); - - // Move the delimiting value to the left node. - value_init(finish(), alloc, parent()->slot(position())); - - // Move the values from the right to the left node. - transfer_n(src->count(), finish() + 1, src->start(), src, alloc); - - if (is_internal()) { - // Move the child pointers from the right to the left node. - for (int i = src->start(), j = finish() + 1; i <= src->finish(); ++i, ++j) { - init_child(j, src->child(i)); - src->clear_child(i); - } - } - - // Fixup `finish` on the src and dest nodes. - set_finish(start() + 1 + count() + src->count()); - src->set_finish(src->start()); - - // Remove the value on the parent node and delete the src node. - parent()->remove_values(position(), /*to_erase=*/1, alloc); -} - -template -void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { - if (node->is_leaf()) { - node->value_destroy_n(node->start(), node->count(), alloc); - deallocate(LeafSize(node->max_count()), node, alloc); - return; - } - if (node->count() == 0) { - deallocate(InternalSize(), node, alloc); - return; - } - - // The parent of the root of the subtree we are deleting. - btree_node *delete_root_parent = node->parent(); - - // Navigate to the leftmost leaf under node, and then delete upwards. - while (node->is_internal()) node = node->start_child(); + } + + enum : uint32_t + { + kNodeSlots = node_type::kNodeSlots, + kMinNodeValues = kNodeSlots / 2, + }; + + struct node_stats + { + using size_type = typename Params::size_type; + + node_stats(size_type l, size_type i) : + leaf_nodes(l), + internal_nodes(i) + { + } + + node_stats& operator+=(const node_stats& other) + { + leaf_nodes += other.leaf_nodes; + internal_nodes += other.internal_nodes; + return *this; + } + + size_type leaf_nodes; + size_type internal_nodes; + }; + + public: + using key_type = typename Params::key_type; + using value_type = typename Params::value_type; + using size_type = typename Params::size_type; + using difference_type = typename Params::difference_type; + using key_compare = typename Params::key_compare; + using original_key_compare = typename Params::original_key_compare; + using value_compare = typename Params::value_compare; + using allocator_type = typename Params::allocator_type; + using reference = typename Params::reference; + using const_reference = typename Params::const_reference; + using pointer = typename Params::pointer; + using const_pointer = typename Params::const_pointer; + using iterator = + typename btree_iterator::iterator; + using const_iterator = typename iterator::const_iterator; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + using node_handle_type = node_handle; + + // Internal types made public for use by btree_container types. + using params_type = Params; + using slot_type = typename Params::slot_type; + + private: + // Copies or moves (depending on the template parameter) the values in + // other into this btree in their order in other. This btree must be empty + // before this method is called. This method is used in copy construction, + // copy assignment, and move assignment. + template + void copy_or_move_values_in_order(Btree& other); + + // Validates that various assumptions/requirements are true at compile time. + constexpr static bool static_assert_validation(); + + public: + btree(const key_compare& comp, const allocator_type& alloc) : + root_(EmptyNode()), + rightmost_(comp, alloc, EmptyNode()), + size_(0) + { + } + + btree(const btree& other) : + btree(other, other.allocator()) + { + } + btree(const btree& other, const allocator_type& alloc) : + btree(other.key_comp(), alloc) + { + copy_or_move_values_in_order(other); + } + btree(btree&& other) noexcept + : + root_(absl::exchange(other.root_, EmptyNode())), + rightmost_(std::move(other.rightmost_)), + size_(absl::exchange(other.size_, 0)) + { + other.mutable_rightmost() = EmptyNode(); + } + btree(btree&& other, const allocator_type& alloc) : + btree(other.key_comp(), alloc) + { + if (alloc == other.allocator()) + { + swap(other); + } + else + { + // Move values from `other` one at a time when allocators are different. + copy_or_move_values_in_order(other); + } + } + + ~btree() + { + // Put static_asserts in destructor to avoid triggering them before the type + // is complete. + static_assert(static_assert_validation(), "This call must be elided."); + clear(); + } + + // Assign the contents of other to *this. + btree& operator=(const btree& other); + btree& operator=(btree&& other) noexcept; + + iterator begin() + { + return iterator(leftmost()); + } + const_iterator begin() const + { + return const_iterator(leftmost()); + } + iterator end() + { + return iterator(rightmost(), rightmost()->finish()); + } + const_iterator end() const + { + return const_iterator(rightmost(), rightmost()->finish()); + } + reverse_iterator rbegin() + { + return reverse_iterator(end()); + } + const_reverse_iterator rbegin() const + { + return const_reverse_iterator(end()); + } + reverse_iterator rend() + { + return reverse_iterator(begin()); + } + const_reverse_iterator rend() const + { + return const_reverse_iterator(begin()); + } + + // Finds the first element whose key is not less than `key`. + template + iterator lower_bound(const K& key) + { + return internal_end(internal_lower_bound(key).value); + } + template + const_iterator lower_bound(const K& key) const + { + return internal_end(internal_lower_bound(key).value); + } + + // Finds the first element whose key is not less than `key` and also returns + // whether that element is equal to `key`. + template + std::pair lower_bound_equal(const K& key) const; + + // Finds the first element whose key is greater than `key`. + template + iterator upper_bound(const K& key) + { + return internal_end(internal_upper_bound(key)); + } + template + const_iterator upper_bound(const K& key) const + { + return internal_end(internal_upper_bound(key)); + } + + // Finds the range of values which compare equal to key. The first member of + // the returned pair is equal to lower_bound(key). The second member of the + // pair is equal to upper_bound(key). + template + std::pair equal_range(const K& key); + template + std::pair equal_range(const K& key) const + { + return const_cast(this)->equal_range(key); + } + + // Inserts a value into the btree only if it does not already exist. The + // boolean return value indicates whether insertion succeeded or failed. + // Requirement: if `key` already exists in the btree, does not consume `args`. + // Requirement: `key` is never referenced after consuming `args`. + template + std::pair insert_unique(const K& key, Args&&... args); + + // Inserts with hint. Checks to see if the value should be placed immediately + // before `position` in the tree. If so, then the insertion will take + // amortized constant time. If not, the insertion will take amortized + // logarithmic time as if a call to insert_unique() were made. + // Requirement: if `key` already exists in the btree, does not consume `args`. + // Requirement: `key` is never referenced after consuming `args`. + template + std::pair insert_hint_unique(iterator position, const K& key, Args&&... args); + + // Insert a range of values into the btree. + // Note: the first overload avoids constructing a value_type if the key + // already exists in the btree. + template()(params_type::key(*std::declval()), std::declval()))> + void insert_iterator_unique(InputIterator b, InputIterator e, int); + // We need the second overload for cases in which we need to construct a + // value_type in order to compare it with the keys already in the btree. + template + void insert_iterator_unique(InputIterator b, InputIterator e, char); + + // Inserts a value into the btree. + template + iterator insert_multi(const key_type& key, ValueType&& v); + + // Inserts a value into the btree. + template + iterator insert_multi(ValueType&& v) + { + return insert_multi(params_type::key(v), std::forward(v)); + } + + // Insert with hint. Check to see if the value should be placed immediately + // before position in the tree. If it does, then the insertion will take + // amortized constant time. If not, the insertion will take amortized + // logarithmic time as if a call to insert_multi(v) were made. + template + iterator insert_hint_multi(iterator position, ValueType&& v); + + // Insert a range of values into the btree. + template + void insert_iterator_multi(InputIterator b, InputIterator e); + + // Erase the specified iterator from the btree. The iterator must be valid + // (i.e. not equal to end()). Return an iterator pointing to the node after + // the one that was erased (or end() if none exists). + // Requirement: does not read the value at `*iter`. + iterator erase(iterator iter); + + // Erases range. Returns the number of keys erased and an iterator pointing + // to the element after the last erased element. + std::pair erase_range(iterator begin, iterator end); + + // Finds an element with key equivalent to `key` or returns `end()` if `key` + // is not present. + template + iterator find(const K& key) + { + return internal_end(internal_find(key)); + } + template + const_iterator find(const K& key) const + { + return internal_end(internal_find(key)); + } + + // Clear the btree, deleting all of the values it contains. + void clear(); + + // Swaps the contents of `this` and `other`. + void swap(btree& other); + + const key_compare& key_comp() const noexcept + { + return rightmost_.template get<0>(); + } + template + bool compare_keys(const K1& a, const K2& b) const + { + return compare_internal::compare_result_as_less_than(key_comp()(a, b)); + } + + value_compare value_comp() const + { + return value_compare(original_key_compare(key_comp())); + } + + // Verifies the structure of the btree. + void verify() const; + + // Size routines. + size_type size() const + { + return size_; + } + size_type max_size() const + { + return (std::numeric_limits::max)(); + } + bool empty() const + { + return size_ == 0; + } + + // The height of the btree. An empty tree will have height 0. + size_type height() const + { + size_type h = 0; + if (!empty()) + { + // Count the length of the chain from the leftmost node up to the + // root. We actually count from the root back around to the level below + // the root, but the calculation is the same because of the circularity + // of that traversal. + const node_type* n = root(); + do + { + ++h; + n = n->parent(); + } while (n != root()); + } + return h; + } + + // The number of internal, leaf and total nodes used by the btree. + size_type leaf_nodes() const + { + return internal_stats(root()).leaf_nodes; + } + size_type internal_nodes() const + { + return internal_stats(root()).internal_nodes; + } + size_type nodes() const + { + node_stats stats = internal_stats(root()); + return stats.leaf_nodes + stats.internal_nodes; + } + + // The total number of bytes used by the btree. + // TODO(b/169338300): update to support node_btree_*. + size_type bytes_used() const + { + node_stats stats = internal_stats(root()); + if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) + { + return sizeof(*this) + node_type::LeafSize(root()->max_count()); + } + else + { + return sizeof(*this) + stats.leaf_nodes * node_type::LeafSize() + + stats.internal_nodes * node_type::InternalSize(); + } + } + + // The average number of bytes used per value stored in the btree assuming + // random insertion order. + static double average_bytes_per_value() + { + // The expected number of values per node with random insertion order is the + // average of the maximum and minimum numbers of values per node. + const double expected_values_per_node = + (kNodeSlots + kMinNodeValues) / 2.0; + return node_type::LeafSize() / expected_values_per_node; + } + + // The fullness of the btree. Computed as the number of elements in the btree + // divided by the maximum number of elements a tree with the current number + // of nodes could hold. A value of 1 indicates perfect space + // utilization. Smaller values indicate space wastage. + // Returns 0 for empty trees. + double fullness() const + { + if (empty()) + return 0.0; + return static_cast(size()) / (nodes() * kNodeSlots); + } + // The overhead of the btree structure in bytes per node. Computed as the + // total number of bytes used by the btree minus the number of bytes used for + // storing elements divided by the number of elements. + // Returns 0 for empty trees. + double overhead() const + { + if (empty()) + return 0.0; + return (bytes_used() - size() * sizeof(value_type)) / + static_cast(size()); + } + + // The allocator used by the btree. + allocator_type get_allocator() const + { + return allocator(); + } + + private: + friend struct btree_access; + + // Internal accessor routines. + node_type* root() + { + return root_; + } + const node_type* root() const + { + return root_; + } + node_type*& mutable_root() noexcept + { + return root_; + } + node_type* rightmost() + { + return rightmost_.template get<2>(); + } + const node_type* rightmost() const + { + return rightmost_.template get<2>(); + } + node_type*& mutable_rightmost() noexcept + { + return rightmost_.template get<2>(); + } + key_compare* mutable_key_comp() noexcept + { + return &rightmost_.template get<0>(); + } + + // The leftmost node is stored as the parent of the root node. + node_type* leftmost() + { + return root()->parent(); + } + const node_type* leftmost() const + { + return root()->parent(); + } + + // Allocator routines. + allocator_type* mutable_allocator() noexcept + { + return &rightmost_.template get<1>(); + } + const allocator_type& allocator() const noexcept + { + return rightmost_.template get<1>(); + } + + // Allocates a correctly aligned node of at least size bytes using the + // allocator. + node_type* allocate(const size_type size) + { + return reinterpret_cast( + absl::container_internal::Allocate( + mutable_allocator(), size + ) + ); + } + + // Node creation/deletion routines. + node_type* new_internal_node(node_type* parent) + { + node_type* n = allocate(node_type::InternalSize()); + n->init_internal(parent); + return n; + } + node_type* new_leaf_node(node_type* parent) + { + node_type* n = allocate(node_type::LeafSize()); + n->init_leaf(kNodeSlots, parent); + return n; + } + node_type* new_leaf_root_node(const int max_count) + { + node_type* n = allocate(node_type::LeafSize(max_count)); + n->init_leaf(max_count, /*parent=*/n); + return n; + } + + // Deletion helper routines. + iterator rebalance_after_delete(iterator iter); + + // Rebalances or splits the node iter points to. + void rebalance_or_split(iterator* iter); + + // Merges the values of left, right and the delimiting key on their parent + // onto left, removing the delimiting key and deleting right. + void merge_nodes(node_type* left, node_type* right); + + // Tries to merge node with its left or right sibling, and failing that, + // rebalance with its left or right sibling. Returns true if a merge + // occurred, at which point it is no longer valid to access node. Returns + // false if no merging took place. + bool try_merge_or_rebalance(iterator* iter); + + // Tries to shrink the height of the tree by 1. + void try_shrink(); + + iterator internal_end(iterator iter) + { + return iter.node_ != nullptr ? iter : end(); + } + const_iterator internal_end(const_iterator iter) const + { + return iter.node_ != nullptr ? iter : end(); + } + + // Emplaces a value into the btree immediately before iter. Requires that + // key(v) <= iter.key() and (--iter).key() <= key(v). + template + iterator internal_emplace(iterator iter, Args&&... args); + + // Returns an iterator pointing to the first value >= the value "iter" is + // pointing at. Note that "iter" might be pointing to an invalid location such + // as iter.position_ == iter.node_->finish(). This routine simply moves iter + // up in the tree to a valid location. Requires: iter.node_ is non-null. + template + static IterType internal_last(IterType iter); + + // Returns an iterator pointing to the leaf position at which key would + // reside in the tree, unless there is an exact match - in which case, the + // result may not be on a leaf. When there's a three-way comparator, we can + // return whether there was an exact match. This allows the caller to avoid a + // subsequent comparison to determine if an exact match was made, which is + // important for keys with expensive comparison, such as strings. + template + SearchResult internal_locate( + const K& key + ) const; + + // Internal routine which implements lower_bound(). + template + SearchResult internal_lower_bound( + const K& key + ) const; + + // Internal routine which implements upper_bound(). + template + iterator internal_upper_bound(const K& key) const; + + // Internal routine which implements find(). + template + iterator internal_find(const K& key) const; + + // Verifies the tree structure of node. + int internal_verify(const node_type* node, const key_type* lo, const key_type* hi) const; + + node_stats internal_stats(const node_type* node) const + { + // The root can be a static empty node. + if (node == nullptr || (node == root() && empty())) + { + return node_stats(0, 0); + } + if (node->is_leaf()) + { + return node_stats(1, 0); + } + node_stats res(0, 1); + for (int i = node->start(); i <= node->finish(); ++i) + { + res += internal_stats(node->child(i)); + } + return res; + } + + node_type* root_; + + // A pointer to the rightmost node. Note that the leftmost node is stored as + // the root's parent. We use compressed tuple in order to save space because + // key_compare and allocator_type are usually empty. + absl::container_internal::CompressedTuple + rightmost_; + + // Number of values. + size_type size_; + }; + + //// + // btree_node methods + template + template + inline void btree_node

::emplace_value(const size_type i, allocator_type* alloc, Args&&... args) + { + assert(i >= start()); + assert(i <= finish()); + // Shift old values to create space for new value and then construct it in + // place. + if (i < finish()) + { + transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this, alloc); + } + value_init(i, alloc, std::forward(args)...); + set_finish(finish() + 1); + + if (is_internal() && finish() > i + 1) + { + for (field_type j = finish(); j > i + 1; --j) + { + set_child(j, child(j - 1)); + } + clear_child(i + 1); + } + } + + template + inline void btree_node

::remove_values(const field_type i, const field_type to_erase, allocator_type* alloc) + { + // Transfer values after the removed range into their new places. + value_destroy_n(i, to_erase, alloc); + const field_type orig_finish = finish(); + const field_type src_i = i + to_erase; + transfer_n(orig_finish - src_i, i, src_i, this, alloc); + + if (is_internal()) + { + // Delete all children between begin and end. + for (int j = 0; j < to_erase; ++j) + { + clear_and_delete(child(i + j + 1), alloc); + } + // Rotate children after end into new positions. + for (int j = i + to_erase + 1; j <= orig_finish; ++j) + { + set_child(j - to_erase, child(j)); + clear_child(j); + } + } + set_finish(orig_finish - to_erase); + } + + template + void btree_node

::rebalance_right_to_left(const int to_move, btree_node* right, allocator_type* alloc) + { + assert(parent() == right->parent()); + assert(position() + 1 == right->position()); + assert(right->count() >= count()); + assert(to_move >= 1); + assert(to_move <= right->count()); + + // 1) Move the delimiting value in the parent to the left node. + transfer(finish(), position(), parent(), alloc); + + // 2) Move the (to_move - 1) values from the right node to the left node. + transfer_n(to_move - 1, finish() + 1, right->start(), right, alloc); + + // 3) Move the new delimiting value to the parent from the right node. + parent()->transfer(position(), right->start() + to_move - 1, right, alloc); + + // 4) Shift the values in the right node to their correct positions. + right->transfer_n(right->count() - to_move, right->start(), right->start() + to_move, right, alloc); + + if (is_internal()) + { + // Move the child pointers from the right to the left node. + for (int i = 0; i < to_move; ++i) + { + init_child(finish() + i + 1, right->child(i)); + } + for (int i = right->start(); i <= right->finish() - to_move; ++i) + { + assert(i + to_move <= right->max_count()); + right->init_child(i, right->child(i + to_move)); + right->clear_child(i + to_move); + } + } + + // Fixup `finish` on the left and right nodes. + set_finish(finish() + to_move); + right->set_finish(right->finish() - to_move); + } + + template + void btree_node

::rebalance_left_to_right(const int to_move, btree_node* right, allocator_type* alloc) + { + assert(parent() == right->parent()); + assert(position() + 1 == right->position()); + assert(count() >= right->count()); + assert(to_move >= 1); + assert(to_move <= count()); + + // Values in the right node are shifted to the right to make room for the + // new to_move values. Then, the delimiting value in the parent and the + // other (to_move - 1) values in the left node are moved into the right node. + // Lastly, a new delimiting value is moved from the left node into the + // parent, and the remaining empty left node entries are destroyed. + + // 1) Shift existing values in the right node to their correct positions. + right->transfer_n_backward(right->count(), right->start() + to_move, right->start(), right, alloc); + + // 2) Move the delimiting value in the parent to the right node. + right->transfer(right->start() + to_move - 1, position(), parent(), alloc); + + // 3) Move the (to_move - 1) values from the left node to the right node. + right->transfer_n(to_move - 1, right->start(), finish() - (to_move - 1), this, alloc); + + // 4) Move the new delimiting value to the parent from the left node. + parent()->transfer(position(), finish() - to_move, this, alloc); + + if (is_internal()) + { + // Move the child pointers from the left to the right node. + for (int i = right->finish(); i >= right->start(); --i) + { + right->init_child(i + to_move, right->child(i)); + right->clear_child(i); + } + for (int i = 1; i <= to_move; ++i) + { + right->init_child(i - 1, child(finish() - to_move + i)); + clear_child(finish() - to_move + i); + } + } + + // Fixup the counts on the left and right nodes. + set_finish(finish() - to_move); + right->set_finish(right->finish() + to_move); + } + + template + void btree_node

::split(const int insert_position, btree_node* dest, allocator_type* alloc) + { + assert(dest->count() == 0); + assert(max_count() == kNodeSlots); + + // We bias the split based on the position being inserted. If we're + // inserting at the beginning of the left node then bias the split to put + // more values on the right node. If we're inserting at the end of the + // right node then bias the split to put more values on the left node. + if (insert_position == start()) + { + dest->set_finish(dest->start() + finish() - 1); + } + else if (insert_position == kNodeSlots) + { + dest->set_finish(dest->start()); + } + else + { + dest->set_finish(dest->start() + count() / 2); + } + set_finish(finish() - dest->count()); + assert(count() >= 1); + + // Move values from the left sibling to the right sibling. + dest->transfer_n(dest->count(), dest->start(), finish(), this, alloc); + + // The split key is the largest value in the left sibling. + --mutable_finish(); + parent()->emplace_value(position(), alloc, finish_slot()); + value_destroy(finish(), alloc); + parent()->init_child(position() + 1, dest); + + if (is_internal()) + { + for (int i = dest->start(), j = finish() + 1; i <= dest->finish(); + ++i, ++j) + { + assert(child(j) != nullptr); + dest->init_child(i, child(j)); + clear_child(j); + } + } + } + + template + void btree_node

::merge(btree_node* src, allocator_type* alloc) + { + assert(parent() == src->parent()); + assert(position() + 1 == src->position()); + + // Move the delimiting value to the left node. + value_init(finish(), alloc, parent()->slot(position())); + + // Move the values from the right to the left node. + transfer_n(src->count(), finish() + 1, src->start(), src, alloc); + + if (is_internal()) + { + // Move the child pointers from the right to the left node. + for (int i = src->start(), j = finish() + 1; i <= src->finish(); ++i, ++j) + { + init_child(j, src->child(i)); + src->clear_child(i); + } + } + + // Fixup `finish` on the src and dest nodes. + set_finish(start() + 1 + count() + src->count()); + src->set_finish(src->start()); + + // Remove the value on the parent node and delete the src node. + parent()->remove_values(position(), /*to_erase=*/1, alloc); + } + + template + void btree_node

::clear_and_delete(btree_node* node, allocator_type* alloc) + { + if (node->is_leaf()) + { + node->value_destroy_n(node->start(), node->count(), alloc); + deallocate(LeafSize(node->max_count()), node, alloc); + return; + } + if (node->count() == 0) + { + deallocate(InternalSize(), node, alloc); + return; + } + + // The parent of the root of the subtree we are deleting. + btree_node* delete_root_parent = node->parent(); + + // Navigate to the leftmost leaf under node, and then delete upwards. + while (node->is_internal()) + node = node->start_child(); #ifdef ABSL_BTREE_ENABLE_GENERATIONS - // When generations are enabled, we delete the leftmost leaf last in case it's - // the parent of the root and we need to check whether it's a leaf before we - // can update the root's generation. - // TODO(ezb): if we change btree_node::is_root to check a bool inside the node - // instead of checking whether the parent is a leaf, we can remove this logic. - btree_node *leftmost_leaf = node; + // When generations are enabled, we delete the leftmost leaf last in case it's + // the parent of the root and we need to check whether it's a leaf before we + // can update the root's generation. + // TODO(ezb): if we change btree_node::is_root to check a bool inside the node + // instead of checking whether the parent is a leaf, we can remove this logic. + btree_node* leftmost_leaf = node; #endif - // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which - // isn't guaranteed to be a valid `field_type`. - int pos = node->position(); - btree_node *parent = node->parent(); - for (;;) { - // In each iteration of the next loop, we delete one leaf node and go right. - assert(pos <= parent->finish()); - do { - node = parent->child(pos); - if (node->is_internal()) { - // Navigate to the leftmost leaf under node. - while (node->is_internal()) node = node->start_child(); - pos = node->position(); - parent = node->parent(); - } - node->value_destroy_n(node->start(), node->count(), alloc); + // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which + // isn't guaranteed to be a valid `field_type`. + int pos = node->position(); + btree_node* parent = node->parent(); + for (;;) + { + // In each iteration of the next loop, we delete one leaf node and go right. + assert(pos <= parent->finish()); + do + { + node = parent->child(pos); + if (node->is_internal()) + { + // Navigate to the leftmost leaf under node. + while (node->is_internal()) + node = node->start_child(); + pos = node->position(); + parent = node->parent(); + } + node->value_destroy_n(node->start(), node->count(), alloc); #ifdef ABSL_BTREE_ENABLE_GENERATIONS - if (leftmost_leaf != node) + if (leftmost_leaf != node) #endif - deallocate(LeafSize(node->max_count()), node, alloc); - ++pos; - } while (pos <= parent->finish()); - - // Once we've deleted all children of parent, delete parent and go up/right. - assert(pos > parent->finish()); - do { - node = parent; - pos = node->position(); - parent = node->parent(); - node->value_destroy_n(node->start(), node->count(), alloc); - deallocate(InternalSize(), node, alloc); - if (parent == delete_root_parent) { + deallocate(LeafSize(node->max_count()), node, alloc); + ++pos; + } while (pos <= parent->finish()); + + // Once we've deleted all children of parent, delete parent and go up/right. + assert(pos > parent->finish()); + do + { + node = parent; + pos = node->position(); + parent = node->parent(); + node->value_destroy_n(node->start(), node->count(), alloc); + deallocate(InternalSize(), node, alloc); + if (parent == delete_root_parent) + { #ifdef ABSL_BTREE_ENABLE_GENERATIONS - deallocate(LeafSize(leftmost_leaf->max_count()), leftmost_leaf, alloc); + deallocate(LeafSize(leftmost_leaf->max_count()), leftmost_leaf, alloc); #endif - return; - } - ++pos; - } while (pos > parent->finish()); - } -} - -//// -// btree_iterator methods -template -void btree_iterator::increment_slow() { - if (node_->is_leaf()) { - assert(position_ >= node_->finish()); - btree_iterator save(*this); - while (position_ == node_->finish() && !node_->is_root()) { - assert(node_->parent()->child(node_->position()) == node_); - position_ = node_->position(); - node_ = node_->parent(); - } - // TODO(ezb): assert we aren't incrementing end() instead of handling. - if (position_ == node_->finish()) { - *this = save; - } - } else { - assert(position_ < node_->finish()); - node_ = node_->child(position_ + 1); - while (node_->is_internal()) { - node_ = node_->start_child(); - } - position_ = node_->start(); - } -} - -template -void btree_iterator::decrement_slow() { - if (node_->is_leaf()) { - assert(position_ <= -1); - btree_iterator save(*this); - while (position_ < node_->start() && !node_->is_root()) { - assert(node_->parent()->child(node_->position()) == node_); - position_ = node_->position() - 1; - node_ = node_->parent(); - } - // TODO(ezb): assert we aren't decrementing begin() instead of handling. - if (position_ < node_->start()) { - *this = save; - } - } else { - assert(position_ >= node_->start()); - node_ = node_->child(position_); - while (node_->is_internal()) { - node_ = node_->child(node_->finish()); - } - position_ = node_->finish() - 1; - } -} - -//// -// btree methods -template -template -void btree

::copy_or_move_values_in_order(Btree &other) { - static_assert(std::is_same::value || - std::is_same::value, - "Btree type must be same or const."); - assert(empty()); - - // We can avoid key comparisons because we know the order of the - // values is the same order we'll store them in. - auto iter = other.begin(); - if (iter == other.end()) return; - insert_multi(iter.slot()); - ++iter; - for (; iter != other.end(); ++iter) { - // If the btree is not empty, we can just insert the new value at the end - // of the tree. - internal_emplace(end(), iter.slot()); - } -} - -template -constexpr bool btree

::static_assert_validation() { - static_assert(std::is_nothrow_copy_constructible::value, - "Key comparison must be nothrow copy constructible"); - static_assert(std::is_nothrow_copy_constructible::value, - "Allocator must be nothrow copy constructible"); - static_assert(type_traits_internal::is_trivially_copyable::value, - "iterator not trivially copyable."); - - // Note: We assert that kTargetValues, which is computed from - // Params::kTargetNodeSize, must fit the node_type::field_type. - static_assert( - kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))), - "target node size too large"); - - // Verify that key_compare returns an absl::{weak,strong}_ordering or bool. - static_assert( - compare_has_valid_result_type(), - "key comparison function must return absl::{weak,strong}_ordering or " - "bool."); - - // Test the assumption made in setting kNodeSlotSpace. - static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4, - "node space assumption incorrect"); - - return true; -} - -template -template -auto btree

::lower_bound_equal(const K &key) const - -> std::pair { - const SearchResult res = - internal_lower_bound(key); - const iterator lower = iterator(internal_end(res.value)); - const bool equal = res.HasMatch() - ? res.IsEq() - : lower != end() && !compare_keys(key, lower.key()); - return {lower, equal}; -} - -template -template -auto btree

::equal_range(const K &key) -> std::pair { - const std::pair lower_and_equal = lower_bound_equal(key); - const iterator lower = lower_and_equal.first; - if (!lower_and_equal.second) { - return {lower, lower}; - } - - const iterator next = std::next(lower); - if (!params_type::template can_have_multiple_equivalent_keys()) { - // The next iterator after lower must point to a key greater than `key`. - // Note: if this assert fails, then it may indicate that the comparator does - // not meet the equivalence requirements for Compare - // (see https://en.cppreference.com/w/cpp/named_req/Compare). - assert(next == end() || compare_keys(key, next.key())); - return {lower, next}; - } - // Try once more to avoid the call to upper_bound() if there's only one - // equivalent key. This should prevent all calls to upper_bound() in cases of - // unique-containers with heterogeneous comparators in which all comparison - // operators have the same equivalence classes. - if (next == end() || compare_keys(key, next.key())) return {lower, next}; - - // In this case, we need to call upper_bound() to avoid worst case O(N) - // behavior if we were to iterate over equal keys. - return {lower, upper_bound(key)}; -} - -template -template -auto btree

::insert_unique(const K &key, Args &&... args) - -> std::pair { - if (empty()) { - mutable_root() = mutable_rightmost() = new_leaf_root_node(1); - } - - SearchResult res = internal_locate(key); - iterator iter = res.value; - - if (res.HasMatch()) { - if (res.IsEq()) { - // The key already exists in the tree, do nothing. - return {iter, false}; - } - } else { - iterator last = internal_last(iter); - if (last.node_ && !compare_keys(key, last.key())) { - // The key already exists in the tree, do nothing. - return {last, false}; - } - } - return {internal_emplace(iter, std::forward(args)...), true}; -} - -template -template -inline auto btree

::insert_hint_unique(iterator position, const K &key, - Args &&... args) - -> std::pair { - if (!empty()) { - if (position == end() || compare_keys(key, position.key())) { - if (position == begin() || compare_keys(std::prev(position).key(), key)) { - // prev.key() < key < position.key() - return {internal_emplace(position, std::forward(args)...), true}; - } - } else if (compare_keys(position.key(), key)) { - ++position; - if (position == end() || compare_keys(key, position.key())) { - // {original `position`}.key() < key < {current `position`}.key() - return {internal_emplace(position, std::forward(args)...), true}; - } - } else { - // position.key() == key - return {position, false}; - } - } - return insert_unique(key, std::forward(args)...); -} - -template -template -void btree

::insert_iterator_unique(InputIterator b, InputIterator e, int) { - for (; b != e; ++b) { - insert_hint_unique(end(), params_type::key(*b), *b); - } -} - -template -template -void btree

::insert_iterator_unique(InputIterator b, InputIterator e, char) { - for (; b != e; ++b) { - // Use a node handle to manage a temp slot. - auto node_handle = - CommonAccess::Construct(get_allocator(), *b); - slot_type *slot = CommonAccess::GetSlot(node_handle); - insert_hint_unique(end(), params_type::key(slot), slot); - } -} - -template -template -auto btree

::insert_multi(const key_type &key, ValueType &&v) -> iterator { - if (empty()) { - mutable_root() = mutable_rightmost() = new_leaf_root_node(1); - } - - iterator iter = internal_upper_bound(key); - if (iter.node_ == nullptr) { - iter = end(); - } - return internal_emplace(iter, std::forward(v)); -} - -template -template -auto btree

::insert_hint_multi(iterator position, ValueType &&v) -> iterator { - if (!empty()) { - const key_type &key = params_type::key(v); - if (position == end() || !compare_keys(position.key(), key)) { - if (position == begin() || - !compare_keys(key, std::prev(position).key())) { - // prev.key() <= key <= position.key() - return internal_emplace(position, std::forward(v)); - } - } else { - ++position; - if (position == end() || !compare_keys(position.key(), key)) { - // {original `position`}.key() < key < {current `position`}.key() - return internal_emplace(position, std::forward(v)); - } - } - } - return insert_multi(std::forward(v)); -} - -template -template -void btree

::insert_iterator_multi(InputIterator b, InputIterator e) { - for (; b != e; ++b) { - insert_hint_multi(end(), *b); - } -} - -template -auto btree

::operator=(const btree &other) -> btree & { - if (this != &other) { - clear(); - - *mutable_key_comp() = other.key_comp(); - if (absl::allocator_traits< - allocator_type>::propagate_on_container_copy_assignment::value) { - *mutable_allocator() = other.allocator(); - } - - copy_or_move_values_in_order(other); - } - return *this; -} - -template -auto btree

::operator=(btree &&other) noexcept -> btree & { - if (this != &other) { - clear(); - - using std::swap; - if (absl::allocator_traits< - allocator_type>::propagate_on_container_copy_assignment::value) { - swap(root_, other.root_); - // Note: `rightmost_` also contains the allocator and the key comparator. - swap(rightmost_, other.rightmost_); - swap(size_, other.size_); - } else { - if (allocator() == other.allocator()) { - swap(mutable_root(), other.mutable_root()); - swap(*mutable_key_comp(), *other.mutable_key_comp()); - swap(mutable_rightmost(), other.mutable_rightmost()); - swap(size_, other.size_); - } else { - // We aren't allowed to propagate the allocator and the allocator is - // different so we can't take over its memory. We must move each element - // individually. We need both `other` and `this` to have `other`s key - // comparator while moving the values so we can't swap the key - // comparators. - *mutable_key_comp() = other.key_comp(); - copy_or_move_values_in_order(other); - } - } - } - return *this; -} - -template -auto btree

::erase(iterator iter) -> iterator { - iter.node_->value_destroy(iter.position_, mutable_allocator()); - iter.update_generation(); - - const bool internal_delete = iter.node_->is_internal(); - if (internal_delete) { - // Deletion of a value on an internal node. First, transfer the largest - // value from our left child here, then erase/rebalance from that position. - // We can get to the largest value from our left child by decrementing iter. - iterator internal_iter(iter); - --iter; - assert(iter.node_->is_leaf()); - internal_iter.node_->transfer(internal_iter.position_, iter.position_, - iter.node_, mutable_allocator()); - } else { - // Shift values after erased position in leaf. In the internal case, we - // don't need to do this because the leaf position is the end of the node. - const field_type transfer_from = iter.position_ + 1; - const field_type num_to_transfer = iter.node_->finish() - transfer_from; - iter.node_->transfer_n(num_to_transfer, iter.position_, transfer_from, - iter.node_, mutable_allocator()); - } - // Update node finish and container size. - iter.node_->set_finish(iter.node_->finish() - 1); - --size_; - - // We want to return the next value after the one we just erased. If we - // erased from an internal node (internal_delete == true), then the next - // value is ++(++iter). If we erased from a leaf node (internal_delete == - // false) then the next value is ++iter. Note that ++iter may point to an - // internal node and the value in the internal node may move to a leaf node - // (iter.node_) when rebalancing is performed at the leaf level. - - iterator res = rebalance_after_delete(iter); - - // If we erased from an internal node, advance the iterator. - if (internal_delete) { - ++res; - } - return res; -} - -template -auto btree

::rebalance_after_delete(iterator iter) -> iterator { - // Merge/rebalance as we walk back up the tree. - iterator res(iter); - bool first_iteration = true; - for (;;) { - if (iter.node_ == root()) { - try_shrink(); - if (empty()) { - return end(); - } - break; - } - if (iter.node_->count() >= kMinNodeValues) { - break; - } - bool merged = try_merge_or_rebalance(&iter); - // On the first iteration, we should update `res` with `iter` because `res` - // may have been invalidated. - if (first_iteration) { - res = iter; - first_iteration = false; - } - if (!merged) { - break; - } - iter.position_ = iter.node_->position(); - iter.node_ = iter.node_->parent(); - } - res.update_generation(); - - // Adjust our return value. If we're pointing at the end of a node, advance - // the iterator. - if (res.position_ == res.node_->finish()) { - res.position_ = res.node_->finish() - 1; - ++res; - } - - return res; -} - -template -auto btree

::erase_range(iterator begin, iterator end) - -> std::pair { - difference_type count = std::distance(begin, end); - assert(count >= 0); - - if (count == 0) { - return {0, begin}; - } - - if (static_cast(count) == size_) { - clear(); - return {count, this->end()}; - } - - if (begin.node_ == end.node_) { - assert(end.position_ > begin.position_); - begin.node_->remove_values(begin.position_, end.position_ - begin.position_, - mutable_allocator()); - size_ -= count; - return {count, rebalance_after_delete(begin)}; - } - - const size_type target_size = size_ - count; - while (size_ > target_size) { - if (begin.node_->is_leaf()) { - const size_type remaining_to_erase = size_ - target_size; - const size_type remaining_in_node = - begin.node_->finish() - begin.position_; - const size_type to_erase = - (std::min)(remaining_to_erase, remaining_in_node); - begin.node_->remove_values(begin.position_, to_erase, - mutable_allocator()); - size_ -= to_erase; - begin = rebalance_after_delete(begin); - } else { - begin = erase(begin); - } - } - begin.update_generation(); - return {count, begin}; -} - -template -void btree

::clear() { - if (!empty()) { - node_type::clear_and_delete(root(), mutable_allocator()); - } - mutable_root() = mutable_rightmost() = EmptyNode(); - size_ = 0; -} - -template -void btree

::swap(btree &other) { - using std::swap; - if (absl::allocator_traits< - allocator_type>::propagate_on_container_swap::value) { - // Note: `rightmost_` also contains the allocator and the key comparator. - swap(rightmost_, other.rightmost_); - } else { - // It's undefined behavior if the allocators are unequal here. - assert(allocator() == other.allocator()); - swap(mutable_rightmost(), other.mutable_rightmost()); - swap(*mutable_key_comp(), *other.mutable_key_comp()); - } - swap(mutable_root(), other.mutable_root()); - swap(size_, other.size_); -} - -template -void btree

::verify() const { - assert(root() != nullptr); - assert(leftmost() != nullptr); - assert(rightmost() != nullptr); - assert(empty() || size() == internal_verify(root(), nullptr, nullptr)); - assert(leftmost() == (++const_iterator(root(), -1)).node_); - assert(rightmost() == (--const_iterator(root(), root()->finish())).node_); - assert(leftmost()->is_leaf()); - assert(rightmost()->is_leaf()); -} - -template -void btree

::rebalance_or_split(iterator *iter) { - node_type *&node = iter->node_; - int &insert_position = iter->position_; - assert(node->count() == node->max_count()); - assert(kNodeSlots == node->max_count()); - - // First try to make room on the node by rebalancing. - node_type *parent = node->parent(); - if (node != root()) { - if (node->position() > parent->start()) { - // Try rebalancing with our left sibling. - node_type *left = parent->child(node->position() - 1); - assert(left->max_count() == kNodeSlots); - if (left->count() < kNodeSlots) { - // We bias rebalancing based on the position being inserted. If we're - // inserting at the end of the right node then we bias rebalancing to - // fill up the left node. - int to_move = (kNodeSlots - left->count()) / - (1 + (insert_position < static_cast(kNodeSlots))); - to_move = (std::max)(1, to_move); - - if (insert_position - to_move >= node->start() || - left->count() + to_move < static_cast(kNodeSlots)) { - left->rebalance_right_to_left(to_move, node, mutable_allocator()); - - assert(node->max_count() - node->count() == to_move); - insert_position = insert_position - to_move; - if (insert_position < node->start()) { - insert_position = insert_position + left->count() + 1; - node = left; - } - - assert(node->count() < node->max_count()); - return; + return; + } + ++pos; + } while (pos > parent->finish()); + } } - } - } - - if (node->position() < parent->finish()) { - // Try rebalancing with our right sibling. - node_type *right = parent->child(node->position() + 1); - assert(right->max_count() == kNodeSlots); - if (right->count() < kNodeSlots) { - // We bias rebalancing based on the position being inserted. If we're - // inserting at the beginning of the left node then we bias rebalancing - // to fill up the right node. - int to_move = (static_cast(kNodeSlots) - right->count()) / - (1 + (insert_position > node->start())); - to_move = (std::max)(1, to_move); - - if (insert_position <= node->finish() - to_move || - right->count() + to_move < static_cast(kNodeSlots)) { - node->rebalance_left_to_right(to_move, right, mutable_allocator()); - - if (insert_position > node->finish()) { - insert_position = insert_position - node->count() - 1; - node = right; - } - - assert(node->count() < node->max_count()); - return; + + //// + // btree_iterator methods + template + void btree_iterator::increment_slow() + { + if (node_->is_leaf()) + { + assert(position_ >= node_->finish()); + btree_iterator save(*this); + while (position_ == node_->finish() && !node_->is_root()) + { + assert(node_->parent()->child(node_->position()) == node_); + position_ = node_->position(); + node_ = node_->parent(); + } + // TODO(ezb): assert we aren't incrementing end() instead of handling. + if (position_ == node_->finish()) + { + *this = save; + } + } + else + { + assert(position_ < node_->finish()); + node_ = node_->child(position_ + 1); + while (node_->is_internal()) + { + node_ = node_->start_child(); + } + position_ = node_->start(); + } + } + + template + void btree_iterator::decrement_slow() + { + if (node_->is_leaf()) + { + assert(position_ <= -1); + btree_iterator save(*this); + while (position_ < node_->start() && !node_->is_root()) + { + assert(node_->parent()->child(node_->position()) == node_); + position_ = node_->position() - 1; + node_ = node_->parent(); + } + // TODO(ezb): assert we aren't decrementing begin() instead of handling. + if (position_ < node_->start()) + { + *this = save; + } + } + else + { + assert(position_ >= node_->start()); + node_ = node_->child(position_); + while (node_->is_internal()) + { + node_ = node_->child(node_->finish()); + } + position_ = node_->finish() - 1; + } } - } - } - - // Rebalancing failed, make sure there is room on the parent node for a new - // value. - assert(parent->max_count() == kNodeSlots); - if (parent->count() == kNodeSlots) { - iterator parent_iter(node->parent(), node->position()); - rebalance_or_split(&parent_iter); - } - } else { - // Rebalancing not possible because this is the root node. - // Create a new root node and set the current root node as the child of the - // new root. - parent = new_internal_node(parent); - parent->set_generation(root()->generation()); - parent->init_child(parent->start(), root()); - mutable_root() = parent; - // If the former root was a leaf node, then it's now the rightmost node. - assert(parent->start_child()->is_internal() || - parent->start_child() == rightmost()); - } - - // Split the node. - node_type *split_node; - if (node->is_leaf()) { - split_node = new_leaf_node(parent); - node->split(insert_position, split_node, mutable_allocator()); - if (rightmost() == node) mutable_rightmost() = split_node; - } else { - split_node = new_internal_node(parent); - node->split(insert_position, split_node, mutable_allocator()); - } - - if (insert_position > node->finish()) { - insert_position = insert_position - node->count() - 1; - node = split_node; - } -} - -template -void btree

::merge_nodes(node_type *left, node_type *right) { - left->merge(right, mutable_allocator()); - if (rightmost() == right) mutable_rightmost() = left; -} - -template -bool btree

::try_merge_or_rebalance(iterator *iter) { - node_type *parent = iter->node_->parent(); - if (iter->node_->position() > parent->start()) { - // Try merging with our left sibling. - node_type *left = parent->child(iter->node_->position() - 1); - assert(left->max_count() == kNodeSlots); - if (1U + left->count() + iter->node_->count() <= kNodeSlots) { - iter->position_ += 1 + left->count(); - merge_nodes(left, iter->node_); - iter->node_ = left; - return true; - } - } - if (iter->node_->position() < parent->finish()) { - // Try merging with our right sibling. - node_type *right = parent->child(iter->node_->position() + 1); - assert(right->max_count() == kNodeSlots); - if (1U + iter->node_->count() + right->count() <= kNodeSlots) { - merge_nodes(iter->node_, right); - return true; - } - // Try rebalancing with our right sibling. We don't perform rebalancing if - // we deleted the first element from iter->node_ and the node is not - // empty. This is a small optimization for the common pattern of deleting - // from the front of the tree. - if (right->count() > kMinNodeValues && - (iter->node_->count() == 0 || iter->position_ > iter->node_->start())) { - int to_move = (right->count() - iter->node_->count()) / 2; - to_move = (std::min)(to_move, right->count() - 1); - iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator()); - return false; - } - } - if (iter->node_->position() > parent->start()) { - // Try rebalancing with our left sibling. We don't perform rebalancing if - // we deleted the last element from iter->node_ and the node is not - // empty. This is a small optimization for the common pattern of deleting - // from the back of the tree. - node_type *left = parent->child(iter->node_->position() - 1); - if (left->count() > kMinNodeValues && - (iter->node_->count() == 0 || - iter->position_ < iter->node_->finish())) { - int to_move = (left->count() - iter->node_->count()) / 2; - to_move = (std::min)(to_move, left->count() - 1); - left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator()); - iter->position_ += to_move; - return false; - } - } - return false; -} - -template -void btree

::try_shrink() { - node_type *orig_root = root(); - if (orig_root->count() > 0) { - return; - } - // Deleted the last item on the root node, shrink the height of the tree. - if (orig_root->is_leaf()) { - assert(size() == 0); - mutable_root() = mutable_rightmost() = EmptyNode(); - } else { - node_type *child = orig_root->start_child(); - child->make_root(); - mutable_root() = child; - } - node_type::clear_and_delete(orig_root, mutable_allocator()); -} - -template -template -inline IterType btree

::internal_last(IterType iter) { - assert(iter.node_ != nullptr); - while (iter.position_ == iter.node_->finish()) { - iter.position_ = iter.node_->position(); - iter.node_ = iter.node_->parent(); - if (iter.node_->is_leaf()) { - iter.node_ = nullptr; - break; - } - } - iter.update_generation(); - return iter; -} - -template -template -inline auto btree

::internal_emplace(iterator iter, Args &&... args) - -> iterator { - if (iter.node_->is_internal()) { - // We can't insert on an internal node. Instead, we'll insert after the - // previous value which is guaranteed to be on a leaf node. - --iter; - ++iter.position_; - } - const field_type max_count = iter.node_->max_count(); - allocator_type *alloc = mutable_allocator(); - if (iter.node_->count() == max_count) { - // Make room in the leaf for the new item. - if (max_count < kNodeSlots) { - // Insertion into the root where the root is smaller than the full node - // size. Simply grow the size of the root node. - assert(iter.node_ == root()); - iter.node_ = - new_leaf_root_node((std::min)(kNodeSlots, 2 * max_count)); - // Transfer the values from the old root to the new root. - node_type *old_root = root(); - node_type *new_root = iter.node_; - new_root->transfer_n(old_root->count(), new_root->start(), - old_root->start(), old_root, alloc); - new_root->set_finish(old_root->finish()); - old_root->set_finish(old_root->start()); - new_root->set_generation(old_root->generation()); - node_type::clear_and_delete(old_root, alloc); - mutable_root() = mutable_rightmost() = new_root; - } else { - rebalance_or_split(&iter); - } - } - iter.node_->emplace_value(iter.position_, alloc, std::forward(args)...); - ++size_; - iter.update_generation(); - return iter; -} - -template -template -inline auto btree

::internal_locate(const K &key) const - -> SearchResult { - iterator iter(const_cast(root())); - for (;;) { - SearchResult res = - iter.node_->lower_bound(key, key_comp()); - iter.position_ = res.value; - if (res.IsEq()) { - return {iter, MatchKind::kEq}; - } - // Note: in the non-key-compare-to case, we don't need to walk all the way - // down the tree if the keys are equal, but determining equality would - // require doing an extra comparison on each node on the way down, and we - // will need to go all the way to the leaf node in the expected case. - if (iter.node_->is_leaf()) { - break; - } - iter.node_ = iter.node_->child(iter.position_); - } - // Note: in the non-key-compare-to case, the key may actually be equivalent - // here (and the MatchKind::kNe is ignored). - return {iter, MatchKind::kNe}; -} - -template -template -auto btree

::internal_lower_bound(const K &key) const - -> SearchResult { - if (!params_type::template can_have_multiple_equivalent_keys()) { - SearchResult ret = internal_locate(key); - ret.value = internal_last(ret.value); - return ret; - } - iterator iter(const_cast(root())); - SearchResult res; - bool seen_eq = false; - for (;;) { - res = iter.node_->lower_bound(key, key_comp()); - iter.position_ = res.value; - if (iter.node_->is_leaf()) { - break; - } - seen_eq = seen_eq || res.IsEq(); - iter.node_ = iter.node_->child(iter.position_); - } - if (res.IsEq()) return {iter, MatchKind::kEq}; - return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe}; -} - -template -template -auto btree

::internal_upper_bound(const K &key) const -> iterator { - iterator iter(const_cast(root())); - for (;;) { - iter.position_ = iter.node_->upper_bound(key, key_comp()); - if (iter.node_->is_leaf()) { - break; - } - iter.node_ = iter.node_->child(iter.position_); - } - return internal_last(iter); -} - -template -template -auto btree

::internal_find(const K &key) const -> iterator { - SearchResult res = internal_locate(key); - if (res.HasMatch()) { - if (res.IsEq()) { - return res.value; - } - } else { - const iterator iter = internal_last(res.value); - if (iter.node_ != nullptr && !compare_keys(key, iter.key())) { - return iter; - } - } - return {nullptr, 0}; -} - -template -int btree

::internal_verify(const node_type *node, const key_type *lo, - const key_type *hi) const { - assert(node->count() > 0); - assert(node->count() <= node->max_count()); - if (lo) { - assert(!compare_keys(node->key(node->start()), *lo)); - } - if (hi) { - assert(!compare_keys(*hi, node->key(node->finish() - 1))); - } - for (int i = node->start() + 1; i < node->finish(); ++i) { - assert(!compare_keys(node->key(i), node->key(i - 1))); - } - int count = node->count(); - if (node->is_internal()) { - for (int i = node->start(); i <= node->finish(); ++i) { - assert(node->child(i) != nullptr); - assert(node->child(i)->parent() == node); - assert(node->child(i)->position() == i); - count += internal_verify(node->child(i), - i == node->start() ? lo : &node->key(i - 1), - i == node->finish() ? hi : &node->key(i)); - } - } - return count; -} - -struct btree_access { - template - static auto erase_if(BtreeContainer &container, Pred pred) - -> typename BtreeContainer::size_type { - const auto initial_size = container.size(); - auto &tree = container.tree_; - auto *alloc = tree.mutable_allocator(); - for (auto it = container.begin(); it != container.end();) { - if (!pred(*it)) { - ++it; - continue; - } - auto *node = it.node_; - if (node->is_internal()) { - // Handle internal nodes normally. - it = container.erase(it); - continue; - } - // If this is a leaf node, then we do all the erases from this node - // at once before doing rebalancing. - - // The current position to transfer slots to. - int to_pos = it.position_; - node->value_destroy(it.position_, alloc); - while (++it.position_ < node->finish()) { - it.update_generation(); - if (pred(*it)) { - node->value_destroy(it.position_, alloc); - } else { - node->transfer(node->slot(to_pos++), node->slot(it.position_), alloc); + + //// + // btree methods + template + template + void btree

::copy_or_move_values_in_order(Btree& other) + { + static_assert(std::is_same::value || std::is_same::value, "Btree type must be same or const."); + assert(empty()); + + // We can avoid key comparisons because we know the order of the + // values is the same order we'll store them in. + auto iter = other.begin(); + if (iter == other.end()) + return; + insert_multi(iter.slot()); + ++iter; + for (; iter != other.end(); ++iter) + { + // If the btree is not empty, we can just insert the new value at the end + // of the tree. + internal_emplace(end(), iter.slot()); + } + } + + template + constexpr bool btree

::static_assert_validation() + { + static_assert(std::is_nothrow_copy_constructible::value, "Key comparison must be nothrow copy constructible"); + static_assert(std::is_nothrow_copy_constructible::value, "Allocator must be nothrow copy constructible"); + static_assert(type_traits_internal::is_trivially_copyable::value, "iterator not trivially copyable."); + + // Note: We assert that kTargetValues, which is computed from + // Params::kTargetNodeSize, must fit the node_type::field_type. + static_assert( + kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))), + "target node size too large" + ); + + // Verify that key_compare returns an absl::{weak,strong}_ordering or bool. + static_assert( + compare_has_valid_result_type(), + "key comparison function must return absl::{weak,strong}_ordering or " + "bool." + ); + + // Test the assumption made in setting kNodeSlotSpace. + static_assert(node_type::MinimumOverhead() >= sizeof(void*) + 4, "node space assumption incorrect"); + + return true; } - } - const int num_deleted = node->finish() - to_pos; - tree.size_ -= num_deleted; - node->set_finish(to_pos); - it.position_ = to_pos; - it = tree.rebalance_after_delete(it); - } - return initial_size - container.size(); - } -}; + + template + template + auto btree

::lower_bound_equal(const K& key) const + -> std::pair + { + const SearchResult res = + internal_lower_bound(key); + const iterator lower = iterator(internal_end(res.value)); + const bool equal = res.HasMatch() ? res.IsEq() : lower != end() && !compare_keys(key, lower.key()); + return {lower, equal}; + } + + template + template + auto btree

::equal_range(const K& key) -> std::pair + { + const std::pair lower_and_equal = lower_bound_equal(key); + const iterator lower = lower_and_equal.first; + if (!lower_and_equal.second) + { + return {lower, lower}; + } + + const iterator next = std::next(lower); + if (!params_type::template can_have_multiple_equivalent_keys()) + { + // The next iterator after lower must point to a key greater than `key`. + // Note: if this assert fails, then it may indicate that the comparator does + // not meet the equivalence requirements for Compare + // (see https://en.cppreference.com/w/cpp/named_req/Compare). + assert(next == end() || compare_keys(key, next.key())); + return {lower, next}; + } + // Try once more to avoid the call to upper_bound() if there's only one + // equivalent key. This should prevent all calls to upper_bound() in cases of + // unique-containers with heterogeneous comparators in which all comparison + // operators have the same equivalence classes. + if (next == end() || compare_keys(key, next.key())) + return {lower, next}; + + // In this case, we need to call upper_bound() to avoid worst case O(N) + // behavior if we were to iterate over equal keys. + return {lower, upper_bound(key)}; + } + + template + template + auto btree

::insert_unique(const K& key, Args&&... args) + -> std::pair + { + if (empty()) + { + mutable_root() = mutable_rightmost() = new_leaf_root_node(1); + } + + SearchResult res = internal_locate(key); + iterator iter = res.value; + + if (res.HasMatch()) + { + if (res.IsEq()) + { + // The key already exists in the tree, do nothing. + return {iter, false}; + } + } + else + { + iterator last = internal_last(iter); + if (last.node_ && !compare_keys(key, last.key())) + { + // The key already exists in the tree, do nothing. + return {last, false}; + } + } + return {internal_emplace(iter, std::forward(args)...), true}; + } + + template + template + inline auto btree

::insert_hint_unique(iterator position, const K& key, Args&&... args) + -> std::pair + { + if (!empty()) + { + if (position == end() || compare_keys(key, position.key())) + { + if (position == begin() || compare_keys(std::prev(position).key(), key)) + { + // prev.key() < key < position.key() + return {internal_emplace(position, std::forward(args)...), true}; + } + } + else if (compare_keys(position.key(), key)) + { + ++position; + if (position == end() || compare_keys(key, position.key())) + { + // {original `position`}.key() < key < {current `position`}.key() + return {internal_emplace(position, std::forward(args)...), true}; + } + } + else + { + // position.key() == key + return {position, false}; + } + } + return insert_unique(key, std::forward(args)...); + } + + template + template + void btree

::insert_iterator_unique(InputIterator b, InputIterator e, int) + { + for (; b != e; ++b) + { + insert_hint_unique(end(), params_type::key(*b), *b); + } + } + + template + template + void btree

::insert_iterator_unique(InputIterator b, InputIterator e, char) + { + for (; b != e; ++b) + { + // Use a node handle to manage a temp slot. + auto node_handle = + CommonAccess::Construct(get_allocator(), *b); + slot_type* slot = CommonAccess::GetSlot(node_handle); + insert_hint_unique(end(), params_type::key(slot), slot); + } + } + + template + template + auto btree

::insert_multi(const key_type& key, ValueType&& v) -> iterator + { + if (empty()) + { + mutable_root() = mutable_rightmost() = new_leaf_root_node(1); + } + + iterator iter = internal_upper_bound(key); + if (iter.node_ == nullptr) + { + iter = end(); + } + return internal_emplace(iter, std::forward(v)); + } + + template + template + auto btree

::insert_hint_multi(iterator position, ValueType&& v) -> iterator + { + if (!empty()) + { + const key_type& key = params_type::key(v); + if (position == end() || !compare_keys(position.key(), key)) + { + if (position == begin() || + !compare_keys(key, std::prev(position).key())) + { + // prev.key() <= key <= position.key() + return internal_emplace(position, std::forward(v)); + } + } + else + { + ++position; + if (position == end() || !compare_keys(position.key(), key)) + { + // {original `position`}.key() < key < {current `position`}.key() + return internal_emplace(position, std::forward(v)); + } + } + } + return insert_multi(std::forward(v)); + } + + template + template + void btree

::insert_iterator_multi(InputIterator b, InputIterator e) + { + for (; b != e; ++b) + { + insert_hint_multi(end(), *b); + } + } + + template + auto btree

::operator=(const btree& other) -> btree& + { + if (this != &other) + { + clear(); + + *mutable_key_comp() = other.key_comp(); + if (absl::allocator_traits< + allocator_type>::propagate_on_container_copy_assignment::value) + { + *mutable_allocator() = other.allocator(); + } + + copy_or_move_values_in_order(other); + } + return *this; + } + + template + auto btree

::operator=(btree&& other) noexcept -> btree& + { + if (this != &other) + { + clear(); + + using std::swap; + if (absl::allocator_traits< + allocator_type>::propagate_on_container_copy_assignment::value) + { + swap(root_, other.root_); + // Note: `rightmost_` also contains the allocator and the key comparator. + swap(rightmost_, other.rightmost_); + swap(size_, other.size_); + } + else + { + if (allocator() == other.allocator()) + { + swap(mutable_root(), other.mutable_root()); + swap(*mutable_key_comp(), *other.mutable_key_comp()); + swap(mutable_rightmost(), other.mutable_rightmost()); + swap(size_, other.size_); + } + else + { + // We aren't allowed to propagate the allocator and the allocator is + // different so we can't take over its memory. We must move each element + // individually. We need both `other` and `this` to have `other`s key + // comparator while moving the values so we can't swap the key + // comparators. + *mutable_key_comp() = other.key_comp(); + copy_or_move_values_in_order(other); + } + } + } + return *this; + } + + template + auto btree

::erase(iterator iter) -> iterator + { + iter.node_->value_destroy(iter.position_, mutable_allocator()); + iter.update_generation(); + + const bool internal_delete = iter.node_->is_internal(); + if (internal_delete) + { + // Deletion of a value on an internal node. First, transfer the largest + // value from our left child here, then erase/rebalance from that position. + // We can get to the largest value from our left child by decrementing iter. + iterator internal_iter(iter); + --iter; + assert(iter.node_->is_leaf()); + internal_iter.node_->transfer(internal_iter.position_, iter.position_, iter.node_, mutable_allocator()); + } + else + { + // Shift values after erased position in leaf. In the internal case, we + // don't need to do this because the leaf position is the end of the node. + const field_type transfer_from = iter.position_ + 1; + const field_type num_to_transfer = iter.node_->finish() - transfer_from; + iter.node_->transfer_n(num_to_transfer, iter.position_, transfer_from, iter.node_, mutable_allocator()); + } + // Update node finish and container size. + iter.node_->set_finish(iter.node_->finish() - 1); + --size_; + + // We want to return the next value after the one we just erased. If we + // erased from an internal node (internal_delete == true), then the next + // value is ++(++iter). If we erased from a leaf node (internal_delete == + // false) then the next value is ++iter. Note that ++iter may point to an + // internal node and the value in the internal node may move to a leaf node + // (iter.node_) when rebalancing is performed at the leaf level. + + iterator res = rebalance_after_delete(iter); + + // If we erased from an internal node, advance the iterator. + if (internal_delete) + { + ++res; + } + return res; + } + + template + auto btree

::rebalance_after_delete(iterator iter) -> iterator + { + // Merge/rebalance as we walk back up the tree. + iterator res(iter); + bool first_iteration = true; + for (;;) + { + if (iter.node_ == root()) + { + try_shrink(); + if (empty()) + { + return end(); + } + break; + } + if (iter.node_->count() >= kMinNodeValues) + { + break; + } + bool merged = try_merge_or_rebalance(&iter); + // On the first iteration, we should update `res` with `iter` because `res` + // may have been invalidated. + if (first_iteration) + { + res = iter; + first_iteration = false; + } + if (!merged) + { + break; + } + iter.position_ = iter.node_->position(); + iter.node_ = iter.node_->parent(); + } + res.update_generation(); + + // Adjust our return value. If we're pointing at the end of a node, advance + // the iterator. + if (res.position_ == res.node_->finish()) + { + res.position_ = res.node_->finish() - 1; + ++res; + } + + return res; + } + + template + auto btree

::erase_range(iterator begin, iterator end) + -> std::pair + { + difference_type count = std::distance(begin, end); + assert(count >= 0); + + if (count == 0) + { + return {0, begin}; + } + + if (static_cast(count) == size_) + { + clear(); + return {count, this->end()}; + } + + if (begin.node_ == end.node_) + { + assert(end.position_ > begin.position_); + begin.node_->remove_values(begin.position_, end.position_ - begin.position_, mutable_allocator()); + size_ -= count; + return {count, rebalance_after_delete(begin)}; + } + + const size_type target_size = size_ - count; + while (size_ > target_size) + { + if (begin.node_->is_leaf()) + { + const size_type remaining_to_erase = size_ - target_size; + const size_type remaining_in_node = + begin.node_->finish() - begin.position_; + const size_type to_erase = + (std::min)(remaining_to_erase, remaining_in_node); + begin.node_->remove_values(begin.position_, to_erase, mutable_allocator()); + size_ -= to_erase; + begin = rebalance_after_delete(begin); + } + else + { + begin = erase(begin); + } + } + begin.update_generation(); + return {count, begin}; + } + + template + void btree

::clear() + { + if (!empty()) + { + node_type::clear_and_delete(root(), mutable_allocator()); + } + mutable_root() = mutable_rightmost() = EmptyNode(); + size_ = 0; + } + + template + void btree

::swap(btree& other) + { + using std::swap; + if (absl::allocator_traits< + allocator_type>::propagate_on_container_swap::value) + { + // Note: `rightmost_` also contains the allocator and the key comparator. + swap(rightmost_, other.rightmost_); + } + else + { + // It's undefined behavior if the allocators are unequal here. + assert(allocator() == other.allocator()); + swap(mutable_rightmost(), other.mutable_rightmost()); + swap(*mutable_key_comp(), *other.mutable_key_comp()); + } + swap(mutable_root(), other.mutable_root()); + swap(size_, other.size_); + } + + template + void btree

::verify() const + { + assert(root() != nullptr); + assert(leftmost() != nullptr); + assert(rightmost() != nullptr); + assert(empty() || size() == internal_verify(root(), nullptr, nullptr)); + assert(leftmost() == (++const_iterator(root(), -1)).node_); + assert(rightmost() == (--const_iterator(root(), root()->finish())).node_); + assert(leftmost()->is_leaf()); + assert(rightmost()->is_leaf()); + } + + template + void btree

::rebalance_or_split(iterator* iter) + { + node_type*& node = iter->node_; + int& insert_position = iter->position_; + assert(node->count() == node->max_count()); + assert(kNodeSlots == node->max_count()); + + // First try to make room on the node by rebalancing. + node_type* parent = node->parent(); + if (node != root()) + { + if (node->position() > parent->start()) + { + // Try rebalancing with our left sibling. + node_type* left = parent->child(node->position() - 1); + assert(left->max_count() == kNodeSlots); + if (left->count() < kNodeSlots) + { + // We bias rebalancing based on the position being inserted. If we're + // inserting at the end of the right node then we bias rebalancing to + // fill up the left node. + int to_move = (kNodeSlots - left->count()) / + (1 + (insert_position < static_cast(kNodeSlots))); + to_move = (std::max)(1, to_move); + + if (insert_position - to_move >= node->start() || + left->count() + to_move < static_cast(kNodeSlots)) + { + left->rebalance_right_to_left(to_move, node, mutable_allocator()); + + assert(node->max_count() - node->count() == to_move); + insert_position = insert_position - to_move; + if (insert_position < node->start()) + { + insert_position = insert_position + left->count() + 1; + node = left; + } + + assert(node->count() < node->max_count()); + return; + } + } + } + + if (node->position() < parent->finish()) + { + // Try rebalancing with our right sibling. + node_type* right = parent->child(node->position() + 1); + assert(right->max_count() == kNodeSlots); + if (right->count() < kNodeSlots) + { + // We bias rebalancing based on the position being inserted. If we're + // inserting at the beginning of the left node then we bias rebalancing + // to fill up the right node. + int to_move = (static_cast(kNodeSlots) - right->count()) / + (1 + (insert_position > node->start())); + to_move = (std::max)(1, to_move); + + if (insert_position <= node->finish() - to_move || + right->count() + to_move < static_cast(kNodeSlots)) + { + node->rebalance_left_to_right(to_move, right, mutable_allocator()); + + if (insert_position > node->finish()) + { + insert_position = insert_position - node->count() - 1; + node = right; + } + + assert(node->count() < node->max_count()); + return; + } + } + } + + // Rebalancing failed, make sure there is room on the parent node for a new + // value. + assert(parent->max_count() == kNodeSlots); + if (parent->count() == kNodeSlots) + { + iterator parent_iter(node->parent(), node->position()); + rebalance_or_split(&parent_iter); + } + } + else + { + // Rebalancing not possible because this is the root node. + // Create a new root node and set the current root node as the child of the + // new root. + parent = new_internal_node(parent); + parent->set_generation(root()->generation()); + parent->init_child(parent->start(), root()); + mutable_root() = parent; + // If the former root was a leaf node, then it's now the rightmost node. + assert(parent->start_child()->is_internal() || parent->start_child() == rightmost()); + } + + // Split the node. + node_type* split_node; + if (node->is_leaf()) + { + split_node = new_leaf_node(parent); + node->split(insert_position, split_node, mutable_allocator()); + if (rightmost() == node) + mutable_rightmost() = split_node; + } + else + { + split_node = new_internal_node(parent); + node->split(insert_position, split_node, mutable_allocator()); + } + + if (insert_position > node->finish()) + { + insert_position = insert_position - node->count() - 1; + node = split_node; + } + } + + template + void btree

::merge_nodes(node_type* left, node_type* right) + { + left->merge(right, mutable_allocator()); + if (rightmost() == right) + mutable_rightmost() = left; + } + + template + bool btree

::try_merge_or_rebalance(iterator* iter) + { + node_type* parent = iter->node_->parent(); + if (iter->node_->position() > parent->start()) + { + // Try merging with our left sibling. + node_type* left = parent->child(iter->node_->position() - 1); + assert(left->max_count() == kNodeSlots); + if (1U + left->count() + iter->node_->count() <= kNodeSlots) + { + iter->position_ += 1 + left->count(); + merge_nodes(left, iter->node_); + iter->node_ = left; + return true; + } + } + if (iter->node_->position() < parent->finish()) + { + // Try merging with our right sibling. + node_type* right = parent->child(iter->node_->position() + 1); + assert(right->max_count() == kNodeSlots); + if (1U + iter->node_->count() + right->count() <= kNodeSlots) + { + merge_nodes(iter->node_, right); + return true; + } + // Try rebalancing with our right sibling. We don't perform rebalancing if + // we deleted the first element from iter->node_ and the node is not + // empty. This is a small optimization for the common pattern of deleting + // from the front of the tree. + if (right->count() > kMinNodeValues && + (iter->node_->count() == 0 || iter->position_ > iter->node_->start())) + { + int to_move = (right->count() - iter->node_->count()) / 2; + to_move = (std::min)(to_move, right->count() - 1); + iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator()); + return false; + } + } + if (iter->node_->position() > parent->start()) + { + // Try rebalancing with our left sibling. We don't perform rebalancing if + // we deleted the last element from iter->node_ and the node is not + // empty. This is a small optimization for the common pattern of deleting + // from the back of the tree. + node_type* left = parent->child(iter->node_->position() - 1); + if (left->count() > kMinNodeValues && + (iter->node_->count() == 0 || + iter->position_ < iter->node_->finish())) + { + int to_move = (left->count() - iter->node_->count()) / 2; + to_move = (std::min)(to_move, left->count() - 1); + left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator()); + iter->position_ += to_move; + return false; + } + } + return false; + } + + template + void btree

::try_shrink() + { + node_type* orig_root = root(); + if (orig_root->count() > 0) + { + return; + } + // Deleted the last item on the root node, shrink the height of the tree. + if (orig_root->is_leaf()) + { + assert(size() == 0); + mutable_root() = mutable_rightmost() = EmptyNode(); + } + else + { + node_type* child = orig_root->start_child(); + child->make_root(); + mutable_root() = child; + } + node_type::clear_and_delete(orig_root, mutable_allocator()); + } + + template + template + inline IterType btree

::internal_last(IterType iter) + { + assert(iter.node_ != nullptr); + while (iter.position_ == iter.node_->finish()) + { + iter.position_ = iter.node_->position(); + iter.node_ = iter.node_->parent(); + if (iter.node_->is_leaf()) + { + iter.node_ = nullptr; + break; + } + } + iter.update_generation(); + return iter; + } + + template + template + inline auto btree

::internal_emplace(iterator iter, Args&&... args) + -> iterator + { + if (iter.node_->is_internal()) + { + // We can't insert on an internal node. Instead, we'll insert after the + // previous value which is guaranteed to be on a leaf node. + --iter; + ++iter.position_; + } + const field_type max_count = iter.node_->max_count(); + allocator_type* alloc = mutable_allocator(); + if (iter.node_->count() == max_count) + { + // Make room in the leaf for the new item. + if (max_count < kNodeSlots) + { + // Insertion into the root where the root is smaller than the full node + // size. Simply grow the size of the root node. + assert(iter.node_ == root()); + iter.node_ = + new_leaf_root_node((std::min)(kNodeSlots, 2 * max_count)); + // Transfer the values from the old root to the new root. + node_type* old_root = root(); + node_type* new_root = iter.node_; + new_root->transfer_n(old_root->count(), new_root->start(), old_root->start(), old_root, alloc); + new_root->set_finish(old_root->finish()); + old_root->set_finish(old_root->start()); + new_root->set_generation(old_root->generation()); + node_type::clear_and_delete(old_root, alloc); + mutable_root() = mutable_rightmost() = new_root; + } + else + { + rebalance_or_split(&iter); + } + } + iter.node_->emplace_value(iter.position_, alloc, std::forward(args)...); + ++size_; + iter.update_generation(); + return iter; + } + + template + template + inline auto btree

::internal_locate(const K& key) const + -> SearchResult + { + iterator iter(const_cast(root())); + for (;;) + { + SearchResult res = + iter.node_->lower_bound(key, key_comp()); + iter.position_ = res.value; + if (res.IsEq()) + { + return {iter, MatchKind::kEq}; + } + // Note: in the non-key-compare-to case, we don't need to walk all the way + // down the tree if the keys are equal, but determining equality would + // require doing an extra comparison on each node on the way down, and we + // will need to go all the way to the leaf node in the expected case. + if (iter.node_->is_leaf()) + { + break; + } + iter.node_ = iter.node_->child(iter.position_); + } + // Note: in the non-key-compare-to case, the key may actually be equivalent + // here (and the MatchKind::kNe is ignored). + return {iter, MatchKind::kNe}; + } + + template + template + auto btree

::internal_lower_bound(const K& key) const + -> SearchResult + { + if (!params_type::template can_have_multiple_equivalent_keys()) + { + SearchResult ret = internal_locate(key); + ret.value = internal_last(ret.value); + return ret; + } + iterator iter(const_cast(root())); + SearchResult res; + bool seen_eq = false; + for (;;) + { + res = iter.node_->lower_bound(key, key_comp()); + iter.position_ = res.value; + if (iter.node_->is_leaf()) + { + break; + } + seen_eq = seen_eq || res.IsEq(); + iter.node_ = iter.node_->child(iter.position_); + } + if (res.IsEq()) + return {iter, MatchKind::kEq}; + return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe}; + } + + template + template + auto btree

::internal_upper_bound(const K& key) const -> iterator + { + iterator iter(const_cast(root())); + for (;;) + { + iter.position_ = iter.node_->upper_bound(key, key_comp()); + if (iter.node_->is_leaf()) + { + break; + } + iter.node_ = iter.node_->child(iter.position_); + } + return internal_last(iter); + } + + template + template + auto btree

::internal_find(const K& key) const -> iterator + { + SearchResult res = internal_locate(key); + if (res.HasMatch()) + { + if (res.IsEq()) + { + return res.value; + } + } + else + { + const iterator iter = internal_last(res.value); + if (iter.node_ != nullptr && !compare_keys(key, iter.key())) + { + return iter; + } + } + return {nullptr, 0}; + } + + template + int btree

::internal_verify(const node_type* node, const key_type* lo, const key_type* hi) const + { + assert(node->count() > 0); + assert(node->count() <= node->max_count()); + if (lo) + { + assert(!compare_keys(node->key(node->start()), *lo)); + } + if (hi) + { + assert(!compare_keys(*hi, node->key(node->finish() - 1))); + } + for (int i = node->start() + 1; i < node->finish(); ++i) + { + assert(!compare_keys(node->key(i), node->key(i - 1))); + } + int count = node->count(); + if (node->is_internal()) + { + for (int i = node->start(); i <= node->finish(); ++i) + { + assert(node->child(i) != nullptr); + assert(node->child(i)->parent() == node); + assert(node->child(i)->position() == i); + count += internal_verify(node->child(i), i == node->start() ? lo : &node->key(i - 1), i == node->finish() ? hi : &node->key(i)); + } + } + return count; + } + + struct btree_access + { + template + static auto erase_if(BtreeContainer& container, Pred pred) + -> typename BtreeContainer::size_type + { + const auto initial_size = container.size(); + auto& tree = container.tree_; + auto* alloc = tree.mutable_allocator(); + for (auto it = container.begin(); it != container.end();) + { + if (!pred(*it)) + { + ++it; + continue; + } + auto* node = it.node_; + if (node->is_internal()) + { + // Handle internal nodes normally. + it = container.erase(it); + continue; + } + // If this is a leaf node, then we do all the erases from this node + // at once before doing rebalancing. + + // The current position to transfer slots to. + int to_pos = it.position_; + node->value_destroy(it.position_, alloc); + while (++it.position_ < node->finish()) + { + it.update_generation(); + if (pred(*it)) + { + node->value_destroy(it.position_, alloc); + } + else + { + node->transfer(node->slot(to_pos++), node->slot(it.position_), alloc); + } + } + const int num_deleted = node->finish() - to_pos; + tree.size_ -= num_deleted; + node->set_finish(to_pos); + it.position_ = to_pos; + it = tree.rebalance_after_delete(it); + } + return initial_size - container.size(); + } + }; #undef ABSL_BTREE_ENABLE_GENERATIONS -} // namespace container_internal -ABSL_NAMESPACE_END + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_BTREE_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/btree_container.h b/CAPI/cpp/grpc/include/absl/container/internal/btree_container.h index fc2f740..679b1ae 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/btree_container.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/btree_container.h @@ -27,673 +27,837 @@ #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -// A common base class for btree_set, btree_map, btree_multiset, and -// btree_multimap. -template -class btree_container { - using params_type = typename Tree::params_type; - - protected: - // Alias used for heterogeneous lookup functions. - // `key_arg` evaluates to `K` when the functors are transparent and to - // `key_type` otherwise. It permits template argument deduction on `K` for the - // transparent case. - template - using key_arg = - typename KeyArg::template type< - K, typename Tree::key_type>; - - public: - using key_type = typename Tree::key_type; - using value_type = typename Tree::value_type; - using size_type = typename Tree::size_type; - using difference_type = typename Tree::difference_type; - using key_compare = typename Tree::original_key_compare; - using value_compare = typename Tree::value_compare; - using allocator_type = typename Tree::allocator_type; - using reference = typename Tree::reference; - using const_reference = typename Tree::const_reference; - using pointer = typename Tree::pointer; - using const_pointer = typename Tree::const_pointer; - using iterator = typename Tree::iterator; - using const_iterator = typename Tree::const_iterator; - using reverse_iterator = typename Tree::reverse_iterator; - using const_reverse_iterator = typename Tree::const_reverse_iterator; - using node_type = typename Tree::node_handle_type; - - // Constructors/assignments. - btree_container() : tree_(key_compare(), allocator_type()) {} - explicit btree_container(const key_compare &comp, - const allocator_type &alloc = allocator_type()) - : tree_(comp, alloc) {} - explicit btree_container(const allocator_type &alloc) - : tree_(key_compare(), alloc) {} - - btree_container(const btree_container &other) - : btree_container(other, absl::allocator_traits:: - select_on_container_copy_construction( - other.get_allocator())) {} - btree_container(const btree_container &other, const allocator_type &alloc) - : tree_(other.tree_, alloc) {} - - btree_container(btree_container &&other) noexcept( - std::is_nothrow_move_constructible::value) = default; - btree_container(btree_container &&other, const allocator_type &alloc) - : tree_(std::move(other.tree_), alloc) {} - - btree_container &operator=(const btree_container &other) = default; - btree_container &operator=(btree_container &&other) noexcept( - std::is_nothrow_move_assignable::value) = default; - - // Iterator routines. - iterator begin() { return tree_.begin(); } - const_iterator begin() const { return tree_.begin(); } - const_iterator cbegin() const { return tree_.begin(); } - iterator end() { return tree_.end(); } - const_iterator end() const { return tree_.end(); } - const_iterator cend() const { return tree_.end(); } - reverse_iterator rbegin() { return tree_.rbegin(); } - const_reverse_iterator rbegin() const { return tree_.rbegin(); } - const_reverse_iterator crbegin() const { return tree_.rbegin(); } - reverse_iterator rend() { return tree_.rend(); } - const_reverse_iterator rend() const { return tree_.rend(); } - const_reverse_iterator crend() const { return tree_.rend(); } - - // Lookup routines. - template - size_type count(const key_arg &key) const { - auto equal_range = this->equal_range(key); - return std::distance(equal_range.first, equal_range.second); - } - template - iterator find(const key_arg &key) { - return tree_.find(key); - } - template - const_iterator find(const key_arg &key) const { - return tree_.find(key); - } - template - bool contains(const key_arg &key) const { - return find(key) != end(); - } - template - iterator lower_bound(const key_arg &key) { - return tree_.lower_bound(key); - } - template - const_iterator lower_bound(const key_arg &key) const { - return tree_.lower_bound(key); - } - template - iterator upper_bound(const key_arg &key) { - return tree_.upper_bound(key); - } - template - const_iterator upper_bound(const key_arg &key) const { - return tree_.upper_bound(key); - } - template - std::pair equal_range(const key_arg &key) { - return tree_.equal_range(key); - } - template - std::pair equal_range( - const key_arg &key) const { - return tree_.equal_range(key); - } - - // Deletion routines. Note that there is also a deletion routine that is - // specific to btree_set_container/btree_multiset_container. - - // Erase the specified iterator from the btree. The iterator must be valid - // (i.e. not equal to end()). Return an iterator pointing to the node after - // the one that was erased (or end() if none exists). - iterator erase(const_iterator iter) { return tree_.erase(iterator(iter)); } - iterator erase(iterator iter) { return tree_.erase(iter); } - iterator erase(const_iterator first, const_iterator last) { - return tree_.erase_range(iterator(first), iterator(last)).second; - } - template - size_type erase(const key_arg &key) { - auto equal_range = this->equal_range(key); - return tree_.erase_range(equal_range.first, equal_range.second).first; - } - - // Extract routines. - node_type extract(iterator position) { - // Use Construct instead of Transfer because the rebalancing code will - // destroy the slot later. - auto node = - CommonAccess::Construct(get_allocator(), position.slot()); - erase(position); - return node; - } - node_type extract(const_iterator position) { - return extract(iterator(position)); - } - - // Utility routines. - ABSL_ATTRIBUTE_REINITIALIZES void clear() { tree_.clear(); } - void swap(btree_container &other) { tree_.swap(other.tree_); } - void verify() const { tree_.verify(); } - - // Size routines. - size_type size() const { return tree_.size(); } - size_type max_size() const { return tree_.max_size(); } - bool empty() const { return tree_.empty(); } - - friend bool operator==(const btree_container &x, const btree_container &y) { - if (x.size() != y.size()) return false; - return std::equal(x.begin(), x.end(), y.begin()); - } - - friend bool operator!=(const btree_container &x, const btree_container &y) { - return !(x == y); - } - - friend bool operator<(const btree_container &x, const btree_container &y) { - return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end()); - } - - friend bool operator>(const btree_container &x, const btree_container &y) { - return y < x; - } - - friend bool operator<=(const btree_container &x, const btree_container &y) { - return !(y < x); - } - - friend bool operator>=(const btree_container &x, const btree_container &y) { - return !(x < y); - } - - // The allocator used by the btree. - allocator_type get_allocator() const { return tree_.get_allocator(); } - - // The key comparator used by the btree. - key_compare key_comp() const { return key_compare(tree_.key_comp()); } - value_compare value_comp() const { return tree_.value_comp(); } - - // Support absl::Hash. - template - friend State AbslHashValue(State h, const btree_container &b) { - for (const auto &v : b) { - h = State::combine(std::move(h), v); - } - return State::combine(std::move(h), b.size()); - } - - protected: - friend struct btree_access; - Tree tree_; -}; - -// A common base class for btree_set and btree_map. -template -class btree_set_container : public btree_container { - using super_type = btree_container; - using params_type = typename Tree::params_type; - using init_type = typename params_type::init_type; - using is_key_compare_to = typename params_type::is_key_compare_to; - friend class BtreeNodePeer; - - protected: - template - using key_arg = typename super_type::template key_arg; - - public: - using key_type = typename Tree::key_type; - using value_type = typename Tree::value_type; - using size_type = typename Tree::size_type; - using key_compare = typename Tree::original_key_compare; - using allocator_type = typename Tree::allocator_type; - using iterator = typename Tree::iterator; - using const_iterator = typename Tree::const_iterator; - using node_type = typename super_type::node_type; - using insert_return_type = InsertReturnType; - - // Inherit constructors. - using super_type::super_type; - btree_set_container() {} - - // Range constructors. - template - btree_set_container(InputIterator b, InputIterator e, - const key_compare &comp = key_compare(), - const allocator_type &alloc = allocator_type()) - : super_type(comp, alloc) { - insert(b, e); - } - template - btree_set_container(InputIterator b, InputIterator e, - const allocator_type &alloc) - : btree_set_container(b, e, key_compare(), alloc) {} - - // Initializer list constructors. - btree_set_container(std::initializer_list init, - const key_compare &comp = key_compare(), - const allocator_type &alloc = allocator_type()) - : btree_set_container(init.begin(), init.end(), comp, alloc) {} - btree_set_container(std::initializer_list init, - const allocator_type &alloc) - : btree_set_container(init.begin(), init.end(), alloc) {} - - // Insertion routines. - std::pair insert(const value_type &v) { - return this->tree_.insert_unique(params_type::key(v), v); - } - std::pair insert(value_type &&v) { - return this->tree_.insert_unique(params_type::key(v), std::move(v)); - } - template - std::pair emplace(Args &&... args) { - // Use a node handle to manage a temp slot. - auto node = CommonAccess::Construct(this->get_allocator(), - std::forward(args)...); - auto *slot = CommonAccess::GetSlot(node); - return this->tree_.insert_unique(params_type::key(slot), slot); - } - iterator insert(const_iterator hint, const value_type &v) { - return this->tree_ - .insert_hint_unique(iterator(hint), params_type::key(v), v) - .first; - } - iterator insert(const_iterator hint, value_type &&v) { - return this->tree_ - .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v)) - .first; - } - template - iterator emplace_hint(const_iterator hint, Args &&... args) { - // Use a node handle to manage a temp slot. - auto node = CommonAccess::Construct(this->get_allocator(), - std::forward(args)...); - auto *slot = CommonAccess::GetSlot(node); - return this->tree_ - .insert_hint_unique(iterator(hint), params_type::key(slot), slot) - .first; - } - template - void insert(InputIterator b, InputIterator e) { - this->tree_.insert_iterator_unique(b, e, 0); - } - void insert(std::initializer_list init) { - this->tree_.insert_iterator_unique(init.begin(), init.end(), 0); - } - insert_return_type insert(node_type &&node) { - if (!node) return {this->end(), false, node_type()}; - std::pair res = - this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)), - CommonAccess::GetSlot(node)); - if (res.second) { - CommonAccess::Destroy(&node); - return {res.first, true, node_type()}; - } else { - return {res.first, false, std::move(node)}; - } - } - iterator insert(const_iterator hint, node_type &&node) { - if (!node) return this->end(); - std::pair res = this->tree_.insert_hint_unique( - iterator(hint), params_type::key(CommonAccess::GetSlot(node)), - CommonAccess::GetSlot(node)); - if (res.second) CommonAccess::Destroy(&node); - return res.first; - } - - // Node extraction routines. - template - node_type extract(const key_arg &key) { - const std::pair lower_and_equal = - this->tree_.lower_bound_equal(key); - return lower_and_equal.second ? extract(lower_and_equal.first) - : node_type(); - } - using super_type::extract; - - // Merge routines. - // Moves elements from `src` into `this`. If the element already exists in - // `this`, it is left unmodified in `src`. - template < - typename T, - typename absl::enable_if_t< - absl::conjunction< - std::is_same, - std::is_same, - std::is_same>::value, - int> = 0> - void merge(btree_container &src) { // NOLINT - for (auto src_it = src.begin(); src_it != src.end();) { - if (insert(std::move(params_type::element(src_it.slot()))).second) { - src_it = src.erase(src_it); - } else { - ++src_it; - } - } - } - - template < - typename T, - typename absl::enable_if_t< - absl::conjunction< - std::is_same, - std::is_same, - std::is_same>::value, - int> = 0> - void merge(btree_container &&src) { - merge(src); - } -}; - -// Base class for btree_map. -template -class btree_map_container : public btree_set_container { - using super_type = btree_set_container; - using params_type = typename Tree::params_type; - friend class BtreeNodePeer; - - private: - template - using key_arg = typename super_type::template key_arg; - - public: - using key_type = typename Tree::key_type; - using mapped_type = typename params_type::mapped_type; - using value_type = typename Tree::value_type; - using key_compare = typename Tree::original_key_compare; - using allocator_type = typename Tree::allocator_type; - using iterator = typename Tree::iterator; - using const_iterator = typename Tree::const_iterator; - - // Inherit constructors. - using super_type::super_type; - btree_map_container() {} - - // Insertion routines. - // Note: the nullptr template arguments and extra `const M&` overloads allow - // for supporting bitfield arguments. - template - std::pair insert_or_assign(const key_arg &k, - const M &obj) { - return insert_or_assign_impl(k, obj); - } - template - std::pair insert_or_assign(key_arg &&k, const M &obj) { - return insert_or_assign_impl(std::forward(k), obj); - } - template - std::pair insert_or_assign(const key_arg &k, M &&obj) { - return insert_or_assign_impl(k, std::forward(obj)); - } - template - std::pair insert_or_assign(key_arg &&k, M &&obj) { - return insert_or_assign_impl(std::forward(k), std::forward(obj)); - } - template - iterator insert_or_assign(const_iterator hint, const key_arg &k, - const M &obj) { - return insert_or_assign_hint_impl(hint, k, obj); - } - template - iterator insert_or_assign(const_iterator hint, key_arg &&k, const M &obj) { - return insert_or_assign_hint_impl(hint, std::forward(k), obj); - } - template - iterator insert_or_assign(const_iterator hint, const key_arg &k, M &&obj) { - return insert_or_assign_hint_impl(hint, k, std::forward(obj)); - } - template - iterator insert_or_assign(const_iterator hint, key_arg &&k, M &&obj) { - return insert_or_assign_hint_impl(hint, std::forward(k), - std::forward(obj)); - } - - template ::value, int> = 0> - std::pair try_emplace(const key_arg &k, Args &&... args) { - return try_emplace_impl(k, std::forward(args)...); - } - template ::value, int> = 0> - std::pair try_emplace(key_arg &&k, Args &&... args) { - return try_emplace_impl(std::forward(k), std::forward(args)...); - } - template - iterator try_emplace(const_iterator hint, const key_arg &k, - Args &&... args) { - return try_emplace_hint_impl(hint, k, std::forward(args)...); - } - template - iterator try_emplace(const_iterator hint, key_arg &&k, Args &&... args) { - return try_emplace_hint_impl(hint, std::forward(k), - std::forward(args)...); - } - - template - mapped_type &operator[](const key_arg &k) { - return try_emplace(k).first->second; - } - template - mapped_type &operator[](key_arg &&k) { - return try_emplace(std::forward(k)).first->second; - } - - template - mapped_type &at(const key_arg &key) { - auto it = this->find(key); - if (it == this->end()) - base_internal::ThrowStdOutOfRange("absl::btree_map::at"); - return it->second; - } - template - const mapped_type &at(const key_arg &key) const { - auto it = this->find(key); - if (it == this->end()) - base_internal::ThrowStdOutOfRange("absl::btree_map::at"); - return it->second; - } - - private: - // Note: when we call `std::forward(obj)` twice, it's safe because - // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when - // `ret.second` is false. - template - std::pair insert_or_assign_impl(K &&k, M &&obj) { - const std::pair ret = - this->tree_.insert_unique(k, std::forward(k), std::forward(obj)); - if (!ret.second) ret.first->second = std::forward(obj); - return ret; - } - template - iterator insert_or_assign_hint_impl(const_iterator hint, K &&k, M &&obj) { - const std::pair ret = this->tree_.insert_hint_unique( - iterator(hint), k, std::forward(k), std::forward(obj)); - if (!ret.second) ret.first->second = std::forward(obj); - return ret.first; - } - - template - std::pair try_emplace_impl(K &&k, Args &&... args) { - return this->tree_.insert_unique( - k, std::piecewise_construct, std::forward_as_tuple(std::forward(k)), - std::forward_as_tuple(std::forward(args)...)); - } - template - iterator try_emplace_hint_impl(const_iterator hint, K &&k, Args &&... args) { - return this->tree_ - .insert_hint_unique(iterator(hint), k, std::piecewise_construct, - std::forward_as_tuple(std::forward(k)), - std::forward_as_tuple(std::forward(args)...)) - .first; - } -}; - -// A common base class for btree_multiset and btree_multimap. -template -class btree_multiset_container : public btree_container { - using super_type = btree_container; - using params_type = typename Tree::params_type; - using init_type = typename params_type::init_type; - using is_key_compare_to = typename params_type::is_key_compare_to; - friend class BtreeNodePeer; - - template - using key_arg = typename super_type::template key_arg; - - public: - using key_type = typename Tree::key_type; - using value_type = typename Tree::value_type; - using size_type = typename Tree::size_type; - using key_compare = typename Tree::original_key_compare; - using allocator_type = typename Tree::allocator_type; - using iterator = typename Tree::iterator; - using const_iterator = typename Tree::const_iterator; - using node_type = typename super_type::node_type; - - // Inherit constructors. - using super_type::super_type; - btree_multiset_container() {} - - // Range constructors. - template - btree_multiset_container(InputIterator b, InputIterator e, - const key_compare &comp = key_compare(), - const allocator_type &alloc = allocator_type()) - : super_type(comp, alloc) { - insert(b, e); - } - template - btree_multiset_container(InputIterator b, InputIterator e, - const allocator_type &alloc) - : btree_multiset_container(b, e, key_compare(), alloc) {} - - // Initializer list constructors. - btree_multiset_container(std::initializer_list init, - const key_compare &comp = key_compare(), - const allocator_type &alloc = allocator_type()) - : btree_multiset_container(init.begin(), init.end(), comp, alloc) {} - btree_multiset_container(std::initializer_list init, - const allocator_type &alloc) - : btree_multiset_container(init.begin(), init.end(), alloc) {} - - // Insertion routines. - iterator insert(const value_type &v) { return this->tree_.insert_multi(v); } - iterator insert(value_type &&v) { - return this->tree_.insert_multi(std::move(v)); - } - iterator insert(const_iterator hint, const value_type &v) { - return this->tree_.insert_hint_multi(iterator(hint), v); - } - iterator insert(const_iterator hint, value_type &&v) { - return this->tree_.insert_hint_multi(iterator(hint), std::move(v)); - } - template - void insert(InputIterator b, InputIterator e) { - this->tree_.insert_iterator_multi(b, e); - } - void insert(std::initializer_list init) { - this->tree_.insert_iterator_multi(init.begin(), init.end()); - } - template - iterator emplace(Args &&... args) { - // Use a node handle to manage a temp slot. - auto node = CommonAccess::Construct(this->get_allocator(), - std::forward(args)...); - return this->tree_.insert_multi(CommonAccess::GetSlot(node)); - } - template - iterator emplace_hint(const_iterator hint, Args &&... args) { - // Use a node handle to manage a temp slot. - auto node = CommonAccess::Construct(this->get_allocator(), - std::forward(args)...); - return this->tree_.insert_hint_multi(iterator(hint), - CommonAccess::GetSlot(node)); - } - iterator insert(node_type &&node) { - if (!node) return this->end(); - iterator res = - this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)), - CommonAccess::GetSlot(node)); - CommonAccess::Destroy(&node); - return res; - } - iterator insert(const_iterator hint, node_type &&node) { - if (!node) return this->end(); - iterator res = this->tree_.insert_hint_multi( - iterator(hint), - std::move(params_type::element(CommonAccess::GetSlot(node)))); - CommonAccess::Destroy(&node); - return res; - } - - // Node extraction routines. - template - node_type extract(const key_arg &key) { - const std::pair lower_and_equal = - this->tree_.lower_bound_equal(key); - return lower_and_equal.second ? extract(lower_and_equal.first) - : node_type(); - } - using super_type::extract; - - // Merge routines. - // Moves all elements from `src` into `this`. - template < - typename T, - typename absl::enable_if_t< - absl::conjunction< - std::is_same, - std::is_same, - std::is_same>::value, - int> = 0> - void merge(btree_container &src) { // NOLINT - for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) { - insert(std::move(params_type::element(src_it.slot()))); - } - src.clear(); - } - - template < - typename T, - typename absl::enable_if_t< - absl::conjunction< - std::is_same, - std::is_same, - std::is_same>::value, - int> = 0> - void merge(btree_container &&src) { - merge(src); - } -}; - -// A base class for btree_multimap. -template -class btree_multimap_container : public btree_multiset_container { - using super_type = btree_multiset_container; - using params_type = typename Tree::params_type; - friend class BtreeNodePeer; - - public: - using mapped_type = typename params_type::mapped_type; - - // Inherit constructors. - using super_type::super_type; - btree_multimap_container() {} -}; - -} // namespace container_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // A common base class for btree_set, btree_map, btree_multiset, and + // btree_multimap. + template + class btree_container + { + using params_type = typename Tree::params_type; + + protected: + // Alias used for heterogeneous lookup functions. + // `key_arg` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template + using key_arg = + typename KeyArg::template type< + K, + typename Tree::key_type>; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using difference_type = typename Tree::difference_type; + using key_compare = typename Tree::original_key_compare; + using value_compare = typename Tree::value_compare; + using allocator_type = typename Tree::allocator_type; + using reference = typename Tree::reference; + using const_reference = typename Tree::const_reference; + using pointer = typename Tree::pointer; + using const_pointer = typename Tree::const_pointer; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using reverse_iterator = typename Tree::reverse_iterator; + using const_reverse_iterator = typename Tree::const_reverse_iterator; + using node_type = typename Tree::node_handle_type; + + // Constructors/assignments. + btree_container() : + tree_(key_compare(), allocator_type()) + { + } + explicit btree_container(const key_compare& comp, const allocator_type& alloc = allocator_type()) : + tree_(comp, alloc) + { + } + explicit btree_container(const allocator_type& alloc) : + tree_(key_compare(), alloc) + { + } + + btree_container(const btree_container& other) : + btree_container(other, absl::allocator_traits::select_on_container_copy_construction(other.get_allocator())) + { + } + btree_container(const btree_container& other, const allocator_type& alloc) : + tree_(other.tree_, alloc) + { + } + + btree_container(btree_container&& other) noexcept( + std::is_nothrow_move_constructible::value + ) = default; + btree_container(btree_container&& other, const allocator_type& alloc) : + tree_(std::move(other.tree_), alloc) + { + } + + btree_container& operator=(const btree_container& other) = default; + btree_container& operator=(btree_container&& other) noexcept( + std::is_nothrow_move_assignable::value + ) = default; + + // Iterator routines. + iterator begin() + { + return tree_.begin(); + } + const_iterator begin() const + { + return tree_.begin(); + } + const_iterator cbegin() const + { + return tree_.begin(); + } + iterator end() + { + return tree_.end(); + } + const_iterator end() const + { + return tree_.end(); + } + const_iterator cend() const + { + return tree_.end(); + } + reverse_iterator rbegin() + { + return tree_.rbegin(); + } + const_reverse_iterator rbegin() const + { + return tree_.rbegin(); + } + const_reverse_iterator crbegin() const + { + return tree_.rbegin(); + } + reverse_iterator rend() + { + return tree_.rend(); + } + const_reverse_iterator rend() const + { + return tree_.rend(); + } + const_reverse_iterator crend() const + { + return tree_.rend(); + } + + // Lookup routines. + template + size_type count(const key_arg& key) const + { + auto equal_range = this->equal_range(key); + return std::distance(equal_range.first, equal_range.second); + } + template + iterator find(const key_arg& key) + { + return tree_.find(key); + } + template + const_iterator find(const key_arg& key) const + { + return tree_.find(key); + } + template + bool contains(const key_arg& key) const + { + return find(key) != end(); + } + template + iterator lower_bound(const key_arg& key) + { + return tree_.lower_bound(key); + } + template + const_iterator lower_bound(const key_arg& key) const + { + return tree_.lower_bound(key); + } + template + iterator upper_bound(const key_arg& key) + { + return tree_.upper_bound(key); + } + template + const_iterator upper_bound(const key_arg& key) const + { + return tree_.upper_bound(key); + } + template + std::pair equal_range(const key_arg& key) + { + return tree_.equal_range(key); + } + template + std::pair equal_range( + const key_arg& key + ) const + { + return tree_.equal_range(key); + } + + // Deletion routines. Note that there is also a deletion routine that is + // specific to btree_set_container/btree_multiset_container. + + // Erase the specified iterator from the btree. The iterator must be valid + // (i.e. not equal to end()). Return an iterator pointing to the node after + // the one that was erased (or end() if none exists). + iterator erase(const_iterator iter) + { + return tree_.erase(iterator(iter)); + } + iterator erase(iterator iter) + { + return tree_.erase(iter); + } + iterator erase(const_iterator first, const_iterator last) + { + return tree_.erase_range(iterator(first), iterator(last)).second; + } + template + size_type erase(const key_arg& key) + { + auto equal_range = this->equal_range(key); + return tree_.erase_range(equal_range.first, equal_range.second).first; + } + + // Extract routines. + node_type extract(iterator position) + { + // Use Construct instead of Transfer because the rebalancing code will + // destroy the slot later. + auto node = + CommonAccess::Construct(get_allocator(), position.slot()); + erase(position); + return node; + } + node_type extract(const_iterator position) + { + return extract(iterator(position)); + } + + // Utility routines. + ABSL_ATTRIBUTE_REINITIALIZES void clear() + { + tree_.clear(); + } + void swap(btree_container& other) + { + tree_.swap(other.tree_); + } + void verify() const + { + tree_.verify(); + } + + // Size routines. + size_type size() const + { + return tree_.size(); + } + size_type max_size() const + { + return tree_.max_size(); + } + bool empty() const + { + return tree_.empty(); + } + + friend bool operator==(const btree_container& x, const btree_container& y) + { + if (x.size() != y.size()) + return false; + return std::equal(x.begin(), x.end(), y.begin()); + } + + friend bool operator!=(const btree_container& x, const btree_container& y) + { + return !(x == y); + } + + friend bool operator<(const btree_container& x, const btree_container& y) + { + return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end()); + } + + friend bool operator>(const btree_container& x, const btree_container& y) + { + return y < x; + } + + friend bool operator<=(const btree_container& x, const btree_container& y) + { + return !(y < x); + } + + friend bool operator>=(const btree_container& x, const btree_container& y) + { + return !(x < y); + } + + // The allocator used by the btree. + allocator_type get_allocator() const + { + return tree_.get_allocator(); + } + + // The key comparator used by the btree. + key_compare key_comp() const + { + return key_compare(tree_.key_comp()); + } + value_compare value_comp() const + { + return tree_.value_comp(); + } + + // Support absl::Hash. + template + friend State AbslHashValue(State h, const btree_container& b) + { + for (const auto& v : b) + { + h = State::combine(std::move(h), v); + } + return State::combine(std::move(h), b.size()); + } + + protected: + friend struct btree_access; + Tree tree_; + }; + + // A common base class for btree_set and btree_map. + template + class btree_set_container : public btree_container + { + using super_type = btree_container; + using params_type = typename Tree::params_type; + using init_type = typename params_type::init_type; + using is_key_compare_to = typename params_type::is_key_compare_to; + friend class BtreeNodePeer; + + protected: + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using key_compare = typename Tree::original_key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using node_type = typename super_type::node_type; + using insert_return_type = InsertReturnType; + + // Inherit constructors. + using super_type::super_type; + btree_set_container() + { + } + + // Range constructors. + template + btree_set_container(InputIterator b, InputIterator e, const key_compare& comp = key_compare(), const allocator_type& alloc = allocator_type()) : + super_type(comp, alloc) + { + insert(b, e); + } + template + btree_set_container(InputIterator b, InputIterator e, const allocator_type& alloc) : + btree_set_container(b, e, key_compare(), alloc) + { + } + + // Initializer list constructors. + btree_set_container(std::initializer_list init, const key_compare& comp = key_compare(), const allocator_type& alloc = allocator_type()) : + btree_set_container(init.begin(), init.end(), comp, alloc) + { + } + btree_set_container(std::initializer_list init, const allocator_type& alloc) : + btree_set_container(init.begin(), init.end(), alloc) + { + } + + // Insertion routines. + std::pair insert(const value_type& v) + { + return this->tree_.insert_unique(params_type::key(v), v); + } + std::pair insert(value_type&& v) + { + return this->tree_.insert_unique(params_type::key(v), std::move(v)); + } + template + std::pair emplace(Args&&... args) + { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), std::forward(args)...); + auto* slot = CommonAccess::GetSlot(node); + return this->tree_.insert_unique(params_type::key(slot), slot); + } + iterator insert(const_iterator hint, const value_type& v) + { + return this->tree_ + .insert_hint_unique(iterator(hint), params_type::key(v), v) + .first; + } + iterator insert(const_iterator hint, value_type&& v) + { + return this->tree_ + .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v)) + .first; + } + template + iterator emplace_hint(const_iterator hint, Args&&... args) + { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), std::forward(args)...); + auto* slot = CommonAccess::GetSlot(node); + return this->tree_ + .insert_hint_unique(iterator(hint), params_type::key(slot), slot) + .first; + } + template + void insert(InputIterator b, InputIterator e) + { + this->tree_.insert_iterator_unique(b, e, 0); + } + void insert(std::initializer_list init) + { + this->tree_.insert_iterator_unique(init.begin(), init.end(), 0); + } + insert_return_type insert(node_type&& node) + { + if (!node) + return {this->end(), false, node_type()}; + std::pair res = + this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)), CommonAccess::GetSlot(node)); + if (res.second) + { + CommonAccess::Destroy(&node); + return {res.first, true, node_type()}; + } + else + { + return {res.first, false, std::move(node)}; + } + } + iterator insert(const_iterator hint, node_type&& node) + { + if (!node) + return this->end(); + std::pair res = this->tree_.insert_hint_unique( + iterator(hint), params_type::key(CommonAccess::GetSlot(node)), CommonAccess::GetSlot(node) + ); + if (res.second) + CommonAccess::Destroy(&node); + return res.first; + } + + // Node extraction routines. + template + node_type extract(const key_arg& key) + { + const std::pair lower_and_equal = + this->tree_.lower_bound_equal(key); + return lower_and_equal.second ? extract(lower_and_equal.first) : node_type(); + } + using super_type::extract; + + // Merge routines. + // Moves elements from `src` into `this`. If the element already exists in + // `this`, it is left unmodified in `src`. + template< + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container& src) + { // NOLINT + for (auto src_it = src.begin(); src_it != src.end();) + { + if (insert(std::move(params_type::element(src_it.slot()))).second) + { + src_it = src.erase(src_it); + } + else + { + ++src_it; + } + } + } + + template< + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container&& src) + { + merge(src); + } + }; + + // Base class for btree_map. + template + class btree_map_container : public btree_set_container + { + using super_type = btree_set_container; + using params_type = typename Tree::params_type; + friend class BtreeNodePeer; + + private: + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using mapped_type = typename params_type::mapped_type; + using value_type = typename Tree::value_type; + using key_compare = typename Tree::original_key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + + // Inherit constructors. + using super_type::super_type; + btree_map_container() + { + } + + // Insertion routines. + // Note: the nullptr template arguments and extra `const M&` overloads allow + // for supporting bitfield arguments. + template + std::pair insert_or_assign(const key_arg& k, const M& obj) + { + return insert_or_assign_impl(k, obj); + } + template + std::pair insert_or_assign(key_arg&& k, const M& obj) + { + return insert_or_assign_impl(std::forward(k), obj); + } + template + std::pair insert_or_assign(const key_arg& k, M&& obj) + { + return insert_or_assign_impl(k, std::forward(obj)); + } + template + std::pair insert_or_assign(key_arg&& k, M&& obj) + { + return insert_or_assign_impl(std::forward(k), std::forward(obj)); + } + template + iterator insert_or_assign(const_iterator hint, const key_arg& k, const M& obj) + { + return insert_or_assign_hint_impl(hint, k, obj); + } + template + iterator insert_or_assign(const_iterator hint, key_arg&& k, const M& obj) + { + return insert_or_assign_hint_impl(hint, std::forward(k), obj); + } + template + iterator insert_or_assign(const_iterator hint, const key_arg& k, M&& obj) + { + return insert_or_assign_hint_impl(hint, k, std::forward(obj)); + } + template + iterator insert_or_assign(const_iterator hint, key_arg&& k, M&& obj) + { + return insert_or_assign_hint_impl(hint, std::forward(k), std::forward(obj)); + } + + template::value, int> = 0> + std::pair try_emplace(const key_arg& k, Args&&... args) + { + return try_emplace_impl(k, std::forward(args)...); + } + template::value, int> = 0> + std::pair try_emplace(key_arg&& k, Args&&... args) + { + return try_emplace_impl(std::forward(k), std::forward(args)...); + } + template + iterator try_emplace(const_iterator hint, const key_arg& k, Args&&... args) + { + return try_emplace_hint_impl(hint, k, std::forward(args)...); + } + template + iterator try_emplace(const_iterator hint, key_arg&& k, Args&&... args) + { + return try_emplace_hint_impl(hint, std::forward(k), std::forward(args)...); + } + + template + mapped_type& operator[](const key_arg& k) + { + return try_emplace(k).first->second; + } + template + mapped_type& operator[](key_arg&& k) + { + return try_emplace(std::forward(k)).first->second; + } + + template + mapped_type& at(const key_arg& key) + { + auto it = this->find(key); + if (it == this->end()) + base_internal::ThrowStdOutOfRange("absl::btree_map::at"); + return it->second; + } + template + const mapped_type& at(const key_arg& key) const + { + auto it = this->find(key); + if (it == this->end()) + base_internal::ThrowStdOutOfRange("absl::btree_map::at"); + return it->second; + } + + private: + // Note: when we call `std::forward(obj)` twice, it's safe because + // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when + // `ret.second` is false. + template + std::pair insert_or_assign_impl(K&& k, M&& obj) + { + const std::pair ret = + this->tree_.insert_unique(k, std::forward(k), std::forward(obj)); + if (!ret.second) + ret.first->second = std::forward(obj); + return ret; + } + template + iterator insert_or_assign_hint_impl(const_iterator hint, K&& k, M&& obj) + { + const std::pair ret = this->tree_.insert_hint_unique( + iterator(hint), k, std::forward(k), std::forward(obj) + ); + if (!ret.second) + ret.first->second = std::forward(obj); + return ret.first; + } + + template + std::pair try_emplace_impl(K&& k, Args&&... args) + { + return this->tree_.insert_unique( + k, std::piecewise_construct, std::forward_as_tuple(std::forward(k)), std::forward_as_tuple(std::forward(args)...) + ); + } + template + iterator try_emplace_hint_impl(const_iterator hint, K&& k, Args&&... args) + { + return this->tree_ + .insert_hint_unique(iterator(hint), k, std::piecewise_construct, std::forward_as_tuple(std::forward(k)), std::forward_as_tuple(std::forward(args)...)) + .first; + } + }; + + // A common base class for btree_multiset and btree_multimap. + template + class btree_multiset_container : public btree_container + { + using super_type = btree_container; + using params_type = typename Tree::params_type; + using init_type = typename params_type::init_type; + using is_key_compare_to = typename params_type::is_key_compare_to; + friend class BtreeNodePeer; + + template + using key_arg = typename super_type::template key_arg; + + public: + using key_type = typename Tree::key_type; + using value_type = typename Tree::value_type; + using size_type = typename Tree::size_type; + using key_compare = typename Tree::original_key_compare; + using allocator_type = typename Tree::allocator_type; + using iterator = typename Tree::iterator; + using const_iterator = typename Tree::const_iterator; + using node_type = typename super_type::node_type; + + // Inherit constructors. + using super_type::super_type; + btree_multiset_container() + { + } + + // Range constructors. + template + btree_multiset_container(InputIterator b, InputIterator e, const key_compare& comp = key_compare(), const allocator_type& alloc = allocator_type()) : + super_type(comp, alloc) + { + insert(b, e); + } + template + btree_multiset_container(InputIterator b, InputIterator e, const allocator_type& alloc) : + btree_multiset_container(b, e, key_compare(), alloc) + { + } + + // Initializer list constructors. + btree_multiset_container(std::initializer_list init, const key_compare& comp = key_compare(), const allocator_type& alloc = allocator_type()) : + btree_multiset_container(init.begin(), init.end(), comp, alloc) + { + } + btree_multiset_container(std::initializer_list init, const allocator_type& alloc) : + btree_multiset_container(init.begin(), init.end(), alloc) + { + } + + // Insertion routines. + iterator insert(const value_type& v) + { + return this->tree_.insert_multi(v); + } + iterator insert(value_type&& v) + { + return this->tree_.insert_multi(std::move(v)); + } + iterator insert(const_iterator hint, const value_type& v) + { + return this->tree_.insert_hint_multi(iterator(hint), v); + } + iterator insert(const_iterator hint, value_type&& v) + { + return this->tree_.insert_hint_multi(iterator(hint), std::move(v)); + } + template + void insert(InputIterator b, InputIterator e) + { + this->tree_.insert_iterator_multi(b, e); + } + void insert(std::initializer_list init) + { + this->tree_.insert_iterator_multi(init.begin(), init.end()); + } + template + iterator emplace(Args&&... args) + { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), std::forward(args)...); + return this->tree_.insert_multi(CommonAccess::GetSlot(node)); + } + template + iterator emplace_hint(const_iterator hint, Args&&... args) + { + // Use a node handle to manage a temp slot. + auto node = CommonAccess::Construct(this->get_allocator(), std::forward(args)...); + return this->tree_.insert_hint_multi(iterator(hint), CommonAccess::GetSlot(node)); + } + iterator insert(node_type&& node) + { + if (!node) + return this->end(); + iterator res = + this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)), CommonAccess::GetSlot(node)); + CommonAccess::Destroy(&node); + return res; + } + iterator insert(const_iterator hint, node_type&& node) + { + if (!node) + return this->end(); + iterator res = this->tree_.insert_hint_multi( + iterator(hint), + std::move(params_type::element(CommonAccess::GetSlot(node))) + ); + CommonAccess::Destroy(&node); + return res; + } + + // Node extraction routines. + template + node_type extract(const key_arg& key) + { + const std::pair lower_and_equal = + this->tree_.lower_bound_equal(key); + return lower_and_equal.second ? extract(lower_and_equal.first) : node_type(); + } + using super_type::extract; + + // Merge routines. + // Moves all elements from `src` into `this`. + template< + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container& src) + { // NOLINT + for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) + { + insert(std::move(params_type::element(src_it.slot()))); + } + src.clear(); + } + + template< + typename T, + typename absl::enable_if_t< + absl::conjunction< + std::is_same, + std::is_same, + std::is_same>::value, + int> = 0> + void merge(btree_container&& src) + { + merge(src); + } + }; + + // A base class for btree_multimap. + template + class btree_multimap_container : public btree_multiset_container + { + using super_type = btree_multiset_container; + using params_type = typename Tree::params_type; + friend class BtreeNodePeer; + + public: + using mapped_type = typename params_type::mapped_type; + + // Inherit constructors. + using super_type::super_type; + btree_multimap_container() + { + } + }; + + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/common.h b/CAPI/cpp/grpc/include/absl/container/internal/common.h index 416d9aa..5a1f424 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/common.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/common.h @@ -21,187 +21,238 @@ #include "absl/meta/type_traits.h" #include "absl/types/optional.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -template -struct IsTransparent : std::false_type {}; -template -struct IsTransparent> - : std::true_type {}; - -template -struct KeyArg { - // Transparent. Forward `K`. - template - using type = K; -}; - -template <> -struct KeyArg { - // Not transparent. Always use `key_type`. - template - using type = key_type; -}; - -// The node_handle concept from C++17. -// We specialize node_handle for sets and maps. node_handle_base holds the -// common API of both. -template -class node_handle_base { - protected: - using slot_type = typename PolicyTraits::slot_type; - - public: - using allocator_type = Alloc; - - constexpr node_handle_base() = default; - node_handle_base(node_handle_base&& other) noexcept { - *this = std::move(other); - } - ~node_handle_base() { destroy(); } - node_handle_base& operator=(node_handle_base&& other) noexcept { - destroy(); - if (!other.empty()) { - alloc_ = other.alloc_; - PolicyTraits::transfer(alloc(), slot(), other.slot()); - other.reset(); - } - return *this; - } - - bool empty() const noexcept { return !alloc_; } - explicit operator bool() const noexcept { return !empty(); } - allocator_type get_allocator() const { return *alloc_; } - - protected: - friend struct CommonAccess; - - struct transfer_tag_t {}; - node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s) - : alloc_(a) { - PolicyTraits::transfer(alloc(), slot(), s); - } - - struct construct_tag_t {}; - template - node_handle_base(construct_tag_t, const allocator_type& a, Args&&... args) - : alloc_(a) { - PolicyTraits::construct(alloc(), slot(), std::forward(args)...); - } - - void destroy() { - if (!empty()) { - PolicyTraits::destroy(alloc(), slot()); - reset(); - } - } - - void reset() { - assert(alloc_.has_value()); - alloc_ = absl::nullopt; - } - - slot_type* slot() const { - assert(!empty()); - return reinterpret_cast(std::addressof(slot_space_)); - } - allocator_type* alloc() { return std::addressof(*alloc_); } - - private: - absl::optional alloc_ = {}; - alignas(slot_type) mutable unsigned char slot_space_[sizeof(slot_type)] = {}; -}; - -// For sets. -template -class node_handle : public node_handle_base { - using Base = node_handle_base; - - public: - using value_type = typename PolicyTraits::value_type; - - constexpr node_handle() {} - - value_type& value() const { return PolicyTraits::element(this->slot()); } - - private: - friend struct CommonAccess; - - using Base::Base; -}; - -// For maps. -template -class node_handle> - : public node_handle_base { - using Base = node_handle_base; - using slot_type = typename PolicyTraits::slot_type; - - public: - using key_type = typename Policy::key_type; - using mapped_type = typename Policy::mapped_type; - - constexpr node_handle() {} - - // When C++17 is available, we can use std::launder to provide mutable - // access to the key. Otherwise, we provide const access. - auto key() const - -> decltype(PolicyTraits::mutable_key(std::declval())) { - return PolicyTraits::mutable_key(this->slot()); - } - - mapped_type& mapped() const { - return PolicyTraits::value(&PolicyTraits::element(this->slot())); - } - - private: - friend struct CommonAccess; - - using Base::Base; -}; - -// Provide access to non-public node-handle functions. -struct CommonAccess { - template - static auto GetSlot(const Node& node) -> decltype(node.slot()) { - return node.slot(); - } - - template - static void Destroy(Node* node) { - node->destroy(); - } - - template - static void Reset(Node* node) { - node->reset(); - } - - template - static T Transfer(Args&&... args) { - return T(typename T::transfer_tag_t{}, std::forward(args)...); - } - - template - static T Construct(Args&&... args) { - return T(typename T::construct_tag_t{}, std::forward(args)...); - } -}; - -// Implement the insert_return_type<> concept of C++17. -template -struct InsertReturnType { - Iterator position; - bool inserted; - NodeType node; -}; - -} // namespace container_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + struct IsTransparent : std::false_type + { + }; + template + struct IsTransparent> : std::true_type + { + }; + + template + struct KeyArg + { + // Transparent. Forward `K`. + template + using type = K; + }; + + template<> + struct KeyArg + { + // Not transparent. Always use `key_type`. + template + using type = key_type; + }; + + // The node_handle concept from C++17. + // We specialize node_handle for sets and maps. node_handle_base holds the + // common API of both. + template + class node_handle_base + { + protected: + using slot_type = typename PolicyTraits::slot_type; + + public: + using allocator_type = Alloc; + + constexpr node_handle_base() = default; + node_handle_base(node_handle_base&& other) noexcept + { + *this = std::move(other); + } + ~node_handle_base() + { + destroy(); + } + node_handle_base& operator=(node_handle_base&& other) noexcept + { + destroy(); + if (!other.empty()) + { + alloc_ = other.alloc_; + PolicyTraits::transfer(alloc(), slot(), other.slot()); + other.reset(); + } + return *this; + } + + bool empty() const noexcept + { + return !alloc_; + } + explicit operator bool() const noexcept + { + return !empty(); + } + allocator_type get_allocator() const + { + return *alloc_; + } + + protected: + friend struct CommonAccess; + + struct transfer_tag_t + { + }; + node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s) : + alloc_(a) + { + PolicyTraits::transfer(alloc(), slot(), s); + } + + struct construct_tag_t + { + }; + template + node_handle_base(construct_tag_t, const allocator_type& a, Args&&... args) : + alloc_(a) + { + PolicyTraits::construct(alloc(), slot(), std::forward(args)...); + } + + void destroy() + { + if (!empty()) + { + PolicyTraits::destroy(alloc(), slot()); + reset(); + } + } + + void reset() + { + assert(alloc_.has_value()); + alloc_ = absl::nullopt; + } + + slot_type* slot() const + { + assert(!empty()); + return reinterpret_cast(std::addressof(slot_space_)); + } + allocator_type* alloc() + { + return std::addressof(*alloc_); + } + + private: + absl::optional alloc_ = {}; + alignas(slot_type) mutable unsigned char slot_space_[sizeof(slot_type)] = {}; + }; + + // For sets. + template + class node_handle : public node_handle_base + { + using Base = node_handle_base; + + public: + using value_type = typename PolicyTraits::value_type; + + constexpr node_handle() + { + } + + value_type& value() const + { + return PolicyTraits::element(this->slot()); + } + + private: + friend struct CommonAccess; + + using Base::Base; + }; + + // For maps. + template + class node_handle> : public node_handle_base + { + using Base = node_handle_base; + using slot_type = typename PolicyTraits::slot_type; + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + + constexpr node_handle() + { + } + + // When C++17 is available, we can use std::launder to provide mutable + // access to the key. Otherwise, we provide const access. + auto key() const + -> decltype(PolicyTraits::mutable_key(std::declval())) + { + return PolicyTraits::mutable_key(this->slot()); + } + + mapped_type& mapped() const + { + return PolicyTraits::value(&PolicyTraits::element(this->slot())); + } + + private: + friend struct CommonAccess; + + using Base::Base; + }; + + // Provide access to non-public node-handle functions. + struct CommonAccess + { + template + static auto GetSlot(const Node& node) -> decltype(node.slot()) + { + return node.slot(); + } + + template + static void Destroy(Node* node) + { + node->destroy(); + } + + template + static void Reset(Node* node) + { + node->reset(); + } + + template + static T Transfer(Args&&... args) + { + return T(typename T::transfer_tag_t{}, std::forward(args)...); + } + + template + static T Construct(Args&&... args) + { + return T(typename T::construct_tag_t{}, std::forward(args)...); + } + }; + + // Implement the insert_return_type<> concept of C++17. + template + struct InsertReturnType + { + Iterator position; + bool inserted; + NodeType node; + }; + + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_CONTAINER_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/compressed_tuple.h b/CAPI/cpp/grpc/include/absl/container/internal/compressed_tuple.h index 5ebe164..022e663 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/compressed_tuple.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/compressed_tuple.h @@ -47,242 +47,291 @@ #define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC #endif -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -template -class CompressedTuple; - -namespace internal_compressed_tuple { - -template -struct Elem; -template -struct Elem, I> - : std::tuple_element> {}; -template -using ElemT = typename Elem::type; - -// Use the __is_final intrinsic if available. Where it's not available, classes -// declared with the 'final' specifier cannot be used as CompressedTuple -// elements. -// TODO(sbenza): Replace this with std::is_final in C++14. -template -constexpr bool IsFinal() { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class CompressedTuple; + + namespace internal_compressed_tuple + { + + template + struct Elem; + template + struct Elem, I> : std::tuple_element> + { + }; + template + using ElemT = typename Elem::type; + + // Use the __is_final intrinsic if available. Where it's not available, classes + // declared with the 'final' specifier cannot be used as CompressedTuple + // elements. + // TODO(sbenza): Replace this with std::is_final in C++14. + template + constexpr bool IsFinal() + { #if defined(__clang__) || defined(__GNUC__) - return __is_final(T); + return __is_final(T); #else - return false; + return false; #endif -} - -// We can't use EBCO on other CompressedTuples because that would mean that we -// derive from multiple Storage<> instantiations with the same I parameter, -// and potentially from multiple identical Storage<> instantiations. So anytime -// we use type inheritance rather than encapsulation, we mark -// CompressedTupleImpl, to make this easy to detect. -struct uses_inheritance {}; - -template -constexpr bool ShouldUseBase() { - return std::is_class::value && std::is_empty::value && !IsFinal() && - !std::is_base_of::value; -} - -// The storage class provides two specializations: -// - For empty classes, it stores T as a base class. -// - For everything else, it stores T as a member. -template instantiations with the same I parameter, + // and potentially from multiple identical Storage<> instantiations. So anytime + // we use type inheritance rather than encapsulation, we mark + // CompressedTupleImpl, to make this easy to detect. + struct uses_inheritance + { + }; + + template + constexpr bool ShouldUseBase() + { + return std::is_class::value && std::is_empty::value && !IsFinal() && + !std::is_base_of::value; + } + + // The storage class provides two specializations: + // - For empty classes, it stores T as a base class. + // - For everything else, it stores T as a member. + template::type>()> + bool UseBase = ShouldUseBase::type>()> #else - bool UseBase = ShouldUseBase()> + bool UseBase = ShouldUseBase()> #endif -struct Storage { - T value; - constexpr Storage() = default; - template - explicit constexpr Storage(absl::in_place_t, V&& v) - : value(absl::forward(v)) {} - constexpr const T& get() const& { return value; } - T& get() & { return value; } - constexpr const T&& get() const&& { return absl::move(*this).value; } - T&& get() && { return std::move(*this).value; } -}; - -template -struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage : T { - constexpr Storage() = default; - - template - explicit constexpr Storage(absl::in_place_t, V&& v) - : T(absl::forward(v)) {} - - constexpr const T& get() const& { return *this; } - T& get() & { return *this; } - constexpr const T&& get() const&& { return absl::move(*this); } - T&& get() && { return std::move(*this); } -}; - -template -struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl; - -template -struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< - CompressedTuple, absl::index_sequence, ShouldAnyUseBase> - // We use the dummy identity function through std::integral_constant to - // convince MSVC of accepting and expanding I in that context. Without it - // you would get: - // error C3548: 'I': parameter pack cannot be used in this context - : uses_inheritance, - Storage::value>... { - constexpr CompressedTupleImpl() = default; - template - explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) - : Storage(absl::in_place, absl::forward(args))... {} - friend CompressedTuple; -}; - -template -struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< - CompressedTuple, absl::index_sequence, false> - // We use the dummy identity function as above... - : Storage::value, false>... { - constexpr CompressedTupleImpl() = default; - template - explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) - : Storage(absl::in_place, absl::forward(args))... {} - friend CompressedTuple; -}; - -std::false_type Or(std::initializer_list); -std::true_type Or(std::initializer_list); - -// MSVC requires this to be done separately rather than within the declaration -// of CompressedTuple below. -template -constexpr bool ShouldAnyUseBase() { - return decltype( - Or({std::integral_constant()>()...})){}; -} - -template -using TupleElementMoveConstructible = - typename std::conditional::value, - std::is_convertible, - std::is_constructible>::type; - -template -struct TupleMoveConstructible : std::false_type {}; - -template -struct TupleMoveConstructible, Vs...> - : std::integral_constant< - bool, absl::conjunction< - TupleElementMoveConstructible...>::value> {}; - -template -struct compressed_tuple_size; - -template -struct compressed_tuple_size> - : public std::integral_constant {}; - -template -struct TupleItemsMoveConstructible - : std::integral_constant< - bool, TupleMoveConstructible::value == - sizeof...(Vs), - T, Vs...>::value> {}; - -} // namespace internal_compressed_tuple - -// Helper class to perform the Empty Base Class Optimization. -// Ts can contain classes and non-classes, empty or not. For the ones that -// are empty classes, we perform the CompressedTuple. If all types in Ts are -// empty classes, then CompressedTuple is itself an empty class. (This -// does not apply when one or more of those empty classes is itself an empty -// CompressedTuple.) -// -// To access the members, use member .get() function. -// -// Eg: -// absl::container_internal::CompressedTuple value(7, t1, t2, -// t3); -// assert(value.get<0>() == 7); -// T1& t1 = value.get<1>(); -// const T2& t2 = value.get<2>(); -// ... -// -// https://en.cppreference.com/w/cpp/language/ebo -template -class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple - : private internal_compressed_tuple::CompressedTupleImpl< - CompressedTuple, absl::index_sequence_for, - internal_compressed_tuple::ShouldAnyUseBase()> { - private: - template - using ElemT = internal_compressed_tuple::ElemT; - - template - using StorageT = internal_compressed_tuple::Storage, I>; - - public: - // There seems to be a bug in MSVC dealing in which using '=default' here will - // cause the compiler to ignore the body of other constructors. The work- - // around is to explicitly implement the default constructor. + struct Storage + { + T value; + constexpr Storage() = default; + template + explicit constexpr Storage(absl::in_place_t, V&& v) : + value(absl::forward(v)) + { + } + constexpr const T& get() const& + { + return value; + } + T& get() & + { + return value; + } + constexpr const T&& get() const&& + { + return absl::move(*this).value; + } + T&& get() && + { + return std::move(*this).value; + } + }; + + template + struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage : T + { + constexpr Storage() = default; + + template + explicit constexpr Storage(absl::in_place_t, V&& v) : + T(absl::forward(v)) + { + } + + constexpr const T& get() const& + { + return *this; + } + T& get() & + { + return *this; + } + constexpr const T&& get() const&& + { + return absl::move(*this); + } + T&& get() && + { + return std::move(*this); + } + }; + + template + struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl; + + template + struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< + CompressedTuple, + absl::index_sequence, + ShouldAnyUseBase> + // We use the dummy identity function through std::integral_constant to + // convince MSVC of accepting and expanding I in that context. Without it + // you would get: + // error C3548: 'I': parameter pack cannot be used in this context + : uses_inheritance, Storage::value>... + { + constexpr CompressedTupleImpl() = default; + template + explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) : + Storage(absl::in_place, absl::forward(args))... + { + } + friend CompressedTuple; + }; + + template + struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< + CompressedTuple, + absl::index_sequence, + false> + // We use the dummy identity function as above... + : Storage::value, false>... + { + constexpr CompressedTupleImpl() = default; + template + explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) : + Storage(absl::in_place, absl::forward(args))... + { + } + friend CompressedTuple; + }; + + std::false_type Or(std::initializer_list); + std::true_type Or(std::initializer_list); + + // MSVC requires this to be done separately rather than within the declaration + // of CompressedTuple below. + template + constexpr bool ShouldAnyUseBase() + { + return decltype(Or({std::integral_constant()>()...})){}; + } + + template + using TupleElementMoveConstructible = + typename std::conditional::value, std::is_convertible, std::is_constructible>::type; + + template + struct TupleMoveConstructible : std::false_type + { + }; + + template + struct TupleMoveConstructible, Vs...> : std::integral_constant...>::value> + { + }; + + template + struct compressed_tuple_size; + + template + struct compressed_tuple_size> : public std::integral_constant + { + }; + + template + struct TupleItemsMoveConstructible : std::integral_constant::value == sizeof...(Vs), T, Vs...>::value> + { + }; + + } // namespace internal_compressed_tuple + + // Helper class to perform the Empty Base Class Optimization. + // Ts can contain classes and non-classes, empty or not. For the ones that + // are empty classes, we perform the CompressedTuple. If all types in Ts are + // empty classes, then CompressedTuple is itself an empty class. (This + // does not apply when one or more of those empty classes is itself an empty + // CompressedTuple.) + // + // To access the members, use member .get() function. + // + // Eg: + // absl::container_internal::CompressedTuple value(7, t1, t2, + // t3); + // assert(value.get<0>() == 7); + // T1& t1 = value.get<1>(); + // const T2& t2 = value.get<2>(); + // ... + // + // https://en.cppreference.com/w/cpp/language/ebo + template + class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple : private internal_compressed_tuple::CompressedTupleImpl, absl::index_sequence_for, internal_compressed_tuple::ShouldAnyUseBase()> + { + private: + template + using ElemT = internal_compressed_tuple::ElemT; + + template + using StorageT = internal_compressed_tuple::Storage, I>; + + public: + // There seems to be a bug in MSVC dealing in which using '=default' here will + // cause the compiler to ignore the body of other constructors. The work- + // around is to explicitly implement the default constructor. #if defined(_MSC_VER) - constexpr CompressedTuple() : CompressedTuple::CompressedTupleImpl() {} + constexpr CompressedTuple() : + CompressedTuple::CompressedTupleImpl() + { + } #else - constexpr CompressedTuple() = default; + constexpr CompressedTuple() = default; #endif - explicit constexpr CompressedTuple(const Ts&... base) - : CompressedTuple::CompressedTupleImpl(absl::in_place, base...) {} - - template )>>, - internal_compressed_tuple::TupleItemsMoveConstructible< - CompressedTuple, First, Vs...>>::value, - bool> = true> - explicit constexpr CompressedTuple(First&& first, Vs&&... base) - : CompressedTuple::CompressedTupleImpl(absl::in_place, - absl::forward(first), - absl::forward(base)...) {} - - template - ElemT& get() & { - return StorageT::get(); - } - - template - constexpr const ElemT& get() const& { - return StorageT::get(); - } - - template - ElemT&& get() && { - return std::move(*this).StorageT::get(); - } - - template - constexpr const ElemT&& get() const&& { - return absl::move(*this).StorageT::get(); - } -}; - -// Explicit specialization for a zero-element tuple -// (needed to avoid ambiguous overloads for the default constructor). -template <> -class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {}; - -} // namespace container_internal -ABSL_NAMESPACE_END + explicit constexpr CompressedTuple(const Ts&... base) : + CompressedTuple::CompressedTupleImpl(absl::in_place, base...) + { + } + + template)>>, + internal_compressed_tuple::TupleItemsMoveConstructible, First, Vs...>>::value, + bool> = true> + explicit constexpr CompressedTuple(First&& first, Vs&&... base) : + CompressedTuple::CompressedTupleImpl(absl::in_place, absl::forward(first), absl::forward(base)...) + { + } + + template + ElemT& get() & + { + return StorageT::get(); + } + + template + constexpr const ElemT& get() const& + { + return StorageT::get(); + } + + template + ElemT&& get() && + { + return std::move(*this).StorageT::get(); + } + + template + constexpr const ElemT&& get() const&& + { + return absl::move(*this).StorageT::get(); + } + }; + + // Explicit specialization for a zero-element tuple + // (needed to avoid ambiguous overloads for the default constructor). + template<> + class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> + { + }; + + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC diff --git a/CAPI/cpp/grpc/include/absl/container/internal/container_memory.h b/CAPI/cpp/grpc/include/absl/container/internal/container_memory.h index 00e9f6d..fa795ac 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/container_memory.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/container_memory.h @@ -36,407 +36,464 @@ #include #endif -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -template -struct alignas(Alignment) AlignedType {}; - -// Allocates at least n bytes aligned to the specified alignment. -// Alignment must be a power of 2. It must be positive. -// -// Note that many allocators don't honor alignment requirements above certain -// threshold (usually either alignof(std::max_align_t) or alignof(void*)). -// Allocate() doesn't apply alignment corrections. If the underlying allocator -// returns insufficiently alignment pointer, that's what you are going to get. -template -void* Allocate(Alloc* alloc, size_t n) { - static_assert(Alignment > 0, ""); - assert(n && "n must be positive"); - using M = AlignedType; - using A = typename absl::allocator_traits::template rebind_alloc; - using AT = typename absl::allocator_traits::template rebind_traits; - // On macOS, "mem_alloc" is a #define with one argument defined in - // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it - // with the "foo(bar)" syntax. - A my_mem_alloc(*alloc); - void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M)); - assert(reinterpret_cast(p) % Alignment == 0 && - "allocator does not respect alignment"); - return p; -} - -// The pointer must have been previously obtained by calling -// Allocate(alloc, n). -template -void Deallocate(Alloc* alloc, void* p, size_t n) { - static_assert(Alignment > 0, ""); - assert(n && "n must be positive"); - using M = AlignedType; - using A = typename absl::allocator_traits::template rebind_alloc; - using AT = typename absl::allocator_traits::template rebind_traits; - // On macOS, "mem_alloc" is a #define with one argument defined in - // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it - // with the "foo(bar)" syntax. - A my_mem_alloc(*alloc); - AT::deallocate(my_mem_alloc, static_cast(p), - (n + sizeof(M) - 1) / sizeof(M)); -} - -namespace memory_internal { - -// Constructs T into uninitialized storage pointed by `ptr` using the args -// specified in the tuple. -template -void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t, - absl::index_sequence) { - absl::allocator_traits::construct( - *alloc, ptr, std::get(std::forward(t))...); -} - -template -struct WithConstructedImplF { - template - decltype(std::declval()(std::declval())) operator()( - Args&&... args) const { - return std::forward(f)(T(std::forward(args)...)); - } - F&& f; -}; - -template -decltype(std::declval()(std::declval())) WithConstructedImpl( - Tuple&& t, absl::index_sequence, F&& f) { - return WithConstructedImplF{std::forward(f)}( - std::get(std::forward(t))...); -} - -template -auto TupleRefImpl(T&& t, absl::index_sequence) - -> decltype(std::forward_as_tuple(std::get(std::forward(t))...)) { - return std::forward_as_tuple(std::get(std::forward(t))...); -} - -// Returns a tuple of references to the elements of the input tuple. T must be a -// tuple. -template -auto TupleRef(T&& t) -> decltype( - TupleRefImpl(std::forward(t), - absl::make_index_sequence< - std::tuple_size::type>::value>())) { - return TupleRefImpl( - std::forward(t), - absl::make_index_sequence< - std::tuple_size::type>::value>()); -} - -template -decltype(std::declval()(std::declval(), std::piecewise_construct, - std::declval>(), std::declval())) -DecomposePairImpl(F&& f, std::pair, V> p) { - const auto& key = std::get<0>(p.first); - return std::forward(f)(key, std::piecewise_construct, std::move(p.first), - std::move(p.second)); -} - -} // namespace memory_internal - -// Constructs T into uninitialized storage pointed by `ptr` using the args -// specified in the tuple. -template -void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) { - memory_internal::ConstructFromTupleImpl( - alloc, ptr, std::forward(t), - absl::make_index_sequence< - std::tuple_size::type>::value>()); -} - -// Constructs T using the args specified in the tuple and calls F with the -// constructed value. -template -decltype(std::declval()(std::declval())) WithConstructed( - Tuple&& t, F&& f) { - return memory_internal::WithConstructedImpl( - std::forward(t), - absl::make_index_sequence< - std::tuple_size::type>::value>(), - std::forward(f)); -} - -// Given arguments of an std::pair's consructor, PairArgs() returns a pair of -// tuples with references to the passed arguments. The tuples contain -// constructor arguments for the first and the second elements of the pair. -// -// The following two snippets are equivalent. -// -// 1. std::pair p(args...); -// -// 2. auto a = PairArgs(args...); -// std::pair p(std::piecewise_construct, -// std::move(a.first), std::move(a.second)); -inline std::pair, std::tuple<>> PairArgs() { return {}; } -template -std::pair, std::tuple> PairArgs(F&& f, S&& s) { - return {std::piecewise_construct, std::forward_as_tuple(std::forward(f)), - std::forward_as_tuple(std::forward(s))}; -} -template -std::pair, std::tuple> PairArgs( - const std::pair& p) { - return PairArgs(p.first, p.second); -} -template -std::pair, std::tuple> PairArgs(std::pair&& p) { - return PairArgs(std::forward(p.first), std::forward(p.second)); -} -template -auto PairArgs(std::piecewise_construct_t, F&& f, S&& s) - -> decltype(std::make_pair(memory_internal::TupleRef(std::forward(f)), - memory_internal::TupleRef(std::forward(s)))) { - return std::make_pair(memory_internal::TupleRef(std::forward(f)), - memory_internal::TupleRef(std::forward(s))); -} - -// A helper function for implementing apply() in map policies. -template -auto DecomposePair(F&& f, Args&&... args) - -> decltype(memory_internal::DecomposePairImpl( - std::forward(f), PairArgs(std::forward(args)...))) { - return memory_internal::DecomposePairImpl( - std::forward(f), PairArgs(std::forward(args)...)); -} - -// A helper function for implementing apply() in set policies. -template -decltype(std::declval()(std::declval(), std::declval())) -DecomposeValue(F&& f, Arg&& arg) { - const auto& key = arg; - return std::forward(f)(key, std::forward(arg)); -} - -// Helper functions for asan and msan. -inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + struct alignas(Alignment) AlignedType + { + }; + + // Allocates at least n bytes aligned to the specified alignment. + // Alignment must be a power of 2. It must be positive. + // + // Note that many allocators don't honor alignment requirements above certain + // threshold (usually either alignof(std::max_align_t) or alignof(void*)). + // Allocate() doesn't apply alignment corrections. If the underlying allocator + // returns insufficiently alignment pointer, that's what you are going to get. + template + void* Allocate(Alloc* alloc, size_t n) + { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + using M = AlignedType; + using A = typename absl::allocator_traits::template rebind_alloc; + using AT = typename absl::allocator_traits::template rebind_traits; + // On macOS, "mem_alloc" is a #define with one argument defined in + // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it + // with the "foo(bar)" syntax. + A my_mem_alloc(*alloc); + void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M)); + assert(reinterpret_cast(p) % Alignment == 0 && "allocator does not respect alignment"); + return p; + } + + // The pointer must have been previously obtained by calling + // Allocate(alloc, n). + template + void Deallocate(Alloc* alloc, void* p, size_t n) + { + static_assert(Alignment > 0, ""); + assert(n && "n must be positive"); + using M = AlignedType; + using A = typename absl::allocator_traits::template rebind_alloc; + using AT = typename absl::allocator_traits::template rebind_traits; + // On macOS, "mem_alloc" is a #define with one argument defined in + // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it + // with the "foo(bar)" syntax. + A my_mem_alloc(*alloc); + AT::deallocate(my_mem_alloc, static_cast(p), (n + sizeof(M) - 1) / sizeof(M)); + } + + namespace memory_internal + { + + // Constructs T into uninitialized storage pointed by `ptr` using the args + // specified in the tuple. + template + void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t, absl::index_sequence) + { + absl::allocator_traits::construct( + *alloc, ptr, std::get(std::forward(t))... + ); + } + + template + struct WithConstructedImplF + { + template + decltype(std::declval()(std::declval())) operator()( + Args&&... args + ) const + { + return std::forward(f)(T(std::forward(args)...)); + } + F&& f; + }; + + template + decltype(std::declval()(std::declval())) WithConstructedImpl( + Tuple&& t, absl::index_sequence, F&& f + ) + { + return WithConstructedImplF{std::forward(f)}( + std::get(std::forward(t))... + ); + } + + template + auto TupleRefImpl(T&& t, absl::index_sequence) + -> decltype(std::forward_as_tuple(std::get(std::forward(t))...)) + { + return std::forward_as_tuple(std::get(std::forward(t))...); + } + + // Returns a tuple of references to the elements of the input tuple. T must be a + // tuple. + template + auto TupleRef(T&& t) -> decltype(TupleRefImpl(std::forward(t), absl::make_index_sequence::type>::value>())) + { + return TupleRefImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>() + ); + } + + template + decltype(std::declval()(std::declval(), std::piecewise_construct, std::declval>(), std::declval())) + DecomposePairImpl(F&& f, std::pair, V> p) + { + const auto& key = std::get<0>(p.first); + return std::forward(f)(key, std::piecewise_construct, std::move(p.first), std::move(p.second)); + } + + } // namespace memory_internal + + // Constructs T into uninitialized storage pointed by `ptr` using the args + // specified in the tuple. + template + void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) + { + memory_internal::ConstructFromTupleImpl( + alloc, ptr, std::forward(t), absl::make_index_sequence::type>::value>() + ); + } + + // Constructs T using the args specified in the tuple and calls F with the + // constructed value. + template + decltype(std::declval()(std::declval())) WithConstructed( + Tuple&& t, F&& f + ) + { + return memory_internal::WithConstructedImpl( + std::forward(t), + absl::make_index_sequence< + std::tuple_size::type>::value>(), + std::forward(f) + ); + } + + // Given arguments of an std::pair's consructor, PairArgs() returns a pair of + // tuples with references to the passed arguments. The tuples contain + // constructor arguments for the first and the second elements of the pair. + // + // The following two snippets are equivalent. + // + // 1. std::pair p(args...); + // + // 2. auto a = PairArgs(args...); + // std::pair p(std::piecewise_construct, + // std::move(a.first), std::move(a.second)); + inline std::pair, std::tuple<>> PairArgs() + { + return {}; + } + template + std::pair, std::tuple> PairArgs(F&& f, S&& s) + { + return {std::piecewise_construct, std::forward_as_tuple(std::forward(f)), std::forward_as_tuple(std::forward(s))}; + } + template + std::pair, std::tuple> PairArgs( + const std::pair& p + ) + { + return PairArgs(p.first, p.second); + } + template + std::pair, std::tuple> PairArgs(std::pair&& p) + { + return PairArgs(std::forward(p.first), std::forward(p.second)); + } + template + auto PairArgs(std::piecewise_construct_t, F&& f, S&& s) + -> decltype(std::make_pair(memory_internal::TupleRef(std::forward(f)), memory_internal::TupleRef(std::forward(s)))) + { + return std::make_pair(memory_internal::TupleRef(std::forward(f)), memory_internal::TupleRef(std::forward(s))); + } + + // A helper function for implementing apply() in map policies. + template + auto DecomposePair(F&& f, Args&&... args) + -> decltype(memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...) + )) + { + return memory_internal::DecomposePairImpl( + std::forward(f), PairArgs(std::forward(args)...) + ); + } + + // A helper function for implementing apply() in set policies. + template + decltype(std::declval()(std::declval(), std::declval())) + DecomposeValue(F&& f, Arg&& arg) + { + const auto& key = arg; + return std::forward(f)(key, std::forward(arg)); + } + + // Helper functions for asan and msan. + inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) + { #ifdef ABSL_HAVE_ADDRESS_SANITIZER - ASAN_POISON_MEMORY_REGION(m, s); + ASAN_POISON_MEMORY_REGION(m, s); #endif #ifdef ABSL_HAVE_MEMORY_SANITIZER - __msan_poison(m, s); + __msan_poison(m, s); #endif - (void)m; - (void)s; -} + (void)m; + (void)s; + } -inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) { + inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) + { #ifdef ABSL_HAVE_ADDRESS_SANITIZER - ASAN_UNPOISON_MEMORY_REGION(m, s); + ASAN_UNPOISON_MEMORY_REGION(m, s); #endif #ifdef ABSL_HAVE_MEMORY_SANITIZER - __msan_unpoison(m, s); + __msan_unpoison(m, s); #endif - (void)m; - (void)s; -} - -template -inline void SanitizerPoisonObject(const T* object) { - SanitizerPoisonMemoryRegion(object, sizeof(T)); -} - -template -inline void SanitizerUnpoisonObject(const T* object) { - SanitizerUnpoisonMemoryRegion(object, sizeof(T)); -} - -namespace memory_internal { - -// If Pair is a standard-layout type, OffsetOf::kFirst and -// OffsetOf::kSecond are equivalent to offsetof(Pair, first) and -// offsetof(Pair, second) respectively. Otherwise they are -1. -// -// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout -// type, which is non-portable. -template -struct OffsetOf { - static constexpr size_t kFirst = static_cast(-1); - static constexpr size_t kSecond = static_cast(-1); -}; - -template -struct OffsetOf::type> { - static constexpr size_t kFirst = offsetof(Pair, first); - static constexpr size_t kSecond = offsetof(Pair, second); -}; - -template -struct IsLayoutCompatible { - private: - struct Pair { - K first; - V second; - }; - - // Is P layout-compatible with Pair? - template - static constexpr bool LayoutCompatible() { - return std::is_standard_layout

() && sizeof(P) == sizeof(Pair) && - alignof(P) == alignof(Pair) && - memory_internal::OffsetOf

::kFirst == - memory_internal::OffsetOf::kFirst && - memory_internal::OffsetOf

::kSecond == - memory_internal::OffsetOf::kSecond; - } - - public: - // Whether pair and pair are layout-compatible. If they are, - // then it is safe to store them in a union and read from either. - static constexpr bool value = std::is_standard_layout() && - std::is_standard_layout() && - memory_internal::OffsetOf::kFirst == 0 && - LayoutCompatible>() && - LayoutCompatible>(); -}; - -} // namespace memory_internal - -// The internal storage type for key-value containers like flat_hash_map. -// -// It is convenient for the value_type of a flat_hash_map to be -// pair; the "const K" prevents accidental modification of the key -// when dealing with the reference returned from find() and similar methods. -// However, this creates other problems; we want to be able to emplace(K, V) -// efficiently with move operations, and similarly be able to move a -// pair in insert(). -// -// The solution is this union, which aliases the const and non-const versions -// of the pair. This also allows flat_hash_map to work, even though -// that has the same efficiency issues with move in emplace() and insert() - -// but people do it anyway. -// -// If kMutableKeys is false, only the value member can be accessed. -// -// If kMutableKeys is true, key can be accessed through all slots while value -// and mutable_value must be accessed only via INITIALIZED slots. Slots are -// created and destroyed via mutable_value so that the key can be moved later. -// -// Accessing one of the union fields while the other is active is safe as -// long as they are layout-compatible, which is guaranteed by the definition of -// kMutableKeys. For C++11, the relevant section of the standard is -// https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19) -template -union map_slot_type { - map_slot_type() {} - ~map_slot_type() = delete; - using value_type = std::pair; - using mutable_value_type = - std::pair, absl::remove_const_t>; - - value_type value; - mutable_value_type mutable_value; - absl::remove_const_t key; -}; - -template -struct map_slot_policy { - using slot_type = map_slot_type; - using value_type = std::pair; - using mutable_value_type = std::pair; - - private: - static void emplace(slot_type* slot) { - // The construction of union doesn't do anything at runtime but it allows us - // to access its members without violating aliasing rules. - new (slot) slot_type; - } - // If pair and pair are layout-compatible, we can accept one - // or the other via slot_type. We are also free to access the key via - // slot_type::key in this case. - using kMutableKeys = memory_internal::IsLayoutCompatible; - - public: - static value_type& element(slot_type* slot) { return slot->value; } - static const value_type& element(const slot_type* slot) { - return slot->value; - } - - // When C++17 is available, we can use std::launder to provide mutable - // access to the key for use in node handle. + (void)m; + (void)s; + } + + template + inline void SanitizerPoisonObject(const T* object) + { + SanitizerPoisonMemoryRegion(object, sizeof(T)); + } + + template + inline void SanitizerUnpoisonObject(const T* object) + { + SanitizerUnpoisonMemoryRegion(object, sizeof(T)); + } + + namespace memory_internal + { + + // If Pair is a standard-layout type, OffsetOf::kFirst and + // OffsetOf::kSecond are equivalent to offsetof(Pair, first) and + // offsetof(Pair, second) respectively. Otherwise they are -1. + // + // The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout + // type, which is non-portable. + template + struct OffsetOf + { + static constexpr size_t kFirst = static_cast(-1); + static constexpr size_t kSecond = static_cast(-1); + }; + + template + struct OffsetOf::type> + { + static constexpr size_t kFirst = offsetof(Pair, first); + static constexpr size_t kSecond = offsetof(Pair, second); + }; + + template + struct IsLayoutCompatible + { + private: + struct Pair + { + K first; + V second; + }; + + // Is P layout-compatible with Pair? + template + static constexpr bool LayoutCompatible() + { + return std::is_standard_layout

() && sizeof(P) == sizeof(Pair) && + alignof(P) == alignof(Pair) && + memory_internal::OffsetOf

::kFirst == + memory_internal::OffsetOf::kFirst && + memory_internal::OffsetOf

::kSecond == + memory_internal::OffsetOf::kSecond; + } + + public: + // Whether pair and pair are layout-compatible. If they are, + // then it is safe to store them in a union and read from either. + static constexpr bool value = std::is_standard_layout() && + std::is_standard_layout() && + memory_internal::OffsetOf::kFirst == 0 && + LayoutCompatible>() && + LayoutCompatible>(); + }; + + } // namespace memory_internal + + // The internal storage type for key-value containers like flat_hash_map. + // + // It is convenient for the value_type of a flat_hash_map to be + // pair; the "const K" prevents accidental modification of the key + // when dealing with the reference returned from find() and similar methods. + // However, this creates other problems; we want to be able to emplace(K, V) + // efficiently with move operations, and similarly be able to move a + // pair in insert(). + // + // The solution is this union, which aliases the const and non-const versions + // of the pair. This also allows flat_hash_map to work, even though + // that has the same efficiency issues with move in emplace() and insert() - + // but people do it anyway. + // + // If kMutableKeys is false, only the value member can be accessed. + // + // If kMutableKeys is true, key can be accessed through all slots while value + // and mutable_value must be accessed only via INITIALIZED slots. Slots are + // created and destroyed via mutable_value so that the key can be moved later. + // + // Accessing one of the union fields while the other is active is safe as + // long as they are layout-compatible, which is guaranteed by the definition of + // kMutableKeys. For C++11, the relevant section of the standard is + // https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19) + template + union map_slot_type + { + map_slot_type() + { + } + ~map_slot_type() = delete; + using value_type = std::pair; + using mutable_value_type = + std::pair, absl::remove_const_t>; + + value_type value; + mutable_value_type mutable_value; + absl::remove_const_t key; + }; + + template + struct map_slot_policy + { + using slot_type = map_slot_type; + using value_type = std::pair; + using mutable_value_type = std::pair; + + private: + static void emplace(slot_type* slot) + { + // The construction of union doesn't do anything at runtime but it allows us + // to access its members without violating aliasing rules. + new (slot) slot_type; + } + // If pair and pair are layout-compatible, we can accept one + // or the other via slot_type. We are also free to access the key via + // slot_type::key in this case. + using kMutableKeys = memory_internal::IsLayoutCompatible; + + public: + static value_type& element(slot_type* slot) + { + return slot->value; + } + static const value_type& element(const slot_type* slot) + { + return slot->value; + } + + // When C++17 is available, we can use std::launder to provide mutable + // access to the key for use in node handle. #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 - static K& mutable_key(slot_type* slot) { - // Still check for kMutableKeys so that we can avoid calling std::launder - // unless necessary because it can interfere with optimizations. - return kMutableKeys::value ? slot->key - : *std::launder(const_cast( - std::addressof(slot->value.first))); - } + static K& mutable_key(slot_type* slot) + { + // Still check for kMutableKeys so that we can avoid calling std::launder + // unless necessary because it can interfere with optimizations. + return kMutableKeys::value ? slot->key : *std::launder(const_cast(std::addressof(slot->value.first))); + } #else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606) - static const K& mutable_key(slot_type* slot) { return key(slot); } + static const K& mutable_key(slot_type* slot) + { + return key(slot); + } #endif - static const K& key(const slot_type* slot) { - return kMutableKeys::value ? slot->key : slot->value.first; - } - - template - static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { - emplace(slot); - if (kMutableKeys::value) { - absl::allocator_traits::construct(*alloc, &slot->mutable_value, - std::forward(args)...); - } else { - absl::allocator_traits::construct(*alloc, &slot->value, - std::forward(args)...); - } - } - - // Construct this slot by moving from another slot. - template - static void construct(Allocator* alloc, slot_type* slot, slot_type* other) { - emplace(slot); - if (kMutableKeys::value) { - absl::allocator_traits::construct( - *alloc, &slot->mutable_value, std::move(other->mutable_value)); - } else { - absl::allocator_traits::construct(*alloc, &slot->value, - std::move(other->value)); - } - } - - // Construct this slot by copying from another slot. - template - static void construct(Allocator* alloc, slot_type* slot, - const slot_type* other) { - emplace(slot); - absl::allocator_traits::construct(*alloc, &slot->value, - other->value); - } - - template - static void destroy(Allocator* alloc, slot_type* slot) { - if (kMutableKeys::value) { - absl::allocator_traits::destroy(*alloc, &slot->mutable_value); - } else { - absl::allocator_traits::destroy(*alloc, &slot->value); - } - } - - template - static void transfer(Allocator* alloc, slot_type* new_slot, - slot_type* old_slot) { - emplace(new_slot); - if (kMutableKeys::value) { - absl::allocator_traits::construct( - *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value)); - } else { - absl::allocator_traits::construct(*alloc, &new_slot->value, - std::move(old_slot->value)); - } - destroy(alloc, old_slot); - } -}; - -} // namespace container_internal -ABSL_NAMESPACE_END + static const K& key(const slot_type* slot) + { + return kMutableKeys::value ? slot->key : slot->value.first; + } + + template + static void construct(Allocator* alloc, slot_type* slot, Args&&... args) + { + emplace(slot); + if (kMutableKeys::value) + { + absl::allocator_traits::construct(*alloc, &slot->mutable_value, std::forward(args)...); + } + else + { + absl::allocator_traits::construct(*alloc, &slot->value, std::forward(args)...); + } + } + + // Construct this slot by moving from another slot. + template + static void construct(Allocator* alloc, slot_type* slot, slot_type* other) + { + emplace(slot); + if (kMutableKeys::value) + { + absl::allocator_traits::construct( + *alloc, &slot->mutable_value, std::move(other->mutable_value) + ); + } + else + { + absl::allocator_traits::construct(*alloc, &slot->value, std::move(other->value)); + } + } + + // Construct this slot by copying from another slot. + template + static void construct(Allocator* alloc, slot_type* slot, const slot_type* other) + { + emplace(slot); + absl::allocator_traits::construct(*alloc, &slot->value, other->value); + } + + template + static void destroy(Allocator* alloc, slot_type* slot) + { + if (kMutableKeys::value) + { + absl::allocator_traits::destroy(*alloc, &slot->mutable_value); + } + else + { + absl::allocator_traits::destroy(*alloc, &slot->value); + } + } + + template + static void transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot) + { + emplace(new_slot); + if (kMutableKeys::value) + { + absl::allocator_traits::construct( + *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value) + ); + } + else + { + absl::allocator_traits::construct(*alloc, &new_slot->value, std::move(old_slot->value)); + } + destroy(alloc, old_slot); + } + }; + + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/counting_allocator.h b/CAPI/cpp/grpc/include/absl/container/internal/counting_allocator.h index 66068a5..01c5dd3 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/counting_allocator.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/counting_allocator.h @@ -20,103 +20,125 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -// This is a stateful allocator, but the state lives outside of the -// allocator (in whatever test is using the allocator). This is odd -// but helps in tests where the allocator is propagated into nested -// containers - that chain of allocators uses the same state and is -// thus easier to query for aggregate allocation information. -template -class CountingAllocator { - public: - using Allocator = std::allocator; - using AllocatorTraits = std::allocator_traits; - using value_type = typename AllocatorTraits::value_type; - using pointer = typename AllocatorTraits::pointer; - using const_pointer = typename AllocatorTraits::const_pointer; - using size_type = typename AllocatorTraits::size_type; - using difference_type = typename AllocatorTraits::difference_type; - - CountingAllocator() = default; - explicit CountingAllocator(int64_t* bytes_used) : bytes_used_(bytes_used) {} - CountingAllocator(int64_t* bytes_used, int64_t* instance_count) - : bytes_used_(bytes_used), instance_count_(instance_count) {} - - template - CountingAllocator(const CountingAllocator& x) - : bytes_used_(x.bytes_used_), instance_count_(x.instance_count_) {} - - pointer allocate( - size_type n, - typename AllocatorTraits::const_void_pointer hint = nullptr) { - Allocator allocator; - pointer ptr = AllocatorTraits::allocate(allocator, n, hint); - if (bytes_used_ != nullptr) { - *bytes_used_ += n * sizeof(T); - } - return ptr; - } - - void deallocate(pointer p, size_type n) { - Allocator allocator; - AllocatorTraits::deallocate(allocator, p, n); - if (bytes_used_ != nullptr) { - *bytes_used_ -= n * sizeof(T); - } - } - - template - void construct(U* p, Args&&... args) { - Allocator allocator; - AllocatorTraits::construct(allocator, p, std::forward(args)...); - if (instance_count_ != nullptr) { - *instance_count_ += 1; - } - } - - template - void destroy(U* p) { - Allocator allocator; - // Ignore GCC warning bug. +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // This is a stateful allocator, but the state lives outside of the + // allocator (in whatever test is using the allocator). This is odd + // but helps in tests where the allocator is propagated into nested + // containers - that chain of allocators uses the same state and is + // thus easier to query for aggregate allocation information. + template + class CountingAllocator + { + public: + using Allocator = std::allocator; + using AllocatorTraits = std::allocator_traits; + using value_type = typename AllocatorTraits::value_type; + using pointer = typename AllocatorTraits::pointer; + using const_pointer = typename AllocatorTraits::const_pointer; + using size_type = typename AllocatorTraits::size_type; + using difference_type = typename AllocatorTraits::difference_type; + + CountingAllocator() = default; + explicit CountingAllocator(int64_t* bytes_used) : + bytes_used_(bytes_used) + { + } + CountingAllocator(int64_t* bytes_used, int64_t* instance_count) : + bytes_used_(bytes_used), + instance_count_(instance_count) + { + } + + template + CountingAllocator(const CountingAllocator& x) : + bytes_used_(x.bytes_used_), + instance_count_(x.instance_count_) + { + } + + pointer allocate( + size_type n, + typename AllocatorTraits::const_void_pointer hint = nullptr + ) + { + Allocator allocator; + pointer ptr = AllocatorTraits::allocate(allocator, n, hint); + if (bytes_used_ != nullptr) + { + *bytes_used_ += n * sizeof(T); + } + return ptr; + } + + void deallocate(pointer p, size_type n) + { + Allocator allocator; + AllocatorTraits::deallocate(allocator, p, n); + if (bytes_used_ != nullptr) + { + *bytes_used_ -= n * sizeof(T); + } + } + + template + void construct(U* p, Args&&... args) + { + Allocator allocator; + AllocatorTraits::construct(allocator, p, std::forward(args)...); + if (instance_count_ != nullptr) + { + *instance_count_ += 1; + } + } + + template + void destroy(U* p) + { + Allocator allocator; + // Ignore GCC warning bug. #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wuse-after-free" #endif - AllocatorTraits::destroy(allocator, p); + AllocatorTraits::destroy(allocator, p); #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) #pragma GCC diagnostic pop #endif - if (instance_count_ != nullptr) { - *instance_count_ -= 1; - } - } - - template - class rebind { - public: - using other = CountingAllocator; - }; - - friend bool operator==(const CountingAllocator& a, - const CountingAllocator& b) { - return a.bytes_used_ == b.bytes_used_ && - a.instance_count_ == b.instance_count_; - } - - friend bool operator!=(const CountingAllocator& a, - const CountingAllocator& b) { - return !(a == b); - } - - int64_t* bytes_used_ = nullptr; - int64_t* instance_count_ = nullptr; -}; - -} // namespace container_internal -ABSL_NAMESPACE_END + if (instance_count_ != nullptr) + { + *instance_count_ -= 1; + } + } + + template + class rebind + { + public: + using other = CountingAllocator; + }; + + friend bool operator==(const CountingAllocator& a, const CountingAllocator& b) + { + return a.bytes_used_ == b.bytes_used_ && + a.instance_count_ == b.instance_count_; + } + + friend bool operator!=(const CountingAllocator& a, const CountingAllocator& b) + { + return !(a == b); + } + + int64_t* bytes_used_ = nullptr; + int64_t* instance_count_ = nullptr; + }; + + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hash_function_defaults.h b/CAPI/cpp/grpc/include/absl/container/internal/hash_function_defaults.h index 250e662..a664482 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/hash_function_defaults.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/hash_function_defaults.h @@ -56,108 +56,140 @@ #include "absl/strings/cord.h" #include "absl/strings/string_view.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -// The hash of an object of type T is computed by using absl::Hash. -template -struct HashEq { - using Hash = absl::Hash; - using Eq = std::equal_to; -}; - -struct StringHash { - using is_transparent = void; - - size_t operator()(absl::string_view v) const { - return absl::Hash{}(v); - } - size_t operator()(const absl::Cord& v) const { - return absl::Hash{}(v); - } -}; - -struct StringEq { - using is_transparent = void; - bool operator()(absl::string_view lhs, absl::string_view rhs) const { - return lhs == rhs; - } - bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const { - return lhs == rhs; - } - bool operator()(const absl::Cord& lhs, absl::string_view rhs) const { - return lhs == rhs; - } - bool operator()(absl::string_view lhs, const absl::Cord& rhs) const { - return lhs == rhs; - } -}; - -// Supports heterogeneous lookup for string-like elements. -struct StringHashEq { - using Hash = StringHash; - using Eq = StringEq; -}; - -template <> -struct HashEq : StringHashEq {}; -template <> -struct HashEq : StringHashEq {}; -template <> -struct HashEq : StringHashEq {}; - -// Supports heterogeneous lookup for pointers and smart pointers. -template -struct HashEq { - struct Hash { - using is_transparent = void; - template - size_t operator()(const U& ptr) const { - return absl::Hash{}(HashEq::ToPtr(ptr)); - } - }; - struct Eq { - using is_transparent = void; - template - bool operator()(const A& a, const B& b) const { - return HashEq::ToPtr(a) == HashEq::ToPtr(b); - } - }; - - private: - static const T* ToPtr(const T* ptr) { return ptr; } - template - static const T* ToPtr(const std::unique_ptr& ptr) { - return ptr.get(); - } - template - static const T* ToPtr(const std::shared_ptr& ptr) { - return ptr.get(); - } -}; - -template -struct HashEq> : HashEq {}; -template -struct HashEq> : HashEq {}; - -// This header's visibility is restricted. If you need to access the default -// hasher please use the container's ::hasher alias instead. -// -// Example: typename Hash = typename absl::flat_hash_map::hasher -template -using hash_default_hash = typename container_internal::HashEq::Hash; - -// This header's visibility is restricted. If you need to access the default -// key equal please use the container's ::key_equal alias instead. -// -// Example: typename Eq = typename absl::flat_hash_map::key_equal -template -using hash_default_eq = typename container_internal::HashEq::Eq; - -} // namespace container_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // The hash of an object of type T is computed by using absl::Hash. + template + struct HashEq + { + using Hash = absl::Hash; + using Eq = std::equal_to; + }; + + struct StringHash + { + using is_transparent = void; + + size_t operator()(absl::string_view v) const + { + return absl::Hash{}(v); + } + size_t operator()(const absl::Cord& v) const + { + return absl::Hash{}(v); + } + }; + + struct StringEq + { + using is_transparent = void; + bool operator()(absl::string_view lhs, absl::string_view rhs) const + { + return lhs == rhs; + } + bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const + { + return lhs == rhs; + } + bool operator()(const absl::Cord& lhs, absl::string_view rhs) const + { + return lhs == rhs; + } + bool operator()(absl::string_view lhs, const absl::Cord& rhs) const + { + return lhs == rhs; + } + }; + + // Supports heterogeneous lookup for string-like elements. + struct StringHashEq + { + using Hash = StringHash; + using Eq = StringEq; + }; + + template<> + struct HashEq : StringHashEq + { + }; + template<> + struct HashEq : StringHashEq + { + }; + template<> + struct HashEq : StringHashEq + { + }; + + // Supports heterogeneous lookup for pointers and smart pointers. + template + struct HashEq + { + struct Hash + { + using is_transparent = void; + template + size_t operator()(const U& ptr) const + { + return absl::Hash{}(HashEq::ToPtr(ptr)); + } + }; + struct Eq + { + using is_transparent = void; + template + bool operator()(const A& a, const B& b) const + { + return HashEq::ToPtr(a) == HashEq::ToPtr(b); + } + }; + + private: + static const T* ToPtr(const T* ptr) + { + return ptr; + } + template + static const T* ToPtr(const std::unique_ptr& ptr) + { + return ptr.get(); + } + template + static const T* ToPtr(const std::shared_ptr& ptr) + { + return ptr.get(); + } + }; + + template + struct HashEq> : HashEq + { + }; + template + struct HashEq> : HashEq + { + }; + + // This header's visibility is restricted. If you need to access the default + // hasher please use the container's ::hasher alias instead. + // + // Example: typename Hash = typename absl::flat_hash_map::hasher + template + using hash_default_hash = typename container_internal::HashEq::Hash; + + // This header's visibility is restricted. If you need to access the default + // key equal please use the container's ::key_equal alias instead. + // + // Example: typename Eq = typename absl::flat_hash_map::key_equal + template + using hash_default_eq = typename container_internal::HashEq::Eq; + + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hash_generator_testing.h b/CAPI/cpp/grpc/include/absl/container/internal/hash_generator_testing.h index f1f555a..a565748 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/hash_generator_testing.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/hash_generator_testing.h @@ -34,149 +34,176 @@ #include "absl/meta/type_traits.h" #include "absl/strings/string_view.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { -namespace hash_internal { -namespace generator_internal { - -template -struct IsMap : std::false_type {}; - -template -struct IsMap> : std::true_type {}; - -} // namespace generator_internal - -std::mt19937_64* GetSharedRng(); - -enum Enum { - kEnumEmpty, - kEnumDeleted, -}; - -enum class EnumClass : uint64_t { - kEmpty, - kDeleted, -}; - -inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) { - return o << static_cast(ec); -} - -template -struct Generator; - -template -struct Generator::value>::type> { - T operator()() const { - std::uniform_int_distribution dist; - return dist(*GetSharedRng()); - } -}; - -template <> -struct Generator { - Enum operator()() const { - std::uniform_int_distribution::type> - dist; - while (true) { - auto variate = dist(*GetSharedRng()); - if (variate != kEnumEmpty && variate != kEnumDeleted) - return static_cast(variate); - } - } -}; - -template <> -struct Generator { - EnumClass operator()() const { - std::uniform_int_distribution< - typename std::underlying_type::type> - dist; - while (true) { - EnumClass variate = static_cast(dist(*GetSharedRng())); - if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted) - return static_cast(variate); - } - } -}; - -template <> -struct Generator { - std::string operator()() const; -}; - -template <> -struct Generator { - absl::string_view operator()() const; -}; - -template <> -struct Generator { - NonStandardLayout operator()() const { - return NonStandardLayout(Generator()()); - } -}; - -template -struct Generator> { - std::pair operator()() const { - return std::pair(Generator::type>()(), - Generator::type>()()); - } -}; - -template -struct Generator> { - std::tuple operator()() const { - return std::tuple(Generator::type>()()...); - } -}; - -template -struct Generator> { - std::unique_ptr operator()() const { - return absl::make_unique(Generator()()); - } -}; - -template -struct Generator().key()), - decltype(std::declval().value())>> - : Generator().key())>::type, - typename std::decay().value())>::type>> {}; - -template -using GeneratedType = decltype( - std::declval::value, - typename Container::value_type, - typename Container::key_type>::type>&>()()); - -// Naive wrapper that performs a linear search of previous values. -// Beware this is O(SQR), which is reasonable for smaller kMaxValues. -template -struct UniqueGenerator { - Generator gen; - std::vector values; - - T operator()() { - assert(values.size() < kMaxValues); - for (;;) { - T value = gen(); - if (std::find(values.begin(), values.end(), value) == values.end()) { - values.push_back(value); - return value; - } - } - } -}; - -} // namespace hash_internal -} // namespace container_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + namespace hash_internal + { + namespace generator_internal + { + + template + struct IsMap : std::false_type + { + }; + + template + struct IsMap> : std::true_type + { + }; + + } // namespace generator_internal + + std::mt19937_64* GetSharedRng(); + + enum Enum + { + kEnumEmpty, + kEnumDeleted, + }; + + enum class EnumClass : uint64_t + { + kEmpty, + kDeleted, + }; + + inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) + { + return o << static_cast(ec); + } + + template + struct Generator; + + template + struct Generator::value>::type> + { + T operator()() const + { + std::uniform_int_distribution dist; + return dist(*GetSharedRng()); + } + }; + + template<> + struct Generator + { + Enum operator()() const + { + std::uniform_int_distribution::type> + dist; + while (true) + { + auto variate = dist(*GetSharedRng()); + if (variate != kEnumEmpty && variate != kEnumDeleted) + return static_cast(variate); + } + } + }; + + template<> + struct Generator + { + EnumClass operator()() const + { + std::uniform_int_distribution< + typename std::underlying_type::type> + dist; + while (true) + { + EnumClass variate = static_cast(dist(*GetSharedRng())); + if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted) + return static_cast(variate); + } + } + }; + + template<> + struct Generator + { + std::string operator()() const; + }; + + template<> + struct Generator + { + absl::string_view operator()() const; + }; + + template<> + struct Generator + { + NonStandardLayout operator()() const + { + return NonStandardLayout(Generator()()); + } + }; + + template + struct Generator> + { + std::pair operator()() const + { + return std::pair(Generator::type>()(), Generator::type>()()); + } + }; + + template + struct Generator> + { + std::tuple operator()() const + { + return std::tuple(Generator::type>()()...); + } + }; + + template + struct Generator> + { + std::unique_ptr operator()() const + { + return absl::make_unique(Generator()()); + } + }; + + template + struct Generator().key()), decltype(std::declval().value())>> : Generator().key())>::type, typename std::decay().value())>::type>> + { + }; + + template + using GeneratedType = decltype(std::declval::value, typename Container::value_type, typename Container::key_type>::type>&>()()); + + // Naive wrapper that performs a linear search of previous values. + // Beware this is O(SQR), which is reasonable for smaller kMaxValues. + template + struct UniqueGenerator + { + Generator gen; + std::vector values; + + T operator()() + { + assert(values.size() < kMaxValues); + for (;;) + { + T value = gen(); + if (std::find(values.begin(), values.end(), value) == values.end()) + { + values.push_back(value); + return value; + } + } + } + }; + + } // namespace hash_internal + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_testing.h b/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_testing.h index 01c40d2..5577ab7 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_testing.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_testing.h @@ -29,141 +29,197 @@ #include "absl/hash/hash.h" #include "absl/strings/string_view.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { -namespace hash_testing_internal { - -template -struct WithId { - WithId() : id_(next_id()) {} - WithId(const WithId& that) : id_(that.id_) {} - WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; } - WithId& operator=(const WithId& that) { - id_ = that.id_; - return *this; - } - WithId& operator=(WithId&& that) { - id_ = that.id_; - that.id_ = 0; - return *this; - } - - size_t id() const { return id_; } - - friend bool operator==(const WithId& a, const WithId& b) { - return a.id_ == b.id_; - } - friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); } - - protected: - explicit WithId(size_t id) : id_(id) {} - - private: - size_t id_; - - template - static size_t next_id() { - // 0 is reserved for moved from state. - static size_t gId = 1; - return gId++; - } -}; - -} // namespace hash_testing_internal - -struct NonStandardLayout { - NonStandardLayout() {} - explicit NonStandardLayout(std::string s) : value(std::move(s)) {} - virtual ~NonStandardLayout() {} - - friend bool operator==(const NonStandardLayout& a, - const NonStandardLayout& b) { - return a.value == b.value; - } - friend bool operator!=(const NonStandardLayout& a, - const NonStandardLayout& b) { - return a.value != b.value; - } - - template - friend H AbslHashValue(H h, const NonStandardLayout& v) { - return H::combine(std::move(h), v.value); - } - - std::string value; -}; - -struct StatefulTestingHash - : absl::container_internal::hash_testing_internal::WithId< - StatefulTestingHash> { - template - size_t operator()(const T& t) const { - return absl::Hash{}(t); - } -}; - -struct StatefulTestingEqual - : absl::container_internal::hash_testing_internal::WithId< - StatefulTestingEqual> { - template - bool operator()(const T& t, const U& u) const { - return t == u; - } -}; - -// It is expected that Alloc() == Alloc() for all allocators so we cannot use -// WithId base. We need to explicitly assign ids. -template -struct Alloc : std::allocator { - using propagate_on_container_swap = std::true_type; - - // Using old paradigm for this to ensure compatibility. - explicit Alloc(size_t id = 0) : id_(id) {} - - Alloc(const Alloc&) = default; - Alloc& operator=(const Alloc&) = default; - - template - Alloc(const Alloc& that) : std::allocator(that), id_(that.id()) {} - - template - struct rebind { - using other = Alloc; - }; - - size_t id() const { return id_; } - - friend bool operator==(const Alloc& a, const Alloc& b) { - return a.id_ == b.id_; - } - friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); } - - private: - size_t id_ = (std::numeric_limits::max)(); -}; - -template -auto items(const Map& m) -> std::vector< - std::pair> { - using std::get; - std::vector> res; - res.reserve(m.size()); - for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v)); - return res; -} - -template -auto keys(const Set& s) - -> std::vector::type> { - std::vector::type> res; - res.reserve(s.size()); - for (const auto& v : s) res.emplace_back(v); - return res; -} - -} // namespace container_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + namespace hash_testing_internal + { + + template + struct WithId + { + WithId() : + id_(next_id()) + { + } + WithId(const WithId& that) : + id_(that.id_) + { + } + WithId(WithId&& that) : + id_(that.id_) + { + that.id_ = 0; + } + WithId& operator=(const WithId& that) + { + id_ = that.id_; + return *this; + } + WithId& operator=(WithId&& that) + { + id_ = that.id_; + that.id_ = 0; + return *this; + } + + size_t id() const + { + return id_; + } + + friend bool operator==(const WithId& a, const WithId& b) + { + return a.id_ == b.id_; + } + friend bool operator!=(const WithId& a, const WithId& b) + { + return !(a == b); + } + + protected: + explicit WithId(size_t id) : + id_(id) + { + } + + private: + size_t id_; + + template + static size_t next_id() + { + // 0 is reserved for moved from state. + static size_t gId = 1; + return gId++; + } + }; + + } // namespace hash_testing_internal + + struct NonStandardLayout + { + NonStandardLayout() + { + } + explicit NonStandardLayout(std::string s) : + value(std::move(s)) + { + } + virtual ~NonStandardLayout() + { + } + + friend bool operator==(const NonStandardLayout& a, const NonStandardLayout& b) + { + return a.value == b.value; + } + friend bool operator!=(const NonStandardLayout& a, const NonStandardLayout& b) + { + return a.value != b.value; + } + + template + friend H AbslHashValue(H h, const NonStandardLayout& v) + { + return H::combine(std::move(h), v.value); + } + + std::string value; + }; + + struct StatefulTestingHash : absl::container_internal::hash_testing_internal::WithId + { + template + size_t operator()(const T& t) const + { + return absl::Hash{}(t); + } + }; + + struct StatefulTestingEqual : absl::container_internal::hash_testing_internal::WithId + { + template + bool operator()(const T& t, const U& u) const + { + return t == u; + } + }; + + // It is expected that Alloc() == Alloc() for all allocators so we cannot use + // WithId base. We need to explicitly assign ids. + template + struct Alloc : std::allocator + { + using propagate_on_container_swap = std::true_type; + + // Using old paradigm for this to ensure compatibility. + explicit Alloc(size_t id = 0) : + id_(id) + { + } + + Alloc(const Alloc&) = default; + Alloc& operator=(const Alloc&) = default; + + template + Alloc(const Alloc& that) : + std::allocator(that), + id_(that.id()) + { + } + + template + struct rebind + { + using other = Alloc; + }; + + size_t id() const + { + return id_; + } + + friend bool operator==(const Alloc& a, const Alloc& b) + { + return a.id_ == b.id_; + } + friend bool operator!=(const Alloc& a, const Alloc& b) + { + return !(a == b); + } + + private: + size_t id_ = (std::numeric_limits::max)(); + }; + + template + auto items(const Map& m) -> std::vector< + std::pair> + { + using std::get; + std::vector> res; + res.reserve(m.size()); + for (const auto& v : m) + res.emplace_back(get<0>(v), get<1>(v)); + return res; + } + + template + auto keys(const Set& s) + -> std::vector::type> + { + std::vector::type> res; + res.reserve(s.size()); + for (const auto& v : s) + res.emplace_back(v); + return res; + } + + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl // ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions @@ -174,8 +230,8 @@ ABSL_NAMESPACE_END // From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html) // "the unordered associative containers in and // meet the allocator-aware container requirements;" -#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 ) || \ -( __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9 )) +#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425) || \ + (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9)) #define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0 #else #define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1 diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_traits.h b/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_traits.h index 46c97b1..11e9ea3 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_traits.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/hash_policy_traits.h @@ -23,186 +23,204 @@ #include "absl/meta/type_traits.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -// Defines how slots are initialized/destroyed/moved. -template -struct hash_policy_traits { - // The type of the keys stored in the hashtable. - using key_type = typename Policy::key_type; - - private: - struct ReturnKey { - // When C++17 is available, we can use std::launder to provide mutable - // access to the key for use in node handle. +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // Defines how slots are initialized/destroyed/moved. + template + struct hash_policy_traits + { + // The type of the keys stored in the hashtable. + using key_type = typename Policy::key_type; + + private: + struct ReturnKey + { + // When C++17 is available, we can use std::launder to provide mutable + // access to the key for use in node handle. #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 - template ::value, int> = 0> - static key_type& Impl(Key&& k, int) { - return *std::launder( - const_cast(std::addressof(std::forward(k)))); - } + template::value, int> = 0> + static key_type& Impl(Key&& k, int) + { + return *std::launder( + const_cast(std::addressof(std::forward(k))) + ); + } #endif - template - static Key Impl(Key&& k, char) { - return std::forward(k); - } - - // When Key=T&, we forward the lvalue reference. - // When Key=T, we return by value to avoid a dangling reference. - // eg, for string_hash_map. - template - auto operator()(Key&& k, const Args&...) const - -> decltype(Impl(std::forward(k), 0)) { - return Impl(std::forward(k), 0); - } - }; - - template - struct ConstantIteratorsImpl : std::false_type {}; - - template - struct ConstantIteratorsImpl> - : P::constant_iterators {}; - - public: - // The actual object stored in the hash table. - using slot_type = typename Policy::slot_type; - - // The argument type for insertions into the hashtable. This is different - // from value_type for increased performance. See initializer_list constructor - // and insert() member functions for more details. - using init_type = typename Policy::init_type; - - using reference = decltype(Policy::element(std::declval())); - using pointer = typename std::remove_reference::type*; - using value_type = typename std::remove_reference::type; - - // Policies can set this variable to tell raw_hash_set that all iterators - // should be constant, even `iterator`. This is useful for set-like - // containers. - // Defaults to false if not provided by the policy. - using constant_iterators = ConstantIteratorsImpl<>; - - // PRECONDITION: `slot` is UNINITIALIZED - // POSTCONDITION: `slot` is INITIALIZED - template - static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { - Policy::construct(alloc, slot, std::forward(args)...); - } - - // PRECONDITION: `slot` is INITIALIZED - // POSTCONDITION: `slot` is UNINITIALIZED - template - static void destroy(Alloc* alloc, slot_type* slot) { - Policy::destroy(alloc, slot); - } - - // Transfers the `old_slot` to `new_slot`. Any memory allocated by the - // allocator inside `old_slot` to `new_slot` can be transferred. - // - // OPTIONAL: defaults to: - // - // clone(new_slot, std::move(*old_slot)); - // destroy(old_slot); - // - // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED - // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is - // UNINITIALIZED - template - static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) { - transfer_impl(alloc, new_slot, old_slot, 0); - } - - // PRECONDITION: `slot` is INITIALIZED - // POSTCONDITION: `slot` is INITIALIZED - template - static auto element(slot_type* slot) -> decltype(P::element(slot)) { - return P::element(slot); - } - - // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`. - // - // If `slot` is nullptr, returns the constant amount of memory owned by any - // full slot or -1 if slots own variable amounts of memory. - // - // PRECONDITION: `slot` is INITIALIZED or nullptr - template - static size_t space_used(const slot_type* slot) { - return P::space_used(slot); - } - - // Provides generalized access to the key for elements, both for elements in - // the table and for elements that have not yet been inserted (or even - // constructed). We would like an API that allows us to say: `key(args...)` - // but we cannot do that for all cases, so we use this more general API that - // can be used for many things, including the following: - // - // - Given an element in a table, get its key. - // - Given an element initializer, get its key. - // - Given `emplace()` arguments, get the element key. - // - // Implementations of this must adhere to a very strict technical - // specification around aliasing and consuming arguments: - // - // Let `value_type` be the result type of `element()` without ref- and - // cv-qualifiers. The first argument is a functor, the rest are constructor - // arguments for `value_type`. Returns `std::forward(f)(k, xs...)`, where - // `k` is the element key, and `xs...` are the new constructor arguments for - // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias - // `ts...`. The key won't be touched once `xs...` are used to construct an - // element; `ts...` won't be touched at all, which allows `apply()` to consume - // any rvalues among them. - // - // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not - // trigger a hard compile error unless it originates from `f`. In other words, - // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not - // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK. - // - // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`, - // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not. - template - static auto apply(F&& f, Ts&&... ts) - -> decltype(P::apply(std::forward(f), std::forward(ts)...)) { - return P::apply(std::forward(f), std::forward(ts)...); - } - - // Returns the "key" portion of the slot. - // Used for node handle manipulation. - template - static auto mutable_key(slot_type* slot) - -> decltype(P::apply(ReturnKey(), element(slot))) { - return P::apply(ReturnKey(), element(slot)); - } - - // Returns the "value" (as opposed to the "key") portion of the element. Used - // by maps to implement `operator[]`, `at()` and `insert_or_assign()`. - template - static auto value(T* elem) -> decltype(P::value(elem)) { - return P::value(elem); - } - - private: - // Use auto -> decltype as an enabler. - template - static auto transfer_impl(Alloc* alloc, slot_type* new_slot, - slot_type* old_slot, int) - -> decltype((void)P::transfer(alloc, new_slot, old_slot)) { - P::transfer(alloc, new_slot, old_slot); - } - template - static void transfer_impl(Alloc* alloc, slot_type* new_slot, - slot_type* old_slot, char) { - construct(alloc, new_slot, std::move(element(old_slot))); - destroy(alloc, old_slot); - } -}; - -} // namespace container_internal -ABSL_NAMESPACE_END + template + static Key Impl(Key&& k, char) + { + return std::forward(k); + } + + // When Key=T&, we forward the lvalue reference. + // When Key=T, we return by value to avoid a dangling reference. + // eg, for string_hash_map. + template + auto operator()(Key&& k, const Args&...) const + -> decltype(Impl(std::forward(k), 0)) + { + return Impl(std::forward(k), 0); + } + }; + + template + struct ConstantIteratorsImpl : std::false_type + { + }; + + template + struct ConstantIteratorsImpl> : P::constant_iterators + { + }; + + public: + // The actual object stored in the hash table. + using slot_type = typename Policy::slot_type; + + // The argument type for insertions into the hashtable. This is different + // from value_type for increased performance. See initializer_list constructor + // and insert() member functions for more details. + using init_type = typename Policy::init_type; + + using reference = decltype(Policy::element(std::declval())); + using pointer = typename std::remove_reference::type*; + using value_type = typename std::remove_reference::type; + + // Policies can set this variable to tell raw_hash_set that all iterators + // should be constant, even `iterator`. This is useful for set-like + // containers. + // Defaults to false if not provided by the policy. + using constant_iterators = ConstantIteratorsImpl<>; + + // PRECONDITION: `slot` is UNINITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) + { + Policy::construct(alloc, slot, std::forward(args)...); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is UNINITIALIZED + template + static void destroy(Alloc* alloc, slot_type* slot) + { + Policy::destroy(alloc, slot); + } + + // Transfers the `old_slot` to `new_slot`. Any memory allocated by the + // allocator inside `old_slot` to `new_slot` can be transferred. + // + // OPTIONAL: defaults to: + // + // clone(new_slot, std::move(*old_slot)); + // destroy(old_slot); + // + // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED + // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is + // UNINITIALIZED + template + static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) + { + transfer_impl(alloc, new_slot, old_slot, 0); + } + + // PRECONDITION: `slot` is INITIALIZED + // POSTCONDITION: `slot` is INITIALIZED + template + static auto element(slot_type* slot) -> decltype(P::element(slot)) + { + return P::element(slot); + } + + // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`. + // + // If `slot` is nullptr, returns the constant amount of memory owned by any + // full slot or -1 if slots own variable amounts of memory. + // + // PRECONDITION: `slot` is INITIALIZED or nullptr + template + static size_t space_used(const slot_type* slot) + { + return P::space_used(slot); + } + + // Provides generalized access to the key for elements, both for elements in + // the table and for elements that have not yet been inserted (or even + // constructed). We would like an API that allows us to say: `key(args...)` + // but we cannot do that for all cases, so we use this more general API that + // can be used for many things, including the following: + // + // - Given an element in a table, get its key. + // - Given an element initializer, get its key. + // - Given `emplace()` arguments, get the element key. + // + // Implementations of this must adhere to a very strict technical + // specification around aliasing and consuming arguments: + // + // Let `value_type` be the result type of `element()` without ref- and + // cv-qualifiers. The first argument is a functor, the rest are constructor + // arguments for `value_type`. Returns `std::forward(f)(k, xs...)`, where + // `k` is the element key, and `xs...` are the new constructor arguments for + // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias + // `ts...`. The key won't be touched once `xs...` are used to construct an + // element; `ts...` won't be touched at all, which allows `apply()` to consume + // any rvalues among them. + // + // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not + // trigger a hard compile error unless it originates from `f`. In other words, + // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not + // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK. + // + // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`, + // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not. + template + static auto apply(F&& f, Ts&&... ts) + -> decltype(P::apply(std::forward(f), std::forward(ts)...)) + { + return P::apply(std::forward(f), std::forward(ts)...); + } + + // Returns the "key" portion of the slot. + // Used for node handle manipulation. + template + static auto mutable_key(slot_type* slot) + -> decltype(P::apply(ReturnKey(), element(slot))) + { + return P::apply(ReturnKey(), element(slot)); + } + + // Returns the "value" (as opposed to the "key") portion of the element. Used + // by maps to implement `operator[]`, `at()` and `insert_or_assign()`. + template + static auto value(T* elem) -> decltype(P::value(elem)) + { + return P::value(elem); + } + + private: + // Use auto -> decltype as an enabler. + template + static auto transfer_impl(Alloc* alloc, slot_type* new_slot, slot_type* old_slot, int) + -> decltype((void)P::transfer(alloc, new_slot, old_slot)) + { + P::transfer(alloc, new_slot, old_slot); + } + template + static void transfer_impl(Alloc* alloc, slot_type* new_slot, slot_type* old_slot, char) + { + construct(alloc, new_slot, std::move(element(old_slot))); + destroy(alloc, old_slot); + } + }; + + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug.h b/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug.h index 19d5212..6da7252 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug.h @@ -37,74 +37,86 @@ #include "absl/container/internal/hashtable_debug_hooks.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { -// Returns the number of probes required to lookup `key`. Returns 0 for a -// search with no collisions. Higher values mean more hash collisions occurred; -// however, the exact meaning of this number varies according to the container -// type. -template -size_t GetHashtableDebugNumProbes( - const C& c, const typename C::key_type& key) { - return absl::container_internal::hashtable_debug_internal:: - HashtableDebugAccess::GetNumProbes(c, key); -} + // Returns the number of probes required to lookup `key`. Returns 0 for a + // search with no collisions. Higher values mean more hash collisions occurred; + // however, the exact meaning of this number varies according to the container + // type. + template + size_t GetHashtableDebugNumProbes( + const C& c, const typename C::key_type& key + ) + { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::GetNumProbes(c, key); + } -// Gets a histogram of the number of probes for each elements in the container. -// The sum of all the values in the vector is equal to container.size(). -template -std::vector GetHashtableDebugNumProbesHistogram(const C& container) { - std::vector v; - for (auto it = container.begin(); it != container.end(); ++it) { - size_t num_probes = GetHashtableDebugNumProbes( - container, - absl::container_internal::hashtable_debug_internal::GetKey(*it, 0)); - v.resize((std::max)(v.size(), num_probes + 1)); - v[num_probes]++; - } - return v; -} + // Gets a histogram of the number of probes for each elements in the container. + // The sum of all the values in the vector is equal to container.size(). + template + std::vector GetHashtableDebugNumProbesHistogram(const C& container) + { + std::vector v; + for (auto it = container.begin(); it != container.end(); ++it) + { + size_t num_probes = GetHashtableDebugNumProbes( + container, + absl::container_internal::hashtable_debug_internal::GetKey(*it, 0) + ); + v.resize((std::max)(v.size(), num_probes + 1)); + v[num_probes]++; + } + return v; + } -struct HashtableDebugProbeSummary { - size_t total_elements; - size_t total_num_probes; - double mean; -}; + struct HashtableDebugProbeSummary + { + size_t total_elements; + size_t total_num_probes; + double mean; + }; -// Gets a summary of the probe count distribution for the elements in the -// container. -template -HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) { - auto probes = GetHashtableDebugNumProbesHistogram(container); - HashtableDebugProbeSummary summary = {}; - for (size_t i = 0; i < probes.size(); ++i) { - summary.total_elements += probes[i]; - summary.total_num_probes += probes[i] * i; - } - summary.mean = 1.0 * summary.total_num_probes / summary.total_elements; - return summary; -} + // Gets a summary of the probe count distribution for the elements in the + // container. + template + HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) + { + auto probes = GetHashtableDebugNumProbesHistogram(container); + HashtableDebugProbeSummary summary = {}; + for (size_t i = 0; i < probes.size(); ++i) + { + summary.total_elements += probes[i]; + summary.total_num_probes += probes[i] * i; + } + summary.mean = 1.0 * summary.total_num_probes / summary.total_elements; + return summary; + } -// Returns the number of bytes requested from the allocator by the container -// and not freed. -template -size_t AllocatedByteSize(const C& c) { - return absl::container_internal::hashtable_debug_internal:: - HashtableDebugAccess::AllocatedByteSize(c); -} + // Returns the number of bytes requested from the allocator by the container + // and not freed. + template + size_t AllocatedByteSize(const C& c) + { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::AllocatedByteSize(c); + } -// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C` -// and `c.size()` is equal to `num_elements`. -template -size_t LowerBoundAllocatedByteSize(size_t num_elements) { - return absl::container_internal::hashtable_debug_internal:: - HashtableDebugAccess::LowerBoundAllocatedByteSize(num_elements); -} + // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C` + // and `c.size()` is equal to `num_elements`. + template + size_t LowerBoundAllocatedByteSize(size_t num_elements) + { + return absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess::LowerBoundAllocatedByteSize(num_elements); + } -} // namespace container_internal -ABSL_NAMESPACE_END + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug_hooks.h b/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug_hooks.h index 3e9ea59..4f0d211 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug_hooks.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/hashtable_debug_hooks.h @@ -25,61 +25,71 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { -namespace hashtable_debug_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + namespace hashtable_debug_internal + { -// If it is a map, call get<0>(). -using std::get; -template -auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) { - return get<0>(pair); -} + // If it is a map, call get<0>(). + using std::get; + template + auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) + { + return get<0>(pair); + } -// If it is not a map, return the value directly. -template -const typename T::key_type& GetKey(const typename T::key_type& key, char) { - return key; -} + // If it is not a map, return the value directly. + template + const typename T::key_type& GetKey(const typename T::key_type& key, char) + { + return key; + } -// Containers should specialize this to provide debug information for that -// container. -template -struct HashtableDebugAccess { - // Returns the number of probes required to find `key` in `c`. The "number of - // probes" is a concept that can vary by container. Implementations should - // return 0 when `key` was found in the minimum number of operations and - // should increment the result for each non-trivial operation required to find - // `key`. - // - // The default implementation uses the bucket api from the standard and thus - // works for `std::unordered_*` containers. - static size_t GetNumProbes(const Container& c, - const typename Container::key_type& key) { - if (!c.bucket_count()) return {}; - size_t num_probes = 0; - size_t bucket = c.bucket(key); - for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) { - if (it == e) return num_probes; - if (c.key_eq()(key, GetKey(*it, 0))) return num_probes; - } - } + // Containers should specialize this to provide debug information for that + // container. + template + struct HashtableDebugAccess + { + // Returns the number of probes required to find `key` in `c`. The "number of + // probes" is a concept that can vary by container. Implementations should + // return 0 when `key` was found in the minimum number of operations and + // should increment the result for each non-trivial operation required to find + // `key`. + // + // The default implementation uses the bucket api from the standard and thus + // works for `std::unordered_*` containers. + static size_t GetNumProbes(const Container& c, const typename Container::key_type& key) + { + if (!c.bucket_count()) + return {}; + size_t num_probes = 0; + size_t bucket = c.bucket(key); + for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) + { + if (it == e) + return num_probes; + if (c.key_eq()(key, GetKey(*it, 0))) + return num_probes; + } + } - // Returns the number of bytes requested from the allocator by the container - // and not freed. - // - // static size_t AllocatedByteSize(const Container& c); + // Returns the number of bytes requested from the allocator by the container + // and not freed. + // + // static size_t AllocatedByteSize(const Container& c); - // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type - // `Container` and `c.size()` is equal to `num_elements`. - // - // static size_t LowerBoundAllocatedByteSize(size_t num_elements); -}; + // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type + // `Container` and `c.size()` is equal to `num_elements`. + // + // static size_t LowerBoundAllocatedByteSize(size_t num_elements); + }; -} // namespace hashtable_debug_internal -} // namespace container_internal -ABSL_NAMESPACE_END + } // namespace hashtable_debug_internal + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/hashtablez_sampler.h b/CAPI/cpp/grpc/include/absl/container/internal/hashtablez_sampler.h index d4016d8..40c1507 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/hashtablez_sampler.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/hashtablez_sampler.h @@ -51,249 +51,303 @@ #include "absl/synchronization/mutex.h" #include "absl/utility/utility.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -// Stores information about a sampled hashtable. All mutations to this *must* -// be made through `Record*` functions below. All reads from this *must* only -// occur in the callback to `HashtablezSampler::Iterate`. -struct HashtablezInfo : public profiling_internal::Sample { - // Constructs the object but does not fill in any fields. - HashtablezInfo(); - ~HashtablezInfo(); - HashtablezInfo(const HashtablezInfo&) = delete; - HashtablezInfo& operator=(const HashtablezInfo&) = delete; - - // Puts the object into a clean state, fills in the logically `const` members, - // blocking for any readers that are currently sampling the object. - void PrepareForSampling(int64_t stride, size_t inline_element_size_value) - ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); - - // These fields are mutated by the various Record* APIs and need to be - // thread-safe. - std::atomic capacity; - std::atomic size; - std::atomic num_erases; - std::atomic num_rehashes; - std::atomic max_probe_length; - std::atomic total_probe_length; - std::atomic hashes_bitwise_or; - std::atomic hashes_bitwise_and; - std::atomic hashes_bitwise_xor; - std::atomic max_reserve; - - // All of the fields below are set by `PrepareForSampling`, they must not be - // mutated in `Record*` functions. They are logically `const` in that sense. - // These are guarded by init_mu, but that is not externalized to clients, - // which can read them only during `SampleRecorder::Iterate` which will hold - // the lock. - static constexpr int kMaxStackDepth = 64; - absl::Time create_time; - int32_t depth; - void* stack[kMaxStackDepth]; - size_t inline_element_size; // How big is the slot? -}; - -inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // Stores information about a sampled hashtable. All mutations to this *must* + // be made through `Record*` functions below. All reads from this *must* only + // occur in the callback to `HashtablezSampler::Iterate`. + struct HashtablezInfo : public profiling_internal::Sample + { + // Constructs the object but does not fill in any fields. + HashtablezInfo(); + ~HashtablezInfo(); + HashtablezInfo(const HashtablezInfo&) = delete; + HashtablezInfo& operator=(const HashtablezInfo&) = delete; + + // Puts the object into a clean state, fills in the logically `const` members, + // blocking for any readers that are currently sampling the object. + void PrepareForSampling(int64_t stride, size_t inline_element_size_value) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); + + // These fields are mutated by the various Record* APIs and need to be + // thread-safe. + std::atomic capacity; + std::atomic size; + std::atomic num_erases; + std::atomic num_rehashes; + std::atomic max_probe_length; + std::atomic total_probe_length; + std::atomic hashes_bitwise_or; + std::atomic hashes_bitwise_and; + std::atomic hashes_bitwise_xor; + std::atomic max_reserve; + + // All of the fields below are set by `PrepareForSampling`, they must not be + // mutated in `Record*` functions. They are logically `const` in that sense. + // These are guarded by init_mu, but that is not externalized to clients, + // which can read them only during `SampleRecorder::Iterate` which will hold + // the lock. + static constexpr int kMaxStackDepth = 64; + absl::Time create_time; + int32_t depth; + void* stack[kMaxStackDepth]; + size_t inline_element_size; // How big is the slot? + }; + + inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) + { #ifdef ABSL_INTERNAL_HAVE_SSE2 - total_probe_length /= 16; + total_probe_length /= 16; #else - total_probe_length /= 8; + total_probe_length /= 8; #endif - info->total_probe_length.store(total_probe_length, std::memory_order_relaxed); - info->num_erases.store(0, std::memory_order_relaxed); - // There is only one concurrent writer, so `load` then `store` is sufficient - // instead of using `fetch_add`. - info->num_rehashes.store( - 1 + info->num_rehashes.load(std::memory_order_relaxed), - std::memory_order_relaxed); -} - -inline void RecordReservationSlow(HashtablezInfo* info, - size_t target_capacity) { - info->max_reserve.store( - (std::max)(info->max_reserve.load(std::memory_order_relaxed), - target_capacity), - std::memory_order_relaxed); -} - -inline void RecordClearedReservationSlow(HashtablezInfo* info) { - info->max_reserve.store(0, std::memory_order_relaxed); -} - -inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, - size_t capacity) { - info->size.store(size, std::memory_order_relaxed); - info->capacity.store(capacity, std::memory_order_relaxed); - if (size == 0) { - // This is a clear, reset the total/num_erases too. - info->total_probe_length.store(0, std::memory_order_relaxed); - info->num_erases.store(0, std::memory_order_relaxed); - } -} - -void RecordInsertSlow(HashtablezInfo* info, size_t hash, - size_t distance_from_desired); - -inline void RecordEraseSlow(HashtablezInfo* info) { - info->size.fetch_sub(1, std::memory_order_relaxed); - // There is only one concurrent writer, so `load` then `store` is sufficient - // instead of using `fetch_add`. - info->num_erases.store( - 1 + info->num_erases.load(std::memory_order_relaxed), - std::memory_order_relaxed); -} - -struct SamplingState { - int64_t next_sample; - // When we make a sampling decision, we record that distance so we can weight - // each sample. - int64_t sample_stride; -}; - -HashtablezInfo* SampleSlow(SamplingState& next_sample, - size_t inline_element_size); -void UnsampleSlow(HashtablezInfo* info); + info->total_probe_length.store(total_probe_length, std::memory_order_relaxed); + info->num_erases.store(0, std::memory_order_relaxed); + // There is only one concurrent writer, so `load` then `store` is sufficient + // instead of using `fetch_add`. + info->num_rehashes.store( + 1 + info->num_rehashes.load(std::memory_order_relaxed), + std::memory_order_relaxed + ); + } + + inline void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity) + { + info->max_reserve.store( + (std::max)(info->max_reserve.load(std::memory_order_relaxed), target_capacity), + std::memory_order_relaxed + ); + } + + inline void RecordClearedReservationSlow(HashtablezInfo* info) + { + info->max_reserve.store(0, std::memory_order_relaxed); + } + + inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, size_t capacity) + { + info->size.store(size, std::memory_order_relaxed); + info->capacity.store(capacity, std::memory_order_relaxed); + if (size == 0) + { + // This is a clear, reset the total/num_erases too. + info->total_probe_length.store(0, std::memory_order_relaxed); + info->num_erases.store(0, std::memory_order_relaxed); + } + } + + void RecordInsertSlow(HashtablezInfo* info, size_t hash, size_t distance_from_desired); + + inline void RecordEraseSlow(HashtablezInfo* info) + { + info->size.fetch_sub(1, std::memory_order_relaxed); + // There is only one concurrent writer, so `load` then `store` is sufficient + // instead of using `fetch_add`. + info->num_erases.store( + 1 + info->num_erases.load(std::memory_order_relaxed), + std::memory_order_relaxed + ); + } + + struct SamplingState + { + int64_t next_sample; + // When we make a sampling decision, we record that distance so we can weight + // each sample. + int64_t sample_stride; + }; + + HashtablezInfo* SampleSlow(SamplingState& next_sample, size_t inline_element_size); + void UnsampleSlow(HashtablezInfo* info); #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -class HashtablezInfoHandle { - public: - explicit HashtablezInfoHandle() : info_(nullptr) {} - explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {} - ~HashtablezInfoHandle() { - if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; - UnsampleSlow(info_); - } - - HashtablezInfoHandle(const HashtablezInfoHandle&) = delete; - HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete; - - HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept - : info_(absl::exchange(o.info_, nullptr)) {} - HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept { - if (ABSL_PREDICT_FALSE(info_ != nullptr)) { - UnsampleSlow(info_); - } - info_ = absl::exchange(o.info_, nullptr); - return *this; - } - - inline void RecordStorageChanged(size_t size, size_t capacity) { - if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; - RecordStorageChangedSlow(info_, size, capacity); - } - - inline void RecordRehash(size_t total_probe_length) { - if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; - RecordRehashSlow(info_, total_probe_length); - } - - inline void RecordReservation(size_t target_capacity) { - if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; - RecordReservationSlow(info_, target_capacity); - } - - inline void RecordClearedReservation() { - if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; - RecordClearedReservationSlow(info_); - } - - inline void RecordInsert(size_t hash, size_t distance_from_desired) { - if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; - RecordInsertSlow(info_, hash, distance_from_desired); - } - - inline void RecordErase() { - if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; - RecordEraseSlow(info_); - } - - friend inline void swap(HashtablezInfoHandle& lhs, - HashtablezInfoHandle& rhs) { - std::swap(lhs.info_, rhs.info_); - } - - private: - friend class HashtablezInfoHandlePeer; - HashtablezInfo* info_; -}; + class HashtablezInfoHandle + { + public: + explicit HashtablezInfoHandle() : + info_(nullptr) + { + } + explicit HashtablezInfoHandle(HashtablezInfo* info) : + info_(info) + { + } + ~HashtablezInfoHandle() + { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) + return; + UnsampleSlow(info_); + } + + HashtablezInfoHandle(const HashtablezInfoHandle&) = delete; + HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete; + + HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept + : + info_(absl::exchange(o.info_, nullptr)) + { + } + HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept + { + if (ABSL_PREDICT_FALSE(info_ != nullptr)) + { + UnsampleSlow(info_); + } + info_ = absl::exchange(o.info_, nullptr); + return *this; + } + + inline void RecordStorageChanged(size_t size, size_t capacity) + { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) + return; + RecordStorageChangedSlow(info_, size, capacity); + } + + inline void RecordRehash(size_t total_probe_length) + { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) + return; + RecordRehashSlow(info_, total_probe_length); + } + + inline void RecordReservation(size_t target_capacity) + { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) + return; + RecordReservationSlow(info_, target_capacity); + } + + inline void RecordClearedReservation() + { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) + return; + RecordClearedReservationSlow(info_); + } + + inline void RecordInsert(size_t hash, size_t distance_from_desired) + { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) + return; + RecordInsertSlow(info_, hash, distance_from_desired); + } + + inline void RecordErase() + { + if (ABSL_PREDICT_TRUE(info_ == nullptr)) + return; + RecordEraseSlow(info_); + } + + friend inline void swap(HashtablezInfoHandle& lhs, HashtablezInfoHandle& rhs) + { + std::swap(lhs.info_, rhs.info_); + } + + private: + friend class HashtablezInfoHandlePeer; + HashtablezInfo* info_; + }; #else -// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can -// be removed by the linker, in order to reduce the binary size. -class HashtablezInfoHandle { - public: - explicit HashtablezInfoHandle() = default; - explicit HashtablezInfoHandle(std::nullptr_t) {} - - inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {} - inline void RecordRehash(size_t /*total_probe_length*/) {} - inline void RecordReservation(size_t /*target_capacity*/) {} - inline void RecordClearedReservation() {} - inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {} - inline void RecordErase() {} - - friend inline void swap(HashtablezInfoHandle& /*lhs*/, - HashtablezInfoHandle& /*rhs*/) {} -}; + // Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can + // be removed by the linker, in order to reduce the binary size. + class HashtablezInfoHandle + { + public: + explicit HashtablezInfoHandle() = default; + explicit HashtablezInfoHandle(std::nullptr_t) + { + } + + inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) + { + } + inline void RecordRehash(size_t /*total_probe_length*/) + { + } + inline void RecordReservation(size_t /*target_capacity*/) + { + } + inline void RecordClearedReservation() + { + } + inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) + { + } + inline void RecordErase() + { + } + + friend inline void swap(HashtablezInfoHandle& /*lhs*/, HashtablezInfoHandle& /*rhs*/) + { + } + }; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample; + extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) -// Returns an RAII sampling handle that manages registration and unregistation -// with the global sampler. -inline HashtablezInfoHandle Sample( - size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) { + // Returns an RAII sampling handle that manages registration and unregistation + // with the global sampler. + inline HashtablezInfoHandle Sample( + size_t inline_element_size ABSL_ATTRIBUTE_UNUSED + ) + { #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) - if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) { - return HashtablezInfoHandle(nullptr); - } - return HashtablezInfoHandle( - SampleSlow(global_next_sample, inline_element_size)); + if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) + { + return HashtablezInfoHandle(nullptr); + } + return HashtablezInfoHandle( + SampleSlow(global_next_sample, inline_element_size) + ); #else - return HashtablezInfoHandle(nullptr); + return HashtablezInfoHandle(nullptr); #endif // !ABSL_PER_THREAD_TLS -} + } -using HashtablezSampler = - ::absl::profiling_internal::SampleRecorder; + using HashtablezSampler = + ::absl::profiling_internal::SampleRecorder; -// Returns a global Sampler. -HashtablezSampler& GlobalHashtablezSampler(); + // Returns a global Sampler. + HashtablezSampler& GlobalHashtablezSampler(); -using HashtablezConfigListener = void (*)(); -void SetHashtablezConfigListener(HashtablezConfigListener l); + using HashtablezConfigListener = void (*)(); + void SetHashtablezConfigListener(HashtablezConfigListener l); -// Enables or disables sampling for Swiss tables. -bool IsHashtablezEnabled(); -void SetHashtablezEnabled(bool enabled); -void SetHashtablezEnabledInternal(bool enabled); + // Enables or disables sampling for Swiss tables. + bool IsHashtablezEnabled(); + void SetHashtablezEnabled(bool enabled); + void SetHashtablezEnabledInternal(bool enabled); -// Sets the rate at which Swiss tables will be sampled. -int32_t GetHashtablezSampleParameter(); -void SetHashtablezSampleParameter(int32_t rate); -void SetHashtablezSampleParameterInternal(int32_t rate); + // Sets the rate at which Swiss tables will be sampled. + int32_t GetHashtablezSampleParameter(); + void SetHashtablezSampleParameter(int32_t rate); + void SetHashtablezSampleParameterInternal(int32_t rate); -// Sets a soft max for the number of samples that will be kept. -int32_t GetHashtablezMaxSamples(); -void SetHashtablezMaxSamples(int32_t max); -void SetHashtablezMaxSamplesInternal(int32_t max); + // Sets a soft max for the number of samples that will be kept. + int32_t GetHashtablezMaxSamples(); + void SetHashtablezMaxSamples(int32_t max); + void SetHashtablezMaxSamplesInternal(int32_t max); -// Configuration override. -// This allows process-wide sampling without depending on order of -// initialization of static storage duration objects. -// The definition of this constant is weak, which allows us to inject a -// different value for it at link time. -extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)(); + // Configuration override. + // This allows process-wide sampling without depending on order of + // initialization of static storage duration objects. + // The definition of this constant is weak, which allows us to inject a + // different value for it at link time. + extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)(); -} // namespace container_internal -ABSL_NAMESPACE_END + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/inlined_vector.h b/CAPI/cpp/grpc/include/absl/container/internal/inlined_vector.h index 54c92a0..a44b9ca 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/inlined_vector.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/inlined_vector.h @@ -32,9 +32,11 @@ #include "absl/meta/type_traits.h" #include "absl/types/span.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace inlined_vector_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace inlined_vector_internal + { // GCC does not deal very well with the below code #if !defined(__clang__) && defined(__GNUC__) @@ -42,912 +44,1068 @@ namespace inlined_vector_internal { #pragma GCC diagnostic ignored "-Warray-bounds" #endif -template -using AllocatorTraits = std::allocator_traits; -template -using ValueType = typename AllocatorTraits::value_type; -template -using SizeType = typename AllocatorTraits::size_type; -template -using Pointer = typename AllocatorTraits::pointer; -template -using ConstPointer = typename AllocatorTraits::const_pointer; -template -using SizeType = typename AllocatorTraits::size_type; -template -using DifferenceType = typename AllocatorTraits::difference_type; -template -using Reference = ValueType&; -template -using ConstReference = const ValueType&; -template -using Iterator = Pointer; -template -using ConstIterator = ConstPointer; -template -using ReverseIterator = typename std::reverse_iterator>; -template -using ConstReverseIterator = typename std::reverse_iterator>; -template -using MoveIterator = typename std::move_iterator>; - -template -using IsAtLeastForwardIterator = std::is_convertible< - typename std::iterator_traits::iterator_category, - std::forward_iterator_tag>; - -template -using IsMemcpyOk = - absl::conjunction>>, - absl::is_trivially_copy_constructible>, - absl::is_trivially_copy_assignable>, - absl::is_trivially_destructible>>; - -template -struct TypeIdentity { - using type = T; -}; - -// Used for function arguments in template functions to prevent ADL by forcing -// callers to explicitly specify the template parameter. -template -using NoTypeDeduction = typename TypeIdentity::type; - -template >::value> -struct DestroyAdapter; - -template -struct DestroyAdapter { - static void DestroyElements(A& allocator, Pointer destroy_first, - SizeType destroy_size) { - for (SizeType i = destroy_size; i != 0;) { - --i; - AllocatorTraits::destroy(allocator, destroy_first + i); - } - } -}; - -template -struct DestroyAdapter { - static void DestroyElements(A& allocator, Pointer destroy_first, - SizeType destroy_size) { - static_cast(allocator); - static_cast(destroy_first); - static_cast(destroy_size); - } -}; - -template -struct Allocation { - Pointer data; - SizeType capacity; -}; - -template ) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)> -struct MallocAdapter { - static Allocation Allocate(A& allocator, SizeType requested_capacity) { - return {AllocatorTraits::allocate(allocator, requested_capacity), - requested_capacity}; - } - - static void Deallocate(A& allocator, Pointer pointer, - SizeType capacity) { - AllocatorTraits::deallocate(allocator, pointer, capacity); - } -}; - -template -void ConstructElements(NoTypeDeduction& allocator, - Pointer construct_first, ValueAdapter& values, - SizeType construct_size) { - for (SizeType i = 0; i < construct_size; ++i) { - ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); } - ABSL_INTERNAL_CATCH_ANY { - DestroyAdapter::DestroyElements(allocator, construct_first, i); - ABSL_INTERNAL_RETHROW; - } - } -} - -template -void AssignElements(Pointer assign_first, ValueAdapter& values, - SizeType assign_size) { - for (SizeType i = 0; i < assign_size; ++i) { - values.AssignNext(assign_first + i); - } -} - -template -struct StorageView { - Pointer data; - SizeType size; - SizeType capacity; -}; - -template -class IteratorValueAdapter { - public: - explicit IteratorValueAdapter(const Iterator& it) : it_(it) {} - - void ConstructNext(A& allocator, Pointer construct_at) { - AllocatorTraits::construct(allocator, construct_at, *it_); - ++it_; - } - - void AssignNext(Pointer assign_at) { - *assign_at = *it_; - ++it_; - } - - private: - Iterator it_; -}; - -template -class CopyValueAdapter { - public: - explicit CopyValueAdapter(ConstPointer p) : ptr_(p) {} - - void ConstructNext(A& allocator, Pointer construct_at) { - AllocatorTraits::construct(allocator, construct_at, *ptr_); - } - - void AssignNext(Pointer assign_at) { *assign_at = *ptr_; } - - private: - ConstPointer ptr_; -}; - -template -class DefaultValueAdapter { - public: - explicit DefaultValueAdapter() {} - - void ConstructNext(A& allocator, Pointer construct_at) { - AllocatorTraits::construct(allocator, construct_at); - } - - void AssignNext(Pointer assign_at) { *assign_at = ValueType(); } -}; - -template -class AllocationTransaction { - public: - explicit AllocationTransaction(A& allocator) - : allocator_data_(allocator, nullptr), capacity_(0) {} - - ~AllocationTransaction() { - if (DidAllocate()) { - MallocAdapter::Deallocate(GetAllocator(), GetData(), GetCapacity()); - } - } - - AllocationTransaction(const AllocationTransaction&) = delete; - void operator=(const AllocationTransaction&) = delete; - - A& GetAllocator() { return allocator_data_.template get<0>(); } - Pointer& GetData() { return allocator_data_.template get<1>(); } - SizeType& GetCapacity() { return capacity_; } - - bool DidAllocate() { return GetData() != nullptr; } - - Pointer Allocate(SizeType requested_capacity) { - Allocation result = - MallocAdapter::Allocate(GetAllocator(), requested_capacity); - GetData() = result.data; - GetCapacity() = result.capacity; - return result.data; - } - - ABSL_MUST_USE_RESULT Allocation Release() && { - Allocation result = {GetData(), GetCapacity()}; - Reset(); - return result; - } - - private: - void Reset() { - GetData() = nullptr; - GetCapacity() = 0; - } - - container_internal::CompressedTuple> allocator_data_; - SizeType capacity_; -}; - -template -class ConstructionTransaction { - public: - explicit ConstructionTransaction(A& allocator) - : allocator_data_(allocator, nullptr), size_(0) {} - - ~ConstructionTransaction() { - if (DidConstruct()) { - DestroyAdapter::DestroyElements(GetAllocator(), GetData(), GetSize()); - } - } - - ConstructionTransaction(const ConstructionTransaction&) = delete; - void operator=(const ConstructionTransaction&) = delete; - - A& GetAllocator() { return allocator_data_.template get<0>(); } - Pointer& GetData() { return allocator_data_.template get<1>(); } - SizeType& GetSize() { return size_; } - - bool DidConstruct() { return GetData() != nullptr; } - template - void Construct(Pointer data, ValueAdapter& values, SizeType size) { - ConstructElements(GetAllocator(), data, values, size); - GetData() = data; - GetSize() = size; - } - void Commit() && { - GetData() = nullptr; - GetSize() = 0; - } - - private: - container_internal::CompressedTuple> allocator_data_; - SizeType size_; -}; - -template -class Storage { - public: - static SizeType NextCapacity(SizeType current_capacity) { - return current_capacity * 2; - } - - static SizeType ComputeCapacity(SizeType current_capacity, - SizeType requested_capacity) { - return (std::max)(NextCapacity(current_capacity), requested_capacity); - } - - // --------------------------------------------------------------------------- - // Storage Constructors and Destructor - // --------------------------------------------------------------------------- - - Storage() : metadata_(A(), /* size and is_allocated */ 0u) {} - - explicit Storage(const A& allocator) - : metadata_(allocator, /* size and is_allocated */ 0u) {} - - ~Storage() { - if (GetSizeAndIsAllocated() == 0) { - // Empty and not allocated; nothing to do. - } else if (IsMemcpyOk::value) { - // No destructors need to be run; just deallocate if necessary. - DeallocateIfAllocated(); - } else { - DestroyContents(); - } - } - - // --------------------------------------------------------------------------- - // Storage Member Accessors - // --------------------------------------------------------------------------- - - SizeType& GetSizeAndIsAllocated() { return metadata_.template get<1>(); } - - const SizeType& GetSizeAndIsAllocated() const { - return metadata_.template get<1>(); - } - - SizeType GetSize() const { return GetSizeAndIsAllocated() >> 1; } - - bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; } - - Pointer GetAllocatedData() { return data_.allocated.allocated_data; } - - ConstPointer GetAllocatedData() const { - return data_.allocated.allocated_data; - } - - Pointer GetInlinedData() { - return reinterpret_cast>( - std::addressof(data_.inlined.inlined_data[0])); - } - - ConstPointer GetInlinedData() const { - return reinterpret_cast>( - std::addressof(data_.inlined.inlined_data[0])); - } - - SizeType GetAllocatedCapacity() const { - return data_.allocated.allocated_capacity; - } - - SizeType GetInlinedCapacity() const { return static_cast>(N); } - - StorageView MakeStorageView() { - return GetIsAllocated() ? StorageView{GetAllocatedData(), GetSize(), - GetAllocatedCapacity()} - : StorageView{GetInlinedData(), GetSize(), - GetInlinedCapacity()}; - } - - A& GetAllocator() { return metadata_.template get<0>(); } - - const A& GetAllocator() const { return metadata_.template get<0>(); } - - // --------------------------------------------------------------------------- - // Storage Member Mutators - // --------------------------------------------------------------------------- - - ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other); - - template - void Initialize(ValueAdapter values, SizeType new_size); - - template - void Assign(ValueAdapter values, SizeType new_size); - - template - void Resize(ValueAdapter values, SizeType new_size); - - template - Iterator Insert(ConstIterator pos, ValueAdapter values, - SizeType insert_count); - - template - Reference EmplaceBack(Args&&... args); - - Iterator Erase(ConstIterator from, ConstIterator to); - - void Reserve(SizeType requested_capacity); - - void ShrinkToFit(); - - void Swap(Storage* other_storage_ptr); - - void SetIsAllocated() { - GetSizeAndIsAllocated() |= static_cast>(1); - } - - void UnsetIsAllocated() { - GetSizeAndIsAllocated() &= ((std::numeric_limits>::max)() - 1); - } - - void SetSize(SizeType size) { - GetSizeAndIsAllocated() = - (size << 1) | static_cast>(GetIsAllocated()); - } - - void SetAllocatedSize(SizeType size) { - GetSizeAndIsAllocated() = (size << 1) | static_cast>(1); - } - - void SetInlinedSize(SizeType size) { - GetSizeAndIsAllocated() = size << static_cast>(1); - } - - void AddSize(SizeType count) { - GetSizeAndIsAllocated() += count << static_cast>(1); - } - - void SubtractSize(SizeType count) { - ABSL_HARDENING_ASSERT(count <= GetSize()); - - GetSizeAndIsAllocated() -= count << static_cast>(1); - } - - void SetAllocation(Allocation allocation) { - data_.allocated.allocated_data = allocation.data; - data_.allocated.allocated_capacity = allocation.capacity; - } - - void MemcpyFrom(const Storage& other_storage) { - ABSL_HARDENING_ASSERT(IsMemcpyOk::value || - other_storage.GetIsAllocated()); - - GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated(); - data_ = other_storage.data_; - } - - void DeallocateIfAllocated() { - if (GetIsAllocated()) { - MallocAdapter::Deallocate(GetAllocator(), GetAllocatedData(), - GetAllocatedCapacity()); - } - } - - private: - ABSL_ATTRIBUTE_NOINLINE void DestroyContents(); - - using Metadata = container_internal::CompressedTuple>; - - struct Allocated { - Pointer allocated_data; - SizeType allocated_capacity; - }; - - struct Inlined { - alignas(ValueType) char inlined_data[sizeof(ValueType[N])]; - }; - - union Data { - Allocated allocated; - Inlined inlined; - }; - - template - ABSL_ATTRIBUTE_NOINLINE Reference EmplaceBackSlow(Args&&... args); - - Metadata metadata_; - Data data_; -}; - -template -void Storage::DestroyContents() { - Pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData(); - DestroyAdapter::DestroyElements(GetAllocator(), data, GetSize()); - DeallocateIfAllocated(); -} - -template -void Storage::InitFrom(const Storage& other) { - const SizeType n = other.GetSize(); - ABSL_HARDENING_ASSERT(n > 0); // Empty sources handled handled in caller. - ConstPointer src; - Pointer dst; - if (!other.GetIsAllocated()) { - dst = GetInlinedData(); - src = other.GetInlinedData(); - } else { - // Because this is only called from the `InlinedVector` constructors, it's - // safe to take on the allocation with size `0`. If `ConstructElements(...)` - // throws, deallocation will be automatically handled by `~Storage()`. - SizeType requested_capacity = ComputeCapacity(GetInlinedCapacity(), n); - Allocation allocation = - MallocAdapter::Allocate(GetAllocator(), requested_capacity); - SetAllocation(allocation); - dst = allocation.data; - src = other.GetAllocatedData(); - } - if (IsMemcpyOk::value) { - std::memcpy(reinterpret_cast(dst), - reinterpret_cast(src), n * sizeof(ValueType)); - } else { - auto values = IteratorValueAdapter>(src); - ConstructElements(GetAllocator(), dst, values, n); - } - GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated(); -} - -template -template -auto Storage::Initialize(ValueAdapter values, SizeType new_size) - -> void { - // Only callable from constructors! - ABSL_HARDENING_ASSERT(!GetIsAllocated()); - ABSL_HARDENING_ASSERT(GetSize() == 0); - - Pointer construct_data; - if (new_size > GetInlinedCapacity()) { - // Because this is only called from the `InlinedVector` constructors, it's - // safe to take on the allocation with size `0`. If `ConstructElements(...)` - // throws, deallocation will be automatically handled by `~Storage()`. - SizeType requested_capacity = - ComputeCapacity(GetInlinedCapacity(), new_size); - Allocation allocation = - MallocAdapter::Allocate(GetAllocator(), requested_capacity); - construct_data = allocation.data; - SetAllocation(allocation); - SetIsAllocated(); - } else { - construct_data = GetInlinedData(); - } - - ConstructElements(GetAllocator(), construct_data, values, new_size); - - // Since the initial size was guaranteed to be `0` and the allocated bit is - // already correct for either case, *adding* `new_size` gives us the correct - // result faster than setting it directly. - AddSize(new_size); -} - -template -template -auto Storage::Assign(ValueAdapter values, SizeType new_size) - -> void { - StorageView storage_view = MakeStorageView(); - - AllocationTransaction allocation_tx(GetAllocator()); - - absl::Span> assign_loop; - absl::Span> construct_loop; - absl::Span> destroy_loop; - - if (new_size > storage_view.capacity) { - SizeType requested_capacity = - ComputeCapacity(storage_view.capacity, new_size); - construct_loop = {allocation_tx.Allocate(requested_capacity), new_size}; - destroy_loop = {storage_view.data, storage_view.size}; - } else if (new_size > storage_view.size) { - assign_loop = {storage_view.data, storage_view.size}; - construct_loop = {storage_view.data + storage_view.size, - new_size - storage_view.size}; - } else { - assign_loop = {storage_view.data, new_size}; - destroy_loop = {storage_view.data + new_size, storage_view.size - new_size}; - } - - AssignElements(assign_loop.data(), values, assign_loop.size()); - - ConstructElements(GetAllocator(), construct_loop.data(), values, - construct_loop.size()); - - DestroyAdapter::DestroyElements(GetAllocator(), destroy_loop.data(), - destroy_loop.size()); - - if (allocation_tx.DidAllocate()) { - DeallocateIfAllocated(); - SetAllocation(std::move(allocation_tx).Release()); - SetIsAllocated(); - } - - SetSize(new_size); -} - -template -template -auto Storage::Resize(ValueAdapter values, SizeType new_size) - -> void { - StorageView storage_view = MakeStorageView(); - Pointer const base = storage_view.data; - const SizeType size = storage_view.size; - A& alloc = GetAllocator(); - if (new_size <= size) { - // Destroy extra old elements. - DestroyAdapter::DestroyElements(alloc, base + new_size, size - new_size); - } else if (new_size <= storage_view.capacity) { - // Construct new elements in place. - ConstructElements(alloc, base + size, values, new_size - size); - } else { - // Steps: - // a. Allocate new backing store. - // b. Construct new elements in new backing store. - // c. Move existing elements from old backing store to new backing store. - // d. Destroy all elements in old backing store. - // Use transactional wrappers for the first two steps so we can roll - // back if necessary due to exceptions. - AllocationTransaction allocation_tx(alloc); - SizeType requested_capacity = - ComputeCapacity(storage_view.capacity, new_size); - Pointer new_data = allocation_tx.Allocate(requested_capacity); - - ConstructionTransaction construction_tx(alloc); - construction_tx.Construct(new_data + size, values, new_size - size); - - IteratorValueAdapter> move_values( - (MoveIterator(base))); - ConstructElements(alloc, new_data, move_values, size); - - DestroyAdapter::DestroyElements(alloc, base, size); - std::move(construction_tx).Commit(); - DeallocateIfAllocated(); - SetAllocation(std::move(allocation_tx).Release()); - SetIsAllocated(); - } - SetSize(new_size); -} - -template -template -auto Storage::Insert(ConstIterator pos, ValueAdapter values, - SizeType insert_count) -> Iterator { - StorageView storage_view = MakeStorageView(); - - SizeType insert_index = - std::distance(ConstIterator(storage_view.data), pos); - SizeType insert_end_index = insert_index + insert_count; - SizeType new_size = storage_view.size + insert_count; - - if (new_size > storage_view.capacity) { - AllocationTransaction allocation_tx(GetAllocator()); - ConstructionTransaction construction_tx(GetAllocator()); - ConstructionTransaction move_construction_tx(GetAllocator()); - - IteratorValueAdapter> move_values( - MoveIterator(storage_view.data)); - - SizeType requested_capacity = - ComputeCapacity(storage_view.capacity, new_size); - Pointer new_data = allocation_tx.Allocate(requested_capacity); - - construction_tx.Construct(new_data + insert_index, values, insert_count); - - move_construction_tx.Construct(new_data, move_values, insert_index); - - ConstructElements(GetAllocator(), new_data + insert_end_index, - move_values, storage_view.size - insert_index); - - DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, - storage_view.size); - - std::move(construction_tx).Commit(); - std::move(move_construction_tx).Commit(); - DeallocateIfAllocated(); - SetAllocation(std::move(allocation_tx).Release()); - - SetAllocatedSize(new_size); - return Iterator(new_data + insert_index); - } else { - SizeType move_construction_destination_index = - (std::max)(insert_end_index, storage_view.size); - - ConstructionTransaction move_construction_tx(GetAllocator()); - - IteratorValueAdapter> move_construction_values( - MoveIterator(storage_view.data + - (move_construction_destination_index - insert_count))); - absl::Span> move_construction = { - storage_view.data + move_construction_destination_index, - new_size - move_construction_destination_index}; - - Pointer move_assignment_values = storage_view.data + insert_index; - absl::Span> move_assignment = { - storage_view.data + insert_end_index, - move_construction_destination_index - insert_end_index}; - - absl::Span> insert_assignment = {move_assignment_values, - move_construction.size()}; - - absl::Span> insert_construction = { - insert_assignment.data() + insert_assignment.size(), - insert_count - insert_assignment.size()}; - - move_construction_tx.Construct(move_construction.data(), - move_construction_values, - move_construction.size()); - - for (Pointer - destination = move_assignment.data() + move_assignment.size(), - last_destination = move_assignment.data(), - source = move_assignment_values + move_assignment.size(); - ;) { - --destination; - --source; - if (destination < last_destination) break; - *destination = std::move(*source); - } - - AssignElements(insert_assignment.data(), values, - insert_assignment.size()); - - ConstructElements(GetAllocator(), insert_construction.data(), values, - insert_construction.size()); - - std::move(move_construction_tx).Commit(); - - AddSize(insert_count); - return Iterator(storage_view.data + insert_index); - } -} - -template -template -auto Storage::EmplaceBack(Args&&... args) -> Reference { - StorageView storage_view = MakeStorageView(); - const SizeType n = storage_view.size; - if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) { - // Fast path; new element fits. - Pointer last_ptr = storage_view.data + n; - AllocatorTraits::construct(GetAllocator(), last_ptr, - std::forward(args)...); - AddSize(1); - return *last_ptr; - } - // TODO(b/173712035): Annotate with musttail attribute to prevent regression. - return EmplaceBackSlow(std::forward(args)...); -} - -template -template -auto Storage::EmplaceBackSlow(Args&&... args) -> Reference { - StorageView storage_view = MakeStorageView(); - AllocationTransaction allocation_tx(GetAllocator()); - IteratorValueAdapter> move_values( - MoveIterator(storage_view.data)); - SizeType requested_capacity = NextCapacity(storage_view.capacity); - Pointer construct_data = allocation_tx.Allocate(requested_capacity); - Pointer last_ptr = construct_data + storage_view.size; - - // Construct new element. - AllocatorTraits::construct(GetAllocator(), last_ptr, - std::forward(args)...); - // Move elements from old backing store to new backing store. - ABSL_INTERNAL_TRY { - ConstructElements(GetAllocator(), allocation_tx.GetData(), move_values, - storage_view.size); - } - ABSL_INTERNAL_CATCH_ANY { - AllocatorTraits::destroy(GetAllocator(), last_ptr); - ABSL_INTERNAL_RETHROW; - } - // Destroy elements in old backing store. - DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, - storage_view.size); - - DeallocateIfAllocated(); - SetAllocation(std::move(allocation_tx).Release()); - SetIsAllocated(); - AddSize(1); - return *last_ptr; -} - -template -auto Storage::Erase(ConstIterator from, ConstIterator to) - -> Iterator { - StorageView storage_view = MakeStorageView(); - - SizeType erase_size = std::distance(from, to); - SizeType erase_index = - std::distance(ConstIterator(storage_view.data), from); - SizeType erase_end_index = erase_index + erase_size; - - IteratorValueAdapter> move_values( - MoveIterator(storage_view.data + erase_end_index)); - - AssignElements(storage_view.data + erase_index, move_values, - storage_view.size - erase_end_index); - - DestroyAdapter::DestroyElements( - GetAllocator(), storage_view.data + (storage_view.size - erase_size), - erase_size); - - SubtractSize(erase_size); - return Iterator(storage_view.data + erase_index); -} - -template -auto Storage::Reserve(SizeType requested_capacity) -> void { - StorageView storage_view = MakeStorageView(); - - if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return; - - AllocationTransaction allocation_tx(GetAllocator()); - - IteratorValueAdapter> move_values( - MoveIterator(storage_view.data)); - - SizeType new_requested_capacity = - ComputeCapacity(storage_view.capacity, requested_capacity); - Pointer new_data = allocation_tx.Allocate(new_requested_capacity); - - ConstructElements(GetAllocator(), new_data, move_values, - storage_view.size); - - DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, - storage_view.size); - - DeallocateIfAllocated(); - SetAllocation(std::move(allocation_tx).Release()); - SetIsAllocated(); -} - -template -auto Storage::ShrinkToFit() -> void { - // May only be called on allocated instances! - ABSL_HARDENING_ASSERT(GetIsAllocated()); - - StorageView storage_view{GetAllocatedData(), GetSize(), - GetAllocatedCapacity()}; - - if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return; - - AllocationTransaction allocation_tx(GetAllocator()); - - IteratorValueAdapter> move_values( - MoveIterator(storage_view.data)); - - Pointer construct_data; - if (storage_view.size > GetInlinedCapacity()) { - SizeType requested_capacity = storage_view.size; - construct_data = allocation_tx.Allocate(requested_capacity); - if (allocation_tx.GetCapacity() >= storage_view.capacity) { - // Already using the smallest available heap allocation. - return; - } - } else { - construct_data = GetInlinedData(); - } - - ABSL_INTERNAL_TRY { - ConstructElements(GetAllocator(), construct_data, move_values, - storage_view.size); - } - ABSL_INTERNAL_CATCH_ANY { - SetAllocation({storage_view.data, storage_view.capacity}); - ABSL_INTERNAL_RETHROW; - } - - DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, - storage_view.size); - - MallocAdapter::Deallocate(GetAllocator(), storage_view.data, - storage_view.capacity); - - if (allocation_tx.DidAllocate()) { - SetAllocation(std::move(allocation_tx).Release()); - } else { - UnsetIsAllocated(); - } -} - -template -auto Storage::Swap(Storage* other_storage_ptr) -> void { - using std::swap; - ABSL_HARDENING_ASSERT(this != other_storage_ptr); - - if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) { - swap(data_.allocated, other_storage_ptr->data_.allocated); - } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) { - Storage* small_ptr = this; - Storage* large_ptr = other_storage_ptr; - if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr); - - for (SizeType i = 0; i < small_ptr->GetSize(); ++i) { - swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]); - } - - IteratorValueAdapter> move_values( - MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize())); - - ConstructElements(large_ptr->GetAllocator(), - small_ptr->GetInlinedData() + small_ptr->GetSize(), - move_values, - large_ptr->GetSize() - small_ptr->GetSize()); - - DestroyAdapter::DestroyElements( - large_ptr->GetAllocator(), - large_ptr->GetInlinedData() + small_ptr->GetSize(), - large_ptr->GetSize() - small_ptr->GetSize()); - } else { - Storage* allocated_ptr = this; - Storage* inlined_ptr = other_storage_ptr; - if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr); - - StorageView allocated_storage_view{ - allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(), - allocated_ptr->GetAllocatedCapacity()}; - - IteratorValueAdapter> move_values( - MoveIterator(inlined_ptr->GetInlinedData())); - - ABSL_INTERNAL_TRY { - ConstructElements(inlined_ptr->GetAllocator(), - allocated_ptr->GetInlinedData(), move_values, - inlined_ptr->GetSize()); - } - ABSL_INTERNAL_CATCH_ANY { - allocated_ptr->SetAllocation(Allocation{ - allocated_storage_view.data, allocated_storage_view.capacity}); - ABSL_INTERNAL_RETHROW; - } - - DestroyAdapter::DestroyElements(inlined_ptr->GetAllocator(), - inlined_ptr->GetInlinedData(), - inlined_ptr->GetSize()); - - inlined_ptr->SetAllocation(Allocation{allocated_storage_view.data, - allocated_storage_view.capacity}); - } - - swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated()); - swap(GetAllocator(), other_storage_ptr->GetAllocator()); -} + template + using AllocatorTraits = std::allocator_traits; + template + using ValueType = typename AllocatorTraits::value_type; + template + using SizeType = typename AllocatorTraits::size_type; + template + using Pointer = typename AllocatorTraits::pointer; + template + using ConstPointer = typename AllocatorTraits::const_pointer; + template + using SizeType = typename AllocatorTraits::size_type; + template + using DifferenceType = typename AllocatorTraits::difference_type; + template + using Reference = ValueType&; + template + using ConstReference = const ValueType&; + template + using Iterator = Pointer; + template + using ConstIterator = ConstPointer; + template + using ReverseIterator = typename std::reverse_iterator>; + template + using ConstReverseIterator = typename std::reverse_iterator>; + template + using MoveIterator = typename std::move_iterator>; + + template + using IsAtLeastForwardIterator = std::is_convertible< + typename std::iterator_traits::iterator_category, + std::forward_iterator_tag>; + + template + using IsMemcpyOk = + absl::conjunction>>, absl::is_trivially_copy_constructible>, absl::is_trivially_copy_assignable>, absl::is_trivially_destructible>>; + + template + struct TypeIdentity + { + using type = T; + }; + + // Used for function arguments in template functions to prevent ADL by forcing + // callers to explicitly specify the template parameter. + template + using NoTypeDeduction = typename TypeIdentity::type; + + template>::value> + struct DestroyAdapter; + + template + struct DestroyAdapter + { + static void DestroyElements(A& allocator, Pointer destroy_first, SizeType destroy_size) + { + for (SizeType i = destroy_size; i != 0;) + { + --i; + AllocatorTraits::destroy(allocator, destroy_first + i); + } + } + }; + + template + struct DestroyAdapter + { + static void DestroyElements(A& allocator, Pointer destroy_first, SizeType destroy_size) + { + static_cast(allocator); + static_cast(destroy_first); + static_cast(destroy_size); + } + }; + + template + struct Allocation + { + Pointer data; + SizeType capacity; + }; + + template) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)> + struct MallocAdapter + { + static Allocation Allocate(A& allocator, SizeType requested_capacity) + { + return {AllocatorTraits::allocate(allocator, requested_capacity), requested_capacity}; + } + + static void Deallocate(A& allocator, Pointer pointer, SizeType capacity) + { + AllocatorTraits::deallocate(allocator, pointer, capacity); + } + }; + + template + void ConstructElements(NoTypeDeduction& allocator, Pointer construct_first, ValueAdapter& values, SizeType construct_size) + { + for (SizeType i = 0; i < construct_size; ++i) + { + ABSL_INTERNAL_TRY + { + values.ConstructNext(allocator, construct_first + i); + } + ABSL_INTERNAL_CATCH_ANY + { + DestroyAdapter::DestroyElements(allocator, construct_first, i); + ABSL_INTERNAL_RETHROW; + } + } + } + + template + void AssignElements(Pointer assign_first, ValueAdapter& values, SizeType assign_size) + { + for (SizeType i = 0; i < assign_size; ++i) + { + values.AssignNext(assign_first + i); + } + } + + template + struct StorageView + { + Pointer data; + SizeType size; + SizeType capacity; + }; + + template + class IteratorValueAdapter + { + public: + explicit IteratorValueAdapter(const Iterator& it) : + it_(it) + { + } + + void ConstructNext(A& allocator, Pointer construct_at) + { + AllocatorTraits::construct(allocator, construct_at, *it_); + ++it_; + } + + void AssignNext(Pointer assign_at) + { + *assign_at = *it_; + ++it_; + } + + private: + Iterator it_; + }; + + template + class CopyValueAdapter + { + public: + explicit CopyValueAdapter(ConstPointer p) : + ptr_(p) + { + } + + void ConstructNext(A& allocator, Pointer construct_at) + { + AllocatorTraits::construct(allocator, construct_at, *ptr_); + } + + void AssignNext(Pointer assign_at) + { + *assign_at = *ptr_; + } + + private: + ConstPointer ptr_; + }; + + template + class DefaultValueAdapter + { + public: + explicit DefaultValueAdapter() + { + } + + void ConstructNext(A& allocator, Pointer construct_at) + { + AllocatorTraits::construct(allocator, construct_at); + } + + void AssignNext(Pointer assign_at) + { + *assign_at = ValueType(); + } + }; + + template + class AllocationTransaction + { + public: + explicit AllocationTransaction(A& allocator) : + allocator_data_(allocator, nullptr), + capacity_(0) + { + } + + ~AllocationTransaction() + { + if (DidAllocate()) + { + MallocAdapter::Deallocate(GetAllocator(), GetData(), GetCapacity()); + } + } + + AllocationTransaction(const AllocationTransaction&) = delete; + void operator=(const AllocationTransaction&) = delete; + + A& GetAllocator() + { + return allocator_data_.template get<0>(); + } + Pointer& GetData() + { + return allocator_data_.template get<1>(); + } + SizeType& GetCapacity() + { + return capacity_; + } + + bool DidAllocate() + { + return GetData() != nullptr; + } + + Pointer Allocate(SizeType requested_capacity) + { + Allocation result = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + GetData() = result.data; + GetCapacity() = result.capacity; + return result.data; + } + + ABSL_MUST_USE_RESULT Allocation Release() && + { + Allocation result = {GetData(), GetCapacity()}; + Reset(); + return result; + } + + private: + void Reset() + { + GetData() = nullptr; + GetCapacity() = 0; + } + + container_internal::CompressedTuple> allocator_data_; + SizeType capacity_; + }; + + template + class ConstructionTransaction + { + public: + explicit ConstructionTransaction(A& allocator) : + allocator_data_(allocator, nullptr), + size_(0) + { + } + + ~ConstructionTransaction() + { + if (DidConstruct()) + { + DestroyAdapter::DestroyElements(GetAllocator(), GetData(), GetSize()); + } + } + + ConstructionTransaction(const ConstructionTransaction&) = delete; + void operator=(const ConstructionTransaction&) = delete; + + A& GetAllocator() + { + return allocator_data_.template get<0>(); + } + Pointer& GetData() + { + return allocator_data_.template get<1>(); + } + SizeType& GetSize() + { + return size_; + } + + bool DidConstruct() + { + return GetData() != nullptr; + } + template + void Construct(Pointer data, ValueAdapter& values, SizeType size) + { + ConstructElements(GetAllocator(), data, values, size); + GetData() = data; + GetSize() = size; + } + void Commit() && + { + GetData() = nullptr; + GetSize() = 0; + } + + private: + container_internal::CompressedTuple> allocator_data_; + SizeType size_; + }; + + template + class Storage + { + public: + static SizeType NextCapacity(SizeType current_capacity) + { + return current_capacity * 2; + } + + static SizeType ComputeCapacity(SizeType current_capacity, SizeType requested_capacity) + { + return (std::max)(NextCapacity(current_capacity), requested_capacity); + } + + // --------------------------------------------------------------------------- + // Storage Constructors and Destructor + // --------------------------------------------------------------------------- + + Storage() : + metadata_(A(), /* size and is_allocated */ 0u) + { + } + + explicit Storage(const A& allocator) : + metadata_(allocator, /* size and is_allocated */ 0u) + { + } + + ~Storage() + { + if (GetSizeAndIsAllocated() == 0) + { + // Empty and not allocated; nothing to do. + } + else if (IsMemcpyOk::value) + { + // No destructors need to be run; just deallocate if necessary. + DeallocateIfAllocated(); + } + else + { + DestroyContents(); + } + } + + // --------------------------------------------------------------------------- + // Storage Member Accessors + // --------------------------------------------------------------------------- + + SizeType& GetSizeAndIsAllocated() + { + return metadata_.template get<1>(); + } + + const SizeType& GetSizeAndIsAllocated() const + { + return metadata_.template get<1>(); + } + + SizeType GetSize() const + { + return GetSizeAndIsAllocated() >> 1; + } + + bool GetIsAllocated() const + { + return GetSizeAndIsAllocated() & 1; + } + + Pointer GetAllocatedData() + { + return data_.allocated.allocated_data; + } + + ConstPointer GetAllocatedData() const + { + return data_.allocated.allocated_data; + } + + Pointer GetInlinedData() + { + return reinterpret_cast>( + std::addressof(data_.inlined.inlined_data[0]) + ); + } + + ConstPointer GetInlinedData() const + { + return reinterpret_cast>( + std::addressof(data_.inlined.inlined_data[0]) + ); + } + + SizeType GetAllocatedCapacity() const + { + return data_.allocated.allocated_capacity; + } + + SizeType GetInlinedCapacity() const + { + return static_cast>(N); + } + + StorageView MakeStorageView() + { + return GetIsAllocated() ? StorageView{GetAllocatedData(), GetSize(), GetAllocatedCapacity()} : StorageView{GetInlinedData(), GetSize(), GetInlinedCapacity()}; + } + + A& GetAllocator() + { + return metadata_.template get<0>(); + } + + const A& GetAllocator() const + { + return metadata_.template get<0>(); + } + + // --------------------------------------------------------------------------- + // Storage Member Mutators + // --------------------------------------------------------------------------- + + ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other); + + template + void Initialize(ValueAdapter values, SizeType new_size); + + template + void Assign(ValueAdapter values, SizeType new_size); + + template + void Resize(ValueAdapter values, SizeType new_size); + + template + Iterator Insert(ConstIterator pos, ValueAdapter values, SizeType insert_count); + + template + Reference EmplaceBack(Args&&... args); + + Iterator Erase(ConstIterator from, ConstIterator to); + + void Reserve(SizeType requested_capacity); + + void ShrinkToFit(); + + void Swap(Storage* other_storage_ptr); + + void SetIsAllocated() + { + GetSizeAndIsAllocated() |= static_cast>(1); + } + + void UnsetIsAllocated() + { + GetSizeAndIsAllocated() &= ((std::numeric_limits>::max)() - 1); + } + + void SetSize(SizeType size) + { + GetSizeAndIsAllocated() = + (size << 1) | static_cast>(GetIsAllocated()); + } + + void SetAllocatedSize(SizeType size) + { + GetSizeAndIsAllocated() = (size << 1) | static_cast>(1); + } + + void SetInlinedSize(SizeType size) + { + GetSizeAndIsAllocated() = size << static_cast>(1); + } + + void AddSize(SizeType count) + { + GetSizeAndIsAllocated() += count << static_cast>(1); + } + + void SubtractSize(SizeType count) + { + ABSL_HARDENING_ASSERT(count <= GetSize()); + + GetSizeAndIsAllocated() -= count << static_cast>(1); + } + + void SetAllocation(Allocation allocation) + { + data_.allocated.allocated_data = allocation.data; + data_.allocated.allocated_capacity = allocation.capacity; + } + + void MemcpyFrom(const Storage& other_storage) + { + ABSL_HARDENING_ASSERT(IsMemcpyOk::value || other_storage.GetIsAllocated()); + + GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated(); + data_ = other_storage.data_; + } + + void DeallocateIfAllocated() + { + if (GetIsAllocated()) + { + MallocAdapter::Deallocate(GetAllocator(), GetAllocatedData(), GetAllocatedCapacity()); + } + } + + private: + ABSL_ATTRIBUTE_NOINLINE void DestroyContents(); + + using Metadata = container_internal::CompressedTuple>; + + struct Allocated + { + Pointer allocated_data; + SizeType allocated_capacity; + }; + + struct Inlined + { + alignas(ValueType) char inlined_data[sizeof(ValueType[N])]; + }; + + union Data + { + Allocated allocated; + Inlined inlined; + }; + + template + ABSL_ATTRIBUTE_NOINLINE Reference EmplaceBackSlow(Args&&... args); + + Metadata metadata_; + Data data_; + }; + + template + void Storage::DestroyContents() + { + Pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData(); + DestroyAdapter::DestroyElements(GetAllocator(), data, GetSize()); + DeallocateIfAllocated(); + } + + template + void Storage::InitFrom(const Storage& other) + { + const SizeType n = other.GetSize(); + ABSL_HARDENING_ASSERT(n > 0); // Empty sources handled handled in caller. + ConstPointer src; + Pointer dst; + if (!other.GetIsAllocated()) + { + dst = GetInlinedData(); + src = other.GetInlinedData(); + } + else + { + // Because this is only called from the `InlinedVector` constructors, it's + // safe to take on the allocation with size `0`. If `ConstructElements(...)` + // throws, deallocation will be automatically handled by `~Storage()`. + SizeType requested_capacity = ComputeCapacity(GetInlinedCapacity(), n); + Allocation allocation = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + SetAllocation(allocation); + dst = allocation.data; + src = other.GetAllocatedData(); + } + if (IsMemcpyOk::value) + { + std::memcpy(reinterpret_cast(dst), reinterpret_cast(src), n * sizeof(ValueType)); + } + else + { + auto values = IteratorValueAdapter>(src); + ConstructElements(GetAllocator(), dst, values, n); + } + GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated(); + } + + template + template + auto Storage::Initialize(ValueAdapter values, SizeType new_size) + -> void + { + // Only callable from constructors! + ABSL_HARDENING_ASSERT(!GetIsAllocated()); + ABSL_HARDENING_ASSERT(GetSize() == 0); + + Pointer construct_data; + if (new_size > GetInlinedCapacity()) + { + // Because this is only called from the `InlinedVector` constructors, it's + // safe to take on the allocation with size `0`. If `ConstructElements(...)` + // throws, deallocation will be automatically handled by `~Storage()`. + SizeType requested_capacity = + ComputeCapacity(GetInlinedCapacity(), new_size); + Allocation allocation = + MallocAdapter::Allocate(GetAllocator(), requested_capacity); + construct_data = allocation.data; + SetAllocation(allocation); + SetIsAllocated(); + } + else + { + construct_data = GetInlinedData(); + } + + ConstructElements(GetAllocator(), construct_data, values, new_size); + + // Since the initial size was guaranteed to be `0` and the allocated bit is + // already correct for either case, *adding* `new_size` gives us the correct + // result faster than setting it directly. + AddSize(new_size); + } + + template + template + auto Storage::Assign(ValueAdapter values, SizeType new_size) + -> void + { + StorageView storage_view = MakeStorageView(); + + AllocationTransaction allocation_tx(GetAllocator()); + + absl::Span> assign_loop; + absl::Span> construct_loop; + absl::Span> destroy_loop; + + if (new_size > storage_view.capacity) + { + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + construct_loop = {allocation_tx.Allocate(requested_capacity), new_size}; + destroy_loop = {storage_view.data, storage_view.size}; + } + else if (new_size > storage_view.size) + { + assign_loop = {storage_view.data, storage_view.size}; + construct_loop = {storage_view.data + storage_view.size, new_size - storage_view.size}; + } + else + { + assign_loop = {storage_view.data, new_size}; + destroy_loop = {storage_view.data + new_size, storage_view.size - new_size}; + } + + AssignElements(assign_loop.data(), values, assign_loop.size()); + + ConstructElements(GetAllocator(), construct_loop.data(), values, construct_loop.size()); + + DestroyAdapter::DestroyElements(GetAllocator(), destroy_loop.data(), destroy_loop.size()); + + if (allocation_tx.DidAllocate()) + { + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); + } + + SetSize(new_size); + } + + template + template + auto Storage::Resize(ValueAdapter values, SizeType new_size) + -> void + { + StorageView storage_view = MakeStorageView(); + Pointer const base = storage_view.data; + const SizeType size = storage_view.size; + A& alloc = GetAllocator(); + if (new_size <= size) + { + // Destroy extra old elements. + DestroyAdapter::DestroyElements(alloc, base + new_size, size - new_size); + } + else if (new_size <= storage_view.capacity) + { + // Construct new elements in place. + ConstructElements(alloc, base + size, values, new_size - size); + } + else + { + // Steps: + // a. Allocate new backing store. + // b. Construct new elements in new backing store. + // c. Move existing elements from old backing store to new backing store. + // d. Destroy all elements in old backing store. + // Use transactional wrappers for the first two steps so we can roll + // back if necessary due to exceptions. + AllocationTransaction allocation_tx(alloc); + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + Pointer new_data = allocation_tx.Allocate(requested_capacity); + + ConstructionTransaction construction_tx(alloc); + construction_tx.Construct(new_data + size, values, new_size - size); + + IteratorValueAdapter> move_values( + (MoveIterator(base)) + ); + ConstructElements(alloc, new_data, move_values, size); + + DestroyAdapter::DestroyElements(alloc, base, size); + std::move(construction_tx).Commit(); + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); + } + SetSize(new_size); + } + + template + template + auto Storage::Insert(ConstIterator pos, ValueAdapter values, SizeType insert_count) -> Iterator + { + StorageView storage_view = MakeStorageView(); + + SizeType insert_index = + std::distance(ConstIterator(storage_view.data), pos); + SizeType insert_end_index = insert_index + insert_count; + SizeType new_size = storage_view.size + insert_count; + + if (new_size > storage_view.capacity) + { + AllocationTransaction allocation_tx(GetAllocator()); + ConstructionTransaction construction_tx(GetAllocator()); + ConstructionTransaction move_construction_tx(GetAllocator()); + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data) + ); + + SizeType requested_capacity = + ComputeCapacity(storage_view.capacity, new_size); + Pointer new_data = allocation_tx.Allocate(requested_capacity); + + construction_tx.Construct(new_data + insert_index, values, insert_count); + + move_construction_tx.Construct(new_data, move_values, insert_index); + + ConstructElements(GetAllocator(), new_data + insert_end_index, move_values, storage_view.size - insert_index); + + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); + + std::move(construction_tx).Commit(); + std::move(move_construction_tx).Commit(); + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + + SetAllocatedSize(new_size); + return Iterator(new_data + insert_index); + } + else + { + SizeType move_construction_destination_index = + (std::max)(insert_end_index, storage_view.size); + + ConstructionTransaction move_construction_tx(GetAllocator()); + + IteratorValueAdapter> move_construction_values( + MoveIterator(storage_view.data + (move_construction_destination_index - insert_count)) + ); + absl::Span> move_construction = { + storage_view.data + move_construction_destination_index, + new_size - move_construction_destination_index}; + + Pointer move_assignment_values = storage_view.data + insert_index; + absl::Span> move_assignment = { + storage_view.data + insert_end_index, + move_construction_destination_index - insert_end_index}; + + absl::Span> insert_assignment = {move_assignment_values, move_construction.size()}; + + absl::Span> insert_construction = { + insert_assignment.data() + insert_assignment.size(), + insert_count - insert_assignment.size()}; + + move_construction_tx.Construct(move_construction.data(), move_construction_values, move_construction.size()); + + for (Pointer + destination = move_assignment.data() + move_assignment.size(), + last_destination = move_assignment.data(), + source = move_assignment_values + move_assignment.size(); + ;) + { + --destination; + --source; + if (destination < last_destination) + break; + *destination = std::move(*source); + } + + AssignElements(insert_assignment.data(), values, insert_assignment.size()); + + ConstructElements(GetAllocator(), insert_construction.data(), values, insert_construction.size()); + + std::move(move_construction_tx).Commit(); + + AddSize(insert_count); + return Iterator(storage_view.data + insert_index); + } + } + + template + template + auto Storage::EmplaceBack(Args&&... args) -> Reference + { + StorageView storage_view = MakeStorageView(); + const SizeType n = storage_view.size; + if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) + { + // Fast path; new element fits. + Pointer last_ptr = storage_view.data + n; + AllocatorTraits::construct(GetAllocator(), last_ptr, std::forward(args)...); + AddSize(1); + return *last_ptr; + } + // TODO(b/173712035): Annotate with musttail attribute to prevent regression. + return EmplaceBackSlow(std::forward(args)...); + } + + template + template + auto Storage::EmplaceBackSlow(Args&&... args) -> Reference + { + StorageView storage_view = MakeStorageView(); + AllocationTransaction allocation_tx(GetAllocator()); + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data) + ); + SizeType requested_capacity = NextCapacity(storage_view.capacity); + Pointer construct_data = allocation_tx.Allocate(requested_capacity); + Pointer last_ptr = construct_data + storage_view.size; + + // Construct new element. + AllocatorTraits::construct(GetAllocator(), last_ptr, std::forward(args)...); + // Move elements from old backing store to new backing store. + ABSL_INTERNAL_TRY + { + ConstructElements(GetAllocator(), allocation_tx.GetData(), move_values, storage_view.size); + } + ABSL_INTERNAL_CATCH_ANY + { + AllocatorTraits::destroy(GetAllocator(), last_ptr); + ABSL_INTERNAL_RETHROW; + } + // Destroy elements in old backing store. + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); + + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); + AddSize(1); + return *last_ptr; + } + + template + auto Storage::Erase(ConstIterator from, ConstIterator to) + -> Iterator + { + StorageView storage_view = MakeStorageView(); + + SizeType erase_size = std::distance(from, to); + SizeType erase_index = + std::distance(ConstIterator(storage_view.data), from); + SizeType erase_end_index = erase_index + erase_size; + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data + erase_end_index) + ); + + AssignElements(storage_view.data + erase_index, move_values, storage_view.size - erase_end_index); + + DestroyAdapter::DestroyElements( + GetAllocator(), storage_view.data + (storage_view.size - erase_size), erase_size + ); + + SubtractSize(erase_size); + return Iterator(storage_view.data + erase_index); + } + + template + auto Storage::Reserve(SizeType requested_capacity) -> void + { + StorageView storage_view = MakeStorageView(); + + if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) + return; + + AllocationTransaction allocation_tx(GetAllocator()); + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data) + ); + + SizeType new_requested_capacity = + ComputeCapacity(storage_view.capacity, requested_capacity); + Pointer new_data = allocation_tx.Allocate(new_requested_capacity); + + ConstructElements(GetAllocator(), new_data, move_values, storage_view.size); + + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); + + DeallocateIfAllocated(); + SetAllocation(std::move(allocation_tx).Release()); + SetIsAllocated(); + } + + template + auto Storage::ShrinkToFit() -> void + { + // May only be called on allocated instances! + ABSL_HARDENING_ASSERT(GetIsAllocated()); + + StorageView storage_view{GetAllocatedData(), GetSize(), GetAllocatedCapacity()}; + + if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) + return; + + AllocationTransaction allocation_tx(GetAllocator()); + + IteratorValueAdapter> move_values( + MoveIterator(storage_view.data) + ); + + Pointer construct_data; + if (storage_view.size > GetInlinedCapacity()) + { + SizeType requested_capacity = storage_view.size; + construct_data = allocation_tx.Allocate(requested_capacity); + if (allocation_tx.GetCapacity() >= storage_view.capacity) + { + // Already using the smallest available heap allocation. + return; + } + } + else + { + construct_data = GetInlinedData(); + } + + ABSL_INTERNAL_TRY + { + ConstructElements(GetAllocator(), construct_data, move_values, storage_view.size); + } + ABSL_INTERNAL_CATCH_ANY + { + SetAllocation({storage_view.data, storage_view.capacity}); + ABSL_INTERNAL_RETHROW; + } + + DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); + + MallocAdapter::Deallocate(GetAllocator(), storage_view.data, storage_view.capacity); + + if (allocation_tx.DidAllocate()) + { + SetAllocation(std::move(allocation_tx).Release()); + } + else + { + UnsetIsAllocated(); + } + } + + template + auto Storage::Swap(Storage* other_storage_ptr) -> void + { + using std::swap; + ABSL_HARDENING_ASSERT(this != other_storage_ptr); + + if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) + { + swap(data_.allocated, other_storage_ptr->data_.allocated); + } + else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) + { + Storage* small_ptr = this; + Storage* large_ptr = other_storage_ptr; + if (small_ptr->GetSize() > large_ptr->GetSize()) + swap(small_ptr, large_ptr); + + for (SizeType i = 0; i < small_ptr->GetSize(); ++i) + { + swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]); + } + + IteratorValueAdapter> move_values( + MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize()) + ); + + ConstructElements(large_ptr->GetAllocator(), small_ptr->GetInlinedData() + small_ptr->GetSize(), move_values, large_ptr->GetSize() - small_ptr->GetSize()); + + DestroyAdapter::DestroyElements( + large_ptr->GetAllocator(), + large_ptr->GetInlinedData() + small_ptr->GetSize(), + large_ptr->GetSize() - small_ptr->GetSize() + ); + } + else + { + Storage* allocated_ptr = this; + Storage* inlined_ptr = other_storage_ptr; + if (!allocated_ptr->GetIsAllocated()) + swap(allocated_ptr, inlined_ptr); + + StorageView allocated_storage_view{ + allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(), allocated_ptr->GetAllocatedCapacity()}; + + IteratorValueAdapter> move_values( + MoveIterator(inlined_ptr->GetInlinedData()) + ); + + ABSL_INTERNAL_TRY + { + ConstructElements(inlined_ptr->GetAllocator(), allocated_ptr->GetInlinedData(), move_values, inlined_ptr->GetSize()); + } + ABSL_INTERNAL_CATCH_ANY + { + allocated_ptr->SetAllocation(Allocation{ + allocated_storage_view.data, allocated_storage_view.capacity}); + ABSL_INTERNAL_RETHROW; + } + + DestroyAdapter::DestroyElements(inlined_ptr->GetAllocator(), inlined_ptr->GetInlinedData(), inlined_ptr->GetSize()); + + inlined_ptr->SetAllocation(Allocation{allocated_storage_view.data, allocated_storage_view.capacity}); + } + + swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated()); + swap(GetAllocator(), other_storage_ptr->GetAllocator()); + } // End ignore "array-bounds" #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic pop #endif -} // namespace inlined_vector_internal -ABSL_NAMESPACE_END + } // namespace inlined_vector_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/layout.h b/CAPI/cpp/grpc/include/absl/container/internal/layout.h index a59a243..c37e8b5 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/layout.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/layout.h @@ -189,555 +189,621 @@ #include #endif -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -// A type wrapper that instructs `Layout` to use the specific alignment for the -// array. `Layout<..., Aligned, ...>` has exactly the same API -// and behavior as `Layout<..., T, ...>` except that the first element of the -// array of `T` is aligned to `N` (the rest of the elements follow without -// padding). -// -// Requires: `N >= alignof(T)` and `N` is a power of 2. -template -struct Aligned; - -namespace internal_layout { - -template -struct NotAligned {}; - -template -struct NotAligned> { - static_assert(sizeof(T) == 0, "Aligned cannot be const-qualified"); -}; - -template -using IntToSize = size_t; - -template -using TypeToSize = size_t; - -template -struct Type : NotAligned { - using type = T; -}; - -template -struct Type> { - using type = T; -}; - -template -struct SizeOf : NotAligned, std::integral_constant {}; - -template -struct SizeOf> : std::integral_constant {}; - -// Note: workaround for https://gcc.gnu.org/PR88115 -template -struct AlignOf : NotAligned { - static constexpr size_t value = alignof(T); -}; - -template -struct AlignOf> { - static_assert(N % alignof(T) == 0, - "Custom alignment can't be lower than the type's alignment"); - static constexpr size_t value = N; -}; - -// Does `Ts...` contain `T`? -template -using Contains = absl::disjunction...>; - -template -using CopyConst = - typename std::conditional::value, const To, To>::type; - -// Note: We're not qualifying this with absl:: because it doesn't compile under -// MSVC. -template -using SliceType = Span; - -// This namespace contains no types. It prevents functions defined in it from -// being found by ADL. -namespace adl_barrier { - -template -constexpr size_t Find(Needle, Needle, Ts...) { - static_assert(!Contains(), "Duplicate element type"); - return 0; -} - -template -constexpr size_t Find(Needle, T, Ts...) { - return adl_barrier::Find(Needle(), Ts()...) + 1; -} - -constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); } - -// Returns `q * m` for the smallest `q` such that `q * m >= n`. -// Requires: `m` is a power of two. It's enforced by IsLegalElementType below. -constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); } - -constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; } - -constexpr size_t Max(size_t a) { return a; } - -template -constexpr size_t Max(size_t a, size_t b, Ts... rest) { - return adl_barrier::Max(b < a ? a : b, rest...); -} - -template -std::string TypeName() { - std::string out; - int status = 0; - char* demangled = nullptr; +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + // A type wrapper that instructs `Layout` to use the specific alignment for the + // array. `Layout<..., Aligned, ...>` has exactly the same API + // and behavior as `Layout<..., T, ...>` except that the first element of the + // array of `T` is aligned to `N` (the rest of the elements follow without + // padding). + // + // Requires: `N >= alignof(T)` and `N` is a power of 2. + template + struct Aligned; + + namespace internal_layout + { + + template + struct NotAligned + { + }; + + template + struct NotAligned> + { + static_assert(sizeof(T) == 0, "Aligned cannot be const-qualified"); + }; + + template + using IntToSize = size_t; + + template + using TypeToSize = size_t; + + template + struct Type : NotAligned + { + using type = T; + }; + + template + struct Type> + { + using type = T; + }; + + template + struct SizeOf : NotAligned, std::integral_constant + { + }; + + template + struct SizeOf> : std::integral_constant + { + }; + + // Note: workaround for https://gcc.gnu.org/PR88115 + template + struct AlignOf : NotAligned + { + static constexpr size_t value = alignof(T); + }; + + template + struct AlignOf> + { + static_assert(N % alignof(T) == 0, "Custom alignment can't be lower than the type's alignment"); + static constexpr size_t value = N; + }; + + // Does `Ts...` contain `T`? + template + using Contains = absl::disjunction...>; + + template + using CopyConst = + typename std::conditional::value, const To, To>::type; + + // Note: We're not qualifying this with absl:: because it doesn't compile under + // MSVC. + template + using SliceType = Span; + + // This namespace contains no types. It prevents functions defined in it from + // being found by ADL. + namespace adl_barrier + { + + template + constexpr size_t Find(Needle, Needle, Ts...) + { + static_assert(!Contains(), "Duplicate element type"); + return 0; + } + + template + constexpr size_t Find(Needle, T, Ts...) + { + return adl_barrier::Find(Needle(), Ts()...) + 1; + } + + constexpr bool IsPow2(size_t n) + { + return !(n & (n - 1)); + } + + // Returns `q * m` for the smallest `q` such that `q * m >= n`. + // Requires: `m` is a power of two. It's enforced by IsLegalElementType below. + constexpr size_t Align(size_t n, size_t m) + { + return (n + m - 1) & ~(m - 1); + } + + constexpr size_t Min(size_t a, size_t b) + { + return b < a ? b : a; + } + + constexpr size_t Max(size_t a) + { + return a; + } + + template + constexpr size_t Max(size_t a, size_t b, Ts... rest) + { + return adl_barrier::Max(b < a ? a : b, rest...); + } + + template + std::string TypeName() + { + std::string out; + int status = 0; + char* demangled = nullptr; #ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE - demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status); + demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status); #endif - if (status == 0 && demangled != nullptr) { // Demangling succeeded. - absl::StrAppend(&out, "<", demangled, ">"); - free(demangled); - } else { + if (status == 0 && demangled != nullptr) + { // Demangling succeeded. + absl::StrAppend(&out, "<", demangled, ">"); + free(demangled); + } + else + { #if defined(__GXX_RTTI) || defined(_CPPRTTI) - absl::StrAppend(&out, "<", typeid(T).name(), ">"); + absl::StrAppend(&out, "<", typeid(T).name(), ">"); #endif - } - return out; -} - -} // namespace adl_barrier - -template -using EnableIf = typename std::enable_if::type; - -// Can `T` be a template argument of `Layout`? -template -using IsLegalElementType = std::integral_constant< - bool, !std::is_reference::value && !std::is_volatile::value && - !std::is_reference::type>::value && - !std::is_volatile::type>::value && - adl_barrier::IsPow2(AlignOf::value)>; - -template -class LayoutImpl; - -// Public base class of `Layout` and the result type of `Layout::Partial()`. -// -// `Elements...` contains all template arguments of `Layout` that created this -// instance. -// -// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments -// passed to `Layout::Partial()` or `Layout::Layout()`. -// -// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is -// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we -// can compute offsets). -template -class LayoutImpl, absl::index_sequence, - absl::index_sequence> { - private: - static_assert(sizeof...(Elements) > 0, "At least one field is required"); - static_assert(absl::conjunction...>::value, - "Invalid element type (see IsLegalElementType)"); - - enum { - NumTypes = sizeof...(Elements), - NumSizes = sizeof...(SizeSeq), - NumOffsets = sizeof...(OffsetSeq), - }; - - // These are guaranteed by `Layout`. - static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1), - "Internal error"); - static_assert(NumTypes > 0, "Internal error"); - - // Returns the index of `T` in `Elements...`. Results in a compilation error - // if `Elements...` doesn't contain exactly one instance of `T`. - template - static constexpr size_t ElementIndex() { - static_assert(Contains, Type::type>...>(), - "Type not found"); - return adl_barrier::Find(Type(), - Type::type>()...); - } - - template - using ElementAlignment = - AlignOf>::type>; - - public: - // Element types of all arrays packed in a tuple. - using ElementTypes = std::tuple::type...>; - - // Element type of the Nth array. - template - using ElementType = typename std::tuple_element::type; - - constexpr explicit LayoutImpl(IntToSize... sizes) - : size_{sizes...} {} - - // Alignment of the layout, equal to the strictest alignment of all elements. - // All pointers passed to the methods of layout must be aligned to this value. - static constexpr size_t Alignment() { - return adl_barrier::Max(AlignOf::value...); - } - - // Offset in bytes of the Nth array. - // - // // int[3], 4 bytes of padding, double[4]. - // Layout x(3, 4); - // assert(x.Offset<0>() == 0); // The ints starts from 0. - // assert(x.Offset<1>() == 16); // The doubles starts from 16. - // - // Requires: `N <= NumSizes && N < sizeof...(Ts)`. - template = 0> - constexpr size_t Offset() const { - return 0; - } - - template = 0> - constexpr size_t Offset() const { - static_assert(N < NumOffsets, "Index out of bounds"); - return adl_barrier::Align( - Offset() + SizeOf>::value * size_[N - 1], - ElementAlignment::value); - } - - // Offset in bytes of the array with the specified element type. There must - // be exactly one such array and its zero-based index must be at most - // `NumSizes`. - // - // // int[3], 4 bytes of padding, double[4]. - // Layout x(3, 4); - // assert(x.Offset() == 0); // The ints starts from 0. - // assert(x.Offset() == 16); // The doubles starts from 16. - template - constexpr size_t Offset() const { - return Offset()>(); - } - - // Offsets in bytes of all arrays for which the offsets are known. - constexpr std::array Offsets() const { - return {{Offset()...}}; - } - - // The number of elements in the Nth array. This is the Nth argument of - // `Layout::Partial()` or `Layout::Layout()` (zero-based). - // - // // int[3], 4 bytes of padding, double[4]. - // Layout x(3, 4); - // assert(x.Size<0>() == 3); - // assert(x.Size<1>() == 4); - // - // Requires: `N < NumSizes`. - template - constexpr size_t Size() const { - static_assert(N < NumSizes, "Index out of bounds"); - return size_[N]; - } - - // The number of elements in the array with the specified element type. - // There must be exactly one such array and its zero-based index must be - // at most `NumSizes`. - // - // // int[3], 4 bytes of padding, double[4]. - // Layout x(3, 4); - // assert(x.Size() == 3); - // assert(x.Size() == 4); - template - constexpr size_t Size() const { - return Size()>(); - } - - // The number of elements of all arrays for which they are known. - constexpr std::array Sizes() const { - return {{Size()...}}; - } - - // Pointer to the beginning of the Nth array. - // - // `Char` must be `[const] [signed|unsigned] char`. - // - // // int[3], 4 bytes of padding, double[4]. - // Layout x(3, 4); - // unsigned char* p = new unsigned char[x.AllocSize()]; - // int* ints = x.Pointer<0>(p); - // double* doubles = x.Pointer<1>(p); - // - // Requires: `N <= NumSizes && N < sizeof...(Ts)`. - // Requires: `p` is aligned to `Alignment()`. - template - CopyConst>* Pointer(Char* p) const { - using C = typename std::remove_const::type; - static_assert( - std::is_same() || std::is_same() || - std::is_same(), - "The argument must be a pointer to [const] [signed|unsigned] char"); - constexpr size_t alignment = Alignment(); - (void)alignment; - assert(reinterpret_cast(p) % alignment == 0); - return reinterpret_cast>*>(p + Offset()); - } - - // Pointer to the beginning of the array with the specified element type. - // There must be exactly one such array and its zero-based index must be at - // most `NumSizes`. - // - // `Char` must be `[const] [signed|unsigned] char`. - // - // // int[3], 4 bytes of padding, double[4]. - // Layout x(3, 4); - // unsigned char* p = new unsigned char[x.AllocSize()]; - // int* ints = x.Pointer(p); - // double* doubles = x.Pointer(p); - // - // Requires: `p` is aligned to `Alignment()`. - template - CopyConst* Pointer(Char* p) const { - return Pointer()>(p); - } - - // Pointers to all arrays for which pointers are known. - // - // `Char` must be `[const] [signed|unsigned] char`. - // - // // int[3], 4 bytes of padding, double[4]. - // Layout x(3, 4); - // unsigned char* p = new unsigned char[x.AllocSize()]; - // - // int* ints; - // double* doubles; - // std::tie(ints, doubles) = x.Pointers(p); - // - // Requires: `p` is aligned to `Alignment()`. - // - // Note: We're not using ElementType alias here because it does not compile - // under MSVC. - template - std::tuple::type>*...> - Pointers(Char* p) const { - return std::tuple>*...>( - Pointer(p)...); - } - - // The Nth array. - // - // `Char` must be `[const] [signed|unsigned] char`. - // - // // int[3], 4 bytes of padding, double[4]. - // Layout x(3, 4); - // unsigned char* p = new unsigned char[x.AllocSize()]; - // Span ints = x.Slice<0>(p); - // Span doubles = x.Slice<1>(p); - // - // Requires: `N < NumSizes`. - // Requires: `p` is aligned to `Alignment()`. - template - SliceType>> Slice(Char* p) const { - return SliceType>>(Pointer(p), Size()); - } - - // The array with the specified element type. There must be exactly one - // such array and its zero-based index must be less than `NumSizes`. - // - // `Char` must be `[const] [signed|unsigned] char`. - // - // // int[3], 4 bytes of padding, double[4]. - // Layout x(3, 4); - // unsigned char* p = new unsigned char[x.AllocSize()]; - // Span ints = x.Slice(p); - // Span doubles = x.Slice(p); - // - // Requires: `p` is aligned to `Alignment()`. - template - SliceType> Slice(Char* p) const { - return Slice()>(p); - } - - // All arrays with known sizes. - // - // `Char` must be `[const] [signed|unsigned] char`. - // - // // int[3], 4 bytes of padding, double[4]. - // Layout x(3, 4); - // unsigned char* p = new unsigned char[x.AllocSize()]; - // - // Span ints; - // Span doubles; - // std::tie(ints, doubles) = x.Slices(p); - // - // Requires: `p` is aligned to `Alignment()`. - // - // Note: We're not using ElementType alias here because it does not compile - // under MSVC. - template - std::tuple::type>>...> - Slices(Char* p) const { - // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed - // in 6.1). - (void)p; - return std::tuple>>...>( - Slice(p)...); - } - - // The size of the allocation that fits all arrays. - // - // // int[3], 4 bytes of padding, double[4]. - // Layout x(3, 4); - // unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes - // - // Requires: `NumSizes == sizeof...(Ts)`. - constexpr size_t AllocSize() const { - static_assert(NumTypes == NumSizes, "You must specify sizes of all fields"); - return Offset() + - SizeOf>::value * size_[NumTypes - 1]; - } - - // If built with --config=asan, poisons padding bytes (if any) in the - // allocation. The pointer must point to a memory block at least - // `AllocSize()` bytes in length. - // - // `Char` must be `[const] [signed|unsigned] char`. - // - // Requires: `p` is aligned to `Alignment()`. - template = 0> - void PoisonPadding(const Char* p) const { - Pointer<0>(p); // verify the requirements on `Char` and `p` - } - - template = 0> - void PoisonPadding(const Char* p) const { - static_assert(N < NumOffsets, "Index out of bounds"); - (void)p; + } + return out; + } + + } // namespace adl_barrier + + template + using EnableIf = typename std::enable_if::type; + + // Can `T` be a template argument of `Layout`? + template + using IsLegalElementType = std::integral_constant< + bool, + !std::is_reference::value && !std::is_volatile::value && + !std::is_reference::type>::value && + !std::is_volatile::type>::value && + adl_barrier::IsPow2(AlignOf::value)>; + + template + class LayoutImpl; + + // Public base class of `Layout` and the result type of `Layout::Partial()`. + // + // `Elements...` contains all template arguments of `Layout` that created this + // instance. + // + // `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments + // passed to `Layout::Partial()` or `Layout::Layout()`. + // + // `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is + // `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we + // can compute offsets). + template + class LayoutImpl, absl::index_sequence, absl::index_sequence> + { + private: + static_assert(sizeof...(Elements) > 0, "At least one field is required"); + static_assert(absl::conjunction...>::value, "Invalid element type (see IsLegalElementType)"); + + enum + { + NumTypes = sizeof...(Elements), + NumSizes = sizeof...(SizeSeq), + NumOffsets = sizeof...(OffsetSeq), + }; + + // These are guaranteed by `Layout`. + static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1), "Internal error"); + static_assert(NumTypes > 0, "Internal error"); + + // Returns the index of `T` in `Elements...`. Results in a compilation error + // if `Elements...` doesn't contain exactly one instance of `T`. + template + static constexpr size_t ElementIndex() + { + static_assert(Contains, Type::type>...>(), "Type not found"); + return adl_barrier::Find(Type(), Type::type>()...); + } + + template + using ElementAlignment = + AlignOf>::type>; + + public: + // Element types of all arrays packed in a tuple. + using ElementTypes = std::tuple::type...>; + + // Element type of the Nth array. + template + using ElementType = typename std::tuple_element::type; + + constexpr explicit LayoutImpl(IntToSize... sizes) : + size_{sizes...} + { + } + + // Alignment of the layout, equal to the strictest alignment of all elements. + // All pointers passed to the methods of layout must be aligned to this value. + static constexpr size_t Alignment() + { + return adl_barrier::Max(AlignOf::value...); + } + + // Offset in bytes of the Nth array. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Offset<0>() == 0); // The ints starts from 0. + // assert(x.Offset<1>() == 16); // The doubles starts from 16. + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + template = 0> + constexpr size_t Offset() const + { + return 0; + } + + template = 0> + constexpr size_t Offset() const + { + static_assert(N < NumOffsets, "Index out of bounds"); + return adl_barrier::Align( + Offset() + SizeOf>::value * size_[N - 1], + ElementAlignment::value + ); + } + + // Offset in bytes of the array with the specified element type. There must + // be exactly one such array and its zero-based index must be at most + // `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Offset() == 0); // The ints starts from 0. + // assert(x.Offset() == 16); // The doubles starts from 16. + template + constexpr size_t Offset() const + { + return Offset()>(); + } + + // Offsets in bytes of all arrays for which the offsets are known. + constexpr std::array Offsets() const + { + return {{Offset()...}}; + } + + // The number of elements in the Nth array. This is the Nth argument of + // `Layout::Partial()` or `Layout::Layout()` (zero-based). + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Size<0>() == 3); + // assert(x.Size<1>() == 4); + // + // Requires: `N < NumSizes`. + template + constexpr size_t Size() const + { + static_assert(N < NumSizes, "Index out of bounds"); + return size_[N]; + } + + // The number of elements in the array with the specified element type. + // There must be exactly one such array and its zero-based index must be + // at most `NumSizes`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // assert(x.Size() == 3); + // assert(x.Size() == 4); + template + constexpr size_t Size() const + { + return Size()>(); + } + + // The number of elements of all arrays for which they are known. + constexpr std::array Sizes() const + { + return {{Size()...}}; + } + + // Pointer to the beginning of the Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // int* ints = x.Pointer<0>(p); + // double* doubles = x.Pointer<1>(p); + // + // Requires: `N <= NumSizes && N < sizeof...(Ts)`. + // Requires: `p` is aligned to `Alignment()`. + template + CopyConst>* Pointer(Char* p) const + { + using C = typename std::remove_const::type; + static_assert( + std::is_same() || std::is_same() || + std::is_same(), + "The argument must be a pointer to [const] [signed|unsigned] char" + ); + constexpr size_t alignment = Alignment(); + (void)alignment; + assert(reinterpret_cast(p) % alignment == 0); + return reinterpret_cast>*>(p + Offset()); + } + + // Pointer to the beginning of the array with the specified element type. + // There must be exactly one such array and its zero-based index must be at + // most `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // int* ints = x.Pointer(p); + // double* doubles = x.Pointer(p); + // + // Requires: `p` is aligned to `Alignment()`. + template + CopyConst* Pointer(Char* p) const + { + return Pointer()>(p); + } + + // Pointers to all arrays for which pointers are known. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // int* ints; + // double* doubles; + // std::tie(ints, doubles) = x.Pointers(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template + std::tuple::type>*...> + Pointers(Char* p) const + { + return std::tuple>*...>( + Pointer(p)... + ); + } + + // The Nth array. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span ints = x.Slice<0>(p); + // Span doubles = x.Slice<1>(p); + // + // Requires: `N < NumSizes`. + // Requires: `p` is aligned to `Alignment()`. + template + SliceType>> Slice(Char* p) const + { + return SliceType>>(Pointer(p), Size()); + } + + // The array with the specified element type. There must be exactly one + // such array and its zero-based index must be less than `NumSizes`. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // Span ints = x.Slice(p); + // Span doubles = x.Slice(p); + // + // Requires: `p` is aligned to `Alignment()`. + template + SliceType> Slice(Char* p) const + { + return Slice()>(p); + } + + // All arrays with known sizes. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; + // + // Span ints; + // Span doubles; + // std::tie(ints, doubles) = x.Slices(p); + // + // Requires: `p` is aligned to `Alignment()`. + // + // Note: We're not using ElementType alias here because it does not compile + // under MSVC. + template + std::tuple::type>>...> + Slices(Char* p) const + { + // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed + // in 6.1). + (void)p; + return std::tuple>>...>( + Slice(p)... + ); + } + + // The size of the allocation that fits all arrays. + // + // // int[3], 4 bytes of padding, double[4]. + // Layout x(3, 4); + // unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes + // + // Requires: `NumSizes == sizeof...(Ts)`. + constexpr size_t AllocSize() const + { + static_assert(NumTypes == NumSizes, "You must specify sizes of all fields"); + return Offset() + + SizeOf>::value * size_[NumTypes - 1]; + } + + // If built with --config=asan, poisons padding bytes (if any) in the + // allocation. The pointer must point to a memory block at least + // `AllocSize()` bytes in length. + // + // `Char` must be `[const] [signed|unsigned] char`. + // + // Requires: `p` is aligned to `Alignment()`. + template = 0> + void PoisonPadding(const Char* p) const + { + Pointer<0>(p); // verify the requirements on `Char` and `p` + } + + template = 0> + void PoisonPadding(const Char* p) const + { + static_assert(N < NumOffsets, "Index out of bounds"); + (void)p; #ifdef ABSL_HAVE_ADDRESS_SANITIZER - PoisonPadding(p); - // The `if` is an optimization. It doesn't affect the observable behaviour. - if (ElementAlignment::value % ElementAlignment::value) { - size_t start = - Offset() + SizeOf>::value * size_[N - 1]; - ASAN_POISON_MEMORY_REGION(p + start, Offset() - start); - } + PoisonPadding(p); + // The `if` is an optimization. It doesn't affect the observable behaviour. + if (ElementAlignment::value % ElementAlignment::value) + { + size_t start = + Offset() + SizeOf>::value * size_[N - 1]; + ASAN_POISON_MEMORY_REGION(p + start, Offset() - start); + } #endif - } - - // Human-readable description of the memory layout. Useful for debugging. - // Slow. - // - // // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed - // // by an unknown number of doubles. - // auto x = Layout::Partial(5, 3); - // assert(x.DebugString() == - // "@0(1)[5]; @8(4)[3]; @24(8)"); - // - // Each field is in the following format: @offset(sizeof)[size] ( - // may be missing depending on the target platform). For example, - // @8(4)[3] means that at offset 8 we have an array of ints, where each - // int is 4 bytes, and we have 3 of those ints. The size of the last field may - // be missing (as in the example above). Only fields with known offsets are - // described. Type names may differ across platforms: one compiler might - // produce "unsigned*" where another produces "unsigned int *". - std::string DebugString() const { - const auto offsets = Offsets(); - const size_t sizes[] = {SizeOf>::value...}; - const std::string types[] = { - adl_barrier::TypeName>()...}; - std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")"); - for (size_t i = 0; i != NumOffsets - 1; ++i) { - absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1], - "(", sizes[i + 1], ")"); - } - // NumSizes is a constant that may be zero. Some compilers cannot see that - // inside the if statement "size_[NumSizes - 1]" must be valid. - int last = static_cast(NumSizes) - 1; - if (NumTypes == NumSizes && last >= 0) { - absl::StrAppend(&res, "[", size_[last], "]"); - } - return res; - } - - private: - // Arguments of `Layout::Partial()` or `Layout::Layout()`. - size_t size_[NumSizes > 0 ? NumSizes : 1]; -}; - -template -using LayoutType = LayoutImpl< - std::tuple, absl::make_index_sequence, - absl::make_index_sequence>; - -} // namespace internal_layout - -// Descriptor of arrays of various types and sizes laid out in memory one after -// another. See the top of the file for documentation. -// -// Check out the public API of internal_layout::LayoutImpl above. The type is -// internal to the library but its methods are public, and they are inherited -// by `Layout`. -template -class Layout : public internal_layout::LayoutType { - public: - static_assert(sizeof...(Ts) > 0, "At least one field is required"); - static_assert( - absl::conjunction...>::value, - "Invalid element type (see IsLegalElementType)"); - - // The result type of `Partial()` with `NumSizes` arguments. - template - using PartialType = internal_layout::LayoutType; - - // `Layout` knows the element types of the arrays we want to lay out in - // memory but not the number of elements in each array. - // `Partial(size1, ..., sizeN)` allows us to specify the latter. The - // resulting immutable object can be used to obtain pointers to the - // individual arrays. - // - // It's allowed to pass fewer array sizes than the number of arrays. E.g., - // if all you need is to the offset of the second array, you only need to - // pass one argument -- the number of elements in the first array. - // - // // int[3] followed by 4 bytes of padding and an unknown number of - // // doubles. - // auto x = Layout::Partial(3); - // // doubles start at byte 16. - // assert(x.Offset<1>() == 16); - // - // If you know the number of elements in all arrays, you can still call - // `Partial()` but it's more convenient to use the constructor of `Layout`. - // - // Layout x(3, 5); - // - // Note: The sizes of the arrays must be specified in number of elements, - // not in bytes. - // - // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`. - // Requires: all arguments are convertible to `size_t`. - template - static constexpr PartialType Partial(Sizes&&... sizes) { - static_assert(sizeof...(Sizes) <= sizeof...(Ts), ""); - return PartialType(absl::forward(sizes)...); - } - - // Creates a layout with the sizes of all arrays specified. If you know - // only the sizes of the first N arrays (where N can be zero), you can use - // `Partial()` defined above. The constructor is essentially equivalent to - // calling `Partial()` and passing in all array sizes; the constructor is - // provided as a convenient abbreviation. - // - // Note: The sizes of the arrays must be specified in number of elements, - // not in bytes. - constexpr explicit Layout(internal_layout::TypeToSize... sizes) - : internal_layout::LayoutType(sizes...) {} -}; - -} // namespace container_internal -ABSL_NAMESPACE_END + } + + // Human-readable description of the memory layout. Useful for debugging. + // Slow. + // + // // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed + // // by an unknown number of doubles. + // auto x = Layout::Partial(5, 3); + // assert(x.DebugString() == + // "@0(1)[5]; @8(4)[3]; @24(8)"); + // + // Each field is in the following format: @offset(sizeof)[size] ( + // may be missing depending on the target platform). For example, + // @8(4)[3] means that at offset 8 we have an array of ints, where each + // int is 4 bytes, and we have 3 of those ints. The size of the last field may + // be missing (as in the example above). Only fields with known offsets are + // described. Type names may differ across platforms: one compiler might + // produce "unsigned*" where another produces "unsigned int *". + std::string DebugString() const + { + const auto offsets = Offsets(); + const size_t sizes[] = {SizeOf>::value...}; + const std::string types[] = { + adl_barrier::TypeName>()...}; + std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")"); + for (size_t i = 0; i != NumOffsets - 1; ++i) + { + absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1], "(", sizes[i + 1], ")"); + } + // NumSizes is a constant that may be zero. Some compilers cannot see that + // inside the if statement "size_[NumSizes - 1]" must be valid. + int last = static_cast(NumSizes) - 1; + if (NumTypes == NumSizes && last >= 0) + { + absl::StrAppend(&res, "[", size_[last], "]"); + } + return res; + } + + private: + // Arguments of `Layout::Partial()` or `Layout::Layout()`. + size_t size_[NumSizes > 0 ? NumSizes : 1]; + }; + + template + using LayoutType = LayoutImpl< + std::tuple, + absl::make_index_sequence, + absl::make_index_sequence>; + + } // namespace internal_layout + + // Descriptor of arrays of various types and sizes laid out in memory one after + // another. See the top of the file for documentation. + // + // Check out the public API of internal_layout::LayoutImpl above. The type is + // internal to the library but its methods are public, and they are inherited + // by `Layout`. + template + class Layout : public internal_layout::LayoutType + { + public: + static_assert(sizeof...(Ts) > 0, "At least one field is required"); + static_assert( + absl::conjunction...>::value, + "Invalid element type (see IsLegalElementType)" + ); + + // The result type of `Partial()` with `NumSizes` arguments. + template + using PartialType = internal_layout::LayoutType; + + // `Layout` knows the element types of the arrays we want to lay out in + // memory but not the number of elements in each array. + // `Partial(size1, ..., sizeN)` allows us to specify the latter. The + // resulting immutable object can be used to obtain pointers to the + // individual arrays. + // + // It's allowed to pass fewer array sizes than the number of arrays. E.g., + // if all you need is to the offset of the second array, you only need to + // pass one argument -- the number of elements in the first array. + // + // // int[3] followed by 4 bytes of padding and an unknown number of + // // doubles. + // auto x = Layout::Partial(3); + // // doubles start at byte 16. + // assert(x.Offset<1>() == 16); + // + // If you know the number of elements in all arrays, you can still call + // `Partial()` but it's more convenient to use the constructor of `Layout`. + // + // Layout x(3, 5); + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + // + // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`. + // Requires: all arguments are convertible to `size_t`. + template + static constexpr PartialType Partial(Sizes&&... sizes) + { + static_assert(sizeof...(Sizes) <= sizeof...(Ts), ""); + return PartialType(absl::forward(sizes)...); + } + + // Creates a layout with the sizes of all arrays specified. If you know + // only the sizes of the first N arrays (where N can be zero), you can use + // `Partial()` defined above. The constructor is essentially equivalent to + // calling `Partial()` and passing in all array sizes; the constructor is + // provided as a convenient abbreviation. + // + // Note: The sizes of the arrays must be specified in number of elements, + // not in bytes. + constexpr explicit Layout(internal_layout::TypeToSize... sizes) : + internal_layout::LayoutType(sizes...) + { + } + }; + + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/node_slot_policy.h b/CAPI/cpp/grpc/include/absl/container/internal/node_slot_policy.h index baba574..4ab459f 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/node_slot_policy.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/node_slot_policy.h @@ -41,52 +41,65 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { -template -struct node_slot_policy { - static_assert(std::is_lvalue_reference::value, ""); + template + struct node_slot_policy + { + static_assert(std::is_lvalue_reference::value, ""); - using slot_type = typename std::remove_cv< - typename std::remove_reference::type>::type*; + using slot_type = typename std::remove_cv< + typename std::remove_reference::type>::type*; - template - static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { - *slot = Policy::new_element(alloc, std::forward(args)...); - } + template + static void construct(Alloc* alloc, slot_type* slot, Args&&... args) + { + *slot = Policy::new_element(alloc, std::forward(args)...); + } - template - static void destroy(Alloc* alloc, slot_type* slot) { - Policy::delete_element(alloc, *slot); - } + template + static void destroy(Alloc* alloc, slot_type* slot) + { + Policy::delete_element(alloc, *slot); + } - template - static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) { - *new_slot = *old_slot; - } + template + static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) + { + *new_slot = *old_slot; + } - static size_t space_used(const slot_type* slot) { - if (slot == nullptr) return Policy::element_space_used(nullptr); - return Policy::element_space_used(*slot); - } + static size_t space_used(const slot_type* slot) + { + if (slot == nullptr) + return Policy::element_space_used(nullptr); + return Policy::element_space_used(*slot); + } - static Reference element(slot_type* slot) { return **slot; } + static Reference element(slot_type* slot) + { + return **slot; + } - template - static auto value(T* elem) -> decltype(P::value(elem)) { - return P::value(elem); - } + template + static auto value(T* elem) -> decltype(P::value(elem)) + { + return P::value(elem); + } - template - static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward(ts)...)) { - return P::apply(std::forward(ts)...); - } -}; + template + static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward(ts)...)) + { + return P::apply(std::forward(ts)...); + } + }; -} // namespace container_internal -ABSL_NAMESPACE_END + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_map.h b/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_map.h index c7df2ef..705eec4 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_map.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_map.h @@ -23,176 +23,196 @@ #include "absl/container/internal/container_memory.h" #include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -template -class raw_hash_map : public raw_hash_set { - // P is Policy. It's passed as a template argument to support maps that have - // incomplete types as values, as in unordered_map. - // MappedReference<> may be a non-reference type. - template - using MappedReference = decltype(P::value( - std::addressof(std::declval()))); - - // MappedConstReference<> may be a non-reference type. - template - using MappedConstReference = decltype(P::value( - std::addressof(std::declval()))); - - using KeyArgImpl = - KeyArg::value && IsTransparent::value>; - - public: - using key_type = typename Policy::key_type; - using mapped_type = typename Policy::mapped_type; - template - using key_arg = typename KeyArgImpl::template type; - - static_assert(!std::is_reference::value, ""); - - // TODO(b/187807849): Evaluate whether to support reference mapped_type and - // remove this assertion if/when it is supported. - static_assert(!std::is_reference::value, ""); - - using iterator = typename raw_hash_map::raw_hash_set::iterator; - using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator; - - raw_hash_map() {} - using raw_hash_map::raw_hash_set::raw_hash_set; - - // The last two template parameters ensure that both arguments are rvalues - // (lvalue arguments are handled by the overloads below). This is necessary - // for supporting bitfield arguments. - // - // union { int n : 1; }; - // flat_hash_map m; - // m.insert_or_assign(n, n); - template - std::pair insert_or_assign(key_arg&& k, V&& v) { - return insert_or_assign_impl(std::forward(k), std::forward(v)); - } - - template - std::pair insert_or_assign(key_arg&& k, const V& v) { - return insert_or_assign_impl(std::forward(k), v); - } - - template - std::pair insert_or_assign(const key_arg& k, V&& v) { - return insert_or_assign_impl(k, std::forward(v)); - } - - template - std::pair insert_or_assign(const key_arg& k, const V& v) { - return insert_or_assign_impl(k, v); - } - - template - iterator insert_or_assign(const_iterator, key_arg&& k, V&& v) { - return insert_or_assign(std::forward(k), std::forward(v)).first; - } - - template - iterator insert_or_assign(const_iterator, key_arg&& k, const V& v) { - return insert_or_assign(std::forward(k), v).first; - } - - template - iterator insert_or_assign(const_iterator, const key_arg& k, V&& v) { - return insert_or_assign(k, std::forward(v)).first; - } - - template - iterator insert_or_assign(const_iterator, const key_arg& k, const V& v) { - return insert_or_assign(k, v).first; - } - - // All `try_emplace()` overloads make the same guarantees regarding rvalue - // arguments as `std::unordered_map::try_emplace()`, namely that these - // functions will not move from rvalue arguments if insertions do not happen. - template ::value, int>::type = 0, - K* = nullptr> - std::pair try_emplace(key_arg&& k, Args&&... args) { - return try_emplace_impl(std::forward(k), std::forward(args)...); - } - - template ::value, int>::type = 0> - std::pair try_emplace(const key_arg& k, Args&&... args) { - return try_emplace_impl(k, std::forward(args)...); - } - - template - iterator try_emplace(const_iterator, key_arg&& k, Args&&... args) { - return try_emplace(std::forward(k), std::forward(args)...).first; - } - - template - iterator try_emplace(const_iterator, const key_arg& k, Args&&... args) { - return try_emplace(k, std::forward(args)...).first; - } - - template - MappedReference

at(const key_arg& key) { - auto it = this->find(key); - if (it == this->end()) { - base_internal::ThrowStdOutOfRange( - "absl::container_internal::raw_hash_map<>::at"); - } - return Policy::value(&*it); - } - - template - MappedConstReference

at(const key_arg& key) const { - auto it = this->find(key); - if (it == this->end()) { - base_internal::ThrowStdOutOfRange( - "absl::container_internal::raw_hash_map<>::at"); - } - return Policy::value(&*it); - } - - template - MappedReference

operator[](key_arg&& key) { - return Policy::value(&*try_emplace(std::forward(key)).first); - } - - template - MappedReference

operator[](const key_arg& key) { - return Policy::value(&*try_emplace(key).first); - } - - private: - template - std::pair insert_or_assign_impl(K&& k, V&& v) { - auto res = this->find_or_prepare_insert(k); - if (res.second) - this->emplace_at(res.first, std::forward(k), std::forward(v)); - else - Policy::value(&*this->iterator_at(res.first)) = std::forward(v); - return {this->iterator_at(res.first), res.second}; - } - - template - std::pair try_emplace_impl(K&& k, Args&&... args) { - auto res = this->find_or_prepare_insert(k); - if (res.second) - this->emplace_at(res.first, std::piecewise_construct, - std::forward_as_tuple(std::forward(k)), - std::forward_as_tuple(std::forward(args)...)); - return {this->iterator_at(res.first), res.second}; - } -}; - -} // namespace container_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class raw_hash_map : public raw_hash_set + { + // P is Policy. It's passed as a template argument to support maps that have + // incomplete types as values, as in unordered_map. + // MappedReference<> may be a non-reference type. + template + using MappedReference = decltype(P::value( + std::addressof(std::declval()) + )); + + // MappedConstReference<> may be a non-reference type. + template + using MappedConstReference = decltype(P::value( + std::addressof(std::declval()) + )); + + using KeyArgImpl = + KeyArg::value && IsTransparent::value>; + + public: + using key_type = typename Policy::key_type; + using mapped_type = typename Policy::mapped_type; + template + using key_arg = typename KeyArgImpl::template type; + + static_assert(!std::is_reference::value, ""); + + // TODO(b/187807849): Evaluate whether to support reference mapped_type and + // remove this assertion if/when it is supported. + static_assert(!std::is_reference::value, ""); + + using iterator = typename raw_hash_map::raw_hash_set::iterator; + using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator; + + raw_hash_map() + { + } + using raw_hash_map::raw_hash_set::raw_hash_set; + + // The last two template parameters ensure that both arguments are rvalues + // (lvalue arguments are handled by the overloads below). This is necessary + // for supporting bitfield arguments. + // + // union { int n : 1; }; + // flat_hash_map m; + // m.insert_or_assign(n, n); + template + std::pair insert_or_assign(key_arg&& k, V&& v) + { + return insert_or_assign_impl(std::forward(k), std::forward(v)); + } + + template + std::pair insert_or_assign(key_arg&& k, const V& v) + { + return insert_or_assign_impl(std::forward(k), v); + } + + template + std::pair insert_or_assign(const key_arg& k, V&& v) + { + return insert_or_assign_impl(k, std::forward(v)); + } + + template + std::pair insert_or_assign(const key_arg& k, const V& v) + { + return insert_or_assign_impl(k, v); + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, V&& v) + { + return insert_or_assign(std::forward(k), std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, key_arg&& k, const V& v) + { + return insert_or_assign(std::forward(k), v).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, V&& v) + { + return insert_or_assign(k, std::forward(v)).first; + } + + template + iterator insert_or_assign(const_iterator, const key_arg& k, const V& v) + { + return insert_or_assign(k, v).first; + } + + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. + template::value, int>::type = 0, K* = nullptr> + std::pair try_emplace(key_arg&& k, Args&&... args) + { + return try_emplace_impl(std::forward(k), std::forward(args)...); + } + + template::value, int>::type = 0> + std::pair try_emplace(const key_arg& k, Args&&... args) + { + return try_emplace_impl(k, std::forward(args)...); + } + + template + iterator try_emplace(const_iterator, key_arg&& k, Args&&... args) + { + return try_emplace(std::forward(k), std::forward(args)...).first; + } + + template + iterator try_emplace(const_iterator, const key_arg& k, Args&&... args) + { + return try_emplace(k, std::forward(args)...).first; + } + + template + MappedReference

at(const key_arg& key) + { + auto it = this->find(key); + if (it == this->end()) + { + base_internal::ThrowStdOutOfRange( + "absl::container_internal::raw_hash_map<>::at" + ); + } + return Policy::value(&*it); + } + + template + MappedConstReference

at(const key_arg& key) const + { + auto it = this->find(key); + if (it == this->end()) + { + base_internal::ThrowStdOutOfRange( + "absl::container_internal::raw_hash_map<>::at" + ); + } + return Policy::value(&*it); + } + + template + MappedReference

operator[](key_arg&& key) + { + return Policy::value(&*try_emplace(std::forward(key)).first); + } + + template + MappedReference

operator[](const key_arg& key) + { + return Policy::value(&*try_emplace(key).first); + } + + private: + template + std::pair insert_or_assign_impl(K&& k, V&& v) + { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::forward(k), std::forward(v)); + else + Policy::value(&*this->iterator_at(res.first)) = std::forward(v); + return {this->iterator_at(res.first), res.second}; + } + + template + std::pair try_emplace_impl(K&& k, Args&&... args) + { + auto res = this->find_or_prepare_insert(k); + if (res.second) + this->emplace_at(res.first, std::piecewise_construct, std::forward_as_tuple(std::forward(k)), std::forward_as_tuple(std::forward(args)...)); + return {this->iterator_at(res.first), res.second}; + } + }; + + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_set.h b/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_set.h index ea912f8..eecf77a 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_set.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/raw_hash_set.h @@ -215,2149 +215,2532 @@ #include #endif -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -template -void SwapAlloc(AllocType& lhs, AllocType& rhs, - std::true_type /* propagate_on_container_swap */) { - using std::swap; - swap(lhs, rhs); -} -template -void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/, - std::false_type /* propagate_on_container_swap */) {} - -// The state for a probe sequence. -// -// Currently, the sequence is a triangular progression of the form -// -// p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1) -// -// The use of `Width` ensures that each probe step does not overlap groups; -// the sequence effectively outputs the addresses of *groups* (although not -// necessarily aligned to any boundary). The `Group` machinery allows us -// to check an entire group with minimal branching. -// -// Wrapping around at `mask + 1` is important, but not for the obvious reason. -// As described above, the first few entries of the control byte array -// are mirrored at the end of the array, which `Group` will find and use -// for selecting candidates. However, when those candidates' slots are -// actually inspected, there are no corresponding slots for the cloned bytes, -// so we need to make sure we've treated those offsets as "wrapping around". -// -// It turns out that this probe sequence visits every group exactly once if the -// number of groups is a power of two, since (i^2+i)/2 is a bijection in -// Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing -template -class probe_seq { - public: - // Creates a new probe sequence using `hash` as the initial value of the - // sequence and `mask` (usually the capacity of the table) as the mask to - // apply to each value in the progression. - probe_seq(size_t hash, size_t mask) { - assert(((mask + 1) & mask) == 0 && "not a mask"); - mask_ = mask; - offset_ = hash & mask_; - } - - // The offset within the table, i.e., the value `p(i)` above. - size_t offset() const { return offset_; } - size_t offset(size_t i) const { return (offset_ + i) & mask_; } - - void next() { - index_ += Width; - offset_ += index_; - offset_ &= mask_; - } - // 0-based probe index, a multiple of `Width`. - size_t index() const { return index_; } - - private: - size_t mask_; - size_t offset_; - size_t index_ = 0; -}; - -template -struct RequireUsableKey { - template - std::pair< - decltype(std::declval()(std::declval())), - decltype(std::declval()(std::declval(), - std::declval()))>* - operator()(const PassedKey&, const Args&...) const; -}; - -template -struct IsDecomposable : std::false_type {}; - -template -struct IsDecomposable< - absl::void_t(), - std::declval()...))>, - Policy, Hash, Eq, Ts...> : std::true_type {}; - -// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. -template -constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) { - using std::swap; - return noexcept(swap(std::declval(), std::declval())); -} -template -constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) { - return false; -} - -template -uint32_t TrailingZeros(T x) { - ABSL_ASSUME(x != 0); - return static_cast(countr_zero(x)); -} - -// An abstract bitmask, such as that emitted by a SIMD instruction. -// -// Specifically, this type implements a simple bitset whose representation is -// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number -// of abstract bits in the bitset, while `Shift` is the log-base-two of the -// width of an abstract bit in the representation. -// This mask provides operations for any number of real bits set in an abstract -// bit. To add iteration on top of that, implementation must guarantee no more -// than one real bit is set in an abstract bit. -template -class NonIterableBitMask { - public: - explicit NonIterableBitMask(T mask) : mask_(mask) {} - - explicit operator bool() const { return this->mask_ != 0; } - - // Returns the index of the lowest *abstract* bit set in `self`. - uint32_t LowestBitSet() const { - return container_internal::TrailingZeros(mask_) >> Shift; - } - - // Returns the index of the highest *abstract* bit set in `self`. - uint32_t HighestBitSet() const { - return static_cast((bit_width(mask_) - 1) >> Shift); - } - - // Return the number of trailing zero *abstract* bits. - uint32_t TrailingZeros() const { - return container_internal::TrailingZeros(mask_) >> Shift; - } - - // Return the number of leading zero *abstract* bits. - uint32_t LeadingZeros() const { - constexpr int total_significant_bits = SignificantBits << Shift; - constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; - return static_cast(countl_zero(mask_ << extra_bits)) >> Shift; - } - - T mask_; -}; - -// Mask that can be iterable -// -// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just -// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When -// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as -// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask. -// -// For example: -// for (int i : BitMask(0b101)) -> yields 0, 2 -// for (int i : BitMask(0x0000000080800000)) -> yields 2, 3 -template -class BitMask : public NonIterableBitMask { - using Base = NonIterableBitMask; - static_assert(std::is_unsigned::value, ""); - static_assert(Shift == 0 || Shift == 3, ""); - - public: - explicit BitMask(T mask) : Base(mask) {} - // BitMask is an iterator over the indices of its abstract bits. - using value_type = int; - using iterator = BitMask; - using const_iterator = BitMask; - - BitMask& operator++() { - this->mask_ &= (this->mask_ - 1); - return *this; - } - - uint32_t operator*() const { return Base::LowestBitSet(); } - - BitMask begin() const { return *this; } - BitMask end() const { return BitMask(0); } - - private: - friend bool operator==(const BitMask& a, const BitMask& b) { - return a.mask_ == b.mask_; - } - friend bool operator!=(const BitMask& a, const BitMask& b) { - return a.mask_ != b.mask_; - } -}; - -using h2_t = uint8_t; - -// The values here are selected for maximum performance. See the static asserts -// below for details. - -// A `ctrl_t` is a single control byte, which can have one of four -// states: empty, deleted, full (which has an associated seven-bit h2_t value) -// and the sentinel. They have the following bit patterns: -// -// empty: 1 0 0 0 0 0 0 0 -// deleted: 1 1 1 1 1 1 1 0 -// full: 0 h h h h h h h // h represents the hash bits. -// sentinel: 1 1 1 1 1 1 1 1 -// -// These values are specifically tuned for SSE-flavored SIMD. -// The static_asserts below detail the source of these choices. -// -// We use an enum class so that when strict aliasing is enabled, the compiler -// knows ctrl_t doesn't alias other types. -enum class ctrl_t : int8_t { - kEmpty = -128, // 0b10000000 - kDeleted = -2, // 0b11111110 - kSentinel = -1, // 0b11111111 -}; -static_assert( - (static_cast(ctrl_t::kEmpty) & - static_cast(ctrl_t::kDeleted) & - static_cast(ctrl_t::kSentinel) & 0x80) != 0, - "Special markers need to have the MSB to make checking for them efficient"); -static_assert( - ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel, - "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than " - "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient"); -static_assert( - ctrl_t::kSentinel == static_cast(-1), - "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD " - "registers (pcmpeqd xmm, xmm)"); -static_assert(ctrl_t::kEmpty == static_cast(-128), - "ctrl_t::kEmpty must be -128 to make the SIMD check for its " - "existence efficient (psignb xmm, xmm)"); -static_assert( - (~static_cast(ctrl_t::kEmpty) & - ~static_cast(ctrl_t::kDeleted) & - static_cast(ctrl_t::kSentinel) & 0x7F) != 0, - "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not " - "shared by ctrl_t::kSentinel to make the scalar test for " - "MaskEmptyOrDeleted() efficient"); -static_assert(ctrl_t::kDeleted == static_cast(-2), - "ctrl_t::kDeleted must be -2 to make the implementation of " - "ConvertSpecialToEmptyAndFullToDeleted efficient"); - -ABSL_DLL extern const ctrl_t kEmptyGroup[16]; - -// Returns a pointer to a control byte group that can be used by empty tables. -inline ctrl_t* EmptyGroup() { - // Const must be cast away here; no uses of this function will actually write - // to it, because it is only used for empty tables. - return const_cast(kEmptyGroup); -} - -// Mixes a randomly generated per-process seed with `hash` and `ctrl` to -// randomize insertion order within groups. -bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl); - -// Returns a per-table, hash salt, which changes on resize. This gets mixed into -// H1 to randomize iteration order per-table. -// -// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure -// non-determinism of iteration order in most cases. -inline size_t PerTableSalt(const ctrl_t* ctrl) { - // The low bits of the pointer have little or no entropy because of - // alignment. We shift the pointer to try to use higher entropy bits. A - // good number seems to be 12 bits, because that aligns with page size. - return reinterpret_cast(ctrl) >> 12; -} -// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt. -inline size_t H1(size_t hash, const ctrl_t* ctrl) { - return (hash >> 7) ^ PerTableSalt(ctrl); -} - -// Extracts the H2 portion of a hash: the 7 bits not used for H1. -// -// These are used as an occupied control byte. -inline h2_t H2(size_t hash) { return hash & 0x7F; } +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + void SwapAlloc(AllocType& lhs, AllocType& rhs, std::true_type /* propagate_on_container_swap */) + { + using std::swap; + swap(lhs, rhs); + } + template + void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/, std::false_type /* propagate_on_container_swap */) + { + } -// Helpers for checking the state of a control byte. -inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; } -inline bool IsFull(ctrl_t c) { return c >= static_cast(0); } -inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; } -inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; } + // The state for a probe sequence. + // + // Currently, the sequence is a triangular progression of the form + // + // p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1) + // + // The use of `Width` ensures that each probe step does not overlap groups; + // the sequence effectively outputs the addresses of *groups* (although not + // necessarily aligned to any boundary). The `Group` machinery allows us + // to check an entire group with minimal branching. + // + // Wrapping around at `mask + 1` is important, but not for the obvious reason. + // As described above, the first few entries of the control byte array + // are mirrored at the end of the array, which `Group` will find and use + // for selecting candidates. However, when those candidates' slots are + // actually inspected, there are no corresponding slots for the cloned bytes, + // so we need to make sure we've treated those offsets as "wrapping around". + // + // It turns out that this probe sequence visits every group exactly once if the + // number of groups is a power of two, since (i^2+i)/2 is a bijection in + // Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing + template + class probe_seq + { + public: + // Creates a new probe sequence using `hash` as the initial value of the + // sequence and `mask` (usually the capacity of the table) as the mask to + // apply to each value in the progression. + probe_seq(size_t hash, size_t mask) + { + assert(((mask + 1) & mask) == 0 && "not a mask"); + mask_ = mask; + offset_ = hash & mask_; + } + + // The offset within the table, i.e., the value `p(i)` above. + size_t offset() const + { + return offset_; + } + size_t offset(size_t i) const + { + return (offset_ + i) & mask_; + } + + void next() + { + index_ += Width; + offset_ += index_; + offset_ &= mask_; + } + // 0-based probe index, a multiple of `Width`. + size_t index() const + { + return index_; + } + + private: + size_t mask_; + size_t offset_; + size_t index_ = 0; + }; + + template + struct RequireUsableKey + { + template + std::pair< + decltype(std::declval()(std::declval())), + decltype(std::declval()(std::declval(), std::declval()))>* + operator()(const PassedKey&, const Args&...) const; + }; + + template + struct IsDecomposable : std::false_type + { + }; + + template + struct IsDecomposable< + absl::void_t(), + std::declval()... + ))>, + Policy, + Hash, + Eq, + Ts...> : std::true_type + { + }; + + // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. + template + constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) + { + using std::swap; + return noexcept(swap(std::declval(), std::declval())); + } + template + constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) + { + return false; + } -#ifdef ABSL_INTERNAL_HAVE_SSE2 -// Quick reference guide for intrinsics used below: -// -// * __m128i: An XMM (128-bit) word. -// -// * _mm_setzero_si128: Returns a zero vector. -// * _mm_set1_epi8: Returns a vector with the same i8 in each lane. -// -// * _mm_subs_epi8: Saturating-subtracts two i8 vectors. -// * _mm_and_si128: Ands two i128s together. -// * _mm_or_si128: Ors two i128s together. -// * _mm_andnot_si128: And-nots two i128s together. -// -// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality, -// filling each lane with 0x00 or 0xff. -// * _mm_cmpgt_epi8: Same as above, but using > rather than ==. -// -// * _mm_loadu_si128: Performs an unaligned load of an i128. -// * _mm_storeu_si128: Performs an unaligned store of an i128. -// -// * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first -// argument if the corresponding lane of the second -// argument is positive, negative, or zero, respectively. -// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a -// bitmask consisting of those bits. -// * _mm_shuffle_epi8: Selects i8s from the first argument, using the low -// four bits of each i8 lane in the second argument as -// indices. - -// https://github.com/abseil/abseil-cpp/issues/209 -// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 -// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char -// Work around this by using the portable implementation of Group -// when using -funsigned-char under GCC. -inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) { -#if defined(__GNUC__) && !defined(__clang__) - if (std::is_unsigned::value) { - const __m128i mask = _mm_set1_epi8(0x80); - const __m128i diff = _mm_subs_epi8(b, a); - return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask); - } -#endif - return _mm_cmpgt_epi8(a, b); -} + template + uint32_t TrailingZeros(T x) + { + ABSL_ASSUME(x != 0); + return static_cast(countr_zero(x)); + } + + // An abstract bitmask, such as that emitted by a SIMD instruction. + // + // Specifically, this type implements a simple bitset whose representation is + // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number + // of abstract bits in the bitset, while `Shift` is the log-base-two of the + // width of an abstract bit in the representation. + // This mask provides operations for any number of real bits set in an abstract + // bit. To add iteration on top of that, implementation must guarantee no more + // than one real bit is set in an abstract bit. + template + class NonIterableBitMask + { + public: + explicit NonIterableBitMask(T mask) : + mask_(mask) + { + } + + explicit operator bool() const + { + return this->mask_ != 0; + } + + // Returns the index of the lowest *abstract* bit set in `self`. + uint32_t LowestBitSet() const + { + return container_internal::TrailingZeros(mask_) >> Shift; + } + + // Returns the index of the highest *abstract* bit set in `self`. + uint32_t HighestBitSet() const + { + return static_cast((bit_width(mask_) - 1) >> Shift); + } + + // Return the number of trailing zero *abstract* bits. + uint32_t TrailingZeros() const + { + return container_internal::TrailingZeros(mask_) >> Shift; + } + + // Return the number of leading zero *abstract* bits. + uint32_t LeadingZeros() const + { + constexpr int total_significant_bits = SignificantBits << Shift; + constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; + return static_cast(countl_zero(mask_ << extra_bits)) >> Shift; + } + + T mask_; + }; + + // Mask that can be iterable + // + // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just + // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When + // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as + // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask. + // + // For example: + // for (int i : BitMask(0b101)) -> yields 0, 2 + // for (int i : BitMask(0x0000000080800000)) -> yields 2, 3 + template + class BitMask : public NonIterableBitMask + { + using Base = NonIterableBitMask; + static_assert(std::is_unsigned::value, ""); + static_assert(Shift == 0 || Shift == 3, ""); + + public: + explicit BitMask(T mask) : + Base(mask) + { + } + // BitMask is an iterator over the indices of its abstract bits. + using value_type = int; + using iterator = BitMask; + using const_iterator = BitMask; + + BitMask& operator++() + { + this->mask_ &= (this->mask_ - 1); + return *this; + } + + uint32_t operator*() const + { + return Base::LowestBitSet(); + } + + BitMask begin() const + { + return *this; + } + BitMask end() const + { + return BitMask(0); + } + + private: + friend bool operator==(const BitMask& a, const BitMask& b) + { + return a.mask_ == b.mask_; + } + friend bool operator!=(const BitMask& a, const BitMask& b) + { + return a.mask_ != b.mask_; + } + }; + + using h2_t = uint8_t; + + // The values here are selected for maximum performance. See the static asserts + // below for details. + + // A `ctrl_t` is a single control byte, which can have one of four + // states: empty, deleted, full (which has an associated seven-bit h2_t value) + // and the sentinel. They have the following bit patterns: + // + // empty: 1 0 0 0 0 0 0 0 + // deleted: 1 1 1 1 1 1 1 0 + // full: 0 h h h h h h h // h represents the hash bits. + // sentinel: 1 1 1 1 1 1 1 1 + // + // These values are specifically tuned for SSE-flavored SIMD. + // The static_asserts below detail the source of these choices. + // + // We use an enum class so that when strict aliasing is enabled, the compiler + // knows ctrl_t doesn't alias other types. + enum class ctrl_t : int8_t + { + kEmpty = -128, // 0b10000000 + kDeleted = -2, // 0b11111110 + kSentinel = -1, // 0b11111111 + }; + static_assert( + (static_cast(ctrl_t::kEmpty) & + static_cast(ctrl_t::kDeleted) & + static_cast(ctrl_t::kSentinel) & 0x80) != 0, + "Special markers need to have the MSB to make checking for them efficient" + ); + static_assert( + ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel, + "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than " + "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient" + ); + static_assert( + ctrl_t::kSentinel == static_cast(-1), + "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD " + "registers (pcmpeqd xmm, xmm)" + ); + static_assert(ctrl_t::kEmpty == static_cast(-128), "ctrl_t::kEmpty must be -128 to make the SIMD check for its " + "existence efficient (psignb xmm, xmm)"); + static_assert( + (~static_cast(ctrl_t::kEmpty) & + ~static_cast(ctrl_t::kDeleted) & + static_cast(ctrl_t::kSentinel) & 0x7F) != 0, + "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not " + "shared by ctrl_t::kSentinel to make the scalar test for " + "MaskEmptyOrDeleted() efficient" + ); + static_assert(ctrl_t::kDeleted == static_cast(-2), "ctrl_t::kDeleted must be -2 to make the implementation of " + "ConvertSpecialToEmptyAndFullToDeleted efficient"); + + ABSL_DLL extern const ctrl_t kEmptyGroup[16]; + + // Returns a pointer to a control byte group that can be used by empty tables. + inline ctrl_t* EmptyGroup() + { + // Const must be cast away here; no uses of this function will actually write + // to it, because it is only used for empty tables. + return const_cast(kEmptyGroup); + } + + // Mixes a randomly generated per-process seed with `hash` and `ctrl` to + // randomize insertion order within groups. + bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl); + + // Returns a per-table, hash salt, which changes on resize. This gets mixed into + // H1 to randomize iteration order per-table. + // + // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure + // non-determinism of iteration order in most cases. + inline size_t PerTableSalt(const ctrl_t* ctrl) + { + // The low bits of the pointer have little or no entropy because of + // alignment. We shift the pointer to try to use higher entropy bits. A + // good number seems to be 12 bits, because that aligns with page size. + return reinterpret_cast(ctrl) >> 12; + } + // Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt. + inline size_t H1(size_t hash, const ctrl_t* ctrl) + { + return (hash >> 7) ^ PerTableSalt(ctrl); + } -struct GroupSse2Impl { - static constexpr size_t kWidth = 16; // the number of slots per group + // Extracts the H2 portion of a hash: the 7 bits not used for H1. + // + // These are used as an occupied control byte. + inline h2_t H2(size_t hash) + { + return hash & 0x7F; + } - explicit GroupSse2Impl(const ctrl_t* pos) { - ctrl = _mm_loadu_si128(reinterpret_cast(pos)); - } + // Helpers for checking the state of a control byte. + inline bool IsEmpty(ctrl_t c) + { + return c == ctrl_t::kEmpty; + } + inline bool IsFull(ctrl_t c) + { + return c >= static_cast(0); + } + inline bool IsDeleted(ctrl_t c) + { + return c == ctrl_t::kDeleted; + } + inline bool IsEmptyOrDeleted(ctrl_t c) + { + return c < ctrl_t::kSentinel; + } - // Returns a bitmask representing the positions of slots that match hash. - BitMask Match(h2_t hash) const { - auto match = _mm_set1_epi8(hash); - return BitMask( - static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); - } +#ifdef ABSL_INTERNAL_HAVE_SSE2 + // Quick reference guide for intrinsics used below: + // + // * __m128i: An XMM (128-bit) word. + // + // * _mm_setzero_si128: Returns a zero vector. + // * _mm_set1_epi8: Returns a vector with the same i8 in each lane. + // + // * _mm_subs_epi8: Saturating-subtracts two i8 vectors. + // * _mm_and_si128: Ands two i128s together. + // * _mm_or_si128: Ors two i128s together. + // * _mm_andnot_si128: And-nots two i128s together. + // + // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality, + // filling each lane with 0x00 or 0xff. + // * _mm_cmpgt_epi8: Same as above, but using > rather than ==. + // + // * _mm_loadu_si128: Performs an unaligned load of an i128. + // * _mm_storeu_si128: Performs an unaligned store of an i128. + // + // * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first + // argument if the corresponding lane of the second + // argument is positive, negative, or zero, respectively. + // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a + // bitmask consisting of those bits. + // * _mm_shuffle_epi8: Selects i8s from the first argument, using the low + // four bits of each i8 lane in the second argument as + // indices. + + // https://github.com/abseil/abseil-cpp/issues/209 + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 + // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char + // Work around this by using the portable implementation of Group + // when using -funsigned-char under GCC. + inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) + { +#if defined(__GNUC__) && !defined(__clang__) + if (std::is_unsigned::value) + { + const __m128i mask = _mm_set1_epi8(0x80); + const __m128i diff = _mm_subs_epi8(b, a); + return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask); + } +#endif + return _mm_cmpgt_epi8(a, b); + } - // Returns a bitmask representing the positions of empty slots. - NonIterableBitMask MaskEmpty() const { + struct GroupSse2Impl + { + static constexpr size_t kWidth = 16; // the number of slots per group + + explicit GroupSse2Impl(const ctrl_t* pos) + { + ctrl = _mm_loadu_si128(reinterpret_cast(pos)); + } + + // Returns a bitmask representing the positions of slots that match hash. + BitMask Match(h2_t hash) const + { + auto match = _mm_set1_epi8(hash); + return BitMask( + static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))) + ); + } + + // Returns a bitmask representing the positions of empty slots. + NonIterableBitMask MaskEmpty() const + { #ifdef ABSL_INTERNAL_HAVE_SSSE3 - // This only works because ctrl_t::kEmpty is -128. - return NonIterableBitMask( - static_cast(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)))); + // This only works because ctrl_t::kEmpty is -128. + return NonIterableBitMask( + static_cast(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))) + ); #else - auto match = _mm_set1_epi8(static_cast(ctrl_t::kEmpty)); - return NonIterableBitMask( - static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); + auto match = _mm_set1_epi8(static_cast(ctrl_t::kEmpty)); + return NonIterableBitMask( + static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))) + ); #endif - } - - // Returns a bitmask representing the positions of empty or deleted slots. - NonIterableBitMask MaskEmptyOrDeleted() const { - auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); - return NonIterableBitMask(static_cast( - _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)))); - } - - // Returns the number of trailing empty or deleted elements in the group. - uint32_t CountLeadingEmptyOrDeleted() const { - auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); - return TrailingZeros(static_cast( - _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1)); - } - - void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { - auto msbs = _mm_set1_epi8(static_cast(-128)); - auto x126 = _mm_set1_epi8(126); + } + + // Returns a bitmask representing the positions of empty or deleted slots. + NonIterableBitMask MaskEmptyOrDeleted() const + { + auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); + return NonIterableBitMask(static_cast( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + )); + } + + // Returns the number of trailing empty or deleted elements in the group. + uint32_t CountLeadingEmptyOrDeleted() const + { + auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); + return TrailingZeros(static_cast( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1 + )); + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const + { + auto msbs = _mm_set1_epi8(static_cast(-128)); + auto x126 = _mm_set1_epi8(126); #ifdef ABSL_INTERNAL_HAVE_SSSE3 - auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); + auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); #else - auto zero = _mm_setzero_si128(); - auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl); - auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); + auto zero = _mm_setzero_si128(); + auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl); + auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); #endif - _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); - } + _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); + } - __m128i ctrl; -}; + __m128i ctrl; + }; #endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 #if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) -struct GroupAArch64Impl { - static constexpr size_t kWidth = 8; - - explicit GroupAArch64Impl(const ctrl_t* pos) { - ctrl = vld1_u8(reinterpret_cast(pos)); - } - - BitMask Match(h2_t hash) const { - uint8x8_t dup = vdup_n_u8(hash); - auto mask = vceq_u8(ctrl, dup); - constexpr uint64_t msbs = 0x8080808080808080ULL; - return BitMask( - vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs); - } - - NonIterableBitMask MaskEmpty() const { - uint64_t mask = - vget_lane_u64(vreinterpret_u64_u8( - vceq_s8(vdup_n_s8(static_cast(ctrl_t::kEmpty)), - vreinterpret_s8_u8(ctrl))), - 0); - return NonIterableBitMask(mask); - } - - NonIterableBitMask MaskEmptyOrDeleted() const { - uint64_t mask = - vget_lane_u64(vreinterpret_u64_u8(vcgt_s8( - vdup_n_s8(static_cast(ctrl_t::kSentinel)), - vreinterpret_s8_u8(ctrl))), - 0); - return NonIterableBitMask(mask); - } - - uint32_t CountLeadingEmptyOrDeleted() const { - uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); - // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and - // kDeleted. We lower all other bits and count number of trailing zeros. - // Clang and GCC optimize countr_zero to rbit+clz without any check for 0, - // so we should be fine. - constexpr uint64_t bits = 0x0101010101010101ULL; - return countr_zero((mask | ~(mask >> 7)) & bits) >> 3; - } - - void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { - uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); - constexpr uint64_t msbs = 0x8080808080808080ULL; - constexpr uint64_t lsbs = 0x0101010101010101ULL; - auto x = mask & msbs; - auto res = (~x + (x >> 7)) & ~lsbs; - little_endian::Store64(dst, res); - } - - uint8x8_t ctrl; -}; + struct GroupAArch64Impl + { + static constexpr size_t kWidth = 8; + + explicit GroupAArch64Impl(const ctrl_t* pos) + { + ctrl = vld1_u8(reinterpret_cast(pos)); + } + + BitMask Match(h2_t hash) const + { + uint8x8_t dup = vdup_n_u8(hash); + auto mask = vceq_u8(ctrl, dup); + constexpr uint64_t msbs = 0x8080808080808080ULL; + return BitMask( + vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs + ); + } + + NonIterableBitMask MaskEmpty() const + { + uint64_t mask = + vget_lane_u64(vreinterpret_u64_u8(vceq_s8(vdup_n_s8(static_cast(ctrl_t::kEmpty)), vreinterpret_s8_u8(ctrl))), 0); + return NonIterableBitMask(mask); + } + + NonIterableBitMask MaskEmptyOrDeleted() const + { + uint64_t mask = + vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(vdup_n_s8(static_cast(ctrl_t::kSentinel)), vreinterpret_s8_u8(ctrl))), 0); + return NonIterableBitMask(mask); + } + + uint32_t CountLeadingEmptyOrDeleted() const + { + uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); + // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and + // kDeleted. We lower all other bits and count number of trailing zeros. + // Clang and GCC optimize countr_zero to rbit+clz without any check for 0, + // so we should be fine. + constexpr uint64_t bits = 0x0101010101010101ULL; + return countr_zero((mask | ~(mask >> 7)) & bits) >> 3; + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const + { + uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = mask & msbs; + auto res = (~x + (x >> 7)) & ~lsbs; + little_endian::Store64(dst, res); + } + + uint8x8_t ctrl; + }; #endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN -struct GroupPortableImpl { - static constexpr size_t kWidth = 8; - - explicit GroupPortableImpl(const ctrl_t* pos) - : ctrl(little_endian::Load64(pos)) {} - - BitMask Match(h2_t hash) const { - // For the technique, see: - // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord - // (Determine if a word has a byte equal to n). - // - // Caveat: there are false positives but: - // - they only occur if there is a real match - // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel - // - they will be handled gracefully by subsequent checks in code - // - // Example: - // v = 0x1716151413121110 - // hash = 0x12 - // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 - constexpr uint64_t msbs = 0x8080808080808080ULL; - constexpr uint64_t lsbs = 0x0101010101010101ULL; - auto x = ctrl ^ (lsbs * hash); - return BitMask((x - lsbs) & ~x & msbs); - } - - NonIterableBitMask MaskEmpty() const { - constexpr uint64_t msbs = 0x8080808080808080ULL; - return NonIterableBitMask((ctrl & (~ctrl << 6)) & - msbs); - } - - NonIterableBitMask MaskEmptyOrDeleted() const { - constexpr uint64_t msbs = 0x8080808080808080ULL; - return NonIterableBitMask((ctrl & (~ctrl << 7)) & - msbs); - } - - uint32_t CountLeadingEmptyOrDeleted() const { - // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and - // kDeleted. We lower all other bits and count number of trailing zeros. - constexpr uint64_t bits = 0x0101010101010101ULL; - return countr_zero((ctrl | ~(ctrl >> 7)) & bits) >> 3; - } - - void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { - constexpr uint64_t msbs = 0x8080808080808080ULL; - constexpr uint64_t lsbs = 0x0101010101010101ULL; - auto x = ctrl & msbs; - auto res = (~x + (x >> 7)) & ~lsbs; - little_endian::Store64(dst, res); - } - - uint64_t ctrl; -}; + struct GroupPortableImpl + { + static constexpr size_t kWidth = 8; + + explicit GroupPortableImpl(const ctrl_t* pos) : + ctrl(little_endian::Load64(pos)) + { + } + + BitMask Match(h2_t hash) const + { + // For the technique, see: + // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord + // (Determine if a word has a byte equal to n). + // + // Caveat: there are false positives but: + // - they only occur if there is a real match + // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel + // - they will be handled gracefully by subsequent checks in code + // + // Example: + // v = 0x1716151413121110 + // hash = 0x12 + // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + return BitMask((x - lsbs) & ~x & msbs); + } + + NonIterableBitMask MaskEmpty() const + { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return NonIterableBitMask((ctrl & (~ctrl << 6)) & msbs); + } + + NonIterableBitMask MaskEmptyOrDeleted() const + { + constexpr uint64_t msbs = 0x8080808080808080ULL; + return NonIterableBitMask((ctrl & (~ctrl << 7)) & msbs); + } + + uint32_t CountLeadingEmptyOrDeleted() const + { + // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and + // kDeleted. We lower all other bits and count number of trailing zeros. + constexpr uint64_t bits = 0x0101010101010101ULL; + return countr_zero((ctrl | ~(ctrl >> 7)) & bits) >> 3; + } + + void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const + { + constexpr uint64_t msbs = 0x8080808080808080ULL; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl & msbs; + auto res = (~x + (x >> 7)) & ~lsbs; + little_endian::Store64(dst, res); + } + + uint64_t ctrl; + }; #ifdef ABSL_INTERNAL_HAVE_SSE2 -using Group = GroupSse2Impl; + using Group = GroupSse2Impl; #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) -using Group = GroupAArch64Impl; + using Group = GroupAArch64Impl; #else -using Group = GroupPortableImpl; + using Group = GroupPortableImpl; #endif -// Returns he number of "cloned control bytes". -// -// This is the number of control bytes that are present both at the beginning -// of the control byte array and at the end, such that we can create a -// `Group::kWidth`-width probe window starting from any control byte. -constexpr size_t NumClonedBytes() { return Group::kWidth - 1; } + // Returns he number of "cloned control bytes". + // + // This is the number of control bytes that are present both at the beginning + // of the control byte array and at the end, such that we can create a + // `Group::kWidth`-width probe window starting from any control byte. + constexpr size_t NumClonedBytes() + { + return Group::kWidth - 1; + } -template -class raw_hash_set; + template + class raw_hash_set; -// Returns whether `n` is a valid capacity (i.e., number of slots). -// -// A valid capacity is a non-zero integer `2^m - 1`. -inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } - -// Applies the following mapping to every byte in the control array: -// * kDeleted -> kEmpty -// * kEmpty -> kEmpty -// * _ -> kDeleted -// PRECONDITION: -// IsValidCapacity(capacity) -// ctrl[capacity] == ctrl_t::kSentinel -// ctrl[i] != ctrl_t::kSentinel for all i < capacity -void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity); - -// Converts `n` into the next valid capacity, per `IsValidCapacity`. -inline size_t NormalizeCapacity(size_t n) { - return n ? ~size_t{} >> countl_zero(n) : 1; -} - -// General notes on capacity/growth methods below: -// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an -// average of two empty slots per group. -// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity. -// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we -// never need to probe (the whole table fits in one group) so we don't need a -// load factor less than 1. - -// Given `capacity`, applies the load factor; i.e., it returns the maximum -// number of values we should put into the table before a resizing rehash. -inline size_t CapacityToGrowth(size_t capacity) { - assert(IsValidCapacity(capacity)); - // `capacity*7/8` - if (Group::kWidth == 8 && capacity == 7) { - // x-x/8 does not work when x==7. - return 6; - } - return capacity - capacity / 8; -} - -// Given `growth`, "unapplies" the load factor to find how large the capacity -// should be to stay within the load factor. -// -// This might not be a valid capacity and `NormalizeCapacity()` should be -// called on this. -inline size_t GrowthToLowerboundCapacity(size_t growth) { - // `growth*8/7` - if (Group::kWidth == 8 && growth == 7) { - // x+(x-1)/7 does not work when x==7. - return 8; - } - return growth + static_cast((static_cast(growth) - 1) / 7); -} - -template -size_t SelectBucketCountForIterRange(InputIter first, InputIter last, - size_t bucket_count) { - if (bucket_count != 0) { - return bucket_count; - } - using InputIterCategory = - typename std::iterator_traits::iterator_category; - if (std::is_base_of::value) { - return GrowthToLowerboundCapacity( - static_cast(std::distance(first, last))); - } - return 0; -} + // Returns whether `n` is a valid capacity (i.e., number of slots). + // + // A valid capacity is a non-zero integer `2^m - 1`. + inline bool IsValidCapacity(size_t n) + { + return ((n + 1) & n) == 0 && n > 0; + } + + // Applies the following mapping to every byte in the control array: + // * kDeleted -> kEmpty + // * kEmpty -> kEmpty + // * _ -> kDeleted + // PRECONDITION: + // IsValidCapacity(capacity) + // ctrl[capacity] == ctrl_t::kSentinel + // ctrl[i] != ctrl_t::kSentinel for all i < capacity + void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity); + + // Converts `n` into the next valid capacity, per `IsValidCapacity`. + inline size_t NormalizeCapacity(size_t n) + { + return n ? ~size_t{} >> countl_zero(n) : 1; + } + + // General notes on capacity/growth methods below: + // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an + // average of two empty slots per group. + // - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity. + // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we + // never need to probe (the whole table fits in one group) so we don't need a + // load factor less than 1. + + // Given `capacity`, applies the load factor; i.e., it returns the maximum + // number of values we should put into the table before a resizing rehash. + inline size_t CapacityToGrowth(size_t capacity) + { + assert(IsValidCapacity(capacity)); + // `capacity*7/8` + if (Group::kWidth == 8 && capacity == 7) + { + // x-x/8 does not work when x==7. + return 6; + } + return capacity - capacity / 8; + } + + // Given `growth`, "unapplies" the load factor to find how large the capacity + // should be to stay within the load factor. + // + // This might not be a valid capacity and `NormalizeCapacity()` should be + // called on this. + inline size_t GrowthToLowerboundCapacity(size_t growth) + { + // `growth*8/7` + if (Group::kWidth == 8 && growth == 7) + { + // x+(x-1)/7 does not work when x==7. + return 8; + } + return growth + static_cast((static_cast(growth) - 1) / 7); + } + + template + size_t SelectBucketCountForIterRange(InputIter first, InputIter last, size_t bucket_count) + { + if (bucket_count != 0) + { + return bucket_count; + } + using InputIterCategory = + typename std::iterator_traits::iterator_category; + if (std::is_base_of::value) + { + return GrowthToLowerboundCapacity( + static_cast(std::distance(first, last)) + ); + } + return 0; + } #define ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, msg) \ - ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) && msg) - -inline void AssertIsValid(ctrl_t* ctrl) { - ABSL_HARDENING_ASSERT( - (ctrl == nullptr || IsFull(*ctrl)) && - "Invalid operation on iterator. The element might have " - "been erased, the table might have rehashed, or this may " - "be an end() iterator."); -} - -struct FindInfo { - size_t offset; - size_t probe_length; -}; - -// Whether a table is "small". A small table fits entirely into a probing -// group, i.e., has a capacity < `Group::kWidth`. -// -// In small mode we are able to use the whole capacity. The extra control -// bytes give us at least one "empty" control byte to stop the iteration. -// This is important to make 1 a valid capacity. -// -// In small mode only the first `capacity` control bytes after the sentinel -// are valid. The rest contain dummy ctrl_t::kEmpty values that do not -// represent a real slot. This is important to take into account on -// `find_first_non_full()`, where we never try -// `ShouldInsertBackwards()` for small tables. -inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; } - -// Begins a probing operation on `ctrl`, using `hash`. -inline probe_seq probe(const ctrl_t* ctrl, size_t hash, - size_t capacity) { - return probe_seq(H1(hash, ctrl), capacity); -} - -// Probes an array of control bits using a probe sequence derived from `hash`, -// and returns the offset corresponding to the first deleted or empty slot. -// -// Behavior when the entire table is full is undefined. -// -// NOTE: this function must work with tables having both empty and deleted -// slots in the same group. Such tables appear during `erase()`. -template -inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash, - size_t capacity) { - auto seq = probe(ctrl, hash, capacity); - while (true) { - Group g{ctrl + seq.offset()}; - auto mask = g.MaskEmptyOrDeleted(); - if (mask) { + ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) && msg) + + inline void AssertIsValid(ctrl_t* ctrl) + { + ABSL_HARDENING_ASSERT( + (ctrl == nullptr || IsFull(*ctrl)) && + "Invalid operation on iterator. The element might have " + "been erased, the table might have rehashed, or this may " + "be an end() iterator." + ); + } + + struct FindInfo + { + size_t offset; + size_t probe_length; + }; + + // Whether a table is "small". A small table fits entirely into a probing + // group, i.e., has a capacity < `Group::kWidth`. + // + // In small mode we are able to use the whole capacity. The extra control + // bytes give us at least one "empty" control byte to stop the iteration. + // This is important to make 1 a valid capacity. + // + // In small mode only the first `capacity` control bytes after the sentinel + // are valid. The rest contain dummy ctrl_t::kEmpty values that do not + // represent a real slot. This is important to take into account on + // `find_first_non_full()`, where we never try + // `ShouldInsertBackwards()` for small tables. + inline bool is_small(size_t capacity) + { + return capacity < Group::kWidth - 1; + } + + // Begins a probing operation on `ctrl`, using `hash`. + inline probe_seq probe(const ctrl_t* ctrl, size_t hash, size_t capacity) + { + return probe_seq(H1(hash, ctrl), capacity); + } + + // Probes an array of control bits using a probe sequence derived from `hash`, + // and returns the offset corresponding to the first deleted or empty slot. + // + // Behavior when the entire table is full is undefined. + // + // NOTE: this function must work with tables having both empty and deleted + // slots in the same group. Such tables appear during `erase()`. + template + inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash, size_t capacity) + { + auto seq = probe(ctrl, hash, capacity); + while (true) + { + Group g{ctrl + seq.offset()}; + auto mask = g.MaskEmptyOrDeleted(); + if (mask) + { #if !defined(NDEBUG) - // We want to add entropy even when ASLR is not enabled. - // In debug build we will randomly insert in either the front or back of - // the group. - // TODO(kfm,sbenza): revisit after we do unconditional mixing - if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) { - return {seq.offset(mask.HighestBitSet()), seq.index()}; - } + // We want to add entropy even when ASLR is not enabled. + // In debug build we will randomly insert in either the front or back of + // the group. + // TODO(kfm,sbenza): revisit after we do unconditional mixing + if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) + { + return {seq.offset(mask.HighestBitSet()), seq.index()}; + } #endif - return {seq.offset(mask.LowestBitSet()), seq.index()}; - } - seq.next(); - assert(seq.index() <= capacity && "full table!"); - } -} - -// Extern template for inline function keep possibility of inlining. -// When compiler decided to not inline, no symbols will be added to the -// corresponding translation unit. -extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t); - -// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire -// array as marked as empty. -inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot, - size_t slot_size) { - std::memset(ctrl, static_cast(ctrl_t::kEmpty), - capacity + 1 + NumClonedBytes()); - ctrl[capacity] = ctrl_t::kSentinel; - SanitizerPoisonMemoryRegion(slot, slot_size * capacity); -} - -// Sets `ctrl[i]` to `h`. -// -// Unlike setting it directly, this function will perform bounds checks and -// mirror the value to the cloned tail if necessary. -inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl, - const void* slot, size_t slot_size) { - assert(i < capacity); - - auto* slot_i = static_cast(slot) + i * slot_size; - if (IsFull(h)) { - SanitizerUnpoisonMemoryRegion(slot_i, slot_size); - } else { - SanitizerPoisonMemoryRegion(slot_i, slot_size); - } - - ctrl[i] = h; - ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h; -} - -// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. -inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl, - const void* slot, size_t slot_size) { - SetCtrl(i, static_cast(h), capacity, ctrl, slot, slot_size); -} - -// Given the capacity of a table, computes the offset (from the start of the -// backing allocation) at which the slots begin. -inline size_t SlotOffset(size_t capacity, size_t slot_align) { - assert(IsValidCapacity(capacity)); - const size_t num_control_bytes = capacity + 1 + NumClonedBytes(); - return (num_control_bytes + slot_align - 1) & (~slot_align + 1); -} - -// Given the capacity of a table, computes the total size of the backing -// array. -inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) { - return SlotOffset(capacity, slot_align) + capacity * slot_size; -} - -// A SwissTable. -// -// Policy: a policy defines how to perform different operations on -// the slots of the hashtable (see hash_policy_traits.h for the full interface -// of policy). -// -// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The -// functor should accept a key and return size_t as hash. For best performance -// it is important that the hash function provides high entropy across all bits -// of the hash. -// -// Eq: a (possibly polymorphic) functor that compares two keys for equality. It -// should accept two (of possibly different type) keys and return a bool: true -// if they are equal, false if they are not. If two keys compare equal, then -// their hash values as defined by Hash MUST be equal. -// -// Allocator: an Allocator -// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which -// the storage of the hashtable will be allocated and the elements will be -// constructed and destroyed. -template -class raw_hash_set { - using PolicyTraits = hash_policy_traits; - using KeyArgImpl = - KeyArg::value && IsTransparent::value>; - - public: - using init_type = typename PolicyTraits::init_type; - using key_type = typename PolicyTraits::key_type; - // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user - // code fixes! - using slot_type = typename PolicyTraits::slot_type; - using allocator_type = Alloc; - using size_type = size_t; - using difference_type = ptrdiff_t; - using hasher = Hash; - using key_equal = Eq; - using policy_type = Policy; - using value_type = typename PolicyTraits::value_type; - using reference = value_type&; - using const_reference = const value_type&; - using pointer = typename absl::allocator_traits< - allocator_type>::template rebind_traits::pointer; - using const_pointer = typename absl::allocator_traits< - allocator_type>::template rebind_traits::const_pointer; - - // Alias used for heterogeneous lookup functions. - // `key_arg` evaluates to `K` when the functors are transparent and to - // `key_type` otherwise. It permits template argument deduction on `K` for the - // transparent case. - template - using key_arg = typename KeyArgImpl::template type; - - private: - // Give an early error when key_type is not hashable/eq. - auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); - auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); - - using AllocTraits = absl::allocator_traits; - using SlotAlloc = typename absl::allocator_traits< - allocator_type>::template rebind_alloc; - using SlotAllocTraits = typename absl::allocator_traits< - allocator_type>::template rebind_traits; - - static_assert(std::is_lvalue_reference::value, - "Policy::element() must return a reference"); - - template - struct SameAsElementReference - : std::is_same::type>::type, - typename std::remove_cv< - typename std::remove_reference::type>::type> {}; - - // An enabler for insert(T&&): T must be convertible to init_type or be the - // same as [cv] value_type [ref]. - // Note: we separate SameAsElementReference into its own type to avoid using - // reference unless we need to. MSVC doesn't seem to like it in some - // cases. - template - using RequiresInsertable = typename std::enable_if< - absl::disjunction, - SameAsElementReference>::value, - int>::type; - - // RequiresNotInit is a workaround for gcc prior to 7.1. - // See https://godbolt.org/g/Y4xsUh. - template - using RequiresNotInit = - typename std::enable_if::value, int>::type; - - template - using IsDecomposable = IsDecomposable; - - public: - static_assert(std::is_same::value, - "Allocators with custom pointer types are not supported"); - static_assert(std::is_same::value, - "Allocators with custom pointer types are not supported"); - - class iterator { - friend class raw_hash_set; - - public: - using iterator_category = std::forward_iterator_tag; - using value_type = typename raw_hash_set::value_type; - using reference = - absl::conditional_t; - using pointer = absl::remove_reference_t*; - using difference_type = typename raw_hash_set::difference_type; - - iterator() {} - - // PRECONDITION: not an end() iterator. - reference operator*() const { - ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, - "operator*() called on invalid iterator."); - return PolicyTraits::element(slot_); - } - - // PRECONDITION: not an end() iterator. - pointer operator->() const { - ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, - "operator-> called on invalid iterator."); - return &operator*(); - } - - // PRECONDITION: not an end() iterator. - iterator& operator++() { - ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, - "operator++ called on invalid iterator."); - ++ctrl_; - ++slot_; - skip_empty_or_deleted(); - return *this; - } - // PRECONDITION: not an end() iterator. - iterator operator++(int) { - auto tmp = *this; - ++*this; - return tmp; - } - - friend bool operator==(const iterator& a, const iterator& b) { - AssertIsValid(a.ctrl_); - AssertIsValid(b.ctrl_); - return a.ctrl_ == b.ctrl_; - } - friend bool operator!=(const iterator& a, const iterator& b) { - return !(a == b); - } - - private: - iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) { - // This assumption helps the compiler know that any non-end iterator is - // not equal to any end iterator. - ABSL_ASSUME(ctrl != nullptr); - } - - // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until - // they reach one. - // - // If a sentinel is reached, we null both of them out instead. - void skip_empty_or_deleted() { - while (IsEmptyOrDeleted(*ctrl_)) { - uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); - ctrl_ += shift; - slot_ += shift; - } - if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr; - } - - ctrl_t* ctrl_ = nullptr; - // To avoid uninitialized member warnings, put slot_ in an anonymous union. - // The member is not initialized on singleton and end iterators. - union { - slot_type* slot_; - }; - }; - - class const_iterator { - friend class raw_hash_set; - - public: - using iterator_category = typename iterator::iterator_category; - using value_type = typename raw_hash_set::value_type; - using reference = typename raw_hash_set::const_reference; - using pointer = typename raw_hash_set::const_pointer; - using difference_type = typename raw_hash_set::difference_type; - - const_iterator() {} - // Implicit construction from iterator. - const_iterator(iterator i) : inner_(std::move(i)) {} - - reference operator*() const { return *inner_; } - pointer operator->() const { return inner_.operator->(); } - - const_iterator& operator++() { - ++inner_; - return *this; - } - const_iterator operator++(int) { return inner_++; } - - friend bool operator==(const const_iterator& a, const const_iterator& b) { - return a.inner_ == b.inner_; - } - friend bool operator!=(const const_iterator& a, const const_iterator& b) { - return !(a == b); - } - - private: - const_iterator(const ctrl_t* ctrl, const slot_type* slot) - : inner_(const_cast(ctrl), const_cast(slot)) {} - - iterator inner_; - }; - - using node_type = node_handle, Alloc>; - using insert_return_type = InsertReturnType; - - raw_hash_set() noexcept( - std::is_nothrow_default_constructible::value&& - std::is_nothrow_default_constructible::value&& - std::is_nothrow_default_constructible::value) {} - - explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), - const key_equal& eq = key_equal(), - const allocator_type& alloc = allocator_type()) - : ctrl_(EmptyGroup()), - settings_(0, HashtablezInfoHandle(), hash, eq, alloc) { - if (bucket_count) { - capacity_ = NormalizeCapacity(bucket_count); - initialize_slots(); - } - } - - raw_hash_set(size_t bucket_count, const hasher& hash, - const allocator_type& alloc) - : raw_hash_set(bucket_count, hash, key_equal(), alloc) {} - - raw_hash_set(size_t bucket_count, const allocator_type& alloc) - : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {} - - explicit raw_hash_set(const allocator_type& alloc) - : raw_hash_set(0, hasher(), key_equal(), alloc) {} - - template - raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, - const hasher& hash = hasher(), const key_equal& eq = key_equal(), - const allocator_type& alloc = allocator_type()) - : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count), - hash, eq, alloc) { - insert(first, last); - } - - template - raw_hash_set(InputIter first, InputIter last, size_t bucket_count, - const hasher& hash, const allocator_type& alloc) - : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {} - - template - raw_hash_set(InputIter first, InputIter last, size_t bucket_count, - const allocator_type& alloc) - : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {} - - template - raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc) - : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {} - - // Instead of accepting std::initializer_list as the first - // argument like std::unordered_set does, we have two overloads - // that accept std::initializer_list and std::initializer_list. - // This is advantageous for performance. - // - // // Turns {"abc", "def"} into std::initializer_list, then - // // copies the strings into the set. - // std::unordered_set s = {"abc", "def"}; - // - // // Turns {"abc", "def"} into std::initializer_list, then - // // copies the strings into the set. - // absl::flat_hash_set s = {"abc", "def"}; - // - // The same trick is used in insert(). - // - // The enabler is necessary to prevent this constructor from triggering where - // the copy constructor is meant to be called. - // - // absl::flat_hash_set a, b{a}; - // - // RequiresNotInit is a workaround for gcc prior to 7.1. - template = 0, RequiresInsertable = 0> - raw_hash_set(std::initializer_list init, size_t bucket_count = 0, - const hasher& hash = hasher(), const key_equal& eq = key_equal(), - const allocator_type& alloc = allocator_type()) - : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} - - raw_hash_set(std::initializer_list init, size_t bucket_count = 0, - const hasher& hash = hasher(), const key_equal& eq = key_equal(), - const allocator_type& alloc = allocator_type()) - : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} - - template = 0, RequiresInsertable = 0> - raw_hash_set(std::initializer_list init, size_t bucket_count, - const hasher& hash, const allocator_type& alloc) - : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} - - raw_hash_set(std::initializer_list init, size_t bucket_count, - const hasher& hash, const allocator_type& alloc) - : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} - - template = 0, RequiresInsertable = 0> - raw_hash_set(std::initializer_list init, size_t bucket_count, - const allocator_type& alloc) - : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} - - raw_hash_set(std::initializer_list init, size_t bucket_count, - const allocator_type& alloc) - : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} - - template = 0, RequiresInsertable = 0> - raw_hash_set(std::initializer_list init, const allocator_type& alloc) - : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} - - raw_hash_set(std::initializer_list init, - const allocator_type& alloc) - : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} - - raw_hash_set(const raw_hash_set& that) - : raw_hash_set(that, AllocTraits::select_on_container_copy_construction( - that.alloc_ref())) {} - - raw_hash_set(const raw_hash_set& that, const allocator_type& a) - : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) { - reserve(that.size()); - // Because the table is guaranteed to be empty, we can do something faster - // than a full `insert`. - for (const auto& v : that) { - const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); - auto target = find_first_non_full(ctrl_, hash, capacity_); - SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_, - sizeof(slot_type)); - emplace_at(target.offset, v); - infoz().RecordInsert(hash, target.probe_length); - } - size_ = that.size(); - growth_left() -= that.size(); - } - - raw_hash_set(raw_hash_set&& that) noexcept( - std::is_nothrow_copy_constructible::value&& - std::is_nothrow_copy_constructible::value&& - std::is_nothrow_copy_constructible::value) - : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())), - slots_(absl::exchange(that.slots_, nullptr)), - size_(absl::exchange(that.size_, 0)), - capacity_(absl::exchange(that.capacity_, 0)), - // Hash, equality and allocator are copied instead of moved because - // `that` must be left valid. If Hash is std::function, moving it - // would create a nullptr functor that cannot be called. - settings_(absl::exchange(that.growth_left(), 0), - absl::exchange(that.infoz(), HashtablezInfoHandle()), - that.hash_ref(), that.eq_ref(), that.alloc_ref()) {} - - raw_hash_set(raw_hash_set&& that, const allocator_type& a) - : ctrl_(EmptyGroup()), - slots_(nullptr), - size_(0), - capacity_(0), - settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(), - a) { - if (a == that.alloc_ref()) { - std::swap(ctrl_, that.ctrl_); - std::swap(slots_, that.slots_); - std::swap(size_, that.size_); - std::swap(capacity_, that.capacity_); - std::swap(growth_left(), that.growth_left()); - std::swap(infoz(), that.infoz()); - } else { - reserve(that.size()); - // Note: this will copy elements of dense_set and unordered_set instead of - // moving them. This can be fixed if it ever becomes an issue. - for (auto& elem : that) insert(std::move(elem)); - } - } - - raw_hash_set& operator=(const raw_hash_set& that) { - raw_hash_set tmp(that, - AllocTraits::propagate_on_container_copy_assignment::value - ? that.alloc_ref() - : alloc_ref()); - swap(tmp); - return *this; - } - - raw_hash_set& operator=(raw_hash_set&& that) noexcept( - absl::allocator_traits::is_always_equal::value&& - std::is_nothrow_move_assignable::value&& - std::is_nothrow_move_assignable::value) { - // TODO(sbenza): We should only use the operations from the noexcept clause - // to make sure we actually adhere to that contract. - return move_assign( - std::move(that), - typename AllocTraits::propagate_on_container_move_assignment()); - } - - ~raw_hash_set() { destroy_slots(); } - - iterator begin() { - auto it = iterator_at(0); - it.skip_empty_or_deleted(); - return it; - } - iterator end() { return {}; } - - const_iterator begin() const { - return const_cast(this)->begin(); - } - const_iterator end() const { return {}; } - const_iterator cbegin() const { return begin(); } - const_iterator cend() const { return end(); } - - bool empty() const { return !size(); } - size_t size() const { return size_; } - size_t capacity() const { return capacity_; } - size_t max_size() const { return (std::numeric_limits::max)(); } - - ABSL_ATTRIBUTE_REINITIALIZES void clear() { - // Iterating over this container is O(bucket_count()). When bucket_count() - // is much greater than size(), iteration becomes prohibitively expensive. - // For clear() it is more important to reuse the allocated array when the - // container is small because allocation takes comparatively long time - // compared to destruction of the elements of the container. So we pick the - // largest bucket_count() threshold for which iteration is still fast and - // past that we simply deallocate the array. - if (capacity_ > 127) { - destroy_slots(); - - infoz().RecordClearedReservation(); - } else if (capacity_) { - for (size_t i = 0; i != capacity_; ++i) { - if (IsFull(ctrl_[i])) { - PolicyTraits::destroy(&alloc_ref(), slots_ + i); + return {seq.offset(mask.LowestBitSet()), seq.index()}; + } + seq.next(); + assert(seq.index() <= capacity && "full table!"); + } + } + + // Extern template for inline function keep possibility of inlining. + // When compiler decided to not inline, no symbols will be added to the + // corresponding translation unit. + extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t); + + // Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire + // array as marked as empty. + inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot, size_t slot_size) + { + std::memset(ctrl, static_cast(ctrl_t::kEmpty), capacity + 1 + NumClonedBytes()); + ctrl[capacity] = ctrl_t::kSentinel; + SanitizerPoisonMemoryRegion(slot, slot_size * capacity); } - } - size_ = 0; - ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type)); - reset_growth_left(); - } - assert(empty()); - infoz().RecordStorageChanged(0, capacity_); - } - - // This overload kicks in when the argument is an rvalue of insertable and - // decomposable type other than init_type. - // - // flat_hash_map m; - // m.insert(std::make_pair("abc", 42)); - // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc - // bug. - template = 0, class T2 = T, - typename std::enable_if::value, int>::type = 0, - T* = nullptr> - std::pair insert(T&& value) { - return emplace(std::forward(value)); - } - - // This overload kicks in when the argument is a bitfield or an lvalue of - // insertable and decomposable type. - // - // union { int n : 1; }; - // flat_hash_set s; - // s.insert(n); - // - // flat_hash_set s; - // const char* p = "hello"; - // s.insert(p); - // - // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace - // RequiresInsertable with RequiresInsertable. - // We are hitting this bug: https://godbolt.org/g/1Vht4f. - template < - class T, RequiresInsertable = 0, - typename std::enable_if::value, int>::type = 0> - std::pair insert(const T& value) { - return emplace(value); - } - - // This overload kicks in when the argument is an rvalue of init_type. Its - // purpose is to handle brace-init-list arguments. - // - // flat_hash_map s; - // s.insert({"abc", 42}); - std::pair insert(init_type&& value) { - return emplace(std::move(value)); - } - - // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc - // bug. - template = 0, class T2 = T, - typename std::enable_if::value, int>::type = 0, - T* = nullptr> - iterator insert(const_iterator, T&& value) { - return insert(std::forward(value)).first; - } - - // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace - // RequiresInsertable with RequiresInsertable. - // We are hitting this bug: https://godbolt.org/g/1Vht4f. - template < - class T, RequiresInsertable = 0, - typename std::enable_if::value, int>::type = 0> - iterator insert(const_iterator, const T& value) { - return insert(value).first; - } - - iterator insert(const_iterator, init_type&& value) { - return insert(std::move(value)).first; - } - - template - void insert(InputIt first, InputIt last) { - for (; first != last; ++first) emplace(*first); - } - - template = 0, RequiresInsertable = 0> - void insert(std::initializer_list ilist) { - insert(ilist.begin(), ilist.end()); - } - - void insert(std::initializer_list ilist) { - insert(ilist.begin(), ilist.end()); - } - - insert_return_type insert(node_type&& node) { - if (!node) return {end(), false, node_type()}; - const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node)); - auto res = PolicyTraits::apply( - InsertSlot{*this, std::move(*CommonAccess::GetSlot(node))}, - elem); - if (res.second) { - CommonAccess::Reset(&node); - return {res.first, true, node_type()}; - } else { - return {res.first, false, std::move(node)}; - } - } - - iterator insert(const_iterator, node_type&& node) { - auto res = insert(std::move(node)); - node = std::move(res.node); - return res.position; - } - - // This overload kicks in if we can deduce the key from args. This enables us - // to avoid constructing value_type if an entry with the same key already - // exists. - // - // For example: - // - // flat_hash_map m = {{"abc", "def"}}; - // // Creates no std::string copies and makes no heap allocations. - // m.emplace("abc", "xyz"); - template ::value, int>::type = 0> - std::pair emplace(Args&&... args) { - return PolicyTraits::apply(EmplaceDecomposable{*this}, - std::forward(args)...); - } - - // This overload kicks in if we cannot deduce the key from args. It constructs - // value_type unconditionally and then either moves it into the table or - // destroys. - template ::value, int>::type = 0> - std::pair emplace(Args&&... args) { - alignas(slot_type) unsigned char raw[sizeof(slot_type)]; - slot_type* slot = reinterpret_cast(&raw); - - PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); - const auto& elem = PolicyTraits::element(slot); - return PolicyTraits::apply(InsertSlot{*this, std::move(*slot)}, elem); - } - - template - iterator emplace_hint(const_iterator, Args&&... args) { - return emplace(std::forward(args)...).first; - } - - // Extension API: support for lazy emplace. - // - // Looks up key in the table. If found, returns the iterator to the element. - // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`. - // - // `f` must abide by several restrictions: - // - it MUST call `raw_hash_set::constructor` with arguments as if a - // `raw_hash_set::value_type` is constructed, - // - it MUST NOT access the container before the call to - // `raw_hash_set::constructor`, and - // - it MUST NOT erase the lazily emplaced element. - // Doing any of these is undefined behavior. - // - // For example: - // - // std::unordered_set s; - // // Makes ArenaStr even if "abc" is in the map. - // s.insert(ArenaString(&arena, "abc")); - // - // flat_hash_set s; - // // Makes ArenaStr only if "abc" is not in the map. - // s.lazy_emplace("abc", [&](const constructor& ctor) { - // ctor(&arena, "abc"); - // }); - // - // WARNING: This API is currently experimental. If there is a way to implement - // the same thing with the rest of the API, prefer that. - class constructor { - friend class raw_hash_set; - - public: - template - void operator()(Args&&... args) const { - assert(*slot_); - PolicyTraits::construct(alloc_, *slot_, std::forward(args)...); - *slot_ = nullptr; - } - - private: - constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {} - - allocator_type* alloc_; - slot_type** slot_; - }; - - template - iterator lazy_emplace(const key_arg& key, F&& f) { - auto res = find_or_prepare_insert(key); - if (res.second) { - slot_type* slot = slots_ + res.first; - std::forward(f)(constructor(&alloc_ref(), &slot)); - assert(!slot); - } - return iterator_at(res.first); - } - - // Extension API: support for heterogeneous keys. - // - // std::unordered_set s; - // // Turns "abc" into std::string. - // s.erase("abc"); - // - // flat_hash_set s; - // // Uses "abc" directly without copying it into std::string. - // s.erase("abc"); - template - size_type erase(const key_arg& key) { - auto it = find(key); - if (it == end()) return 0; - erase(it); - return 1; - } - - // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, - // this method returns void to reduce algorithmic complexity to O(1). The - // iterator is invalidated, so any increment should be done before calling - // erase. In order to erase while iterating across a map, use the following - // idiom (which also works for standard containers): - // - // for (auto it = m.begin(), end = m.end(); it != end;) { - // // `erase()` will invalidate `it`, so advance `it` first. - // auto copy_it = it++; - // if () { - // m.erase(copy_it); - // } - // } - void erase(const_iterator cit) { erase(cit.inner_); } - - // This overload is necessary because otherwise erase(const K&) would be - // a better match if non-const iterator is passed as an argument. - void erase(iterator it) { - ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_, - "erase() called on invalid iterator."); - PolicyTraits::destroy(&alloc_ref(), it.slot_); - erase_meta_only(it); - } - - iterator erase(const_iterator first, const_iterator last) { - while (first != last) { - erase(first++); - } - return last.inner_; - } - - // Moves elements from `src` into `this`. - // If the element already exists in `this`, it is left unmodified in `src`. - template - void merge(raw_hash_set& src) { // NOLINT - assert(this != &src); - for (auto it = src.begin(), e = src.end(); it != e;) { - auto next = std::next(it); - if (PolicyTraits::apply(InsertSlot{*this, std::move(*it.slot_)}, - PolicyTraits::element(it.slot_)) - .second) { - src.erase_meta_only(it); - } - it = next; - } - } - - template - void merge(raw_hash_set&& src) { - merge(src); - } - - node_type extract(const_iterator position) { - ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_, - "extract() called on invalid iterator."); - auto node = - CommonAccess::Transfer(alloc_ref(), position.inner_.slot_); - erase_meta_only(position); - return node; - } - - template < - class K = key_type, - typename std::enable_if::value, int>::type = 0> - node_type extract(const key_arg& key) { - auto it = find(key); - return it == end() ? node_type() : extract(const_iterator{it}); - } - - void swap(raw_hash_set& that) noexcept( - IsNoThrowSwappable() && IsNoThrowSwappable() && - IsNoThrowSwappable( - typename AllocTraits::propagate_on_container_swap{})) { - using std::swap; - swap(ctrl_, that.ctrl_); - swap(slots_, that.slots_); - swap(size_, that.size_); - swap(capacity_, that.capacity_); - swap(growth_left(), that.growth_left()); - swap(hash_ref(), that.hash_ref()); - swap(eq_ref(), that.eq_ref()); - swap(infoz(), that.infoz()); - SwapAlloc(alloc_ref(), that.alloc_ref(), - typename AllocTraits::propagate_on_container_swap{}); - } - - void rehash(size_t n) { - if (n == 0 && capacity_ == 0) return; - if (n == 0 && size_ == 0) { - destroy_slots(); - infoz().RecordStorageChanged(0, 0); - infoz().RecordClearedReservation(); - return; - } - - // bitor is a faster way of doing `max` here. We will round up to the next - // power-of-2-minus-1, so bitor is good enough. - auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size())); - // n == 0 unconditionally rehashes as per the standard. - if (n == 0 || m > capacity_) { - resize(m); - - // This is after resize, to ensure that we have completed the allocation - // and have potentially sampled the hashtable. - infoz().RecordReservation(n); - } - } - - void reserve(size_t n) { - if (n > size() + growth_left()) { - size_t m = GrowthToLowerboundCapacity(n); - resize(NormalizeCapacity(m)); - - // This is after resize, to ensure that we have completed the allocation - // and have potentially sampled the hashtable. - infoz().RecordReservation(n); - } - } - - // Extension API: support for heterogeneous keys. - // - // std::unordered_set s; - // // Turns "abc" into std::string. - // s.count("abc"); - // - // ch_set s; - // // Uses "abc" directly without copying it into std::string. - // s.count("abc"); - template - size_t count(const key_arg& key) const { - return find(key) == end() ? 0 : 1; - } - - // Issues CPU prefetch instructions for the memory needed to find or insert - // a key. Like all lookup functions, this support heterogeneous keys. - // - // NOTE: This is a very low level operation and should not be used without - // specific benchmarks indicating its importance. - template - void prefetch(const key_arg& key) const { - (void)key; - // Avoid probing if we won't be able to prefetch the addresses received. + + // Sets `ctrl[i]` to `h`. + // + // Unlike setting it directly, this function will perform bounds checks and + // mirror the value to the cloned tail if necessary. + inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl, const void* slot, size_t slot_size) + { + assert(i < capacity); + + auto* slot_i = static_cast(slot) + i * slot_size; + if (IsFull(h)) + { + SanitizerUnpoisonMemoryRegion(slot_i, slot_size); + } + else + { + SanitizerPoisonMemoryRegion(slot_i, slot_size); + } + + ctrl[i] = h; + ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h; + } + + // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. + inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl, const void* slot, size_t slot_size) + { + SetCtrl(i, static_cast(h), capacity, ctrl, slot, slot_size); + } + + // Given the capacity of a table, computes the offset (from the start of the + // backing allocation) at which the slots begin. + inline size_t SlotOffset(size_t capacity, size_t slot_align) + { + assert(IsValidCapacity(capacity)); + const size_t num_control_bytes = capacity + 1 + NumClonedBytes(); + return (num_control_bytes + slot_align - 1) & (~slot_align + 1); + } + + // Given the capacity of a table, computes the total size of the backing + // array. + inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) + { + return SlotOffset(capacity, slot_align) + capacity * slot_size; + } + + // A SwissTable. + // + // Policy: a policy defines how to perform different operations on + // the slots of the hashtable (see hash_policy_traits.h for the full interface + // of policy). + // + // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The + // functor should accept a key and return size_t as hash. For best performance + // it is important that the hash function provides high entropy across all bits + // of the hash. + // + // Eq: a (possibly polymorphic) functor that compares two keys for equality. It + // should accept two (of possibly different type) keys and return a bool: true + // if they are equal, false if they are not. If two keys compare equal, then + // their hash values as defined by Hash MUST be equal. + // + // Allocator: an Allocator + // [https://en.cppreference.com/w/cpp/named_req/Allocator] with which + // the storage of the hashtable will be allocated and the elements will be + // constructed and destroyed. + template + class raw_hash_set + { + using PolicyTraits = hash_policy_traits; + using KeyArgImpl = + KeyArg::value && IsTransparent::value>; + + public: + using init_type = typename PolicyTraits::init_type; + using key_type = typename PolicyTraits::key_type; + // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user + // code fixes! + using slot_type = typename PolicyTraits::slot_type; + using allocator_type = Alloc; + using size_type = size_t; + using difference_type = ptrdiff_t; + using hasher = Hash; + using key_equal = Eq; + using policy_type = Policy; + using value_type = typename PolicyTraits::value_type; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits::pointer; + using const_pointer = typename absl::allocator_traits< + allocator_type>::template rebind_traits::const_pointer; + + // Alias used for heterogeneous lookup functions. + // `key_arg` evaluates to `K` when the functors are transparent and to + // `key_type` otherwise. It permits template argument deduction on `K` for the + // transparent case. + template + using key_arg = typename KeyArgImpl::template type; + + private: + // Give an early error when key_type is not hashable/eq. + auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); + auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); + + using AllocTraits = absl::allocator_traits; + using SlotAlloc = typename absl::allocator_traits< + allocator_type>::template rebind_alloc; + using SlotAllocTraits = typename absl::allocator_traits< + allocator_type>::template rebind_traits; + + static_assert(std::is_lvalue_reference::value, "Policy::element() must return a reference"); + + template + struct SameAsElementReference : std::is_same::type>::type, typename std::remove_cv::type>::type> + { + }; + + // An enabler for insert(T&&): T must be convertible to init_type or be the + // same as [cv] value_type [ref]. + // Note: we separate SameAsElementReference into its own type to avoid using + // reference unless we need to. MSVC doesn't seem to like it in some + // cases. + template + using RequiresInsertable = typename std::enable_if< + absl::disjunction, SameAsElementReference>::value, + int>::type; + + // RequiresNotInit is a workaround for gcc prior to 7.1. + // See https://godbolt.org/g/Y4xsUh. + template + using RequiresNotInit = + typename std::enable_if::value, int>::type; + + template + using IsDecomposable = IsDecomposable; + + public: + static_assert(std::is_same::value, "Allocators with custom pointer types are not supported"); + static_assert(std::is_same::value, "Allocators with custom pointer types are not supported"); + + class iterator + { + friend class raw_hash_set; + + public: + using iterator_category = std::forward_iterator_tag; + using value_type = typename raw_hash_set::value_type; + using reference = + absl::conditional_t; + using pointer = absl::remove_reference_t*; + using difference_type = typename raw_hash_set::difference_type; + + iterator() + { + } + + // PRECONDITION: not an end() iterator. + reference operator*() const + { + ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator*() called on invalid iterator."); + return PolicyTraits::element(slot_); + } + + // PRECONDITION: not an end() iterator. + pointer operator->() const + { + ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator-> called on invalid iterator."); + return &operator*(); + } + + // PRECONDITION: not an end() iterator. + iterator& operator++() + { + ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator++ called on invalid iterator."); + ++ctrl_; + ++slot_; + skip_empty_or_deleted(); + return *this; + } + // PRECONDITION: not an end() iterator. + iterator operator++(int) + { + auto tmp = *this; + ++*this; + return tmp; + } + + friend bool operator==(const iterator& a, const iterator& b) + { + AssertIsValid(a.ctrl_); + AssertIsValid(b.ctrl_); + return a.ctrl_ == b.ctrl_; + } + friend bool operator!=(const iterator& a, const iterator& b) + { + return !(a == b); + } + + private: + iterator(ctrl_t* ctrl, slot_type* slot) : + ctrl_(ctrl), + slot_(slot) + { + // This assumption helps the compiler know that any non-end iterator is + // not equal to any end iterator. + ABSL_ASSUME(ctrl != nullptr); + } + + // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until + // they reach one. + // + // If a sentinel is reached, we null both of them out instead. + void skip_empty_or_deleted() + { + while (IsEmptyOrDeleted(*ctrl_)) + { + uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); + ctrl_ += shift; + slot_ += shift; + } + if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) + ctrl_ = nullptr; + } + + ctrl_t* ctrl_ = nullptr; + // To avoid uninitialized member warnings, put slot_ in an anonymous union. + // The member is not initialized on singleton and end iterators. + union + { + slot_type* slot_; + }; + }; + + class const_iterator + { + friend class raw_hash_set; + + public: + using iterator_category = typename iterator::iterator_category; + using value_type = typename raw_hash_set::value_type; + using reference = typename raw_hash_set::const_reference; + using pointer = typename raw_hash_set::const_pointer; + using difference_type = typename raw_hash_set::difference_type; + + const_iterator() + { + } + // Implicit construction from iterator. + const_iterator(iterator i) : + inner_(std::move(i)) + { + } + + reference operator*() const + { + return *inner_; + } + pointer operator->() const + { + return inner_.operator->(); + } + + const_iterator& operator++() + { + ++inner_; + return *this; + } + const_iterator operator++(int) + { + return inner_++; + } + + friend bool operator==(const const_iterator& a, const const_iterator& b) + { + return a.inner_ == b.inner_; + } + friend bool operator!=(const const_iterator& a, const const_iterator& b) + { + return !(a == b); + } + + private: + const_iterator(const ctrl_t* ctrl, const slot_type* slot) : + inner_(const_cast(ctrl), const_cast(slot)) + { + } + + iterator inner_; + }; + + using node_type = node_handle, Alloc>; + using insert_return_type = InsertReturnType; + + raw_hash_set() noexcept( + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value&& + std::is_nothrow_default_constructible::value + ) + { + } + + explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : + ctrl_(EmptyGroup()), + settings_(0, HashtablezInfoHandle(), hash, eq, alloc) + { + if (bucket_count) + { + capacity_ = NormalizeCapacity(bucket_count); + initialize_slots(); + } + } + + raw_hash_set(size_t bucket_count, const hasher& hash, const allocator_type& alloc) : + raw_hash_set(bucket_count, hash, key_equal(), alloc) + { + } + + raw_hash_set(size_t bucket_count, const allocator_type& alloc) : + raw_hash_set(bucket_count, hasher(), key_equal(), alloc) + { + } + + explicit raw_hash_set(const allocator_type& alloc) : + raw_hash_set(0, hasher(), key_equal(), alloc) + { + } + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : + raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count), hash, eq, alloc) + { + insert(first, last); + } + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, const hasher& hash, const allocator_type& alloc) : + raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) + { + } + + template + raw_hash_set(InputIter first, InputIter last, size_t bucket_count, const allocator_type& alloc) : + raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) + { + } + + template + raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc) : + raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) + { + } + + // Instead of accepting std::initializer_list as the first + // argument like std::unordered_set does, we have two overloads + // that accept std::initializer_list and std::initializer_list. + // This is advantageous for performance. + // + // // Turns {"abc", "def"} into std::initializer_list, then + // // copies the strings into the set. + // std::unordered_set s = {"abc", "def"}; + // + // // Turns {"abc", "def"} into std::initializer_list, then + // // copies the strings into the set. + // absl::flat_hash_set s = {"abc", "def"}; + // + // The same trick is used in insert(). + // + // The enabler is necessary to prevent this constructor from triggering where + // the copy constructor is meant to be called. + // + // absl::flat_hash_set a, b{a}; + // + // RequiresNotInit is a workaround for gcc prior to 7.1. + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : + raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) + { + } + + raw_hash_set(std::initializer_list init, size_t bucket_count = 0, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : + raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) + { + } + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, const hasher& hash, const allocator_type& alloc) : + raw_hash_set(init, bucket_count, hash, key_equal(), alloc) + { + } + + raw_hash_set(std::initializer_list init, size_t bucket_count, const hasher& hash, const allocator_type& alloc) : + raw_hash_set(init, bucket_count, hash, key_equal(), alloc) + { + } + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, size_t bucket_count, const allocator_type& alloc) : + raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) + { + } + + raw_hash_set(std::initializer_list init, size_t bucket_count, const allocator_type& alloc) : + raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) + { + } + + template = 0, RequiresInsertable = 0> + raw_hash_set(std::initializer_list init, const allocator_type& alloc) : + raw_hash_set(init, 0, hasher(), key_equal(), alloc) + { + } + + raw_hash_set(std::initializer_list init, const allocator_type& alloc) : + raw_hash_set(init, 0, hasher(), key_equal(), alloc) + { + } + + raw_hash_set(const raw_hash_set& that) : + raw_hash_set(that, AllocTraits::select_on_container_copy_construction(that.alloc_ref())) + { + } + + raw_hash_set(const raw_hash_set& that, const allocator_type& a) : + raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) + { + reserve(that.size()); + // Because the table is guaranteed to be empty, we can do something faster + // than a full `insert`. + for (const auto& v : that) + { + const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); + auto target = find_first_non_full(ctrl_, hash, capacity_); + SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); + emplace_at(target.offset, v); + infoz().RecordInsert(hash, target.probe_length); + } + size_ = that.size(); + growth_left() -= that.size(); + } + + raw_hash_set(raw_hash_set&& that) noexcept( + std::is_nothrow_copy_constructible::value&& + std::is_nothrow_copy_constructible::value&& + std::is_nothrow_copy_constructible::value + ) : + ctrl_(absl::exchange(that.ctrl_, EmptyGroup())), + slots_(absl::exchange(that.slots_, nullptr)), + size_(absl::exchange(that.size_, 0)), + capacity_(absl::exchange(that.capacity_, 0)), + // Hash, equality and allocator are copied instead of moved because + // `that` must be left valid. If Hash is std::function, moving it + // would create a nullptr functor that cannot be called. + settings_(absl::exchange(that.growth_left(), 0), absl::exchange(that.infoz(), HashtablezInfoHandle()), that.hash_ref(), that.eq_ref(), that.alloc_ref()) + { + } + + raw_hash_set(raw_hash_set&& that, const allocator_type& a) : + ctrl_(EmptyGroup()), + slots_(nullptr), + size_(0), + capacity_(0), + settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(), a) + { + if (a == that.alloc_ref()) + { + std::swap(ctrl_, that.ctrl_); + std::swap(slots_, that.slots_); + std::swap(size_, that.size_); + std::swap(capacity_, that.capacity_); + std::swap(growth_left(), that.growth_left()); + std::swap(infoz(), that.infoz()); + } + else + { + reserve(that.size()); + // Note: this will copy elements of dense_set and unordered_set instead of + // moving them. This can be fixed if it ever becomes an issue. + for (auto& elem : that) + insert(std::move(elem)); + } + } + + raw_hash_set& operator=(const raw_hash_set& that) + { + raw_hash_set tmp(that, AllocTraits::propagate_on_container_copy_assignment::value ? that.alloc_ref() : alloc_ref()); + swap(tmp); + return *this; + } + + raw_hash_set& operator=(raw_hash_set&& that) noexcept( + absl::allocator_traits::is_always_equal::value&& + std::is_nothrow_move_assignable::value&& + std::is_nothrow_move_assignable::value + ) + { + // TODO(sbenza): We should only use the operations from the noexcept clause + // to make sure we actually adhere to that contract. + return move_assign( + std::move(that), + typename AllocTraits::propagate_on_container_move_assignment() + ); + } + + ~raw_hash_set() + { + destroy_slots(); + } + + iterator begin() + { + auto it = iterator_at(0); + it.skip_empty_or_deleted(); + return it; + } + iterator end() + { + return {}; + } + + const_iterator begin() const + { + return const_cast(this)->begin(); + } + const_iterator end() const + { + return {}; + } + const_iterator cbegin() const + { + return begin(); + } + const_iterator cend() const + { + return end(); + } + + bool empty() const + { + return !size(); + } + size_t size() const + { + return size_; + } + size_t capacity() const + { + return capacity_; + } + size_t max_size() const + { + return (std::numeric_limits::max)(); + } + + ABSL_ATTRIBUTE_REINITIALIZES void clear() + { + // Iterating over this container is O(bucket_count()). When bucket_count() + // is much greater than size(), iteration becomes prohibitively expensive. + // For clear() it is more important to reuse the allocated array when the + // container is small because allocation takes comparatively long time + // compared to destruction of the elements of the container. So we pick the + // largest bucket_count() threshold for which iteration is still fast and + // past that we simply deallocate the array. + if (capacity_ > 127) + { + destroy_slots(); + + infoz().RecordClearedReservation(); + } + else if (capacity_) + { + for (size_t i = 0; i != capacity_; ++i) + { + if (IsFull(ctrl_[i])) + { + PolicyTraits::destroy(&alloc_ref(), slots_ + i); + } + } + size_ = 0; + ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type)); + reset_growth_left(); + } + assert(empty()); + infoz().RecordStorageChanged(0, capacity_); + } + + // This overload kicks in when the argument is an rvalue of insertable and + // decomposable type other than init_type. + // + // flat_hash_map m; + // m.insert(std::make_pair("abc", 42)); + // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc + // bug. + template = 0, class T2 = T, typename std::enable_if::value, int>::type = 0, T* = nullptr> + std::pair insert(T&& value) + { + return emplace(std::forward(value)); + } + + // This overload kicks in when the argument is a bitfield or an lvalue of + // insertable and decomposable type. + // + // union { int n : 1; }; + // flat_hash_set s; + // s.insert(n); + // + // flat_hash_set s; + // const char* p = "hello"; + // s.insert(p); + // + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable with RequiresInsertable. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + template< + class T, + RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + std::pair insert(const T& value) + { + return emplace(value); + } + + // This overload kicks in when the argument is an rvalue of init_type. Its + // purpose is to handle brace-init-list arguments. + // + // flat_hash_map s; + // s.insert({"abc", 42}); + std::pair insert(init_type&& value) + { + return emplace(std::move(value)); + } + + // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc + // bug. + template = 0, class T2 = T, typename std::enable_if::value, int>::type = 0, T* = nullptr> + iterator insert(const_iterator, T&& value) + { + return insert(std::forward(value)).first; + } + + // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace + // RequiresInsertable with RequiresInsertable. + // We are hitting this bug: https://godbolt.org/g/1Vht4f. + template< + class T, + RequiresInsertable = 0, + typename std::enable_if::value, int>::type = 0> + iterator insert(const_iterator, const T& value) + { + return insert(value).first; + } + + iterator insert(const_iterator, init_type&& value) + { + return insert(std::move(value)).first; + } + + template + void insert(InputIt first, InputIt last) + { + for (; first != last; ++first) + emplace(*first); + } + + template = 0, RequiresInsertable = 0> + void insert(std::initializer_list ilist) + { + insert(ilist.begin(), ilist.end()); + } + + void insert(std::initializer_list ilist) + { + insert(ilist.begin(), ilist.end()); + } + + insert_return_type insert(node_type&& node) + { + if (!node) + return {end(), false, node_type()}; + const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node)); + auto res = PolicyTraits::apply( + InsertSlot{*this, std::move(*CommonAccess::GetSlot(node))}, + elem + ); + if (res.second) + { + CommonAccess::Reset(&node); + return {res.first, true, node_type()}; + } + else + { + return {res.first, false, std::move(node)}; + } + } + + iterator insert(const_iterator, node_type&& node) + { + auto res = insert(std::move(node)); + node = std::move(res.node); + return res.position; + } + + // This overload kicks in if we can deduce the key from args. This enables us + // to avoid constructing value_type if an entry with the same key already + // exists. + // + // For example: + // + // flat_hash_map m = {{"abc", "def"}}; + // // Creates no std::string copies and makes no heap allocations. + // m.emplace("abc", "xyz"); + template::value, int>::type = 0> + std::pair emplace(Args&&... args) + { + return PolicyTraits::apply(EmplaceDecomposable{*this}, std::forward(args)...); + } + + // This overload kicks in if we cannot deduce the key from args. It constructs + // value_type unconditionally and then either moves it into the table or + // destroys. + template::value, int>::type = 0> + std::pair emplace(Args&&... args) + { + alignas(slot_type) unsigned char raw[sizeof(slot_type)]; + slot_type* slot = reinterpret_cast(&raw); + + PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); + const auto& elem = PolicyTraits::element(slot); + return PolicyTraits::apply(InsertSlot{*this, std::move(*slot)}, elem); + } + + template + iterator emplace_hint(const_iterator, Args&&... args) + { + return emplace(std::forward(args)...).first; + } + + // Extension API: support for lazy emplace. + // + // Looks up key in the table. If found, returns the iterator to the element. + // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`. + // + // `f` must abide by several restrictions: + // - it MUST call `raw_hash_set::constructor` with arguments as if a + // `raw_hash_set::value_type` is constructed, + // - it MUST NOT access the container before the call to + // `raw_hash_set::constructor`, and + // - it MUST NOT erase the lazily emplaced element. + // Doing any of these is undefined behavior. + // + // For example: + // + // std::unordered_set s; + // // Makes ArenaStr even if "abc" is in the map. + // s.insert(ArenaString(&arena, "abc")); + // + // flat_hash_set s; + // // Makes ArenaStr only if "abc" is not in the map. + // s.lazy_emplace("abc", [&](const constructor& ctor) { + // ctor(&arena, "abc"); + // }); + // + // WARNING: This API is currently experimental. If there is a way to implement + // the same thing with the rest of the API, prefer that. + class constructor + { + friend class raw_hash_set; + + public: + template + void operator()(Args&&... args) const + { + assert(*slot_); + PolicyTraits::construct(alloc_, *slot_, std::forward(args)...); + *slot_ = nullptr; + } + + private: + constructor(allocator_type* a, slot_type** slot) : + alloc_(a), + slot_(slot) + { + } + + allocator_type* alloc_; + slot_type** slot_; + }; + + template + iterator lazy_emplace(const key_arg& key, F&& f) + { + auto res = find_or_prepare_insert(key); + if (res.second) + { + slot_type* slot = slots_ + res.first; + std::forward(f)(constructor(&alloc_ref(), &slot)); + assert(!slot); + } + return iterator_at(res.first); + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.erase("abc"); + // + // flat_hash_set s; + // // Uses "abc" directly without copying it into std::string. + // s.erase("abc"); + template + size_type erase(const key_arg& key) + { + auto it = find(key); + if (it == end()) + return 0; + erase(it); + return 1; + } + + // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, + // this method returns void to reduce algorithmic complexity to O(1). The + // iterator is invalidated, so any increment should be done before calling + // erase. In order to erase while iterating across a map, use the following + // idiom (which also works for standard containers): + // + // for (auto it = m.begin(), end = m.end(); it != end;) { + // // `erase()` will invalidate `it`, so advance `it` first. + // auto copy_it = it++; + // if () { + // m.erase(copy_it); + // } + // } + void erase(const_iterator cit) + { + erase(cit.inner_); + } + + // This overload is necessary because otherwise erase(const K&) would be + // a better match if non-const iterator is passed as an argument. + void erase(iterator it) + { + ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_, "erase() called on invalid iterator."); + PolicyTraits::destroy(&alloc_ref(), it.slot_); + erase_meta_only(it); + } + + iterator erase(const_iterator first, const_iterator last) + { + while (first != last) + { + erase(first++); + } + return last.inner_; + } + + // Moves elements from `src` into `this`. + // If the element already exists in `this`, it is left unmodified in `src`. + template + void merge(raw_hash_set& src) + { // NOLINT + assert(this != &src); + for (auto it = src.begin(), e = src.end(); it != e;) + { + auto next = std::next(it); + if (PolicyTraits::apply(InsertSlot{*this, std::move(*it.slot_)}, PolicyTraits::element(it.slot_)) + .second) + { + src.erase_meta_only(it); + } + it = next; + } + } + + template + void merge(raw_hash_set&& src) + { + merge(src); + } + + node_type extract(const_iterator position) + { + ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_, "extract() called on invalid iterator."); + auto node = + CommonAccess::Transfer(alloc_ref(), position.inner_.slot_); + erase_meta_only(position); + return node; + } + + template< + class K = key_type, + typename std::enable_if::value, int>::type = 0> + node_type extract(const key_arg& key) + { + auto it = find(key); + return it == end() ? node_type() : extract(const_iterator{it}); + } + + void swap(raw_hash_set& that) noexcept( + IsNoThrowSwappable() && IsNoThrowSwappable() && + IsNoThrowSwappable( + typename AllocTraits::propagate_on_container_swap{} + ) + ) + { + using std::swap; + swap(ctrl_, that.ctrl_); + swap(slots_, that.slots_); + swap(size_, that.size_); + swap(capacity_, that.capacity_); + swap(growth_left(), that.growth_left()); + swap(hash_ref(), that.hash_ref()); + swap(eq_ref(), that.eq_ref()); + swap(infoz(), that.infoz()); + SwapAlloc(alloc_ref(), that.alloc_ref(), typename AllocTraits::propagate_on_container_swap{}); + } + + void rehash(size_t n) + { + if (n == 0 && capacity_ == 0) + return; + if (n == 0 && size_ == 0) + { + destroy_slots(); + infoz().RecordStorageChanged(0, 0); + infoz().RecordClearedReservation(); + return; + } + + // bitor is a faster way of doing `max` here. We will round up to the next + // power-of-2-minus-1, so bitor is good enough. + auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size())); + // n == 0 unconditionally rehashes as per the standard. + if (n == 0 || m > capacity_) + { + resize(m); + + // This is after resize, to ensure that we have completed the allocation + // and have potentially sampled the hashtable. + infoz().RecordReservation(n); + } + } + + void reserve(size_t n) + { + if (n > size() + growth_left()) + { + size_t m = GrowthToLowerboundCapacity(n); + resize(NormalizeCapacity(m)); + + // This is after resize, to ensure that we have completed the allocation + // and have potentially sampled the hashtable. + infoz().RecordReservation(n); + } + } + + // Extension API: support for heterogeneous keys. + // + // std::unordered_set s; + // // Turns "abc" into std::string. + // s.count("abc"); + // + // ch_set s; + // // Uses "abc" directly without copying it into std::string. + // s.count("abc"); + template + size_t count(const key_arg& key) const + { + return find(key) == end() ? 0 : 1; + } + + // Issues CPU prefetch instructions for the memory needed to find or insert + // a key. Like all lookup functions, this support heterogeneous keys. + // + // NOTE: This is a very low level operation and should not be used without + // specific benchmarks indicating its importance. + template + void prefetch(const key_arg& key) const + { + (void)key; + // Avoid probing if we won't be able to prefetch the addresses received. #ifdef ABSL_INTERNAL_HAVE_PREFETCH - prefetch_heap_block(); - auto seq = probe(ctrl_, hash_ref()(key), capacity_); - base_internal::PrefetchT0(ctrl_ + seq.offset()); - base_internal::PrefetchT0(slots_ + seq.offset()); + prefetch_heap_block(); + auto seq = probe(ctrl_, hash_ref()(key), capacity_); + base_internal::PrefetchT0(ctrl_ + seq.offset()); + base_internal::PrefetchT0(slots_ + seq.offset()); #endif // ABSL_INTERNAL_HAVE_PREFETCH - } - - // The API of find() has two extensions. - // - // 1. The hash can be passed by the user. It must be equal to the hash of the - // key. - // - // 2. The type of the key argument doesn't have to be key_type. This is so - // called heterogeneous key support. - template - iterator find(const key_arg& key, size_t hash) { - auto seq = probe(ctrl_, hash, capacity_); - while (true) { - Group g{ctrl_ + seq.offset()}; - for (uint32_t i : g.Match(H2(hash))) { - if (ABSL_PREDICT_TRUE(PolicyTraits::apply( - EqualElement{key, eq_ref()}, - PolicyTraits::element(slots_ + seq.offset(i))))) - return iterator_at(seq.offset(i)); - } - if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end(); - seq.next(); - assert(seq.index() <= capacity_ && "full table!"); - } - } - template - iterator find(const key_arg& key) { - prefetch_heap_block(); - return find(key, hash_ref()(key)); - } - - template - const_iterator find(const key_arg& key, size_t hash) const { - return const_cast(this)->find(key, hash); - } - template - const_iterator find(const key_arg& key) const { - prefetch_heap_block(); - return find(key, hash_ref()(key)); - } - - template - bool contains(const key_arg& key) const { - return find(key) != end(); - } - - template - std::pair equal_range(const key_arg& key) { - auto it = find(key); - if (it != end()) return {it, std::next(it)}; - return {it, it}; - } - template - std::pair equal_range( - const key_arg& key) const { - auto it = find(key); - if (it != end()) return {it, std::next(it)}; - return {it, it}; - } - - size_t bucket_count() const { return capacity_; } - float load_factor() const { - return capacity_ ? static_cast(size()) / capacity_ : 0.0; - } - float max_load_factor() const { return 1.0f; } - void max_load_factor(float) { - // Does nothing. - } - - hasher hash_function() const { return hash_ref(); } - key_equal key_eq() const { return eq_ref(); } - allocator_type get_allocator() const { return alloc_ref(); } - - friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) { - if (a.size() != b.size()) return false; - const raw_hash_set* outer = &a; - const raw_hash_set* inner = &b; - if (outer->capacity() > inner->capacity()) std::swap(outer, inner); - for (const value_type& elem : *outer) - if (!inner->has_element(elem)) return false; - return true; - } - - friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) { - return !(a == b); - } - - template - friend typename std::enable_if::value, - H>::type - AbslHashValue(H h, const raw_hash_set& s) { - return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()), - s.size()); - } - - friend void swap(raw_hash_set& a, - raw_hash_set& b) noexcept(noexcept(a.swap(b))) { - a.swap(b); - } - - private: - template - friend struct absl::container_internal::hashtable_debug_internal:: - HashtableDebugAccess; - - struct FindElement { - template - const_iterator operator()(const K& key, Args&&...) const { - return s.find(key); - } - const raw_hash_set& s; - }; - - struct HashElement { - template - size_t operator()(const K& key, Args&&...) const { - return h(key); - } - const hasher& h; - }; - - template - struct EqualElement { - template - bool operator()(const K2& lhs, Args&&...) const { - return eq(lhs, rhs); - } - const K1& rhs; - const key_equal& eq; - }; - - struct EmplaceDecomposable { - template - std::pair operator()(const K& key, Args&&... args) const { - auto res = s.find_or_prepare_insert(key); - if (res.second) { - s.emplace_at(res.first, std::forward(args)...); - } - return {s.iterator_at(res.first), res.second}; - } - raw_hash_set& s; - }; - - template - struct InsertSlot { - template - std::pair operator()(const K& key, Args&&...) && { - auto res = s.find_or_prepare_insert(key); - if (res.second) { - PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot); - } else if (do_destroy) { - PolicyTraits::destroy(&s.alloc_ref(), &slot); - } - return {s.iterator_at(res.first), res.second}; - } - raw_hash_set& s; - // Constructed slot. Either moved into place or destroyed. - slot_type&& slot; - }; - - // Erases, but does not destroy, the value pointed to by `it`. - // - // This merely updates the pertinent control byte. This can be used in - // conjunction with Policy::transfer to move the object to another place. - void erase_meta_only(const_iterator it) { - assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator"); - --size_; - const size_t index = static_cast(it.inner_.ctrl_ - ctrl_); - const size_t index_before = (index - Group::kWidth) & capacity_; - const auto empty_after = Group(it.inner_.ctrl_).MaskEmpty(); - const auto empty_before = Group(ctrl_ + index_before).MaskEmpty(); - - // We count how many consecutive non empties we have to the right and to the - // left of `it`. If the sum is >= kWidth then there is at least one probe - // window that might have seen a full group. - bool was_never_full = - empty_before && empty_after && - static_cast(empty_after.TrailingZeros() + - empty_before.LeadingZeros()) < Group::kWidth; - - SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted, - capacity_, ctrl_, slots_, sizeof(slot_type)); - growth_left() += was_never_full; - infoz().RecordErase(); - } - - // Allocates a backing array for `self` and initializes its control bytes. - // This reads `capacity_` and updates all other fields based on the result of - // the allocation. - // - // This does not free the currently held array; `capacity_` must be nonzero. - void initialize_slots() { - assert(capacity_); - // Folks with custom allocators often make unwarranted assumptions about the - // behavior of their classes vis-a-vis trivial destructability and what - // calls they will or wont make. Avoid sampling for people with custom - // allocators to get us out of this mess. This is not a hard guarantee but - // a workaround while we plan the exact guarantee we want to provide. - // - // People are often sloppy with the exact type of their allocator (sometimes - // it has an extra const or is missing the pair, but rebinds made it work - // anyway). To avoid the ambiguity, we work off SlotAlloc which we have - // bound more carefully. - if (std::is_same>::value && - slots_ == nullptr) { - infoz() = Sample(sizeof(slot_type)); - } - - char* mem = static_cast(Allocate( - &alloc_ref(), - AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)))); - ctrl_ = reinterpret_cast(mem); - slots_ = reinterpret_cast( - mem + SlotOffset(capacity_, alignof(slot_type))); - ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type)); - reset_growth_left(); - infoz().RecordStorageChanged(size_, capacity_); - } - - // Destroys all slots in the backing array, frees the backing array, and - // clears all top-level book-keeping data. - // - // This essentially implements `map = raw_hash_set();`. - void destroy_slots() { - if (!capacity_) return; - for (size_t i = 0; i != capacity_; ++i) { - if (IsFull(ctrl_[i])) { - PolicyTraits::destroy(&alloc_ref(), slots_ + i); - } - } - - // Unpoison before returning the memory to the allocator. - SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); - Deallocate( - &alloc_ref(), ctrl_, - AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))); - ctrl_ = EmptyGroup(); - slots_ = nullptr; - size_ = 0; - capacity_ = 0; - growth_left() = 0; - } - - void resize(size_t new_capacity) { - assert(IsValidCapacity(new_capacity)); - auto* old_ctrl = ctrl_; - auto* old_slots = slots_; - const size_t old_capacity = capacity_; - capacity_ = new_capacity; - initialize_slots(); - - size_t total_probe_length = 0; - for (size_t i = 0; i != old_capacity; ++i) { - if (IsFull(old_ctrl[i])) { - size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, - PolicyTraits::element(old_slots + i)); - auto target = find_first_non_full(ctrl_, hash, capacity_); - size_t new_i = target.offset; - total_probe_length += target.probe_length; - SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); - PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i); - } - } - if (old_capacity) { - SanitizerUnpoisonMemoryRegion(old_slots, - sizeof(slot_type) * old_capacity); - Deallocate( - &alloc_ref(), old_ctrl, - AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type))); - } - infoz().RecordRehash(total_probe_length); - } - - // Prunes control bytes to remove as many tombstones as possible. - // - // See the comment on `rehash_and_grow_if_necessary()`. - void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE { - assert(IsValidCapacity(capacity_)); - assert(!is_small(capacity_)); - // Algorithm: - // - mark all DELETED slots as EMPTY - // - mark all FULL slots as DELETED - // - for each slot marked as DELETED - // hash = Hash(element) - // target = find_first_non_full(hash) - // if target is in the same group - // mark slot as FULL - // else if target is EMPTY - // transfer element to target - // mark slot as EMPTY - // mark target as FULL - // else if target is DELETED - // swap current element with target element - // mark target as FULL - // repeat procedure for current slot with moved from element (target) - ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_); - alignas(slot_type) unsigned char raw[sizeof(slot_type)]; - size_t total_probe_length = 0; - slot_type* slot = reinterpret_cast(&raw); - for (size_t i = 0; i != capacity_; ++i) { - if (!IsDeleted(ctrl_[i])) continue; - const size_t hash = PolicyTraits::apply( - HashElement{hash_ref()}, PolicyTraits::element(slots_ + i)); - const FindInfo target = find_first_non_full(ctrl_, hash, capacity_); - const size_t new_i = target.offset; - total_probe_length += target.probe_length; - - // Verify if the old and new i fall within the same group wrt the hash. - // If they do, we don't need to move the object as it falls already in the - // best probe we can. - const size_t probe_offset = probe(ctrl_, hash, capacity_).offset(); - const auto probe_index = [probe_offset, this](size_t pos) { - return ((pos - probe_offset) & capacity_) / Group::kWidth; - }; - - // Element doesn't move. - if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) { - SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); - continue; - } - if (IsEmpty(ctrl_[new_i])) { - // Transfer element to the empty spot. - // SetCtrl poisons/unpoisons the slots so we have to call it at the - // right time. - SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); - PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i); - SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type)); - } else { - assert(IsDeleted(ctrl_[new_i])); - SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); - // Until we are done rehashing, DELETED marks previously FULL slots. - // Swap i and new_i elements. - PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i); - PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i); - PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot); - --i; // repeat - } - } - reset_growth_left(); - infoz().RecordRehash(total_probe_length); - } - - // Called whenever the table *might* need to conditionally grow. - // - // This function is an optimization opportunity to perform a rehash even when - // growth is unnecessary, because vacating tombstones is beneficial for - // performance in the long-run. - void rehash_and_grow_if_necessary() { - if (capacity_ == 0) { - resize(1); - } else if (capacity_ > Group::kWidth && - // Do these calcuations in 64-bit to avoid overflow. - size() * uint64_t{32} <= capacity_ * uint64_t{25}) { - // Squash DELETED without growing if there is enough capacity. - // - // Rehash in place if the current size is <= 25/32 of capacity_. - // Rationale for such a high factor: 1) drop_deletes_without_resize() is - // faster than resize, and 2) it takes quite a bit of work to add - // tombstones. In the worst case, seems to take approximately 4 - // insert/erase pairs to create a single tombstone and so if we are - // rehashing because of tombstones, we can afford to rehash-in-place as - // long as we are reclaiming at least 1/8 the capacity without doing more - // than 2X the work. (Where "work" is defined to be size() for rehashing - // or rehashing in place, and 1 for an insert or erase.) But rehashing in - // place is faster per operation than inserting or even doubling the size - // of the table, so we actually afford to reclaim even less space from a - // resize-in-place. The decision is to rehash in place if we can reclaim - // at about 1/8th of the usable capacity (specifically 3/28 of the - // capacity) which means that the total cost of rehashing will be a small - // fraction of the total work. - // - // Here is output of an experiment using the BM_CacheInSteadyState - // benchmark running the old case (where we rehash-in-place only if we can - // reclaim at least 7/16*capacity_) vs. this code (which rehashes in place - // if we can recover 3/32*capacity_). - // - // Note that although in the worst-case number of rehashes jumped up from - // 15 to 190, but the number of operations per second is almost the same. - // - // Abridged output of running BM_CacheInSteadyState benchmark from - // raw_hash_set_benchmark. N is the number of insert/erase operations. - // - // | OLD (recover >= 7/16 | NEW (recover >= 3/32) - // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes - // 448 | 145284 0.44 18 | 140118 0.44 19 - // 493 | 152546 0.24 11 | 151417 0.48 28 - // 538 | 151439 0.26 11 | 151152 0.53 38 - // 583 | 151765 0.28 11 | 150572 0.57 50 - // 628 | 150241 0.31 11 | 150853 0.61 66 - // 672 | 149602 0.33 12 | 150110 0.66 90 - // 717 | 149998 0.35 12 | 149531 0.70 129 - // 762 | 149836 0.37 13 | 148559 0.74 190 - // 807 | 149736 0.39 14 | 151107 0.39 14 - // 852 | 150204 0.42 15 | 151019 0.42 15 - drop_deletes_without_resize(); - } else { - // Otherwise grow the container. - resize(capacity_ * 2 + 1); - } - } - - bool has_element(const value_type& elem) const { - size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); - auto seq = probe(ctrl_, hash, capacity_); - while (true) { - Group g{ctrl_ + seq.offset()}; - for (uint32_t i : g.Match(H2(hash))) { - if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) == - elem)) - return true; - } - if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false; - seq.next(); - assert(seq.index() <= capacity_ && "full table!"); - } - return false; - } - - // TODO(alkis): Optimize this assuming *this and that don't overlap. - raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) { - raw_hash_set tmp(std::move(that)); - swap(tmp); - return *this; - } - raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) { - raw_hash_set tmp(std::move(that), alloc_ref()); - swap(tmp); - return *this; - } - - protected: - // Attempts to find `key` in the table; if it isn't found, returns a slot that - // the value can be inserted into, with the control byte already set to - // `key`'s H2. - template - std::pair find_or_prepare_insert(const K& key) { - prefetch_heap_block(); - auto hash = hash_ref()(key); - auto seq = probe(ctrl_, hash, capacity_); - while (true) { - Group g{ctrl_ + seq.offset()}; - for (uint32_t i : g.Match(H2(hash))) { - if (ABSL_PREDICT_TRUE(PolicyTraits::apply( - EqualElement{key, eq_ref()}, - PolicyTraits::element(slots_ + seq.offset(i))))) - return {seq.offset(i), false}; - } - if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break; - seq.next(); - assert(seq.index() <= capacity_ && "full table!"); - } - return {prepare_insert(hash), true}; - } - - // Given the hash of a value not currently in the table, finds the next - // viable slot index to insert it at. - // - // REQUIRES: At least one non-full slot available. - size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE { - auto target = find_first_non_full(ctrl_, hash, capacity_); - if (ABSL_PREDICT_FALSE(growth_left() == 0 && - !IsDeleted(ctrl_[target.offset]))) { - rehash_and_grow_if_necessary(); - target = find_first_non_full(ctrl_, hash, capacity_); - } - ++size_; - growth_left() -= IsEmpty(ctrl_[target.offset]); - SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_, - sizeof(slot_type)); - infoz().RecordInsert(hash, target.probe_length); - return target.offset; - } - - // Constructs the value in the space pointed by the iterator. This only works - // after an unsuccessful find_or_prepare_insert() and before any other - // modifications happen in the raw_hash_set. - // - // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where - // k is the key decomposed from `forward(args)...`, and the bool - // returned by find_or_prepare_insert(k) was true. - // POSTCONDITION: *m.iterator_at(i) == value_type(forward(args)...). - template - void emplace_at(size_t i, Args&&... args) { - PolicyTraits::construct(&alloc_ref(), slots_ + i, - std::forward(args)...); - - assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) == - iterator_at(i) && - "constructed value does not match the lookup key"); - } - - iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; } - const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; } - - private: - friend struct RawHashSetTestOnlyAccess; - - void reset_growth_left() { - growth_left() = CapacityToGrowth(capacity()) - size_; - } - - // The number of slots we can still fill without needing to rehash. - // - // This is stored separately due to tombstones: we do not include tombstones - // in the growth capacity, because we'd like to rehash when the table is - // otherwise filled with tombstones: otherwise, probe sequences might get - // unacceptably long without triggering a rehash. Callers can also force a - // rehash via the standard `rehash(0)`, which will recompute this value as a - // side-effect. - // - // See `CapacityToGrowth()`. - size_t& growth_left() { return settings_.template get<0>(); } - - // Prefetch the heap-allocated memory region to resolve potential TLB misses. - // This is intended to overlap with execution of calculating the hash for a - // key. - void prefetch_heap_block() const { - base_internal::PrefetchT2(ctrl_); - } - - HashtablezInfoHandle& infoz() { return settings_.template get<1>(); } - - hasher& hash_ref() { return settings_.template get<2>(); } - const hasher& hash_ref() const { return settings_.template get<2>(); } - key_equal& eq_ref() { return settings_.template get<3>(); } - const key_equal& eq_ref() const { return settings_.template get<3>(); } - allocator_type& alloc_ref() { return settings_.template get<4>(); } - const allocator_type& alloc_ref() const { - return settings_.template get<4>(); - } - - // TODO(alkis): Investigate removing some of these fields: - // - ctrl/slots can be derived from each other - // - size can be moved into the slot array - - // The control bytes (and, also, a pointer to the base of the backing array). - // - // This contains `capacity_ + 1 + NumClonedBytes()` entries, even - // when the table is empty (hence EmptyGroup). - ctrl_t* ctrl_ = EmptyGroup(); - // The beginning of the slots, located at `SlotOffset()` bytes after - // `ctrl_`. May be null for empty tables. - slot_type* slots_ = nullptr; - - // The number of filled slots. - size_t size_ = 0; - - // The total number of available slots. - size_t capacity_ = 0; - absl::container_internal::CompressedTuple - settings_{0u, HashtablezInfoHandle{}, hasher{}, key_equal{}, - allocator_type{}}; -}; - -// Erases all elements that satisfy the predicate `pred` from the container `c`. -template -typename raw_hash_set::size_type EraseIf( - Predicate& pred, raw_hash_set* c) { - const auto initial_size = c->size(); - for (auto it = c->begin(), last = c->end(); it != last;) { - if (pred(*it)) { - c->erase(it++); - } else { - ++it; - } - } - return initial_size - c->size(); -} - -namespace hashtable_debug_internal { -template -struct HashtableDebugAccess> { - using Traits = typename Set::PolicyTraits; - using Slot = typename Traits::slot_type; - - static size_t GetNumProbes(const Set& set, - const typename Set::key_type& key) { - size_t num_probes = 0; - size_t hash = set.hash_ref()(key); - auto seq = probe(set.ctrl_, hash, set.capacity_); - while (true) { - container_internal::Group g{set.ctrl_ + seq.offset()}; - for (uint32_t i : g.Match(container_internal::H2(hash))) { - if (Traits::apply( - typename Set::template EqualElement{ - key, set.eq_ref()}, - Traits::element(set.slots_ + seq.offset(i)))) - return num_probes; - ++num_probes; - } - if (g.MaskEmpty()) return num_probes; - seq.next(); - ++num_probes; - } - } - - static size_t AllocatedByteSize(const Set& c) { - size_t capacity = c.capacity_; - if (capacity == 0) return 0; - size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot)); - - size_t per_slot = Traits::space_used(static_cast(nullptr)); - if (per_slot != ~size_t{}) { - m += per_slot * c.size(); - } else { - for (size_t i = 0; i != capacity; ++i) { - if (container_internal::IsFull(c.ctrl_[i])) { - m += Traits::space_used(c.slots_ + i); + } + + // The API of find() has two extensions. + // + // 1. The hash can be passed by the user. It must be equal to the hash of the + // key. + // + // 2. The type of the key argument doesn't have to be key_type. This is so + // called heterogeneous key support. + template + iterator find(const key_arg& key, size_t hash) + { + auto seq = probe(ctrl_, hash, capacity_); + while (true) + { + Group g{ctrl_ + seq.offset()}; + for (uint32_t i : g.Match(H2(hash))) + { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slots_ + seq.offset(i)) + ))) + return iterator_at(seq.offset(i)); + } + if (ABSL_PREDICT_TRUE(g.MaskEmpty())) + return end(); + seq.next(); + assert(seq.index() <= capacity_ && "full table!"); + } + } + template + iterator find(const key_arg& key) + { + prefetch_heap_block(); + return find(key, hash_ref()(key)); + } + + template + const_iterator find(const key_arg& key, size_t hash) const + { + return const_cast(this)->find(key, hash); + } + template + const_iterator find(const key_arg& key) const + { + prefetch_heap_block(); + return find(key, hash_ref()(key)); + } + + template + bool contains(const key_arg& key) const + { + return find(key) != end(); + } + + template + std::pair equal_range(const key_arg& key) + { + auto it = find(key); + if (it != end()) + return {it, std::next(it)}; + return {it, it}; + } + template + std::pair equal_range( + const key_arg& key + ) const + { + auto it = find(key); + if (it != end()) + return {it, std::next(it)}; + return {it, it}; + } + + size_t bucket_count() const + { + return capacity_; + } + float load_factor() const + { + return capacity_ ? static_cast(size()) / capacity_ : 0.0; + } + float max_load_factor() const + { + return 1.0f; + } + void max_load_factor(float) + { + // Does nothing. + } + + hasher hash_function() const + { + return hash_ref(); + } + key_equal key_eq() const + { + return eq_ref(); + } + allocator_type get_allocator() const + { + return alloc_ref(); + } + + friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) + { + if (a.size() != b.size()) + return false; + const raw_hash_set* outer = &a; + const raw_hash_set* inner = &b; + if (outer->capacity() > inner->capacity()) + std::swap(outer, inner); + for (const value_type& elem : *outer) + if (!inner->has_element(elem)) + return false; + return true; + } + + friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) + { + return !(a == b); + } + + template + friend typename std::enable_if::value, H>::type + AbslHashValue(H h, const raw_hash_set& s) + { + return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()), s.size()); + } + + friend void swap(raw_hash_set& a, raw_hash_set& b) noexcept(noexcept(a.swap(b))) + { + a.swap(b); + } + + private: + template + friend struct absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess; + + struct FindElement + { + template + const_iterator operator()(const K& key, Args&&...) const + { + return s.find(key); + } + const raw_hash_set& s; + }; + + struct HashElement + { + template + size_t operator()(const K& key, Args&&...) const + { + return h(key); + } + const hasher& h; + }; + + template + struct EqualElement + { + template + bool operator()(const K2& lhs, Args&&...) const + { + return eq(lhs, rhs); + } + const K1& rhs; + const key_equal& eq; + }; + + struct EmplaceDecomposable + { + template + std::pair operator()(const K& key, Args&&... args) const + { + auto res = s.find_or_prepare_insert(key); + if (res.second) + { + s.emplace_at(res.first, std::forward(args)...); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + }; + + template + struct InsertSlot + { + template + std::pair operator()(const K& key, Args&&...) && + { + auto res = s.find_or_prepare_insert(key); + if (res.second) + { + PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot); + } + else if (do_destroy) + { + PolicyTraits::destroy(&s.alloc_ref(), &slot); + } + return {s.iterator_at(res.first), res.second}; + } + raw_hash_set& s; + // Constructed slot. Either moved into place or destroyed. + slot_type&& slot; + }; + + // Erases, but does not destroy, the value pointed to by `it`. + // + // This merely updates the pertinent control byte. This can be used in + // conjunction with Policy::transfer to move the object to another place. + void erase_meta_only(const_iterator it) + { + assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator"); + --size_; + const size_t index = static_cast(it.inner_.ctrl_ - ctrl_); + const size_t index_before = (index - Group::kWidth) & capacity_; + const auto empty_after = Group(it.inner_.ctrl_).MaskEmpty(); + const auto empty_before = Group(ctrl_ + index_before).MaskEmpty(); + + // We count how many consecutive non empties we have to the right and to the + // left of `it`. If the sum is >= kWidth then there is at least one probe + // window that might have seen a full group. + bool was_never_full = + empty_before && empty_after && + static_cast(empty_after.TrailingZeros() + empty_before.LeadingZeros()) < Group::kWidth; + + SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted, capacity_, ctrl_, slots_, sizeof(slot_type)); + growth_left() += was_never_full; + infoz().RecordErase(); + } + + // Allocates a backing array for `self` and initializes its control bytes. + // This reads `capacity_` and updates all other fields based on the result of + // the allocation. + // + // This does not free the currently held array; `capacity_` must be nonzero. + void initialize_slots() + { + assert(capacity_); + // Folks with custom allocators often make unwarranted assumptions about the + // behavior of their classes vis-a-vis trivial destructability and what + // calls they will or wont make. Avoid sampling for people with custom + // allocators to get us out of this mess. This is not a hard guarantee but + // a workaround while we plan the exact guarantee we want to provide. + // + // People are often sloppy with the exact type of their allocator (sometimes + // it has an extra const or is missing the pair, but rebinds made it work + // anyway). To avoid the ambiguity, we work off SlotAlloc which we have + // bound more carefully. + if (std::is_same>::value && + slots_ == nullptr) + { + infoz() = Sample(sizeof(slot_type)); + } + + char* mem = static_cast(Allocate( + &alloc_ref(), + AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)) + )); + ctrl_ = reinterpret_cast(mem); + slots_ = reinterpret_cast( + mem + SlotOffset(capacity_, alignof(slot_type)) + ); + ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type)); + reset_growth_left(); + infoz().RecordStorageChanged(size_, capacity_); + } + + // Destroys all slots in the backing array, frees the backing array, and + // clears all top-level book-keeping data. + // + // This essentially implements `map = raw_hash_set();`. + void destroy_slots() + { + if (!capacity_) + return; + for (size_t i = 0; i != capacity_; ++i) + { + if (IsFull(ctrl_[i])) + { + PolicyTraits::destroy(&alloc_ref(), slots_ + i); + } + } + + // Unpoison before returning the memory to the allocator. + SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); + Deallocate( + &alloc_ref(), ctrl_, AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)) + ); + ctrl_ = EmptyGroup(); + slots_ = nullptr; + size_ = 0; + capacity_ = 0; + growth_left() = 0; + } + + void resize(size_t new_capacity) + { + assert(IsValidCapacity(new_capacity)); + auto* old_ctrl = ctrl_; + auto* old_slots = slots_; + const size_t old_capacity = capacity_; + capacity_ = new_capacity; + initialize_slots(); + + size_t total_probe_length = 0; + for (size_t i = 0; i != old_capacity; ++i) + { + if (IsFull(old_ctrl[i])) + { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, PolicyTraits::element(old_slots + i)); + auto target = find_first_non_full(ctrl_, hash, capacity_); + size_t new_i = target.offset; + total_probe_length += target.probe_length; + SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i); + } + } + if (old_capacity) + { + SanitizerUnpoisonMemoryRegion(old_slots, sizeof(slot_type) * old_capacity); + Deallocate( + &alloc_ref(), old_ctrl, AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)) + ); + } + infoz().RecordRehash(total_probe_length); + } + + // Prunes control bytes to remove as many tombstones as possible. + // + // See the comment on `rehash_and_grow_if_necessary()`. + void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE + { + assert(IsValidCapacity(capacity_)); + assert(!is_small(capacity_)); + // Algorithm: + // - mark all DELETED slots as EMPTY + // - mark all FULL slots as DELETED + // - for each slot marked as DELETED + // hash = Hash(element) + // target = find_first_non_full(hash) + // if target is in the same group + // mark slot as FULL + // else if target is EMPTY + // transfer element to target + // mark slot as EMPTY + // mark target as FULL + // else if target is DELETED + // swap current element with target element + // mark target as FULL + // repeat procedure for current slot with moved from element (target) + ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_); + alignas(slot_type) unsigned char raw[sizeof(slot_type)]; + size_t total_probe_length = 0; + slot_type* slot = reinterpret_cast(&raw); + for (size_t i = 0; i != capacity_; ++i) + { + if (!IsDeleted(ctrl_[i])) + continue; + const size_t hash = PolicyTraits::apply( + HashElement{hash_ref()}, PolicyTraits::element(slots_ + i) + ); + const FindInfo target = find_first_non_full(ctrl_, hash, capacity_); + const size_t new_i = target.offset; + total_probe_length += target.probe_length; + + // Verify if the old and new i fall within the same group wrt the hash. + // If they do, we don't need to move the object as it falls already in the + // best probe we can. + const size_t probe_offset = probe(ctrl_, hash, capacity_).offset(); + const auto probe_index = [probe_offset, this](size_t pos) + { + return ((pos - probe_offset) & capacity_) / Group::kWidth; + }; + + // Element doesn't move. + if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) + { + SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); + continue; + } + if (IsEmpty(ctrl_[new_i])) + { + // Transfer element to the empty spot. + // SetCtrl poisons/unpoisons the slots so we have to call it at the + // right time. + SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i); + SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type)); + } + else + { + assert(IsDeleted(ctrl_[new_i])); + SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); + // Until we are done rehashing, DELETED marks previously FULL slots. + // Swap i and new_i elements. + PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i); + PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i); + PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot); + --i; // repeat + } + } + reset_growth_left(); + infoz().RecordRehash(total_probe_length); + } + + // Called whenever the table *might* need to conditionally grow. + // + // This function is an optimization opportunity to perform a rehash even when + // growth is unnecessary, because vacating tombstones is beneficial for + // performance in the long-run. + void rehash_and_grow_if_necessary() + { + if (capacity_ == 0) + { + resize(1); + } + else if (capacity_ > Group::kWidth && + // Do these calcuations in 64-bit to avoid overflow. + size() * uint64_t{32} <= capacity_ * uint64_t{25}) + { + // Squash DELETED without growing if there is enough capacity. + // + // Rehash in place if the current size is <= 25/32 of capacity_. + // Rationale for such a high factor: 1) drop_deletes_without_resize() is + // faster than resize, and 2) it takes quite a bit of work to add + // tombstones. In the worst case, seems to take approximately 4 + // insert/erase pairs to create a single tombstone and so if we are + // rehashing because of tombstones, we can afford to rehash-in-place as + // long as we are reclaiming at least 1/8 the capacity without doing more + // than 2X the work. (Where "work" is defined to be size() for rehashing + // or rehashing in place, and 1 for an insert or erase.) But rehashing in + // place is faster per operation than inserting or even doubling the size + // of the table, so we actually afford to reclaim even less space from a + // resize-in-place. The decision is to rehash in place if we can reclaim + // at about 1/8th of the usable capacity (specifically 3/28 of the + // capacity) which means that the total cost of rehashing will be a small + // fraction of the total work. + // + // Here is output of an experiment using the BM_CacheInSteadyState + // benchmark running the old case (where we rehash-in-place only if we can + // reclaim at least 7/16*capacity_) vs. this code (which rehashes in place + // if we can recover 3/32*capacity_). + // + // Note that although in the worst-case number of rehashes jumped up from + // 15 to 190, but the number of operations per second is almost the same. + // + // Abridged output of running BM_CacheInSteadyState benchmark from + // raw_hash_set_benchmark. N is the number of insert/erase operations. + // + // | OLD (recover >= 7/16 | NEW (recover >= 3/32) + // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes + // 448 | 145284 0.44 18 | 140118 0.44 19 + // 493 | 152546 0.24 11 | 151417 0.48 28 + // 538 | 151439 0.26 11 | 151152 0.53 38 + // 583 | 151765 0.28 11 | 150572 0.57 50 + // 628 | 150241 0.31 11 | 150853 0.61 66 + // 672 | 149602 0.33 12 | 150110 0.66 90 + // 717 | 149998 0.35 12 | 149531 0.70 129 + // 762 | 149836 0.37 13 | 148559 0.74 190 + // 807 | 149736 0.39 14 | 151107 0.39 14 + // 852 | 150204 0.42 15 | 151019 0.42 15 + drop_deletes_without_resize(); + } + else + { + // Otherwise grow the container. + resize(capacity_ * 2 + 1); + } + } + + bool has_element(const value_type& elem) const + { + size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); + auto seq = probe(ctrl_, hash, capacity_); + while (true) + { + Group g{ctrl_ + seq.offset()}; + for (uint32_t i : g.Match(H2(hash))) + { + if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) == elem)) + return true; + } + if (ABSL_PREDICT_TRUE(g.MaskEmpty())) + return false; + seq.next(); + assert(seq.index() <= capacity_ && "full table!"); + } + return false; + } + + // TODO(alkis): Optimize this assuming *this and that don't overlap. + raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) + { + raw_hash_set tmp(std::move(that)); + swap(tmp); + return *this; + } + raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) + { + raw_hash_set tmp(std::move(that), alloc_ref()); + swap(tmp); + return *this; + } + + protected: + // Attempts to find `key` in the table; if it isn't found, returns a slot that + // the value can be inserted into, with the control byte already set to + // `key`'s H2. + template + std::pair find_or_prepare_insert(const K& key) + { + prefetch_heap_block(); + auto hash = hash_ref()(key); + auto seq = probe(ctrl_, hash, capacity_); + while (true) + { + Group g{ctrl_ + seq.offset()}; + for (uint32_t i : g.Match(H2(hash))) + { + if (ABSL_PREDICT_TRUE(PolicyTraits::apply( + EqualElement{key, eq_ref()}, + PolicyTraits::element(slots_ + seq.offset(i)) + ))) + return {seq.offset(i), false}; + } + if (ABSL_PREDICT_TRUE(g.MaskEmpty())) + break; + seq.next(); + assert(seq.index() <= capacity_ && "full table!"); + } + return {prepare_insert(hash), true}; + } + + // Given the hash of a value not currently in the table, finds the next + // viable slot index to insert it at. + // + // REQUIRES: At least one non-full slot available. + size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE + { + auto target = find_first_non_full(ctrl_, hash, capacity_); + if (ABSL_PREDICT_FALSE(growth_left() == 0 && !IsDeleted(ctrl_[target.offset]))) + { + rehash_and_grow_if_necessary(); + target = find_first_non_full(ctrl_, hash, capacity_); + } + ++size_; + growth_left() -= IsEmpty(ctrl_[target.offset]); + SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); + infoz().RecordInsert(hash, target.probe_length); + return target.offset; + } + + // Constructs the value in the space pointed by the iterator. This only works + // after an unsuccessful find_or_prepare_insert() and before any other + // modifications happen in the raw_hash_set. + // + // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where + // k is the key decomposed from `forward(args)...`, and the bool + // returned by find_or_prepare_insert(k) was true. + // POSTCONDITION: *m.iterator_at(i) == value_type(forward(args)...). + template + void emplace_at(size_t i, Args&&... args) + { + PolicyTraits::construct(&alloc_ref(), slots_ + i, std::forward(args)...); + + assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) == iterator_at(i) && "constructed value does not match the lookup key"); + } + + iterator iterator_at(size_t i) + { + return {ctrl_ + i, slots_ + i}; + } + const_iterator iterator_at(size_t i) const + { + return {ctrl_ + i, slots_ + i}; + } + + private: + friend struct RawHashSetTestOnlyAccess; + + void reset_growth_left() + { + growth_left() = CapacityToGrowth(capacity()) - size_; + } + + // The number of slots we can still fill without needing to rehash. + // + // This is stored separately due to tombstones: we do not include tombstones + // in the growth capacity, because we'd like to rehash when the table is + // otherwise filled with tombstones: otherwise, probe sequences might get + // unacceptably long without triggering a rehash. Callers can also force a + // rehash via the standard `rehash(0)`, which will recompute this value as a + // side-effect. + // + // See `CapacityToGrowth()`. + size_t& growth_left() + { + return settings_.template get<0>(); + } + + // Prefetch the heap-allocated memory region to resolve potential TLB misses. + // This is intended to overlap with execution of calculating the hash for a + // key. + void prefetch_heap_block() const + { + base_internal::PrefetchT2(ctrl_); + } + + HashtablezInfoHandle& infoz() + { + return settings_.template get<1>(); + } + + hasher& hash_ref() + { + return settings_.template get<2>(); + } + const hasher& hash_ref() const + { + return settings_.template get<2>(); + } + key_equal& eq_ref() + { + return settings_.template get<3>(); + } + const key_equal& eq_ref() const + { + return settings_.template get<3>(); + } + allocator_type& alloc_ref() + { + return settings_.template get<4>(); + } + const allocator_type& alloc_ref() const + { + return settings_.template get<4>(); + } + + // TODO(alkis): Investigate removing some of these fields: + // - ctrl/slots can be derived from each other + // - size can be moved into the slot array + + // The control bytes (and, also, a pointer to the base of the backing array). + // + // This contains `capacity_ + 1 + NumClonedBytes()` entries, even + // when the table is empty (hence EmptyGroup). + ctrl_t* ctrl_ = EmptyGroup(); + // The beginning of the slots, located at `SlotOffset()` bytes after + // `ctrl_`. May be null for empty tables. + slot_type* slots_ = nullptr; + + // The number of filled slots. + size_t size_ = 0; + + // The total number of available slots. + size_t capacity_ = 0; + absl::container_internal::CompressedTuple + settings_{0u, HashtablezInfoHandle{}, hasher{}, key_equal{}, allocator_type{}}; + }; + + // Erases all elements that satisfy the predicate `pred` from the container `c`. + template + typename raw_hash_set::size_type EraseIf( + Predicate& pred, raw_hash_set* c + ) + { + const auto initial_size = c->size(); + for (auto it = c->begin(), last = c->end(); it != last;) + { + if (pred(*it)) + { + c->erase(it++); + } + else + { + ++it; + } + } + return initial_size - c->size(); } - } - } - return m; - } - - static size_t LowerBoundAllocatedByteSize(size_t size) { - size_t capacity = GrowthToLowerboundCapacity(size); - if (capacity == 0) return 0; - size_t m = - AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot)); - size_t per_slot = Traits::space_used(static_cast(nullptr)); - if (per_slot != ~size_t{}) { - m += per_slot * size; - } - return m; - } -}; - -} // namespace hashtable_debug_internal -} // namespace container_internal -ABSL_NAMESPACE_END + + namespace hashtable_debug_internal + { + template + struct HashtableDebugAccess> + { + using Traits = typename Set::PolicyTraits; + using Slot = typename Traits::slot_type; + + static size_t GetNumProbes(const Set& set, const typename Set::key_type& key) + { + size_t num_probes = 0; + size_t hash = set.hash_ref()(key); + auto seq = probe(set.ctrl_, hash, set.capacity_); + while (true) + { + container_internal::Group g{set.ctrl_ + seq.offset()}; + for (uint32_t i : g.Match(container_internal::H2(hash))) + { + if (Traits::apply( + typename Set::template EqualElement{ + key, set.eq_ref()}, + Traits::element(set.slots_ + seq.offset(i)) + )) + return num_probes; + ++num_probes; + } + if (g.MaskEmpty()) + return num_probes; + seq.next(); + ++num_probes; + } + } + + static size_t AllocatedByteSize(const Set& c) + { + size_t capacity = c.capacity_; + if (capacity == 0) + return 0; + size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot)); + + size_t per_slot = Traits::space_used(static_cast(nullptr)); + if (per_slot != ~size_t{}) + { + m += per_slot * c.size(); + } + else + { + for (size_t i = 0; i != capacity; ++i) + { + if (container_internal::IsFull(c.ctrl_[i])) + { + m += Traits::space_used(c.slots_ + i); + } + } + } + return m; + } + + static size_t LowerBoundAllocatedByteSize(size_t size) + { + size_t capacity = GrowthToLowerboundCapacity(size); + if (capacity == 0) + return 0; + size_t m = + AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot)); + size_t per_slot = Traits::space_used(static_cast(nullptr)); + if (per_slot != ~size_t{}) + { + m += per_slot * size; + } + return m; + } + }; + + } // namespace hashtable_debug_internal + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #undef ABSL_INTERNAL_ASSERT_IS_FULL diff --git a/CAPI/cpp/grpc/include/absl/container/internal/test_instance_tracker.h b/CAPI/cpp/grpc/include/absl/container/internal/test_instance_tracker.h index 5ff6fd7..46f3b17 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/test_instance_tracker.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/test_instance_tracker.h @@ -20,255 +20,321 @@ #include "absl/types/compare.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace test_internal { - -// A type that counts number of occurrences of the type, the live occurrences of -// the type, as well as the number of copies, moves, swaps, and comparisons that -// have occurred on the type. This is used as a base class for the copyable, -// copyable+movable, and movable types below that are used in actual tests. Use -// InstanceTracker in tests to track the number of instances. -class BaseCountedInstance { - public: - explicit BaseCountedInstance(int x) : value_(x) { - ++num_instances_; - ++num_live_instances_; - } - BaseCountedInstance(const BaseCountedInstance& x) - : value_(x.value_), is_live_(x.is_live_) { - ++num_instances_; - if (is_live_) ++num_live_instances_; - ++num_copies_; - } - BaseCountedInstance(BaseCountedInstance&& x) - : value_(x.value_), is_live_(x.is_live_) { - x.is_live_ = false; - ++num_instances_; - ++num_moves_; - } - ~BaseCountedInstance() { - --num_instances_; - if (is_live_) --num_live_instances_; - } - - BaseCountedInstance& operator=(const BaseCountedInstance& x) { - value_ = x.value_; - if (is_live_) --num_live_instances_; - is_live_ = x.is_live_; - if (is_live_) ++num_live_instances_; - ++num_copies_; - return *this; - } - BaseCountedInstance& operator=(BaseCountedInstance&& x) { - value_ = x.value_; - if (is_live_) --num_live_instances_; - is_live_ = x.is_live_; - x.is_live_ = false; - ++num_moves_; - return *this; - } - - bool operator==(const BaseCountedInstance& x) const { - ++num_comparisons_; - return value_ == x.value_; - } - - bool operator!=(const BaseCountedInstance& x) const { - ++num_comparisons_; - return value_ != x.value_; - } - - bool operator<(const BaseCountedInstance& x) const { - ++num_comparisons_; - return value_ < x.value_; - } - - bool operator>(const BaseCountedInstance& x) const { - ++num_comparisons_; - return value_ > x.value_; - } - - bool operator<=(const BaseCountedInstance& x) const { - ++num_comparisons_; - return value_ <= x.value_; - } - - bool operator>=(const BaseCountedInstance& x) const { - ++num_comparisons_; - return value_ >= x.value_; - } - - absl::weak_ordering compare(const BaseCountedInstance& x) const { - ++num_comparisons_; - return value_ < x.value_ - ? absl::weak_ordering::less - : value_ == x.value_ ? absl::weak_ordering::equivalent - : absl::weak_ordering::greater; - } - - int value() const { - if (!is_live_) std::abort(); - return value_; - } - - friend std::ostream& operator<<(std::ostream& o, - const BaseCountedInstance& v) { - return o << "[value:" << v.value() << "]"; - } - - // Implementation of efficient swap() that counts swaps. - static void SwapImpl( - BaseCountedInstance& lhs, // NOLINT(runtime/references) - BaseCountedInstance& rhs) { // NOLINT(runtime/references) - using std::swap; - swap(lhs.value_, rhs.value_); - swap(lhs.is_live_, rhs.is_live_); - ++BaseCountedInstance::num_swaps_; - } - - private: - friend class InstanceTracker; - - int value_; - - // Indicates if the value is live, ie it hasn't been moved away from. - bool is_live_ = true; - - // Number of instances. - static int num_instances_; - - // Number of live instances (those that have not been moved away from.) - static int num_live_instances_; - - // Number of times that BaseCountedInstance objects were moved. - static int num_moves_; - - // Number of times that BaseCountedInstance objects were copied. - static int num_copies_; - - // Number of times that BaseCountedInstance objects were swapped. - static int num_swaps_; - - // Number of times that BaseCountedInstance objects were compared. - static int num_comparisons_; -}; - -// Helper to track the BaseCountedInstance instance counters. Expects that the -// number of instances and live_instances are the same when it is constructed -// and when it is destructed. -class InstanceTracker { - public: - InstanceTracker() - : start_instances_(BaseCountedInstance::num_instances_), - start_live_instances_(BaseCountedInstance::num_live_instances_) { - ResetCopiesMovesSwaps(); - } - ~InstanceTracker() { - if (instances() != 0) std::abort(); - if (live_instances() != 0) std::abort(); - } - - // Returns the number of BaseCountedInstance instances both containing valid - // values and those moved away from compared to when the InstanceTracker was - // constructed - int instances() const { - return BaseCountedInstance::num_instances_ - start_instances_; - } - - // Returns the number of live BaseCountedInstance instances compared to when - // the InstanceTracker was constructed - int live_instances() const { - return BaseCountedInstance::num_live_instances_ - start_live_instances_; - } - - // Returns the number of moves on BaseCountedInstance objects since - // construction or since the last call to ResetCopiesMovesSwaps(). - int moves() const { return BaseCountedInstance::num_moves_ - start_moves_; } - - // Returns the number of copies on BaseCountedInstance objects since - // construction or the last call to ResetCopiesMovesSwaps(). - int copies() const { - return BaseCountedInstance::num_copies_ - start_copies_; - } - - // Returns the number of swaps on BaseCountedInstance objects since - // construction or the last call to ResetCopiesMovesSwaps(). - int swaps() const { return BaseCountedInstance::num_swaps_ - start_swaps_; } - - // Returns the number of comparisons on BaseCountedInstance objects since - // construction or the last call to ResetCopiesMovesSwaps(). - int comparisons() const { - return BaseCountedInstance::num_comparisons_ - start_comparisons_; - } - - // Resets the base values for moves, copies, comparisons, and swaps to the - // current values, so that subsequent Get*() calls for moves, copies, - // comparisons, and swaps will compare to the situation at the point of this - // call. - void ResetCopiesMovesSwaps() { - start_moves_ = BaseCountedInstance::num_moves_; - start_copies_ = BaseCountedInstance::num_copies_; - start_swaps_ = BaseCountedInstance::num_swaps_; - start_comparisons_ = BaseCountedInstance::num_comparisons_; - } - - private: - int start_instances_; - int start_live_instances_; - int start_moves_; - int start_copies_; - int start_swaps_; - int start_comparisons_; -}; - -// Copyable, not movable. -class CopyableOnlyInstance : public BaseCountedInstance { - public: - explicit CopyableOnlyInstance(int x) : BaseCountedInstance(x) {} - CopyableOnlyInstance(const CopyableOnlyInstance& rhs) = default; - CopyableOnlyInstance& operator=(const CopyableOnlyInstance& rhs) = default; - - friend void swap(CopyableOnlyInstance& lhs, CopyableOnlyInstance& rhs) { - BaseCountedInstance::SwapImpl(lhs, rhs); - } - - static bool supports_move() { return false; } -}; - -// Copyable and movable. -class CopyableMovableInstance : public BaseCountedInstance { - public: - explicit CopyableMovableInstance(int x) : BaseCountedInstance(x) {} - CopyableMovableInstance(const CopyableMovableInstance& rhs) = default; - CopyableMovableInstance(CopyableMovableInstance&& rhs) = default; - CopyableMovableInstance& operator=(const CopyableMovableInstance& rhs) = - default; - CopyableMovableInstance& operator=(CopyableMovableInstance&& rhs) = default; - - friend void swap(CopyableMovableInstance& lhs, CopyableMovableInstance& rhs) { - BaseCountedInstance::SwapImpl(lhs, rhs); - } - - static bool supports_move() { return true; } -}; - -// Only movable, not default-constructible. -class MovableOnlyInstance : public BaseCountedInstance { - public: - explicit MovableOnlyInstance(int x) : BaseCountedInstance(x) {} - MovableOnlyInstance(MovableOnlyInstance&& other) = default; - MovableOnlyInstance& operator=(MovableOnlyInstance&& other) = default; - - friend void swap(MovableOnlyInstance& lhs, MovableOnlyInstance& rhs) { - BaseCountedInstance::SwapImpl(lhs, rhs); - } - - static bool supports_move() { return true; } -}; - -} // namespace test_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace test_internal + { + + // A type that counts number of occurrences of the type, the live occurrences of + // the type, as well as the number of copies, moves, swaps, and comparisons that + // have occurred on the type. This is used as a base class for the copyable, + // copyable+movable, and movable types below that are used in actual tests. Use + // InstanceTracker in tests to track the number of instances. + class BaseCountedInstance + { + public: + explicit BaseCountedInstance(int x) : + value_(x) + { + ++num_instances_; + ++num_live_instances_; + } + BaseCountedInstance(const BaseCountedInstance& x) : + value_(x.value_), + is_live_(x.is_live_) + { + ++num_instances_; + if (is_live_) + ++num_live_instances_; + ++num_copies_; + } + BaseCountedInstance(BaseCountedInstance&& x) : + value_(x.value_), + is_live_(x.is_live_) + { + x.is_live_ = false; + ++num_instances_; + ++num_moves_; + } + ~BaseCountedInstance() + { + --num_instances_; + if (is_live_) + --num_live_instances_; + } + + BaseCountedInstance& operator=(const BaseCountedInstance& x) + { + value_ = x.value_; + if (is_live_) + --num_live_instances_; + is_live_ = x.is_live_; + if (is_live_) + ++num_live_instances_; + ++num_copies_; + return *this; + } + BaseCountedInstance& operator=(BaseCountedInstance&& x) + { + value_ = x.value_; + if (is_live_) + --num_live_instances_; + is_live_ = x.is_live_; + x.is_live_ = false; + ++num_moves_; + return *this; + } + + bool operator==(const BaseCountedInstance& x) const + { + ++num_comparisons_; + return value_ == x.value_; + } + + bool operator!=(const BaseCountedInstance& x) const + { + ++num_comparisons_; + return value_ != x.value_; + } + + bool operator<(const BaseCountedInstance& x) const + { + ++num_comparisons_; + return value_ < x.value_; + } + + bool operator>(const BaseCountedInstance& x) const + { + ++num_comparisons_; + return value_ > x.value_; + } + + bool operator<=(const BaseCountedInstance& x) const + { + ++num_comparisons_; + return value_ <= x.value_; + } + + bool operator>=(const BaseCountedInstance& x) const + { + ++num_comparisons_; + return value_ >= x.value_; + } + + absl::weak_ordering compare(const BaseCountedInstance& x) const + { + ++num_comparisons_; + return value_ < x.value_ ? absl::weak_ordering::less : value_ == x.value_ ? absl::weak_ordering::equivalent : + absl::weak_ordering::greater; + } + + int value() const + { + if (!is_live_) + std::abort(); + return value_; + } + + friend std::ostream& operator<<(std::ostream& o, const BaseCountedInstance& v) + { + return o << "[value:" << v.value() << "]"; + } + + // Implementation of efficient swap() that counts swaps. + static void SwapImpl( + BaseCountedInstance& lhs, // NOLINT(runtime/references) + BaseCountedInstance& rhs + ) + { // NOLINT(runtime/references) + using std::swap; + swap(lhs.value_, rhs.value_); + swap(lhs.is_live_, rhs.is_live_); + ++BaseCountedInstance::num_swaps_; + } + + private: + friend class InstanceTracker; + + int value_; + + // Indicates if the value is live, ie it hasn't been moved away from. + bool is_live_ = true; + + // Number of instances. + static int num_instances_; + + // Number of live instances (those that have not been moved away from.) + static int num_live_instances_; + + // Number of times that BaseCountedInstance objects were moved. + static int num_moves_; + + // Number of times that BaseCountedInstance objects were copied. + static int num_copies_; + + // Number of times that BaseCountedInstance objects were swapped. + static int num_swaps_; + + // Number of times that BaseCountedInstance objects were compared. + static int num_comparisons_; + }; + + // Helper to track the BaseCountedInstance instance counters. Expects that the + // number of instances and live_instances are the same when it is constructed + // and when it is destructed. + class InstanceTracker + { + public: + InstanceTracker() : + start_instances_(BaseCountedInstance::num_instances_), + start_live_instances_(BaseCountedInstance::num_live_instances_) + { + ResetCopiesMovesSwaps(); + } + ~InstanceTracker() + { + if (instances() != 0) + std::abort(); + if (live_instances() != 0) + std::abort(); + } + + // Returns the number of BaseCountedInstance instances both containing valid + // values and those moved away from compared to when the InstanceTracker was + // constructed + int instances() const + { + return BaseCountedInstance::num_instances_ - start_instances_; + } + + // Returns the number of live BaseCountedInstance instances compared to when + // the InstanceTracker was constructed + int live_instances() const + { + return BaseCountedInstance::num_live_instances_ - start_live_instances_; + } + + // Returns the number of moves on BaseCountedInstance objects since + // construction or since the last call to ResetCopiesMovesSwaps(). + int moves() const + { + return BaseCountedInstance::num_moves_ - start_moves_; + } + + // Returns the number of copies on BaseCountedInstance objects since + // construction or the last call to ResetCopiesMovesSwaps(). + int copies() const + { + return BaseCountedInstance::num_copies_ - start_copies_; + } + + // Returns the number of swaps on BaseCountedInstance objects since + // construction or the last call to ResetCopiesMovesSwaps(). + int swaps() const + { + return BaseCountedInstance::num_swaps_ - start_swaps_; + } + + // Returns the number of comparisons on BaseCountedInstance objects since + // construction or the last call to ResetCopiesMovesSwaps(). + int comparisons() const + { + return BaseCountedInstance::num_comparisons_ - start_comparisons_; + } + + // Resets the base values for moves, copies, comparisons, and swaps to the + // current values, so that subsequent Get*() calls for moves, copies, + // comparisons, and swaps will compare to the situation at the point of this + // call. + void ResetCopiesMovesSwaps() + { + start_moves_ = BaseCountedInstance::num_moves_; + start_copies_ = BaseCountedInstance::num_copies_; + start_swaps_ = BaseCountedInstance::num_swaps_; + start_comparisons_ = BaseCountedInstance::num_comparisons_; + } + + private: + int start_instances_; + int start_live_instances_; + int start_moves_; + int start_copies_; + int start_swaps_; + int start_comparisons_; + }; + + // Copyable, not movable. + class CopyableOnlyInstance : public BaseCountedInstance + { + public: + explicit CopyableOnlyInstance(int x) : + BaseCountedInstance(x) + { + } + CopyableOnlyInstance(const CopyableOnlyInstance& rhs) = default; + CopyableOnlyInstance& operator=(const CopyableOnlyInstance& rhs) = default; + + friend void swap(CopyableOnlyInstance& lhs, CopyableOnlyInstance& rhs) + { + BaseCountedInstance::SwapImpl(lhs, rhs); + } + + static bool supports_move() + { + return false; + } + }; + + // Copyable and movable. + class CopyableMovableInstance : public BaseCountedInstance + { + public: + explicit CopyableMovableInstance(int x) : + BaseCountedInstance(x) + { + } + CopyableMovableInstance(const CopyableMovableInstance& rhs) = default; + CopyableMovableInstance(CopyableMovableInstance&& rhs) = default; + CopyableMovableInstance& operator=(const CopyableMovableInstance& rhs) = + default; + CopyableMovableInstance& operator=(CopyableMovableInstance&& rhs) = default; + + friend void swap(CopyableMovableInstance& lhs, CopyableMovableInstance& rhs) + { + BaseCountedInstance::SwapImpl(lhs, rhs); + } + + static bool supports_move() + { + return true; + } + }; + + // Only movable, not default-constructible. + class MovableOnlyInstance : public BaseCountedInstance + { + public: + explicit MovableOnlyInstance(int x) : + BaseCountedInstance(x) + { + } + MovableOnlyInstance(MovableOnlyInstance&& other) = default; + MovableOnlyInstance& operator=(MovableOnlyInstance&& other) = default; + + friend void swap(MovableOnlyInstance& lhs, MovableOnlyInstance& rhs) + { + BaseCountedInstance::SwapImpl(lhs, rhs); + } + + static bool supports_move() + { + return true; + } + }; + + } // namespace test_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/tracked.h b/CAPI/cpp/grpc/include/absl/container/internal/tracked.h index 29f5829..84c9bd1 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/tracked.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/tracked.h @@ -22,62 +22,85 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { -// A class that tracks its copies and moves so that it can be queried in tests. -template -class Tracked { - public: - Tracked() {} - // NOLINTNEXTLINE(runtime/explicit) - Tracked(const T& val) : val_(val) {} - Tracked(const Tracked& that) - : val_(that.val_), - num_moves_(that.num_moves_), - num_copies_(that.num_copies_) { - ++(*num_copies_); - } - Tracked(Tracked&& that) - : val_(std::move(that.val_)), - num_moves_(std::move(that.num_moves_)), - num_copies_(std::move(that.num_copies_)) { - ++(*num_moves_); - } - Tracked& operator=(const Tracked& that) { - val_ = that.val_; - num_moves_ = that.num_moves_; - num_copies_ = that.num_copies_; - ++(*num_copies_); - } - Tracked& operator=(Tracked&& that) { - val_ = std::move(that.val_); - num_moves_ = std::move(that.num_moves_); - num_copies_ = std::move(that.num_copies_); - ++(*num_moves_); - } + // A class that tracks its copies and moves so that it can be queried in tests. + template + class Tracked + { + public: + Tracked() + { + } + // NOLINTNEXTLINE(runtime/explicit) + Tracked(const T& val) : + val_(val) + { + } + Tracked(const Tracked& that) : + val_(that.val_), + num_moves_(that.num_moves_), + num_copies_(that.num_copies_) + { + ++(*num_copies_); + } + Tracked(Tracked&& that) : + val_(std::move(that.val_)), + num_moves_(std::move(that.num_moves_)), + num_copies_(std::move(that.num_copies_)) + { + ++(*num_moves_); + } + Tracked& operator=(const Tracked& that) + { + val_ = that.val_; + num_moves_ = that.num_moves_; + num_copies_ = that.num_copies_; + ++(*num_copies_); + } + Tracked& operator=(Tracked&& that) + { + val_ = std::move(that.val_); + num_moves_ = std::move(that.num_moves_); + num_copies_ = std::move(that.num_copies_); + ++(*num_moves_); + } - const T& val() const { return val_; } + const T& val() const + { + return val_; + } - friend bool operator==(const Tracked& a, const Tracked& b) { - return a.val_ == b.val_; - } - friend bool operator!=(const Tracked& a, const Tracked& b) { - return !(a == b); - } + friend bool operator==(const Tracked& a, const Tracked& b) + { + return a.val_ == b.val_; + } + friend bool operator!=(const Tracked& a, const Tracked& b) + { + return !(a == b); + } - size_t num_copies() { return *num_copies_; } - size_t num_moves() { return *num_moves_; } + size_t num_copies() + { + return *num_copies_; + } + size_t num_moves() + { + return *num_moves_; + } - private: - T val_; - std::shared_ptr num_moves_ = std::make_shared(0); - std::shared_ptr num_copies_ = std::make_shared(0); -}; + private: + T val_; + std::shared_ptr num_moves_ = std::make_shared(0); + std::shared_ptr num_copies_ = std::make_shared(0); + }; -} // namespace container_internal -ABSL_NAMESPACE_END + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_TRACKED_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_constructor_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_constructor_test.h index 7e84dc2..00248e1 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_constructor_test.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_constructor_test.h @@ -24,471 +24,523 @@ #include "absl/container/internal/hash_generator_testing.h" #include "absl/container/internal/hash_policy_testing.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -template -class ConstructorTest : public ::testing::Test {}; - -TYPED_TEST_SUITE_P(ConstructorTest); - -TYPED_TEST_P(ConstructorTest, NoArgs) { - TypeParam m; - EXPECT_TRUE(m.empty()); - EXPECT_THAT(m, ::testing::UnorderedElementsAre()); -} - -TYPED_TEST_P(ConstructorTest, BucketCount) { - TypeParam m(123); - EXPECT_TRUE(m.empty()); - EXPECT_THAT(m, ::testing::UnorderedElementsAre()); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, BucketCountHash) { - using H = typename TypeParam::hasher; - H hasher; - TypeParam m(123, hasher); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_TRUE(m.empty()); - EXPECT_THAT(m, ::testing::UnorderedElementsAre()); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) { - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - H hasher; - E equal; - TypeParam m(123, hasher, equal); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_EQ(m.key_eq(), equal); - EXPECT_TRUE(m.empty()); - EXPECT_THAT(m, ::testing::UnorderedElementsAre()); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) { - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - TypeParam m(123, hasher, equal, alloc); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_EQ(m.key_eq(), equal); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_TRUE(m.empty()); - EXPECT_THAT(m, ::testing::UnorderedElementsAre()); - EXPECT_GE(m.bucket_count(), 123); -} - -template -struct is_std_unordered_map : std::false_type {}; - -template -struct is_std_unordered_map> : std::true_type {}; +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class ConstructorTest : public ::testing::Test + { + }; + + TYPED_TEST_SUITE_P(ConstructorTest); + + TYPED_TEST_P(ConstructorTest, NoArgs) + { + TypeParam m; + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + } + + TYPED_TEST_P(ConstructorTest, BucketCount) + { + TypeParam m(123); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHash) + { + using H = typename TypeParam::hasher; + H hasher; + TypeParam m(123, hasher); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) + { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + H hasher; + E equal; + TypeParam m(123, hasher, equal); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) + { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + template + struct is_std_unordered_map : std::false_type + { + }; + + template + struct is_std_unordered_map> : std::true_type + { + }; #if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17) -using has_cxx14_std_apis = std::true_type; + using has_cxx14_std_apis = std::true_type; #else -using has_cxx14_std_apis = std::false_type; + using has_cxx14_std_apis = std::false_type; #endif -template -using expect_cxx14_apis = - absl::disjunction>, - has_cxx14_std_apis>; - -template -void BucketCountAllocTest(std::false_type) {} - -template -void BucketCountAllocTest(std::true_type) { - using A = typename TypeParam::allocator_type; - A alloc(0); - TypeParam m(123, alloc); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_TRUE(m.empty()); - EXPECT_THAT(m, ::testing::UnorderedElementsAre()); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, BucketCountAlloc) { - BucketCountAllocTest(expect_cxx14_apis()); -} - -template -void BucketCountHashAllocTest(std::false_type) {} - -template -void BucketCountHashAllocTest(std::true_type) { - using H = typename TypeParam::hasher; - using A = typename TypeParam::allocator_type; - H hasher; - A alloc(0); - TypeParam m(123, hasher, alloc); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_TRUE(m.empty()); - EXPECT_THAT(m, ::testing::UnorderedElementsAre()); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) { - BucketCountHashAllocTest(expect_cxx14_apis()); -} + template + using expect_cxx14_apis = + absl::disjunction>, has_cxx14_std_apis>; + + template + void BucketCountAllocTest(std::false_type) + { + } + + template + void BucketCountAllocTest(std::true_type) + { + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountAlloc) + { + BucketCountAllocTest(expect_cxx14_apis()); + } + + template + void BucketCountHashAllocTest(std::false_type) + { + } + + template + void BucketCountHashAllocTest(std::true_type) + { + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + TypeParam m(123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) + { + BucketCountHashAllocTest(expect_cxx14_apis()); + } #if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS -using has_alloc_std_constructors = std::true_type; + using has_alloc_std_constructors = std::true_type; #else -using has_alloc_std_constructors = std::false_type; + using has_alloc_std_constructors = std::false_type; #endif -template -using expect_alloc_constructors = - absl::disjunction>, - has_alloc_std_constructors>; - -template -void AllocTest(std::false_type) {} - -template -void AllocTest(std::true_type) { - using A = typename TypeParam::allocator_type; - A alloc(0); - TypeParam m(alloc); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_TRUE(m.empty()); - EXPECT_THAT(m, ::testing::UnorderedElementsAre()); -} - -TYPED_TEST_P(ConstructorTest, Alloc) { - AllocTest(expect_alloc_constructors()); -} - -TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::UniqueGenerator()); - TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_EQ(m.key_eq(), equal); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); - EXPECT_GE(m.bucket_count(), 123); -} - -template -void InputIteratorBucketAllocTest(std::false_type) {} - -template -void InputIteratorBucketAllocTest(std::true_type) { - using T = hash_internal::GeneratedType; - using A = typename TypeParam::allocator_type; - A alloc(0); - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::UniqueGenerator()); - TypeParam m(values.begin(), values.end(), 123, alloc); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) { - InputIteratorBucketAllocTest(expect_cxx14_apis()); -} - -template -void InputIteratorBucketHashAllocTest(std::false_type) {} - -template -void InputIteratorBucketHashAllocTest(std::true_type) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using A = typename TypeParam::allocator_type; - H hasher; - A alloc(0); - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::UniqueGenerator()); - TypeParam m(values.begin(), values.end(), 123, hasher, alloc); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) { - InputIteratorBucketHashAllocTest(expect_cxx14_apis()); -} - -TYPED_TEST_P(ConstructorTest, CopyConstructor) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - hash_internal::UniqueGenerator gen; - TypeParam m(123, hasher, equal, alloc); - for (size_t i = 0; i != 10; ++i) m.insert(gen()); - TypeParam n(m); - EXPECT_EQ(m.hash_function(), n.hash_function()); - EXPECT_EQ(m.key_eq(), n.key_eq()); - EXPECT_EQ(m.get_allocator(), n.get_allocator()); - EXPECT_EQ(m, n); -} - -template -void CopyConstructorAllocTest(std::false_type) {} - -template -void CopyConstructorAllocTest(std::true_type) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - hash_internal::UniqueGenerator gen; - TypeParam m(123, hasher, equal, alloc); - for (size_t i = 0; i != 10; ++i) m.insert(gen()); - TypeParam n(m, A(11)); - EXPECT_EQ(m.hash_function(), n.hash_function()); - EXPECT_EQ(m.key_eq(), n.key_eq()); - EXPECT_NE(m.get_allocator(), n.get_allocator()); - EXPECT_EQ(m, n); -} - -TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) { - CopyConstructorAllocTest(expect_alloc_constructors()); -} - -// TODO(alkis): Test non-propagating allocators on copy constructors. - -TYPED_TEST_P(ConstructorTest, MoveConstructor) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - hash_internal::UniqueGenerator gen; - TypeParam m(123, hasher, equal, alloc); - for (size_t i = 0; i != 10; ++i) m.insert(gen()); - TypeParam t(m); - TypeParam n(std::move(t)); - EXPECT_EQ(m.hash_function(), n.hash_function()); - EXPECT_EQ(m.key_eq(), n.key_eq()); - EXPECT_EQ(m.get_allocator(), n.get_allocator()); - EXPECT_EQ(m, n); -} - -template -void MoveConstructorAllocTest(std::false_type) {} - -template -void MoveConstructorAllocTest(std::true_type) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - hash_internal::UniqueGenerator gen; - TypeParam m(123, hasher, equal, alloc); - for (size_t i = 0; i != 10; ++i) m.insert(gen()); - TypeParam t(m); - TypeParam n(std::move(t), A(1)); - EXPECT_EQ(m.hash_function(), n.hash_function()); - EXPECT_EQ(m.key_eq(), n.key_eq()); - EXPECT_NE(m.get_allocator(), n.get_allocator()); - EXPECT_EQ(m, n); -} - -TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) { - MoveConstructorAllocTest(expect_alloc_constructors()); -} - -// TODO(alkis): Test non-propagating allocators on move constructors. - -TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) { - using T = hash_internal::GeneratedType; - hash_internal::UniqueGenerator gen; - std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - TypeParam m(values, 123, hasher, equal, alloc); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_EQ(m.key_eq(), equal); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); - EXPECT_GE(m.bucket_count(), 123); -} - -template -void InitializerListBucketAllocTest(std::false_type) {} - -template -void InitializerListBucketAllocTest(std::true_type) { - using T = hash_internal::GeneratedType; - using A = typename TypeParam::allocator_type; - hash_internal::UniqueGenerator gen; - std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; - A alloc(0); - TypeParam m(values, 123, alloc); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) { - InitializerListBucketAllocTest(expect_cxx14_apis()); -} - -template -void InitializerListBucketHashAllocTest(std::false_type) {} - -template -void InitializerListBucketHashAllocTest(std::true_type) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using A = typename TypeParam::allocator_type; - H hasher; - A alloc(0); - hash_internal::UniqueGenerator gen; - std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; - TypeParam m(values, 123, hasher, alloc); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) { - InitializerListBucketHashAllocTest(expect_cxx14_apis()); -} - -TYPED_TEST_P(ConstructorTest, Assignment) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - hash_internal::UniqueGenerator gen; - TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); - TypeParam n; - n = m; - EXPECT_EQ(m.hash_function(), n.hash_function()); - EXPECT_EQ(m.key_eq(), n.key_eq()); - EXPECT_EQ(m, n); -} - -// TODO(alkis): Test [non-]propagating allocators on move/copy assignments -// (it depends on traits). - -TYPED_TEST_P(ConstructorTest, MoveAssignment) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - hash_internal::UniqueGenerator gen; - TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); - TypeParam t(m); - TypeParam n; - n = std::move(t); - EXPECT_EQ(m.hash_function(), n.hash_function()); - EXPECT_EQ(m.key_eq(), n.key_eq()); - EXPECT_EQ(m, n); -} - -TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) { - using T = hash_internal::GeneratedType; - hash_internal::UniqueGenerator gen; - std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; - TypeParam m; - m = values; - EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); -} - -TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) { - using T = hash_internal::GeneratedType; - hash_internal::UniqueGenerator gen; - TypeParam m({gen(), gen(), gen()}); - TypeParam n({gen()}); - n = m; - EXPECT_EQ(m, n); -} - -TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) { - using T = hash_internal::GeneratedType; - hash_internal::UniqueGenerator gen; - TypeParam m({gen(), gen(), gen()}); - TypeParam t(m); - TypeParam n({gen()}); - n = std::move(t); - EXPECT_EQ(m, n); -} - -TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) { - using T = hash_internal::GeneratedType; - hash_internal::UniqueGenerator gen; - std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; - TypeParam m; - m = values; - EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); -} - -TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) { - using T = hash_internal::GeneratedType; - hash_internal::UniqueGenerator gen; - std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; - TypeParam m(values); - m = *&m; // Avoid -Wself-assign - EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); -} - -// We cannot test self move as standard states that it leaves standard -// containers in unspecified state (and in practice in causes memory-leak -// according to heap-checker!). - -REGISTER_TYPED_TEST_SUITE_P( - ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, - BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc, - InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, - InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc, - MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc, - InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment, - MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting, - MoveAssignmentOverwritesExisting, - AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf); - -} // namespace container_internal -ABSL_NAMESPACE_END + template + using expect_alloc_constructors = + absl::disjunction>, has_alloc_std_constructors>; + + template + void AllocTest(std::false_type) + { + } + + template + void AllocTest(std::true_type) + { + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(m, ::testing::UnorderedElementsAre()); + } + + TYPED_TEST_P(ConstructorTest, Alloc) + { + AllocTest(expect_alloc_constructors()); + } + + TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::UniqueGenerator()); + TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + template + void InputIteratorBucketAllocTest(std::false_type) + { + } + + template + void InputIteratorBucketAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using A = typename TypeParam::allocator_type; + A alloc(0); + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::UniqueGenerator()); + TypeParam m(values.begin(), values.end(), 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) + { + InputIteratorBucketAllocTest(expect_cxx14_apis()); + } + + template + void InputIteratorBucketHashAllocTest(std::false_type) + { + } + + template + void InputIteratorBucketHashAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::UniqueGenerator()); + TypeParam m(values.begin(), values.end(), 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) + { + InputIteratorBucketHashAllocTest(expect_cxx14_apis()); + } + + TYPED_TEST_P(ConstructorTest, CopyConstructor) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(gen()); + TypeParam n(m); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + } + + template + void CopyConstructorAllocTest(std::false_type) + { + } + + template + void CopyConstructorAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(gen()); + TypeParam n(m, A(11)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) + { + CopyConstructorAllocTest(expect_alloc_constructors()); + } + + // TODO(alkis): Test non-propagating allocators on copy constructors. + + TYPED_TEST_P(ConstructorTest, MoveConstructor) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(gen()); + TypeParam t(m); + TypeParam n(std::move(t)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + } + + template + void MoveConstructorAllocTest(std::false_type) + { + } + + template + void MoveConstructorAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(gen()); + TypeParam t(m); + TypeParam n(std::move(t), A(1)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) + { + MoveConstructorAllocTest(expect_alloc_constructors()); + } + + // TODO(alkis): Test non-propagating allocators on move constructors. + + TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) + { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(values, 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + template + void InitializerListBucketAllocTest(std::false_type) + { + } + + template + void InitializerListBucketAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using A = typename TypeParam::allocator_type; + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + A alloc(0); + TypeParam m(values, 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) + { + InitializerListBucketAllocTest(expect_cxx14_apis()); + } + + template + void InitializerListBucketHashAllocTest(std::false_type) + { + } + + template + void InitializerListBucketHashAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values, 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) + { + InitializerListBucketHashAllocTest(expect_cxx14_apis()); + } + + TYPED_TEST_P(ConstructorTest, Assignment) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam n; + n = m; + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); + } + + // TODO(alkis): Test [non-]propagating allocators on move/copy assignments + // (it depends on traits). + + TYPED_TEST_P(ConstructorTest, MoveAssignment) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::UniqueGenerator gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam t(m); + TypeParam n; + n = std::move(t); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) + { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + } + + TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) + { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam n({gen()}); + n = m; + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) + { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam t(m); + TypeParam n({gen()}); + n = std::move(t); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) + { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + } + + TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) + { + using T = hash_internal::GeneratedType; + hash_internal::UniqueGenerator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values); + m = *&m; // Avoid -Wself-assign + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + } + + // We cannot test self move as standard states that it leaves standard + // containers in unspecified state (and in practice in causes memory-leak + // according to heap-checker!). + + REGISTER_TYPED_TEST_SUITE_P( + ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc, MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc, InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment, MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting, AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf + ); + + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_lookup_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_lookup_test.h index 3713cd9..17241b8 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_lookup_test.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_lookup_test.h @@ -20,98 +20,106 @@ #include "absl/container/internal/hash_generator_testing.h" #include "absl/container/internal/hash_policy_testing.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { -template -class LookupTest : public ::testing::Test {}; + template + class LookupTest : public ::testing::Test + { + }; -TYPED_TEST_SUITE_P(LookupTest); + TYPED_TEST_SUITE_P(LookupTest); -TYPED_TEST_P(LookupTest, At) { - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m(values.begin(), values.end()); - for (const auto& p : values) { - const auto& val = m.at(p.first); - EXPECT_EQ(p.second, val) << ::testing::PrintToString(p.first); - } -} + TYPED_TEST_P(LookupTest, At) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + for (const auto& p : values) + { + const auto& val = m.at(p.first); + EXPECT_EQ(p.second, val) << ::testing::PrintToString(p.first); + } + } -TYPED_TEST_P(LookupTest, OperatorBracket) { - using T = hash_internal::GeneratedType; - using V = typename TypeParam::mapped_type; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m; - for (const auto& p : values) { - auto& val = m[p.first]; - EXPECT_EQ(V(), val) << ::testing::PrintToString(p.first); - val = p.second; - } - for (const auto& p : values) - EXPECT_EQ(p.second, m[p.first]) << ::testing::PrintToString(p.first); -} + TYPED_TEST_P(LookupTest, OperatorBracket) + { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + for (const auto& p : values) + { + auto& val = m[p.first]; + EXPECT_EQ(V(), val) << ::testing::PrintToString(p.first); + val = p.second; + } + for (const auto& p : values) + EXPECT_EQ(p.second, m[p.first]) << ::testing::PrintToString(p.first); + } -TYPED_TEST_P(LookupTest, Count) { - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m; - for (const auto& p : values) - EXPECT_EQ(0, m.count(p.first)) << ::testing::PrintToString(p.first); - m.insert(values.begin(), values.end()); - for (const auto& p : values) - EXPECT_EQ(1, m.count(p.first)) << ::testing::PrintToString(p.first); -} + TYPED_TEST_P(LookupTest, Count) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + for (const auto& p : values) + EXPECT_EQ(0, m.count(p.first)) << ::testing::PrintToString(p.first); + m.insert(values.begin(), values.end()); + for (const auto& p : values) + EXPECT_EQ(1, m.count(p.first)) << ::testing::PrintToString(p.first); + } -TYPED_TEST_P(LookupTest, Find) { - using std::get; - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m; - for (const auto& p : values) - EXPECT_TRUE(m.end() == m.find(p.first)) - << ::testing::PrintToString(p.first); - m.insert(values.begin(), values.end()); - for (const auto& p : values) { - auto it = m.find(p.first); - EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(p.first); - EXPECT_EQ(p.second, get<1>(*it)) << ::testing::PrintToString(p.first); - } -} + TYPED_TEST_P(LookupTest, Find) + { + using std::get; + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + for (const auto& p : values) + EXPECT_TRUE(m.end() == m.find(p.first)) + << ::testing::PrintToString(p.first); + m.insert(values.begin(), values.end()); + for (const auto& p : values) + { + auto it = m.find(p.first); + EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(p.first); + EXPECT_EQ(p.second, get<1>(*it)) << ::testing::PrintToString(p.first); + } + } -TYPED_TEST_P(LookupTest, EqualRange) { - using std::get; - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m; - for (const auto& p : values) { - auto r = m.equal_range(p.first); - ASSERT_EQ(0, std::distance(r.first, r.second)); - } - m.insert(values.begin(), values.end()); - for (const auto& p : values) { - auto r = m.equal_range(p.first); - ASSERT_EQ(1, std::distance(r.first, r.second)); - EXPECT_EQ(p.second, get<1>(*r.first)) << ::testing::PrintToString(p.first); - } -} + TYPED_TEST_P(LookupTest, EqualRange) + { + using std::get; + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + for (const auto& p : values) + { + auto r = m.equal_range(p.first); + ASSERT_EQ(0, std::distance(r.first, r.second)); + } + m.insert(values.begin(), values.end()); + for (const auto& p : values) + { + auto r = m.equal_range(p.first); + ASSERT_EQ(1, std::distance(r.first, r.second)); + EXPECT_EQ(p.second, get<1>(*r.first)) << ::testing::PrintToString(p.first); + } + } -REGISTER_TYPED_TEST_SUITE_P(LookupTest, At, OperatorBracket, Count, Find, - EqualRange); + REGISTER_TYPED_TEST_SUITE_P(LookupTest, At, OperatorBracket, Count, Find, EqualRange); -} // namespace container_internal -ABSL_NAMESPACE_END + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_members_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_members_test.h index 7d48cdb..631606d 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_members_test.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_members_test.h @@ -20,68 +20,71 @@ #include "gtest/gtest.h" #include "absl/meta/type_traits.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { -template -class MembersTest : public ::testing::Test {}; + template + class MembersTest : public ::testing::Test + { + }; -TYPED_TEST_SUITE_P(MembersTest); + TYPED_TEST_SUITE_P(MembersTest); -template -void UseType() {} + template + void UseType() + { + } -TYPED_TEST_P(MembersTest, Typedefs) { - EXPECT_TRUE((std::is_same, - typename TypeParam::value_type>())); - EXPECT_TRUE((absl::conjunction< - absl::negation>, - std::is_integral>())); - EXPECT_TRUE((absl::conjunction< - std::is_signed, - std::is_integral>())); - EXPECT_TRUE((std::is_convertible< - decltype(std::declval()( - std::declval())), - size_t>())); - EXPECT_TRUE((std::is_convertible< - decltype(std::declval()( - std::declval(), - std::declval())), - bool>())); - EXPECT_TRUE((std::is_same())); - EXPECT_TRUE((std::is_same())); - EXPECT_TRUE((std::is_same())); - EXPECT_TRUE((std::is_same::pointer, - typename TypeParam::pointer>())); - EXPECT_TRUE( - (std::is_same::const_pointer, - typename TypeParam::const_pointer>())); -} + TYPED_TEST_P(MembersTest, Typedefs) + { + EXPECT_TRUE((std::is_same, typename TypeParam::value_type>())); + EXPECT_TRUE((absl::conjunction< + absl::negation>, + std::is_integral>())); + EXPECT_TRUE((absl::conjunction< + std::is_signed, + std::is_integral>())); + EXPECT_TRUE((std::is_convertible< + decltype(std::declval()( + std::declval() + )), + size_t>())); + EXPECT_TRUE((std::is_convertible< + decltype(std::declval()( + std::declval(), + std::declval() + )), + bool>())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same::pointer, typename TypeParam::pointer>())); + EXPECT_TRUE( + (std::is_same::const_pointer, typename TypeParam::const_pointer>()) + ); + } -TYPED_TEST_P(MembersTest, SimpleFunctions) { - EXPECT_GT(TypeParam().max_size(), 0); -} + TYPED_TEST_P(MembersTest, SimpleFunctions) + { + EXPECT_GT(TypeParam().max_size(), 0); + } -TYPED_TEST_P(MembersTest, BeginEnd) { - TypeParam t = {typename TypeParam::value_type{}}; - EXPECT_EQ(t.begin(), t.cbegin()); - EXPECT_EQ(t.end(), t.cend()); - EXPECT_NE(t.begin(), t.end()); - EXPECT_NE(t.cbegin(), t.cend()); -} + TYPED_TEST_P(MembersTest, BeginEnd) + { + TypeParam t = {typename TypeParam::value_type{}}; + EXPECT_EQ(t.begin(), t.cbegin()); + EXPECT_EQ(t.end(), t.cend()); + EXPECT_NE(t.begin(), t.end()); + EXPECT_NE(t.cbegin(), t.cend()); + } -REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd); + REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd); -} // namespace container_internal -ABSL_NAMESPACE_END + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_modifiers_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_modifiers_test.h index 4d9ab30..fea5d50 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_modifiers_test.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_map_modifiers_test.h @@ -22,331 +22,349 @@ #include "absl/container/internal/hash_generator_testing.h" #include "absl/container/internal/hash_policy_testing.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -template -class ModifiersTest : public ::testing::Test {}; - -TYPED_TEST_SUITE_P(ModifiersTest); - -TYPED_TEST_P(ModifiersTest, Clear) { - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m(values.begin(), values.end()); - ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); - m.clear(); - EXPECT_THAT(items(m), ::testing::UnorderedElementsAre()); - EXPECT_TRUE(m.empty()); -} - -TYPED_TEST_P(ModifiersTest, Insert) { - using T = hash_internal::GeneratedType; - using V = typename TypeParam::mapped_type; - T val = hash_internal::Generator()(); - TypeParam m; - auto p = m.insert(val); - EXPECT_TRUE(p.second); - EXPECT_EQ(val, *p.first); - T val2 = {val.first, hash_internal::Generator()()}; - p = m.insert(val2); - EXPECT_FALSE(p.second); - EXPECT_EQ(val, *p.first); -} - -TYPED_TEST_P(ModifiersTest, InsertHint) { - using T = hash_internal::GeneratedType; - using V = typename TypeParam::mapped_type; - T val = hash_internal::Generator()(); - TypeParam m; - auto it = m.insert(m.end(), val); - EXPECT_TRUE(it != m.end()); - EXPECT_EQ(val, *it); - T val2 = {val.first, hash_internal::Generator()()}; - it = m.insert(it, val2); - EXPECT_TRUE(it != m.end()); - EXPECT_EQ(val, *it); -} - -TYPED_TEST_P(ModifiersTest, InsertRange) { - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m; - m.insert(values.begin(), values.end()); - ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); -} - -TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) { - using T = hash_internal::GeneratedType; - using V = typename TypeParam::mapped_type; - T val = hash_internal::Generator()(); - TypeParam m; - m.reserve(10); - const size_t original_capacity = m.bucket_count(); - m.insert(val); - EXPECT_EQ(m.bucket_count(), original_capacity); - T val2 = {val.first, hash_internal::Generator()()}; - m.insert(val2); - EXPECT_EQ(m.bucket_count(), original_capacity); -} - -TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class ModifiersTest : public ::testing::Test + { + }; + + TYPED_TEST_SUITE_P(ModifiersTest); + + TYPED_TEST_P(ModifiersTest, Clear) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + m.clear(); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(m.empty()); + } + + TYPED_TEST_P(ModifiersTest, Insert) + { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + auto p = m.insert(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + T val2 = {val.first, hash_internal::Generator()()}; + p = m.insert(val2); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); + } + + TYPED_TEST_P(ModifiersTest, InsertHint) + { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + auto it = m.insert(m.end(), val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); + T val2 = {val.first, hash_internal::Generator()()}; + it = m.insert(it, val2); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); + } + + TYPED_TEST_P(ModifiersTest, InsertRange) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + m.insert(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + } + + TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) + { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + m.reserve(10); + const size_t original_capacity = m.bucket_count(); + m.insert(val); + EXPECT_EQ(m.bucket_count(), original_capacity); + T val2 = {val.first, hash_internal::Generator()()}; + m.insert(val2); + EXPECT_EQ(m.bucket_count(), original_capacity); + } + + TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) + { #if !defined(__GLIBCXX__) - using T = hash_internal::GeneratedType; - std::vector base_values; - std::generate_n(std::back_inserter(base_values), 10, - hash_internal::Generator()); - std::vector values; - while (values.size() != 100) { - std::copy_n(base_values.begin(), 10, std::back_inserter(values)); - } - TypeParam m; - m.reserve(10); - const size_t original_capacity = m.bucket_count(); - m.insert(values.begin(), values.end()); - EXPECT_EQ(m.bucket_count(), original_capacity); + using T = hash_internal::GeneratedType; + std::vector base_values; + std::generate_n(std::back_inserter(base_values), 10, hash_internal::Generator()); + std::vector values; + while (values.size() != 100) + { + std::copy_n(base_values.begin(), 10, std::back_inserter(values)); + } + TypeParam m; + m.reserve(10); + const size_t original_capacity = m.bucket_count(); + m.insert(values.begin(), values.end()); + EXPECT_EQ(m.bucket_count(), original_capacity); #endif -} + } -TYPED_TEST_P(ModifiersTest, InsertOrAssign) { + TYPED_TEST_P(ModifiersTest, InsertOrAssign) + { #ifdef UNORDERED_MAP_CXX17 - using std::get; - using K = typename TypeParam::key_type; - using V = typename TypeParam::mapped_type; - K k = hash_internal::Generator()(); - V val = hash_internal::Generator()(); - TypeParam m; - auto p = m.insert_or_assign(k, val); - EXPECT_TRUE(p.second); - EXPECT_EQ(k, get<0>(*p.first)); - EXPECT_EQ(val, get<1>(*p.first)); - V val2 = hash_internal::Generator()(); - p = m.insert_or_assign(k, val2); - EXPECT_FALSE(p.second); - EXPECT_EQ(k, get<0>(*p.first)); - EXPECT_EQ(val2, get<1>(*p.first)); + using std::get; + using K = typename TypeParam::key_type; + using V = typename TypeParam::mapped_type; + K k = hash_internal::Generator()(); + V val = hash_internal::Generator()(); + TypeParam m; + auto p = m.insert_or_assign(k, val); + EXPECT_TRUE(p.second); + EXPECT_EQ(k, get<0>(*p.first)); + EXPECT_EQ(val, get<1>(*p.first)); + V val2 = hash_internal::Generator()(); + p = m.insert_or_assign(k, val2); + EXPECT_FALSE(p.second); + EXPECT_EQ(k, get<0>(*p.first)); + EXPECT_EQ(val2, get<1>(*p.first)); #endif -} + } -TYPED_TEST_P(ModifiersTest, InsertOrAssignHint) { + TYPED_TEST_P(ModifiersTest, InsertOrAssignHint) + { #ifdef UNORDERED_MAP_CXX17 - using std::get; - using K = typename TypeParam::key_type; - using V = typename TypeParam::mapped_type; - K k = hash_internal::Generator()(); - V val = hash_internal::Generator()(); - TypeParam m; - auto it = m.insert_or_assign(m.end(), k, val); - EXPECT_TRUE(it != m.end()); - EXPECT_EQ(k, get<0>(*it)); - EXPECT_EQ(val, get<1>(*it)); - V val2 = hash_internal::Generator()(); - it = m.insert_or_assign(it, k, val2); - EXPECT_EQ(k, get<0>(*it)); - EXPECT_EQ(val2, get<1>(*it)); + using std::get; + using K = typename TypeParam::key_type; + using V = typename TypeParam::mapped_type; + K k = hash_internal::Generator()(); + V val = hash_internal::Generator()(); + TypeParam m; + auto it = m.insert_or_assign(m.end(), k, val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(k, get<0>(*it)); + EXPECT_EQ(val, get<1>(*it)); + V val2 = hash_internal::Generator()(); + it = m.insert_or_assign(it, k, val2); + EXPECT_EQ(k, get<0>(*it)); + EXPECT_EQ(val2, get<1>(*it)); #endif -} - -TYPED_TEST_P(ModifiersTest, Emplace) { - using T = hash_internal::GeneratedType; - using V = typename TypeParam::mapped_type; - T val = hash_internal::Generator()(); - TypeParam m; - // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps - // with test traits/policy. - auto p = m.emplace(val); - EXPECT_TRUE(p.second); - EXPECT_EQ(val, *p.first); - T val2 = {val.first, hash_internal::Generator()()}; - p = m.emplace(val2); - EXPECT_FALSE(p.second); - EXPECT_EQ(val, *p.first); -} - -TYPED_TEST_P(ModifiersTest, EmplaceHint) { - using T = hash_internal::GeneratedType; - using V = typename TypeParam::mapped_type; - T val = hash_internal::Generator()(); - TypeParam m; - // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps - // with test traits/policy. - auto it = m.emplace_hint(m.end(), val); - EXPECT_EQ(val, *it); - T val2 = {val.first, hash_internal::Generator()()}; - it = m.emplace_hint(it, val2); - EXPECT_EQ(val, *it); -} - -TYPED_TEST_P(ModifiersTest, TryEmplace) { + } + + TYPED_TEST_P(ModifiersTest, Emplace) + { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto p = m.emplace(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + T val2 = {val.first, hash_internal::Generator()()}; + p = m.emplace(val2); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); + } + + TYPED_TEST_P(ModifiersTest, EmplaceHint) + { + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto it = m.emplace_hint(m.end(), val); + EXPECT_EQ(val, *it); + T val2 = {val.first, hash_internal::Generator()()}; + it = m.emplace_hint(it, val2); + EXPECT_EQ(val, *it); + } + + TYPED_TEST_P(ModifiersTest, TryEmplace) + { #ifdef UNORDERED_MAP_CXX17 - using T = hash_internal::GeneratedType; - using V = typename TypeParam::mapped_type; - T val = hash_internal::Generator()(); - TypeParam m; - // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps - // with test traits/policy. - auto p = m.try_emplace(val.first, val.second); - EXPECT_TRUE(p.second); - EXPECT_EQ(val, *p.first); - T val2 = {val.first, hash_internal::Generator()()}; - p = m.try_emplace(val2.first, val2.second); - EXPECT_FALSE(p.second); - EXPECT_EQ(val, *p.first); + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto p = m.try_emplace(val.first, val.second); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + T val2 = {val.first, hash_internal::Generator()()}; + p = m.try_emplace(val2.first, val2.second); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); #endif -} + } -TYPED_TEST_P(ModifiersTest, TryEmplaceHint) { + TYPED_TEST_P(ModifiersTest, TryEmplaceHint) + { #ifdef UNORDERED_MAP_CXX17 - using T = hash_internal::GeneratedType; - using V = typename TypeParam::mapped_type; - T val = hash_internal::Generator()(); - TypeParam m; - // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps - // with test traits/policy. - auto it = m.try_emplace(m.end(), val.first, val.second); - EXPECT_EQ(val, *it); - T val2 = {val.first, hash_internal::Generator()()}; - it = m.try_emplace(it, val2.first, val2.second); - EXPECT_EQ(val, *it); + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto it = m.try_emplace(m.end(), val.first, val.second); + EXPECT_EQ(val, *it); + T val2 = {val.first, hash_internal::Generator()()}; + it = m.try_emplace(it, val2.first, val2.second); + EXPECT_EQ(val, *it); #endif -} - -template -using IfNotVoid = typename std::enable_if::value, V>::type; - -// In openmap we chose not to return the iterator from erase because that's -// more expensive. As such we adapt erase to return an iterator here. -struct EraseFirst { - template - auto operator()(Map* m, int) const - -> IfNotVoiderase(m->begin()))> { - return m->erase(m->begin()); - } - template - typename Map::iterator operator()(Map* m, ...) const { - auto it = m->begin(); - m->erase(it++); - return it; - } -}; - -TYPED_TEST_P(ModifiersTest, Erase) { - using T = hash_internal::GeneratedType; - using std::get; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m(values.begin(), values.end()); - ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); - auto& first = *m.begin(); - std::vector values2; - for (const auto& val : values) - if (get<0>(val) != get<0>(first)) values2.push_back(val); - auto it = EraseFirst()(&m, 0); - ASSERT_TRUE(it != m.end()); - EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it)); - EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values2.begin(), - values2.end())); -} - -TYPED_TEST_P(ModifiersTest, EraseRange) { - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m(values.begin(), values.end()); - ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); - auto it = m.erase(m.begin(), m.end()); - EXPECT_THAT(items(m), ::testing::UnorderedElementsAre()); - EXPECT_TRUE(it == m.end()); -} - -TYPED_TEST_P(ModifiersTest, EraseKey) { - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m(values.begin(), values.end()); - ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); - EXPECT_EQ(1, m.erase(values[0].first)); - EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0])); - EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values.begin() + 1, - values.end())); -} - -TYPED_TEST_P(ModifiersTest, Swap) { - using T = hash_internal::GeneratedType; - std::vector v1; - std::vector v2; - std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator()); - std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator()); - TypeParam m1(v1.begin(), v1.end()); - TypeParam m2(v2.begin(), v2.end()); - EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v1)); - EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v2)); - m1.swap(m2); - EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v2)); - EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v1)); -} - -// TODO(alkis): Write tests for extract. -// TODO(alkis): Write tests for merge. - -REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint, - InsertRange, InsertWithinCapacity, - InsertRangeWithinCapacity, InsertOrAssign, - InsertOrAssignHint, Emplace, EmplaceHint, - TryEmplace, TryEmplaceHint, Erase, EraseRange, - EraseKey, Swap); - -template -struct is_unique_ptr : std::false_type {}; - -template -struct is_unique_ptr> : std::true_type {}; - -template -class UniquePtrModifiersTest : public ::testing::Test { - protected: - UniquePtrModifiersTest() { - static_assert(is_unique_ptr::value, - "UniquePtrModifiersTyest may only be called with a " - "std::unique_ptr value type."); - } -}; - -GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(UniquePtrModifiersTest); - -TYPED_TEST_SUITE_P(UniquePtrModifiersTest); - -// Test that we do not move from rvalue arguments if an insertion does not -// happen. -TYPED_TEST_P(UniquePtrModifiersTest, TryEmplace) { + } + + template + using IfNotVoid = typename std::enable_if::value, V>::type; + + // In openmap we chose not to return the iterator from erase because that's + // more expensive. As such we adapt erase to return an iterator here. + struct EraseFirst + { + template + auto operator()(Map* m, int) const + -> IfNotVoiderase(m->begin()))> + { + return m->erase(m->begin()); + } + template + typename Map::iterator operator()(Map* m, ...) const + { + auto it = m->begin(); + m->erase(it++); + return it; + } + }; + + TYPED_TEST_P(ModifiersTest, Erase) + { + using T = hash_internal::GeneratedType; + using std::get; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + auto& first = *m.begin(); + std::vector values2; + for (const auto& val : values) + if (get<0>(val) != get<0>(first)) + values2.push_back(val); + auto it = EraseFirst()(&m, 0); + ASSERT_TRUE(it != m.end()); + EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it)); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values2.begin(), values2.end())); + } + + TYPED_TEST_P(ModifiersTest, EraseRange) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + auto it = m.erase(m.begin(), m.end()); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(it == m.end()); + } + + TYPED_TEST_P(ModifiersTest, EraseKey) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_EQ(1, m.erase(values[0].first)); + EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0])); + EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values.begin() + 1, values.end())); + } + + TYPED_TEST_P(ModifiersTest, Swap) + { + using T = hash_internal::GeneratedType; + std::vector v1; + std::vector v2; + std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator()); + std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator()); + TypeParam m1(v1.begin(), v1.end()); + TypeParam m2(v2.begin(), v2.end()); + EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v1)); + EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v2)); + m1.swap(m2); + EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v2)); + EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v1)); + } + + // TODO(alkis): Write tests for extract. + // TODO(alkis): Write tests for merge. + + REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint, InsertRange, InsertWithinCapacity, InsertRangeWithinCapacity, InsertOrAssign, InsertOrAssignHint, Emplace, EmplaceHint, TryEmplace, TryEmplaceHint, Erase, EraseRange, EraseKey, Swap); + + template + struct is_unique_ptr : std::false_type + { + }; + + template + struct is_unique_ptr> : std::true_type + { + }; + + template + class UniquePtrModifiersTest : public ::testing::Test + { + protected: + UniquePtrModifiersTest() + { + static_assert(is_unique_ptr::value, "UniquePtrModifiersTyest may only be called with a " + "std::unique_ptr value type."); + } + }; + + GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(UniquePtrModifiersTest); + + TYPED_TEST_SUITE_P(UniquePtrModifiersTest); + + // Test that we do not move from rvalue arguments if an insertion does not + // happen. + TYPED_TEST_P(UniquePtrModifiersTest, TryEmplace) + { #ifdef UNORDERED_MAP_CXX17 - using T = hash_internal::GeneratedType; - using V = typename TypeParam::mapped_type; - T val = hash_internal::Generator()(); - TypeParam m; - auto p = m.try_emplace(val.first, std::move(val.second)); - EXPECT_TRUE(p.second); - // A moved from std::unique_ptr is guaranteed to be nullptr. - EXPECT_EQ(val.second, nullptr); - T val2 = {val.first, hash_internal::Generator()()}; - p = m.try_emplace(val2.first, std::move(val2.second)); - EXPECT_FALSE(p.second); - EXPECT_NE(val2.second, nullptr); + using T = hash_internal::GeneratedType; + using V = typename TypeParam::mapped_type; + T val = hash_internal::Generator()(); + TypeParam m; + auto p = m.try_emplace(val.first, std::move(val.second)); + EXPECT_TRUE(p.second); + // A moved from std::unique_ptr is guaranteed to be nullptr. + EXPECT_EQ(val.second, nullptr); + T val2 = {val.first, hash_internal::Generator()()}; + p = m.try_emplace(val2.first, std::move(val2.second)); + EXPECT_FALSE(p.second); + EXPECT_NE(val2.second, nullptr); #endif -} + } -REGISTER_TYPED_TEST_SUITE_P(UniquePtrModifiersTest, TryEmplace); + REGISTER_TYPED_TEST_SUITE_P(UniquePtrModifiersTest, TryEmplace); -} // namespace container_internal -ABSL_NAMESPACE_END + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_constructor_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_constructor_test.h index af1116e..4496996 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_constructor_test.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_constructor_test.h @@ -25,472 +25,527 @@ #include "absl/container/internal/hash_policy_testing.h" #include "absl/meta/type_traits.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -template -class ConstructorTest : public ::testing::Test {}; - -TYPED_TEST_SUITE_P(ConstructorTest); - -TYPED_TEST_P(ConstructorTest, NoArgs) { - TypeParam m; - EXPECT_TRUE(m.empty()); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); -} - -TYPED_TEST_P(ConstructorTest, BucketCount) { - TypeParam m(123); - EXPECT_TRUE(m.empty()); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, BucketCountHash) { - using H = typename TypeParam::hasher; - H hasher; - TypeParam m(123, hasher); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_TRUE(m.empty()); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) { - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - H hasher; - E equal; - TypeParam m(123, hasher, equal); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_EQ(m.key_eq(), equal); - EXPECT_TRUE(m.empty()); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) { - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - TypeParam m(123, hasher, equal, alloc); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_EQ(m.key_eq(), equal); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_TRUE(m.empty()); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); - EXPECT_GE(m.bucket_count(), 123); - - const auto& cm = m; - EXPECT_EQ(cm.hash_function(), hasher); - EXPECT_EQ(cm.key_eq(), equal); - EXPECT_EQ(cm.get_allocator(), alloc); - EXPECT_TRUE(cm.empty()); - EXPECT_THAT(keys(cm), ::testing::UnorderedElementsAre()); - EXPECT_GE(cm.bucket_count(), 123); -} - -template -struct is_std_unordered_set : std::false_type {}; - -template -struct is_std_unordered_set> : std::true_type {}; +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class ConstructorTest : public ::testing::Test + { + }; + + TYPED_TEST_SUITE_P(ConstructorTest); + + TYPED_TEST_P(ConstructorTest, NoArgs) + { + TypeParam m; + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + } + + TYPED_TEST_P(ConstructorTest, BucketCount) + { + TypeParam m(123); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHash) + { + using H = typename TypeParam::hasher; + H hasher; + TypeParam m(123, hasher); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) + { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + H hasher; + E equal; + TypeParam m(123, hasher, equal); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) + { + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + + const auto& cm = m; + EXPECT_EQ(cm.hash_function(), hasher); + EXPECT_EQ(cm.key_eq(), equal); + EXPECT_EQ(cm.get_allocator(), alloc); + EXPECT_TRUE(cm.empty()); + EXPECT_THAT(keys(cm), ::testing::UnorderedElementsAre()); + EXPECT_GE(cm.bucket_count(), 123); + } + + template + struct is_std_unordered_set : std::false_type + { + }; + + template + struct is_std_unordered_set> : std::true_type + { + }; #if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17) -using has_cxx14_std_apis = std::true_type; + using has_cxx14_std_apis = std::true_type; #else -using has_cxx14_std_apis = std::false_type; + using has_cxx14_std_apis = std::false_type; #endif -template -using expect_cxx14_apis = - absl::disjunction>, - has_cxx14_std_apis>; - -template -void BucketCountAllocTest(std::false_type) {} - -template -void BucketCountAllocTest(std::true_type) { - using A = typename TypeParam::allocator_type; - A alloc(0); - TypeParam m(123, alloc); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_TRUE(m.empty()); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, BucketCountAlloc) { - BucketCountAllocTest(expect_cxx14_apis()); -} - -template -void BucketCountHashAllocTest(std::false_type) {} - -template -void BucketCountHashAllocTest(std::true_type) { - using H = typename TypeParam::hasher; - using A = typename TypeParam::allocator_type; - H hasher; - A alloc(0); - TypeParam m(123, hasher, alloc); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_TRUE(m.empty()); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) { - BucketCountHashAllocTest(expect_cxx14_apis()); -} + template + using expect_cxx14_apis = + absl::disjunction>, has_cxx14_std_apis>; + + template + void BucketCountAllocTest(std::false_type) + { + } + + template + void BucketCountAllocTest(std::true_type) + { + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountAlloc) + { + BucketCountAllocTest(expect_cxx14_apis()); + } + + template + void BucketCountHashAllocTest(std::false_type) + { + } + + template + void BucketCountHashAllocTest(std::true_type) + { + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + TypeParam m(123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) + { + BucketCountHashAllocTest(expect_cxx14_apis()); + } #if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS -using has_alloc_std_constructors = std::true_type; + using has_alloc_std_constructors = std::true_type; #else -using has_alloc_std_constructors = std::false_type; + using has_alloc_std_constructors = std::false_type; #endif -template -using expect_alloc_constructors = - absl::disjunction>, - has_alloc_std_constructors>; - -template -void AllocTest(std::false_type) {} - -template -void AllocTest(std::true_type) { - using A = typename TypeParam::allocator_type; - A alloc(0); - TypeParam m(alloc); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_TRUE(m.empty()); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); -} - -TYPED_TEST_P(ConstructorTest, Alloc) { - AllocTest(expect_alloc_constructors()); -} - -TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - std::vector values; - for (size_t i = 0; i != 10; ++i) - values.push_back(hash_internal::Generator()()); - TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_EQ(m.key_eq(), equal); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); - EXPECT_GE(m.bucket_count(), 123); -} - -template -void InputIteratorBucketAllocTest(std::false_type) {} - -template -void InputIteratorBucketAllocTest(std::true_type) { - using T = hash_internal::GeneratedType; - using A = typename TypeParam::allocator_type; - A alloc(0); - std::vector values; - for (size_t i = 0; i != 10; ++i) - values.push_back(hash_internal::Generator()()); - TypeParam m(values.begin(), values.end(), 123, alloc); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) { - InputIteratorBucketAllocTest(expect_cxx14_apis()); -} - -template -void InputIteratorBucketHashAllocTest(std::false_type) {} - -template -void InputIteratorBucketHashAllocTest(std::true_type) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using A = typename TypeParam::allocator_type; - H hasher; - A alloc(0); - std::vector values; - for (size_t i = 0; i != 10; ++i) - values.push_back(hash_internal::Generator()()); - TypeParam m(values.begin(), values.end(), 123, hasher, alloc); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) { - InputIteratorBucketHashAllocTest(expect_cxx14_apis()); -} - -TYPED_TEST_P(ConstructorTest, CopyConstructor) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - TypeParam m(123, hasher, equal, alloc); - for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator()()); - TypeParam n(m); - EXPECT_EQ(m.hash_function(), n.hash_function()); - EXPECT_EQ(m.key_eq(), n.key_eq()); - EXPECT_EQ(m.get_allocator(), n.get_allocator()); - EXPECT_EQ(m, n); - EXPECT_NE(TypeParam(0, hasher, equal, alloc), n); -} - -template -void CopyConstructorAllocTest(std::false_type) {} - -template -void CopyConstructorAllocTest(std::true_type) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - TypeParam m(123, hasher, equal, alloc); - for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator()()); - TypeParam n(m, A(11)); - EXPECT_EQ(m.hash_function(), n.hash_function()); - EXPECT_EQ(m.key_eq(), n.key_eq()); - EXPECT_NE(m.get_allocator(), n.get_allocator()); - EXPECT_EQ(m, n); -} - -TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) { - CopyConstructorAllocTest(expect_alloc_constructors()); -} - -// TODO(alkis): Test non-propagating allocators on copy constructors. - -TYPED_TEST_P(ConstructorTest, MoveConstructor) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - TypeParam m(123, hasher, equal, alloc); - for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator()()); - TypeParam t(m); - TypeParam n(std::move(t)); - EXPECT_EQ(m.hash_function(), n.hash_function()); - EXPECT_EQ(m.key_eq(), n.key_eq()); - EXPECT_EQ(m.get_allocator(), n.get_allocator()); - EXPECT_EQ(m, n); -} - -template -void MoveConstructorAllocTest(std::false_type) {} - -template -void MoveConstructorAllocTest(std::true_type) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - TypeParam m(123, hasher, equal, alloc); - for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator()()); - TypeParam t(m); - TypeParam n(std::move(t), A(1)); - EXPECT_EQ(m.hash_function(), n.hash_function()); - EXPECT_EQ(m.key_eq(), n.key_eq()); - EXPECT_NE(m.get_allocator(), n.get_allocator()); - EXPECT_EQ(m, n); -} - -TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) { - MoveConstructorAllocTest(expect_alloc_constructors()); -} - -// TODO(alkis): Test non-propagating allocators on move constructors. - -TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) { - using T = hash_internal::GeneratedType; - hash_internal::Generator gen; - std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - TypeParam m(values, 123, hasher, equal, alloc); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_EQ(m.key_eq(), equal); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); - EXPECT_GE(m.bucket_count(), 123); -} - -template -void InitializerListBucketAllocTest(std::false_type) {} - -template -void InitializerListBucketAllocTest(std::true_type) { - using T = hash_internal::GeneratedType; - using A = typename TypeParam::allocator_type; - hash_internal::Generator gen; - std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; - A alloc(0); - TypeParam m(values, 123, alloc); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) { - InitializerListBucketAllocTest(expect_cxx14_apis()); -} - -template -void InitializerListBucketHashAllocTest(std::false_type) {} - -template -void InitializerListBucketHashAllocTest(std::true_type) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using A = typename TypeParam::allocator_type; - H hasher; - A alloc(0); - hash_internal::Generator gen; - std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; - TypeParam m(values, 123, hasher, alloc); - EXPECT_EQ(m.hash_function(), hasher); - EXPECT_EQ(m.get_allocator(), alloc); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); - EXPECT_GE(m.bucket_count(), 123); -} - -TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) { - InitializerListBucketHashAllocTest(expect_cxx14_apis()); -} - -TYPED_TEST_P(ConstructorTest, CopyAssignment) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - hash_internal::Generator gen; - TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); - TypeParam n; - n = m; - EXPECT_EQ(m.hash_function(), n.hash_function()); - EXPECT_EQ(m.key_eq(), n.key_eq()); - EXPECT_EQ(m, n); -} - -// TODO(alkis): Test [non-]propagating allocators on move/copy assignments -// (it depends on traits). - -TYPED_TEST_P(ConstructorTest, MoveAssignment) { - using T = hash_internal::GeneratedType; - using H = typename TypeParam::hasher; - using E = typename TypeParam::key_equal; - using A = typename TypeParam::allocator_type; - H hasher; - E equal; - A alloc(0); - hash_internal::Generator gen; - TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); - TypeParam t(m); - TypeParam n; - n = std::move(t); - EXPECT_EQ(m.hash_function(), n.hash_function()); - EXPECT_EQ(m.key_eq(), n.key_eq()); - EXPECT_EQ(m, n); -} - -TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) { - using T = hash_internal::GeneratedType; - hash_internal::Generator gen; - std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; - TypeParam m; - m = values; - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); -} - -TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) { - using T = hash_internal::GeneratedType; - hash_internal::Generator gen; - TypeParam m({gen(), gen(), gen()}); - TypeParam n({gen()}); - n = m; - EXPECT_EQ(m, n); -} - -TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) { - using T = hash_internal::GeneratedType; - hash_internal::Generator gen; - TypeParam m({gen(), gen(), gen()}); - TypeParam t(m); - TypeParam n({gen()}); - n = std::move(t); - EXPECT_EQ(m, n); -} - -TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) { - using T = hash_internal::GeneratedType; - hash_internal::Generator gen; - std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; - TypeParam m; - m = values; - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); -} - -TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) { - using T = hash_internal::GeneratedType; - hash_internal::Generator gen; - std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; - TypeParam m(values); - m = *&m; // Avoid -Wself-assign. - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); -} - -REGISTER_TYPED_TEST_SUITE_P( - ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, - BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc, - InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, - InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc, - MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc, - InitializerListBucketAlloc, InitializerListBucketHashAlloc, CopyAssignment, - MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting, - MoveAssignmentOverwritesExisting, - AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf); - -} // namespace container_internal -ABSL_NAMESPACE_END + template + using expect_alloc_constructors = + absl::disjunction>, has_alloc_std_constructors>; + + template + void AllocTest(std::false_type) + { + } + + template + void AllocTest(std::true_type) + { + using A = typename TypeParam::allocator_type; + A alloc(0); + TypeParam m(alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_TRUE(m.empty()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + } + + TYPED_TEST_P(ConstructorTest, Alloc) + { + AllocTest(expect_alloc_constructors()); + } + + TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + std::vector values; + for (size_t i = 0; i != 10; ++i) + values.push_back(hash_internal::Generator()()); + TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + template + void InputIteratorBucketAllocTest(std::false_type) + { + } + + template + void InputIteratorBucketAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using A = typename TypeParam::allocator_type; + A alloc(0); + std::vector values; + for (size_t i = 0; i != 10; ++i) + values.push_back(hash_internal::Generator()()); + TypeParam m(values.begin(), values.end(), 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) + { + InputIteratorBucketAllocTest(expect_cxx14_apis()); + } + + template + void InputIteratorBucketHashAllocTest(std::false_type) + { + } + + template + void InputIteratorBucketHashAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + std::vector values; + for (size_t i = 0; i != 10; ++i) + values.push_back(hash_internal::Generator()()); + TypeParam m(values.begin(), values.end(), 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) + { + InputIteratorBucketHashAllocTest(expect_cxx14_apis()); + } + + TYPED_TEST_P(ConstructorTest, CopyConstructor) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(hash_internal::Generator()()); + TypeParam n(m); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + EXPECT_NE(TypeParam(0, hasher, equal, alloc), n); + } + + template + void CopyConstructorAllocTest(std::false_type) + { + } + + template + void CopyConstructorAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(hash_internal::Generator()()); + TypeParam n(m, A(11)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) + { + CopyConstructorAllocTest(expect_alloc_constructors()); + } + + // TODO(alkis): Test non-propagating allocators on copy constructors. + + TYPED_TEST_P(ConstructorTest, MoveConstructor) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(hash_internal::Generator()()); + TypeParam t(m); + TypeParam n(std::move(t)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + } + + template + void MoveConstructorAllocTest(std::false_type) + { + } + + template + void MoveConstructorAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(123, hasher, equal, alloc); + for (size_t i = 0; i != 10; ++i) + m.insert(hash_internal::Generator()()); + TypeParam t(m); + TypeParam n(std::move(t), A(1)); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_NE(m.get_allocator(), n.get_allocator()); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) + { + MoveConstructorAllocTest(expect_alloc_constructors()); + } + + // TODO(alkis): Test non-propagating allocators on move constructors. + + TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) + { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + TypeParam m(values, 123, hasher, equal, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.key_eq(), equal); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + template + void InitializerListBucketAllocTest(std::false_type) + { + } + + template + void InitializerListBucketAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using A = typename TypeParam::allocator_type; + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + A alloc(0); + TypeParam m(values, 123, alloc); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) + { + InitializerListBucketAllocTest(expect_cxx14_apis()); + } + + template + void InitializerListBucketHashAllocTest(std::false_type) + { + } + + template + void InitializerListBucketHashAllocTest(std::true_type) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using A = typename TypeParam::allocator_type; + H hasher; + A alloc(0); + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values, 123, hasher, alloc); + EXPECT_EQ(m.hash_function(), hasher); + EXPECT_EQ(m.get_allocator(), alloc); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_GE(m.bucket_count(), 123); + } + + TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) + { + InitializerListBucketHashAllocTest(expect_cxx14_apis()); + } + + TYPED_TEST_P(ConstructorTest, CopyAssignment) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::Generator gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam n; + n = m; + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); + } + + // TODO(alkis): Test [non-]propagating allocators on move/copy assignments + // (it depends on traits). + + TYPED_TEST_P(ConstructorTest, MoveAssignment) + { + using T = hash_internal::GeneratedType; + using H = typename TypeParam::hasher; + using E = typename TypeParam::key_equal; + using A = typename TypeParam::allocator_type; + H hasher; + E equal; + A alloc(0); + hash_internal::Generator gen; + TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc); + TypeParam t(m); + TypeParam n; + n = std::move(t); + EXPECT_EQ(m.hash_function(), n.hash_function()); + EXPECT_EQ(m.key_eq(), n.key_eq()); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) + { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + } + + TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) + { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam n({gen()}); + n = m; + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) + { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + TypeParam m({gen(), gen(), gen()}); + TypeParam t(m); + TypeParam n({gen()}); + n = std::move(t); + EXPECT_EQ(m, n); + } + + TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) + { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m; + m = values; + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + } + + TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) + { + using T = hash_internal::GeneratedType; + hash_internal::Generator gen; + std::initializer_list values = {gen(), gen(), gen(), gen(), gen()}; + TypeParam m(values); + m = *&m; // Avoid -Wself-assign. + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + } + + REGISTER_TYPED_TEST_SUITE_P( + ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual, BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc, InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc, InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc, MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc, InitializerListBucketAlloc, InitializerListBucketHashAlloc, CopyAssignment, MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting, MoveAssignmentOverwritesExisting, AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf + ); + + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_lookup_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_lookup_test.h index b35f766..7775f6d 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_lookup_test.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_lookup_test.h @@ -20,72 +20,75 @@ #include "absl/container/internal/hash_generator_testing.h" #include "absl/container/internal/hash_policy_testing.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { -template -class LookupTest : public ::testing::Test {}; + template + class LookupTest : public ::testing::Test + { + }; -TYPED_TEST_SUITE_P(LookupTest); + TYPED_TEST_SUITE_P(LookupTest); -TYPED_TEST_P(LookupTest, Count) { - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m; - for (const auto& v : values) - EXPECT_EQ(0, m.count(v)) << ::testing::PrintToString(v); - m.insert(values.begin(), values.end()); - for (const auto& v : values) - EXPECT_EQ(1, m.count(v)) << ::testing::PrintToString(v); -} + TYPED_TEST_P(LookupTest, Count) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + for (const auto& v : values) + EXPECT_EQ(0, m.count(v)) << ::testing::PrintToString(v); + m.insert(values.begin(), values.end()); + for (const auto& v : values) + EXPECT_EQ(1, m.count(v)) << ::testing::PrintToString(v); + } -TYPED_TEST_P(LookupTest, Find) { - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m; - for (const auto& v : values) - EXPECT_TRUE(m.end() == m.find(v)) << ::testing::PrintToString(v); - m.insert(values.begin(), values.end()); - for (const auto& v : values) { - typename TypeParam::iterator it = m.find(v); - static_assert(std::is_same::value, - ""); - static_assert(std::is_same())>::value, - ""); - EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(v); - EXPECT_EQ(v, *it) << ::testing::PrintToString(v); - } -} + TYPED_TEST_P(LookupTest, Find) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + for (const auto& v : values) + EXPECT_TRUE(m.end() == m.find(v)) << ::testing::PrintToString(v); + m.insert(values.begin(), values.end()); + for (const auto& v : values) + { + typename TypeParam::iterator it = m.find(v); + static_assert(std::is_same::value, ""); + static_assert(std::is_same())>::value, ""); + EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(v); + EXPECT_EQ(v, *it) << ::testing::PrintToString(v); + } + } -TYPED_TEST_P(LookupTest, EqualRange) { - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m; - for (const auto& v : values) { - auto r = m.equal_range(v); - ASSERT_EQ(0, std::distance(r.first, r.second)); - } - m.insert(values.begin(), values.end()); - for (const auto& v : values) { - auto r = m.equal_range(v); - ASSERT_EQ(1, std::distance(r.first, r.second)); - EXPECT_EQ(v, *r.first); - } -} + TYPED_TEST_P(LookupTest, EqualRange) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + for (const auto& v : values) + { + auto r = m.equal_range(v); + ASSERT_EQ(0, std::distance(r.first, r.second)); + } + m.insert(values.begin(), values.end()); + for (const auto& v : values) + { + auto r = m.equal_range(v); + ASSERT_EQ(1, std::distance(r.first, r.second)); + EXPECT_EQ(v, *r.first); + } + } -REGISTER_TYPED_TEST_SUITE_P(LookupTest, Count, Find, EqualRange); + REGISTER_TYPED_TEST_SUITE_P(LookupTest, Count, Find, EqualRange); -} // namespace container_internal -ABSL_NAMESPACE_END + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_members_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_members_test.h index 4c5e104..ccc772f 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_members_test.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_members_test.h @@ -20,67 +20,71 @@ #include "gtest/gtest.h" #include "absl/meta/type_traits.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { -template -class MembersTest : public ::testing::Test {}; + template + class MembersTest : public ::testing::Test + { + }; -TYPED_TEST_SUITE_P(MembersTest); + TYPED_TEST_SUITE_P(MembersTest); -template -void UseType() {} + template + void UseType() + { + } -TYPED_TEST_P(MembersTest, Typedefs) { - EXPECT_TRUE((std::is_same())); - EXPECT_TRUE((absl::conjunction< - absl::negation>, - std::is_integral>())); - EXPECT_TRUE((absl::conjunction< - std::is_signed, - std::is_integral>())); - EXPECT_TRUE((std::is_convertible< - decltype(std::declval()( - std::declval())), - size_t>())); - EXPECT_TRUE((std::is_convertible< - decltype(std::declval()( - std::declval(), - std::declval())), - bool>())); - EXPECT_TRUE((std::is_same())); - EXPECT_TRUE((std::is_same())); - EXPECT_TRUE((std::is_same())); - EXPECT_TRUE((std::is_same::pointer, - typename TypeParam::pointer>())); - EXPECT_TRUE( - (std::is_same::const_pointer, - typename TypeParam::const_pointer>())); -} + TYPED_TEST_P(MembersTest, Typedefs) + { + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((absl::conjunction< + absl::negation>, + std::is_integral>())); + EXPECT_TRUE((absl::conjunction< + std::is_signed, + std::is_integral>())); + EXPECT_TRUE((std::is_convertible< + decltype(std::declval()( + std::declval() + )), + size_t>())); + EXPECT_TRUE((std::is_convertible< + decltype(std::declval()( + std::declval(), + std::declval() + )), + bool>())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same())); + EXPECT_TRUE((std::is_same::pointer, typename TypeParam::pointer>())); + EXPECT_TRUE( + (std::is_same::const_pointer, typename TypeParam::const_pointer>()) + ); + } -TYPED_TEST_P(MembersTest, SimpleFunctions) { - EXPECT_GT(TypeParam().max_size(), 0); -} + TYPED_TEST_P(MembersTest, SimpleFunctions) + { + EXPECT_GT(TypeParam().max_size(), 0); + } -TYPED_TEST_P(MembersTest, BeginEnd) { - TypeParam t = {typename TypeParam::value_type{}}; - EXPECT_EQ(t.begin(), t.cbegin()); - EXPECT_EQ(t.end(), t.cend()); - EXPECT_NE(t.begin(), t.end()); - EXPECT_NE(t.cbegin(), t.cend()); -} + TYPED_TEST_P(MembersTest, BeginEnd) + { + TypeParam t = {typename TypeParam::value_type{}}; + EXPECT_EQ(t.begin(), t.cbegin()); + EXPECT_EQ(t.end(), t.cend()); + EXPECT_NE(t.begin(), t.end()); + EXPECT_NE(t.cbegin(), t.cend()); + } -REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd); + REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd); -} // namespace container_internal -ABSL_NAMESPACE_END + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_modifiers_test.h b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_modifiers_test.h index d8864bb..92137f4 100644 --- a/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_modifiers_test.h +++ b/CAPI/cpp/grpc/include/absl/container/internal/unordered_set_modifiers_test.h @@ -20,202 +20,212 @@ #include "absl/container/internal/hash_generator_testing.h" #include "absl/container/internal/hash_policy_testing.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { - -template -class ModifiersTest : public ::testing::Test {}; - -TYPED_TEST_SUITE_P(ModifiersTest); - -TYPED_TEST_P(ModifiersTest, Clear) { - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m(values.begin(), values.end()); - ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); - m.clear(); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); - EXPECT_TRUE(m.empty()); -} - -TYPED_TEST_P(ModifiersTest, Insert) { - using T = hash_internal::GeneratedType; - T val = hash_internal::Generator()(); - TypeParam m; - auto p = m.insert(val); - EXPECT_TRUE(p.second); - EXPECT_EQ(val, *p.first); - p = m.insert(val); - EXPECT_FALSE(p.second); -} - -TYPED_TEST_P(ModifiersTest, InsertHint) { - using T = hash_internal::GeneratedType; - T val = hash_internal::Generator()(); - TypeParam m; - auto it = m.insert(m.end(), val); - EXPECT_TRUE(it != m.end()); - EXPECT_EQ(val, *it); - it = m.insert(it, val); - EXPECT_TRUE(it != m.end()); - EXPECT_EQ(val, *it); -} - -TYPED_TEST_P(ModifiersTest, InsertRange) { - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m; - m.insert(values.begin(), values.end()); - ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); -} - -TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) { - using T = hash_internal::GeneratedType; - T val = hash_internal::Generator()(); - TypeParam m; - m.reserve(10); - const size_t original_capacity = m.bucket_count(); - m.insert(val); - EXPECT_EQ(m.bucket_count(), original_capacity); - m.insert(val); - EXPECT_EQ(m.bucket_count(), original_capacity); -} - -TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + + template + class ModifiersTest : public ::testing::Test + { + }; + + TYPED_TEST_SUITE_P(ModifiersTest); + + TYPED_TEST_P(ModifiersTest, Clear) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + m.clear(); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(m.empty()); + } + + TYPED_TEST_P(ModifiersTest, Insert) + { + using T = hash_internal::GeneratedType; + T val = hash_internal::Generator()(); + TypeParam m; + auto p = m.insert(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + p = m.insert(val); + EXPECT_FALSE(p.second); + } + + TYPED_TEST_P(ModifiersTest, InsertHint) + { + using T = hash_internal::GeneratedType; + T val = hash_internal::Generator()(); + TypeParam m; + auto it = m.insert(m.end(), val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); + it = m.insert(it, val); + EXPECT_TRUE(it != m.end()); + EXPECT_EQ(val, *it); + } + + TYPED_TEST_P(ModifiersTest, InsertRange) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m; + m.insert(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + } + + TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) + { + using T = hash_internal::GeneratedType; + T val = hash_internal::Generator()(); + TypeParam m; + m.reserve(10); + const size_t original_capacity = m.bucket_count(); + m.insert(val); + EXPECT_EQ(m.bucket_count(), original_capacity); + m.insert(val); + EXPECT_EQ(m.bucket_count(), original_capacity); + } + + TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) + { #if !defined(__GLIBCXX__) - using T = hash_internal::GeneratedType; - std::vector base_values; - std::generate_n(std::back_inserter(base_values), 10, - hash_internal::Generator()); - std::vector values; - while (values.size() != 100) { - values.insert(values.end(), base_values.begin(), base_values.end()); - } - TypeParam m; - m.reserve(10); - const size_t original_capacity = m.bucket_count(); - m.insert(values.begin(), values.end()); - EXPECT_EQ(m.bucket_count(), original_capacity); + using T = hash_internal::GeneratedType; + std::vector base_values; + std::generate_n(std::back_inserter(base_values), 10, hash_internal::Generator()); + std::vector values; + while (values.size() != 100) + { + values.insert(values.end(), base_values.begin(), base_values.end()); + } + TypeParam m; + m.reserve(10); + const size_t original_capacity = m.bucket_count(); + m.insert(values.begin(), values.end()); + EXPECT_EQ(m.bucket_count(), original_capacity); #endif -} - -TYPED_TEST_P(ModifiersTest, Emplace) { - using T = hash_internal::GeneratedType; - T val = hash_internal::Generator()(); - TypeParam m; - // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps - // with test traits/policy. - auto p = m.emplace(val); - EXPECT_TRUE(p.second); - EXPECT_EQ(val, *p.first); - p = m.emplace(val); - EXPECT_FALSE(p.second); - EXPECT_EQ(val, *p.first); -} - -TYPED_TEST_P(ModifiersTest, EmplaceHint) { - using T = hash_internal::GeneratedType; - T val = hash_internal::Generator()(); - TypeParam m; - // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps - // with test traits/policy. - auto it = m.emplace_hint(m.end(), val); - EXPECT_EQ(val, *it); - it = m.emplace_hint(it, val); - EXPECT_EQ(val, *it); -} - -template -using IfNotVoid = typename std::enable_if::value, V>::type; - -// In openmap we chose not to return the iterator from erase because that's -// more expensive. As such we adapt erase to return an iterator here. -struct EraseFirst { - template - auto operator()(Map* m, int) const - -> IfNotVoiderase(m->begin()))> { - return m->erase(m->begin()); - } - template - typename Map::iterator operator()(Map* m, ...) const { - auto it = m->begin(); - m->erase(it++); - return it; - } -}; - -TYPED_TEST_P(ModifiersTest, Erase) { - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m(values.begin(), values.end()); - ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); - std::vector values2; - for (const auto& val : values) - if (val != *m.begin()) values2.push_back(val); - auto it = EraseFirst()(&m, 0); - ASSERT_TRUE(it != m.end()); - EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it)); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values2.begin(), - values2.end())); -} - -TYPED_TEST_P(ModifiersTest, EraseRange) { - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m(values.begin(), values.end()); - ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); - auto it = m.erase(m.begin(), m.end()); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); - EXPECT_TRUE(it == m.end()); -} - -TYPED_TEST_P(ModifiersTest, EraseKey) { - using T = hash_internal::GeneratedType; - std::vector values; - std::generate_n(std::back_inserter(values), 10, - hash_internal::Generator()); - TypeParam m(values.begin(), values.end()); - ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); - EXPECT_EQ(1, m.erase(values[0])); - EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0])); - EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values.begin() + 1, - values.end())); -} - -TYPED_TEST_P(ModifiersTest, Swap) { - using T = hash_internal::GeneratedType; - std::vector v1; - std::vector v2; - std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator()); - std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator()); - TypeParam m1(v1.begin(), v1.end()); - TypeParam m2(v2.begin(), v2.end()); - EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v1)); - EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v2)); - m1.swap(m2); - EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v2)); - EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v1)); -} - -// TODO(alkis): Write tests for extract. -// TODO(alkis): Write tests for merge. - -REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint, - InsertRange, InsertWithinCapacity, - InsertRangeWithinCapacity, Emplace, EmplaceHint, - Erase, EraseRange, EraseKey, Swap); - -} // namespace container_internal -ABSL_NAMESPACE_END + } + + TYPED_TEST_P(ModifiersTest, Emplace) + { + using T = hash_internal::GeneratedType; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto p = m.emplace(val); + EXPECT_TRUE(p.second); + EXPECT_EQ(val, *p.first); + p = m.emplace(val); + EXPECT_FALSE(p.second); + EXPECT_EQ(val, *p.first); + } + + TYPED_TEST_P(ModifiersTest, EmplaceHint) + { + using T = hash_internal::GeneratedType; + T val = hash_internal::Generator()(); + TypeParam m; + // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps + // with test traits/policy. + auto it = m.emplace_hint(m.end(), val); + EXPECT_EQ(val, *it); + it = m.emplace_hint(it, val); + EXPECT_EQ(val, *it); + } + + template + using IfNotVoid = typename std::enable_if::value, V>::type; + + // In openmap we chose not to return the iterator from erase because that's + // more expensive. As such we adapt erase to return an iterator here. + struct EraseFirst + { + template + auto operator()(Map* m, int) const + -> IfNotVoiderase(m->begin()))> + { + return m->erase(m->begin()); + } + template + typename Map::iterator operator()(Map* m, ...) const + { + auto it = m->begin(); + m->erase(it++); + return it; + } + }; + + TYPED_TEST_P(ModifiersTest, Erase) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + std::vector values2; + for (const auto& val : values) + if (val != *m.begin()) + values2.push_back(val); + auto it = EraseFirst()(&m, 0); + ASSERT_TRUE(it != m.end()); + EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it)); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values2.begin(), values2.end())); + } + + TYPED_TEST_P(ModifiersTest, EraseRange) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + auto it = m.erase(m.begin(), m.end()); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre()); + EXPECT_TRUE(it == m.end()); + } + + TYPED_TEST_P(ModifiersTest, EraseKey) + { + using T = hash_internal::GeneratedType; + std::vector values; + std::generate_n(std::back_inserter(values), 10, hash_internal::Generator()); + TypeParam m(values.begin(), values.end()); + ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values)); + EXPECT_EQ(1, m.erase(values[0])); + EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0])); + EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values.begin() + 1, values.end())); + } + + TYPED_TEST_P(ModifiersTest, Swap) + { + using T = hash_internal::GeneratedType; + std::vector v1; + std::vector v2; + std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator()); + std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator()); + TypeParam m1(v1.begin(), v1.end()); + TypeParam m2(v2.begin(), v2.end()); + EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v1)); + EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v2)); + m1.swap(m2); + EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v2)); + EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v1)); + } + + // TODO(alkis): Write tests for extract. + // TODO(alkis): Write tests for merge. + + REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint, InsertRange, InsertWithinCapacity, InsertRangeWithinCapacity, Emplace, EmplaceHint, Erase, EraseRange, EraseKey, Swap); + + } // namespace container_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/node_hash_map.h b/CAPI/cpp/grpc/include/absl/container/node_hash_map.h index 6868e63..183709b 100644 --- a/CAPI/cpp/grpc/include/absl/container/node_hash_map.h +++ b/CAPI/cpp/grpc/include/absl/container/node_hash_map.h @@ -48,557 +48,569 @@ #include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export #include "absl/memory/memory.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { -template -class NodeHashMapPolicy; -} // namespace container_internal - -// ----------------------------------------------------------------------------- -// absl::node_hash_map -// ----------------------------------------------------------------------------- -// -// An `absl::node_hash_map` is an unordered associative container which -// has been optimized for both speed and memory footprint in most common use -// cases. Its interface is similar to that of `std::unordered_map` with -// the following notable differences: -// -// * Supports heterogeneous lookup, through `find()`, `operator[]()` and -// `insert()`, provided that the map is provided a compatible heterogeneous -// hashing function and equality operator. -// * Contains a `capacity()` member function indicating the number of element -// slots (open, deleted, and empty) within the hash map. -// * Returns `void` from the `erase(iterator)` overload. -// -// By default, `node_hash_map` uses the `absl::Hash` hashing framework. -// All fundamental and Abseil types that support the `absl::Hash` framework have -// a compatible equality operator for comparing insertions into `node_hash_map`. -// If your type is not yet supported by the `absl::Hash` framework, see -// absl/hash/hash.h for information on extending Abseil hashing to user-defined -// types. -// -// Using `absl::node_hash_map` at interface boundaries in dynamically loaded -// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may -// be randomized across dynamically loaded libraries. -// -// Example: -// -// // Create a node hash map of three strings (that map to strings) -// absl::node_hash_map ducks = -// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; -// -// // Insert a new element into the node hash map -// ducks.insert({"d", "donald"}}; -// -// // Force a rehash of the node hash map -// ducks.rehash(0); -// -// // Find the element with the key "b" -// std::string search_key = "b"; -// auto result = ducks.find(search_key); -// if (result != ducks.end()) { -// std::cout << "Result: " << result->second << std::endl; -// } -template , - class Eq = absl::container_internal::hash_default_eq, - class Alloc = std::allocator>> -class node_hash_map - : public absl::container_internal::raw_hash_map< - absl::container_internal::NodeHashMapPolicy, Hash, Eq, - Alloc> { - using Base = typename node_hash_map::raw_hash_map; - - public: - // Constructors and Assignment Operators - // - // A node_hash_map supports the same overload set as `std::unordered_map` - // for construction and assignment: - // - // * Default constructor - // - // // No allocation for the table's elements is made. - // absl::node_hash_map map1; - // - // * Initializer List constructor - // - // absl::node_hash_map map2 = - // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; - // - // * Copy constructor - // - // absl::node_hash_map map3(map2); - // - // * Copy assignment operator - // - // // Hash functor and Comparator are copied as well - // absl::node_hash_map map4; - // map4 = map3; - // - // * Move constructor - // - // // Move is guaranteed efficient - // absl::node_hash_map map5(std::move(map4)); - // - // * Move assignment operator - // - // // May be efficient if allocators are compatible - // absl::node_hash_map map6; - // map6 = std::move(map5); - // - // * Range constructor - // - // std::vector> v = {{1, "a"}, {2, "b"}}; - // absl::node_hash_map map7(v.begin(), v.end()); - node_hash_map() {} - using Base::Base; - - // node_hash_map::begin() - // - // Returns an iterator to the beginning of the `node_hash_map`. - using Base::begin; - - // node_hash_map::cbegin() - // - // Returns a const iterator to the beginning of the `node_hash_map`. - using Base::cbegin; - - // node_hash_map::cend() - // - // Returns a const iterator to the end of the `node_hash_map`. - using Base::cend; - - // node_hash_map::end() - // - // Returns an iterator to the end of the `node_hash_map`. - using Base::end; - - // node_hash_map::capacity() - // - // Returns the number of element slots (assigned, deleted, and empty) - // available within the `node_hash_map`. - // - // NOTE: this member function is particular to `absl::node_hash_map` and is - // not provided in the `std::unordered_map` API. - using Base::capacity; - - // node_hash_map::empty() - // - // Returns whether or not the `node_hash_map` is empty. - using Base::empty; - - // node_hash_map::max_size() - // - // Returns the largest theoretical possible number of elements within a - // `node_hash_map` under current memory constraints. This value can be thought - // of as the largest value of `std::distance(begin(), end())` for a - // `node_hash_map`. - using Base::max_size; - - // node_hash_map::size() - // - // Returns the number of elements currently within the `node_hash_map`. - using Base::size; - - // node_hash_map::clear() - // - // Removes all elements from the `node_hash_map`. Invalidates any references, - // pointers, or iterators referring to contained elements. - // - // NOTE: this operation may shrink the underlying buffer. To avoid shrinking - // the underlying buffer call `erase(begin(), end())`. - using Base::clear; - - // node_hash_map::erase() - // - // Erases elements within the `node_hash_map`. Erasing does not trigger a - // rehash. Overloads are listed below. - // - // void erase(const_iterator pos): - // - // Erases the element at `position` of the `node_hash_map`, returning - // `void`. - // - // NOTE: this return behavior is different than that of STL containers in - // general and `std::unordered_map` in particular. - // - // iterator erase(const_iterator first, const_iterator last): - // - // Erases the elements in the open interval [`first`, `last`), returning an - // iterator pointing to `last`. - // - // size_type erase(const key_type& key): - // - // Erases the element with the matching key, if it exists, returning the - // number of elements erased (0 or 1). - using Base::erase; - - // node_hash_map::insert() - // - // Inserts an element of the specified value into the `node_hash_map`, - // returning an iterator pointing to the newly inserted element, provided that - // an element with the given key does not already exist. If rehashing occurs - // due to the insertion, all iterators are invalidated. Overloads are listed - // below. - // - // std::pair insert(const init_type& value): - // - // Inserts a value into the `node_hash_map`. Returns a pair consisting of an - // iterator to the inserted element (or to the element that prevented the - // insertion) and a `bool` denoting whether the insertion took place. - // - // std::pair insert(T&& value): - // std::pair insert(init_type&& value): - // - // Inserts a moveable value into the `node_hash_map`. Returns a `std::pair` - // consisting of an iterator to the inserted element (or to the element that - // prevented the insertion) and a `bool` denoting whether the insertion took - // place. - // - // iterator insert(const_iterator hint, const init_type& value): - // iterator insert(const_iterator hint, T&& value): - // iterator insert(const_iterator hint, init_type&& value); - // - // Inserts a value, using the position of `hint` as a non-binding suggestion - // for where to begin the insertion search. Returns an iterator to the - // inserted element, or to the existing element that prevented the - // insertion. - // - // void insert(InputIterator first, InputIterator last): - // - // Inserts a range of values [`first`, `last`). - // - // NOTE: Although the STL does not specify which element may be inserted if - // multiple keys compare equivalently, for `node_hash_map` we guarantee the - // first match is inserted. - // - // void insert(std::initializer_list ilist): - // - // Inserts the elements within the initializer list `ilist`. - // - // NOTE: Although the STL does not specify which element may be inserted if - // multiple keys compare equivalently within the initializer list, for - // `node_hash_map` we guarantee the first match is inserted. - using Base::insert; - - // node_hash_map::insert_or_assign() - // - // Inserts an element of the specified value into the `node_hash_map` provided - // that a value with the given key does not already exist, or replaces it with - // the element value if a key for that value already exists, returning an - // iterator pointing to the newly inserted element. If rehashing occurs due to - // the insertion, all iterators are invalidated. Overloads are listed - // below. - // - // std::pair insert_or_assign(const init_type& k, T&& obj): - // std::pair insert_or_assign(init_type&& k, T&& obj): - // - // Inserts/Assigns (or moves) the element of the specified key into the - // `node_hash_map`. - // - // iterator insert_or_assign(const_iterator hint, - // const init_type& k, T&& obj): - // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj): - // - // Inserts/Assigns (or moves) the element of the specified key into the - // `node_hash_map` using the position of `hint` as a non-binding suggestion - // for where to begin the insertion search. - using Base::insert_or_assign; - - // node_hash_map::emplace() - // - // Inserts an element of the specified value by constructing it in-place - // within the `node_hash_map`, provided that no element with the given key - // already exists. - // - // The element may be constructed even if there already is an element with the - // key in the container, in which case the newly constructed element will be - // destroyed immediately. Prefer `try_emplace()` unless your key is not - // copyable or moveable. - // - // If rehashing occurs due to the insertion, all iterators are invalidated. - using Base::emplace; - - // node_hash_map::emplace_hint() - // - // Inserts an element of the specified value by constructing it in-place - // within the `node_hash_map`, using the position of `hint` as a non-binding - // suggestion for where to begin the insertion search, and only inserts - // provided that no element with the given key already exists. - // - // The element may be constructed even if there already is an element with the - // key in the container, in which case the newly constructed element will be - // destroyed immediately. Prefer `try_emplace()` unless your key is not - // copyable or moveable. - // - // If rehashing occurs due to the insertion, all iterators are invalidated. - using Base::emplace_hint; - - // node_hash_map::try_emplace() - // - // Inserts an element of the specified value by constructing it in-place - // within the `node_hash_map`, provided that no element with the given key - // already exists. Unlike `emplace()`, if an element with the given key - // already exists, we guarantee that no element is constructed. - // - // If rehashing occurs due to the insertion, all iterators are invalidated. - // Overloads are listed below. - // - // std::pair try_emplace(const key_type& k, Args&&... args): - // std::pair try_emplace(key_type&& k, Args&&... args): - // - // Inserts (via copy or move) the element of the specified key into the - // `node_hash_map`. - // - // iterator try_emplace(const_iterator hint, - // const key_type& k, Args&&... args): - // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): - // - // Inserts (via copy or move) the element of the specified key into the - // `node_hash_map` using the position of `hint` as a non-binding suggestion - // for where to begin the insertion search. - // - // All `try_emplace()` overloads make the same guarantees regarding rvalue - // arguments as `std::unordered_map::try_emplace()`, namely that these - // functions will not move from rvalue arguments if insertions do not happen. - using Base::try_emplace; - - // node_hash_map::extract() - // - // Extracts the indicated element, erasing it in the process, and returns it - // as a C++17-compatible node handle. Overloads are listed below. - // - // node_type extract(const_iterator position): - // - // Extracts the key,value pair of the element at the indicated position and - // returns a node handle owning that extracted data. - // - // node_type extract(const key_type& x): - // - // Extracts the key,value pair of the element with a key matching the passed - // key value and returns a node handle owning that extracted data. If the - // `node_hash_map` does not contain an element with a matching key, this - // function returns an empty node handle. - // - // NOTE: when compiled in an earlier version of C++ than C++17, - // `node_type::key()` returns a const reference to the key instead of a - // mutable reference. We cannot safely return a mutable reference without - // std::launder (which is not available before C++17). - using Base::extract; - - // node_hash_map::merge() - // - // Extracts elements from a given `source` node hash map into this - // `node_hash_map`. If the destination `node_hash_map` already contains an - // element with an equivalent key, that element is not extracted. - using Base::merge; - - // node_hash_map::swap(node_hash_map& other) - // - // Exchanges the contents of this `node_hash_map` with those of the `other` - // node hash map, avoiding invocation of any move, copy, or swap operations on - // individual elements. - // - // All iterators and references on the `node_hash_map` remain valid, excepting - // for the past-the-end iterator, which is invalidated. - // - // `swap()` requires that the node hash map's hashing and key equivalence - // functions be Swappable, and are exchaged using unqualified calls to - // non-member `swap()`. If the map's allocator has - // `std::allocator_traits::propagate_on_container_swap::value` - // set to `true`, the allocators are also exchanged using an unqualified call - // to non-member `swap()`; otherwise, the allocators are not swapped. - using Base::swap; - - // node_hash_map::rehash(count) - // - // Rehashes the `node_hash_map`, setting the number of slots to be at least - // the passed value. If the new number of slots increases the load factor more - // than the current maximum load factor - // (`count` < `size()` / `max_load_factor()`), then the new number of slots - // will be at least `size()` / `max_load_factor()`. - // - // To force a rehash, pass rehash(0). - using Base::rehash; - - // node_hash_map::reserve(count) - // - // Sets the number of slots in the `node_hash_map` to the number needed to - // accommodate at least `count` total elements without exceeding the current - // maximum load factor, and may rehash the container if needed. - using Base::reserve; - - // node_hash_map::at() - // - // Returns a reference to the mapped value of the element with key equivalent - // to the passed key. - using Base::at; - - // node_hash_map::contains() - // - // Determines whether an element with a key comparing equal to the given `key` - // exists within the `node_hash_map`, returning `true` if so or `false` - // otherwise. - using Base::contains; - - // node_hash_map::count(const Key& key) const - // - // Returns the number of elements with a key comparing equal to the given - // `key` within the `node_hash_map`. note that this function will return - // either `1` or `0` since duplicate keys are not allowed within a - // `node_hash_map`. - using Base::count; - - // node_hash_map::equal_range() - // - // Returns a closed range [first, last], defined by a `std::pair` of two - // iterators, containing all elements with the passed key in the - // `node_hash_map`. - using Base::equal_range; - - // node_hash_map::find() - // - // Finds an element with the passed `key` within the `node_hash_map`. - using Base::find; - - // node_hash_map::operator[]() - // - // Returns a reference to the value mapped to the passed key within the - // `node_hash_map`, performing an `insert()` if the key does not already - // exist. If an insertion occurs and results in a rehashing of the container, - // all iterators are invalidated. Otherwise iterators are not affected and - // references are not invalidated. Overloads are listed below. - // - // T& operator[](const Key& key): - // - // Inserts an init_type object constructed in-place if the element with the - // given key does not exist. - // - // T& operator[](Key&& key): - // - // Inserts an init_type object constructed in-place provided that an element - // with the given key does not exist. - using Base::operator[]; - - // node_hash_map::bucket_count() - // - // Returns the number of "buckets" within the `node_hash_map`. - using Base::bucket_count; - - // node_hash_map::load_factor() - // - // Returns the current load factor of the `node_hash_map` (the average number - // of slots occupied with a value within the hash map). - using Base::load_factor; - - // node_hash_map::max_load_factor() - // - // Manages the maximum load factor of the `node_hash_map`. Overloads are - // listed below. - // - // float node_hash_map::max_load_factor() - // - // Returns the current maximum load factor of the `node_hash_map`. - // - // void node_hash_map::max_load_factor(float ml) - // - // Sets the maximum load factor of the `node_hash_map` to the passed value. - // - // NOTE: This overload is provided only for API compatibility with the STL; - // `node_hash_map` will ignore any set load factor and manage its rehashing - // internally as an implementation detail. - using Base::max_load_factor; - - // node_hash_map::get_allocator() - // - // Returns the allocator function associated with this `node_hash_map`. - using Base::get_allocator; - - // node_hash_map::hash_function() - // - // Returns the hashing function used to hash the keys within this - // `node_hash_map`. - using Base::hash_function; - - // node_hash_map::key_eq() - // - // Returns the function used for comparing keys equality. - using Base::key_eq; -}; - -// erase_if(node_hash_map<>, Pred) -// -// Erases all elements that satisfy the predicate `pred` from the container `c`. -// Returns the number of erased elements. -template -typename node_hash_map::size_type erase_if( - node_hash_map& c, Predicate pred) { - return container_internal::EraseIf(pred, &c); -} - -namespace container_internal { - -template -class NodeHashMapPolicy - : public absl::container_internal::node_slot_policy< - std::pair&, NodeHashMapPolicy> { - using value_type = std::pair; - - public: - using key_type = Key; - using mapped_type = Value; - using init_type = std::pair; - - template - static value_type* new_element(Allocator* alloc, Args&&... args) { - using PairAlloc = typename absl::allocator_traits< - Allocator>::template rebind_alloc; - PairAlloc pair_alloc(*alloc); - value_type* res = - absl::allocator_traits::allocate(pair_alloc, 1); - absl::allocator_traits::construct(pair_alloc, res, - std::forward(args)...); - return res; - } - - template - static void delete_element(Allocator* alloc, value_type* pair) { - using PairAlloc = typename absl::allocator_traits< - Allocator>::template rebind_alloc; - PairAlloc pair_alloc(*alloc); - absl::allocator_traits::destroy(pair_alloc, pair); - absl::allocator_traits::deallocate(pair_alloc, pair, 1); - } - - template - static decltype(absl::container_internal::DecomposePair( - std::declval(), std::declval()...)) - apply(F&& f, Args&&... args) { - return absl::container_internal::DecomposePair(std::forward(f), - std::forward(args)...); - } - - static size_t element_space_used(const value_type*) { - return sizeof(value_type); - } - - static Value& value(value_type* elem) { return elem->second; } - static const Value& value(const value_type* elem) { return elem->second; } -}; -} // namespace container_internal - -namespace container_algorithm_internal { - -// Specialization of trait in absl/algorithm/container.h -template -struct IsUnorderedContainer< - absl::node_hash_map> : std::true_type {}; - -} // namespace container_algorithm_internal - -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + template + class NodeHashMapPolicy; + } // namespace container_internal + + // ----------------------------------------------------------------------------- + // absl::node_hash_map + // ----------------------------------------------------------------------------- + // + // An `absl::node_hash_map` is an unordered associative container which + // has been optimized for both speed and memory footprint in most common use + // cases. Its interface is similar to that of `std::unordered_map` with + // the following notable differences: + // + // * Supports heterogeneous lookup, through `find()`, `operator[]()` and + // `insert()`, provided that the map is provided a compatible heterogeneous + // hashing function and equality operator. + // * Contains a `capacity()` member function indicating the number of element + // slots (open, deleted, and empty) within the hash map. + // * Returns `void` from the `erase(iterator)` overload. + // + // By default, `node_hash_map` uses the `absl::Hash` hashing framework. + // All fundamental and Abseil types that support the `absl::Hash` framework have + // a compatible equality operator for comparing insertions into `node_hash_map`. + // If your type is not yet supported by the `absl::Hash` framework, see + // absl/hash/hash.h for information on extending Abseil hashing to user-defined + // types. + // + // Using `absl::node_hash_map` at interface boundaries in dynamically loaded + // libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may + // be randomized across dynamically loaded libraries. + // + // Example: + // + // // Create a node hash map of three strings (that map to strings) + // absl::node_hash_map ducks = + // {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; + // + // // Insert a new element into the node hash map + // ducks.insert({"d", "donald"}}; + // + // // Force a rehash of the node hash map + // ducks.rehash(0); + // + // // Find the element with the key "b" + // std::string search_key = "b"; + // auto result = ducks.find(search_key); + // if (result != ducks.end()) { + // std::cout << "Result: " << result->second << std::endl; + // } + template, class Eq = absl::container_internal::hash_default_eq, class Alloc = std::allocator>> + class node_hash_map : public absl::container_internal::raw_hash_map, Hash, Eq, Alloc> + { + using Base = typename node_hash_map::raw_hash_map; + + public: + // Constructors and Assignment Operators + // + // A node_hash_map supports the same overload set as `std::unordered_map` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::node_hash_map map1; + // + // * Initializer List constructor + // + // absl::node_hash_map map2 = + // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; + // + // * Copy constructor + // + // absl::node_hash_map map3(map2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::node_hash_map map4; + // map4 = map3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::node_hash_map map5(std::move(map4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::node_hash_map map6; + // map6 = std::move(map5); + // + // * Range constructor + // + // std::vector> v = {{1, "a"}, {2, "b"}}; + // absl::node_hash_map map7(v.begin(), v.end()); + node_hash_map() + { + } + using Base::Base; + + // node_hash_map::begin() + // + // Returns an iterator to the beginning of the `node_hash_map`. + using Base::begin; + + // node_hash_map::cbegin() + // + // Returns a const iterator to the beginning of the `node_hash_map`. + using Base::cbegin; + + // node_hash_map::cend() + // + // Returns a const iterator to the end of the `node_hash_map`. + using Base::cend; + + // node_hash_map::end() + // + // Returns an iterator to the end of the `node_hash_map`. + using Base::end; + + // node_hash_map::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `node_hash_map`. + // + // NOTE: this member function is particular to `absl::node_hash_map` and is + // not provided in the `std::unordered_map` API. + using Base::capacity; + + // node_hash_map::empty() + // + // Returns whether or not the `node_hash_map` is empty. + using Base::empty; + + // node_hash_map::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `node_hash_map` under current memory constraints. This value can be thought + // of as the largest value of `std::distance(begin(), end())` for a + // `node_hash_map`. + using Base::max_size; + + // node_hash_map::size() + // + // Returns the number of elements currently within the `node_hash_map`. + using Base::size; + + // node_hash_map::clear() + // + // Removes all elements from the `node_hash_map`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // node_hash_map::erase() + // + // Erases elements within the `node_hash_map`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `node_hash_map`, returning + // `void`. + // + // NOTE: this return behavior is different than that of STL containers in + // general and `std::unordered_map` in particular. + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // node_hash_map::insert() + // + // Inserts an element of the specified value into the `node_hash_map`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const init_type& value): + // + // Inserts a value into the `node_hash_map`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a `bool` denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // std::pair insert(init_type&& value): + // + // Inserts a moveable value into the `node_hash_map`. Returns a `std::pair` + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a `bool` denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const init_type& value): + // iterator insert(const_iterator hint, T&& value): + // iterator insert(const_iterator hint, init_type&& value); + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `node_hash_map` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `node_hash_map` we guarantee the first match is inserted. + using Base::insert; + + // node_hash_map::insert_or_assign() + // + // Inserts an element of the specified value into the `node_hash_map` provided + // that a value with the given key does not already exist, or replaces it with + // the element value if a key for that value already exists, returning an + // iterator pointing to the newly inserted element. If rehashing occurs due to + // the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert_or_assign(const init_type& k, T&& obj): + // std::pair insert_or_assign(init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `node_hash_map`. + // + // iterator insert_or_assign(const_iterator hint, + // const init_type& k, T&& obj): + // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj): + // + // Inserts/Assigns (or moves) the element of the specified key into the + // `node_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + using Base::insert_or_assign; + + // node_hash_map::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_map`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // node_hash_map::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_map`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. Prefer `try_emplace()` unless your key is not + // copyable or moveable. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // node_hash_map::try_emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_map`, provided that no element with the given key + // already exists. Unlike `emplace()`, if an element with the given key + // already exists, we guarantee that no element is constructed. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + // Overloads are listed below. + // + // std::pair try_emplace(const key_type& k, Args&&... args): + // std::pair try_emplace(key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `node_hash_map`. + // + // iterator try_emplace(const_iterator hint, + // const key_type& k, Args&&... args): + // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): + // + // Inserts (via copy or move) the element of the specified key into the + // `node_hash_map` using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. + // + // All `try_emplace()` overloads make the same guarantees regarding rvalue + // arguments as `std::unordered_map::try_emplace()`, namely that these + // functions will not move from rvalue arguments if insertions do not happen. + using Base::try_emplace; + + // node_hash_map::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the key,value pair of the element at the indicated position and + // returns a node handle owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the key,value pair of the element with a key matching the passed + // key value and returns a node handle owning that extracted data. If the + // `node_hash_map` does not contain an element with a matching key, this + // function returns an empty node handle. + // + // NOTE: when compiled in an earlier version of C++ than C++17, + // `node_type::key()` returns a const reference to the key instead of a + // mutable reference. We cannot safely return a mutable reference without + // std::launder (which is not available before C++17). + using Base::extract; + + // node_hash_map::merge() + // + // Extracts elements from a given `source` node hash map into this + // `node_hash_map`. If the destination `node_hash_map` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // node_hash_map::swap(node_hash_map& other) + // + // Exchanges the contents of this `node_hash_map` with those of the `other` + // node hash map, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `node_hash_map` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the node hash map's hashing and key equivalence + // functions be Swappable, and are exchaged using unqualified calls to + // non-member `swap()`. If the map's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // node_hash_map::rehash(count) + // + // Rehashes the `node_hash_map`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + using Base::rehash; + + // node_hash_map::reserve(count) + // + // Sets the number of slots in the `node_hash_map` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // node_hash_map::at() + // + // Returns a reference to the mapped value of the element with key equivalent + // to the passed key. + using Base::at; + + // node_hash_map::contains() + // + // Determines whether an element with a key comparing equal to the given `key` + // exists within the `node_hash_map`, returning `true` if so or `false` + // otherwise. + using Base::contains; + + // node_hash_map::count(const Key& key) const + // + // Returns the number of elements with a key comparing equal to the given + // `key` within the `node_hash_map`. note that this function will return + // either `1` or `0` since duplicate keys are not allowed within a + // `node_hash_map`. + using Base::count; + + // node_hash_map::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `node_hash_map`. + using Base::equal_range; + + // node_hash_map::find() + // + // Finds an element with the passed `key` within the `node_hash_map`. + using Base::find; + + // node_hash_map::operator[]() + // + // Returns a reference to the value mapped to the passed key within the + // `node_hash_map`, performing an `insert()` if the key does not already + // exist. If an insertion occurs and results in a rehashing of the container, + // all iterators are invalidated. Otherwise iterators are not affected and + // references are not invalidated. Overloads are listed below. + // + // T& operator[](const Key& key): + // + // Inserts an init_type object constructed in-place if the element with the + // given key does not exist. + // + // T& operator[](Key&& key): + // + // Inserts an init_type object constructed in-place provided that an element + // with the given key does not exist. + using Base::operator[]; + + // node_hash_map::bucket_count() + // + // Returns the number of "buckets" within the `node_hash_map`. + using Base::bucket_count; + + // node_hash_map::load_factor() + // + // Returns the current load factor of the `node_hash_map` (the average number + // of slots occupied with a value within the hash map). + using Base::load_factor; + + // node_hash_map::max_load_factor() + // + // Manages the maximum load factor of the `node_hash_map`. Overloads are + // listed below. + // + // float node_hash_map::max_load_factor() + // + // Returns the current maximum load factor of the `node_hash_map`. + // + // void node_hash_map::max_load_factor(float ml) + // + // Sets the maximum load factor of the `node_hash_map` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `node_hash_map` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // node_hash_map::get_allocator() + // + // Returns the allocator function associated with this `node_hash_map`. + using Base::get_allocator; + + // node_hash_map::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `node_hash_map`. + using Base::hash_function; + + // node_hash_map::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; + }; + + // erase_if(node_hash_map<>, Pred) + // + // Erases all elements that satisfy the predicate `pred` from the container `c`. + // Returns the number of erased elements. + template + typename node_hash_map::size_type erase_if( + node_hash_map& c, Predicate pred + ) + { + return container_internal::EraseIf(pred, &c); + } + + namespace container_internal + { + + template + class NodeHashMapPolicy : public absl::container_internal::node_slot_policy&, NodeHashMapPolicy> + { + using value_type = std::pair; + + public: + using key_type = Key; + using mapped_type = Value; + using init_type = std::pair; + + template + static value_type* new_element(Allocator* alloc, Args&&... args) + { + using PairAlloc = typename absl::allocator_traits< + Allocator>::template rebind_alloc; + PairAlloc pair_alloc(*alloc); + value_type* res = + absl::allocator_traits::allocate(pair_alloc, 1); + absl::allocator_traits::construct(pair_alloc, res, std::forward(args)...); + return res; + } + + template + static void delete_element(Allocator* alloc, value_type* pair) + { + using PairAlloc = typename absl::allocator_traits< + Allocator>::template rebind_alloc; + PairAlloc pair_alloc(*alloc); + absl::allocator_traits::destroy(pair_alloc, pair); + absl::allocator_traits::deallocate(pair_alloc, pair, 1); + } + + template + static decltype(absl::container_internal::DecomposePair( + std::declval(), std::declval()... + )) + apply(F&& f, Args&&... args) + { + return absl::container_internal::DecomposePair(std::forward(f), std::forward(args)...); + } + + static size_t element_space_used(const value_type*) + { + return sizeof(value_type); + } + + static Value& value(value_type* elem) + { + return elem->second; + } + static const Value& value(const value_type* elem) + { + return elem->second; + } + }; + } // namespace container_internal + + namespace container_algorithm_internal + { + + // Specialization of trait in absl/algorithm/container.h + template + struct IsUnorderedContainer< + absl::node_hash_map> : std::true_type + { + }; + + } // namespace container_algorithm_internal + + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_NODE_HASH_MAP_H_ diff --git a/CAPI/cpp/grpc/include/absl/container/node_hash_set.h b/CAPI/cpp/grpc/include/absl/container/node_hash_set.h index f2cc70c..f4cd06c 100644 --- a/CAPI/cpp/grpc/include/absl/container/node_hash_set.h +++ b/CAPI/cpp/grpc/include/absl/container/node_hash_set.h @@ -44,457 +44,470 @@ #include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export #include "absl/memory/memory.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace container_internal { -template -struct NodeHashSetPolicy; -} // namespace container_internal - -// ----------------------------------------------------------------------------- -// absl::node_hash_set -// ----------------------------------------------------------------------------- -// -// An `absl::node_hash_set` is an unordered associative container which -// has been optimized for both speed and memory footprint in most common use -// cases. Its interface is similar to that of `std::unordered_set` with the -// following notable differences: -// -// * Supports heterogeneous lookup, through `find()`, `operator[]()` and -// `insert()`, provided that the set is provided a compatible heterogeneous -// hashing function and equality operator. -// * Contains a `capacity()` member function indicating the number of element -// slots (open, deleted, and empty) within the hash set. -// * Returns `void` from the `erase(iterator)` overload. -// -// By default, `node_hash_set` uses the `absl::Hash` hashing framework. -// All fundamental and Abseil types that support the `absl::Hash` framework have -// a compatible equality operator for comparing insertions into `node_hash_set`. -// If your type is not yet supported by the `absl::Hash` framework, see -// absl/hash/hash.h for information on extending Abseil hashing to user-defined -// types. -// -// Using `absl::node_hash_set` at interface boundaries in dynamically loaded -// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may -// be randomized across dynamically loaded libraries. -// -// Example: -// -// // Create a node hash set of three strings -// absl::node_hash_set ducks = -// {"huey", "dewey", "louie"}; -// -// // Insert a new element into the node hash set -// ducks.insert("donald"); -// -// // Force a rehash of the node hash set -// ducks.rehash(0); -// -// // See if "dewey" is present -// if (ducks.contains("dewey")) { -// std::cout << "We found dewey!" << std::endl; -// } -template , - class Eq = absl::container_internal::hash_default_eq, - class Alloc = std::allocator> -class node_hash_set - : public absl::container_internal::raw_hash_set< - absl::container_internal::NodeHashSetPolicy, Hash, Eq, Alloc> { - using Base = typename node_hash_set::raw_hash_set; - - public: - // Constructors and Assignment Operators - // - // A node_hash_set supports the same overload set as `std::unordered_set` - // for construction and assignment: - // - // * Default constructor - // - // // No allocation for the table's elements is made. - // absl::node_hash_set set1; - // - // * Initializer List constructor - // - // absl::node_hash_set set2 = - // {{"huey"}, {"dewey"}, {"louie"}}; - // - // * Copy constructor - // - // absl::node_hash_set set3(set2); - // - // * Copy assignment operator - // - // // Hash functor and Comparator are copied as well - // absl::node_hash_set set4; - // set4 = set3; - // - // * Move constructor - // - // // Move is guaranteed efficient - // absl::node_hash_set set5(std::move(set4)); - // - // * Move assignment operator - // - // // May be efficient if allocators are compatible - // absl::node_hash_set set6; - // set6 = std::move(set5); - // - // * Range constructor - // - // std::vector v = {"a", "b"}; - // absl::node_hash_set set7(v.begin(), v.end()); - node_hash_set() {} - using Base::Base; - - // node_hash_set::begin() - // - // Returns an iterator to the beginning of the `node_hash_set`. - using Base::begin; - - // node_hash_set::cbegin() - // - // Returns a const iterator to the beginning of the `node_hash_set`. - using Base::cbegin; - - // node_hash_set::cend() - // - // Returns a const iterator to the end of the `node_hash_set`. - using Base::cend; - - // node_hash_set::end() - // - // Returns an iterator to the end of the `node_hash_set`. - using Base::end; - - // node_hash_set::capacity() - // - // Returns the number of element slots (assigned, deleted, and empty) - // available within the `node_hash_set`. - // - // NOTE: this member function is particular to `absl::node_hash_set` and is - // not provided in the `std::unordered_set` API. - using Base::capacity; - - // node_hash_set::empty() - // - // Returns whether or not the `node_hash_set` is empty. - using Base::empty; - - // node_hash_set::max_size() - // - // Returns the largest theoretical possible number of elements within a - // `node_hash_set` under current memory constraints. This value can be thought - // of the largest value of `std::distance(begin(), end())` for a - // `node_hash_set`. - using Base::max_size; - - // node_hash_set::size() - // - // Returns the number of elements currently within the `node_hash_set`. - using Base::size; - - // node_hash_set::clear() - // - // Removes all elements from the `node_hash_set`. Invalidates any references, - // pointers, or iterators referring to contained elements. - // - // NOTE: this operation may shrink the underlying buffer. To avoid shrinking - // the underlying buffer call `erase(begin(), end())`. - using Base::clear; - - // node_hash_set::erase() - // - // Erases elements within the `node_hash_set`. Erasing does not trigger a - // rehash. Overloads are listed below. - // - // void erase(const_iterator pos): - // - // Erases the element at `position` of the `node_hash_set`, returning - // `void`. - // - // NOTE: this return behavior is different than that of STL containers in - // general and `std::unordered_set` in particular. - // - // iterator erase(const_iterator first, const_iterator last): - // - // Erases the elements in the open interval [`first`, `last`), returning an - // iterator pointing to `last`. - // - // size_type erase(const key_type& key): - // - // Erases the element with the matching key, if it exists, returning the - // number of elements erased (0 or 1). - using Base::erase; - - // node_hash_set::insert() - // - // Inserts an element of the specified value into the `node_hash_set`, - // returning an iterator pointing to the newly inserted element, provided that - // an element with the given key does not already exist. If rehashing occurs - // due to the insertion, all iterators are invalidated. Overloads are listed - // below. - // - // std::pair insert(const T& value): - // - // Inserts a value into the `node_hash_set`. Returns a pair consisting of an - // iterator to the inserted element (or to the element that prevented the - // insertion) and a bool denoting whether the insertion took place. - // - // std::pair insert(T&& value): - // - // Inserts a moveable value into the `node_hash_set`. Returns a pair - // consisting of an iterator to the inserted element (or to the element that - // prevented the insertion) and a bool denoting whether the insertion took - // place. - // - // iterator insert(const_iterator hint, const T& value): - // iterator insert(const_iterator hint, T&& value): - // - // Inserts a value, using the position of `hint` as a non-binding suggestion - // for where to begin the insertion search. Returns an iterator to the - // inserted element, or to the existing element that prevented the - // insertion. - // - // void insert(InputIterator first, InputIterator last): - // - // Inserts a range of values [`first`, `last`). - // - // NOTE: Although the STL does not specify which element may be inserted if - // multiple keys compare equivalently, for `node_hash_set` we guarantee the - // first match is inserted. - // - // void insert(std::initializer_list ilist): - // - // Inserts the elements within the initializer list `ilist`. - // - // NOTE: Although the STL does not specify which element may be inserted if - // multiple keys compare equivalently within the initializer list, for - // `node_hash_set` we guarantee the first match is inserted. - using Base::insert; - - // node_hash_set::emplace() - // - // Inserts an element of the specified value by constructing it in-place - // within the `node_hash_set`, provided that no element with the given key - // already exists. - // - // The element may be constructed even if there already is an element with the - // key in the container, in which case the newly constructed element will be - // destroyed immediately. - // - // If rehashing occurs due to the insertion, all iterators are invalidated. - using Base::emplace; - - // node_hash_set::emplace_hint() - // - // Inserts an element of the specified value by constructing it in-place - // within the `node_hash_set`, using the position of `hint` as a non-binding - // suggestion for where to begin the insertion search, and only inserts - // provided that no element with the given key already exists. - // - // The element may be constructed even if there already is an element with the - // key in the container, in which case the newly constructed element will be - // destroyed immediately. - // - // If rehashing occurs due to the insertion, all iterators are invalidated. - using Base::emplace_hint; - - // node_hash_set::extract() - // - // Extracts the indicated element, erasing it in the process, and returns it - // as a C++17-compatible node handle. Overloads are listed below. - // - // node_type extract(const_iterator position): - // - // Extracts the element at the indicated position and returns a node handle - // owning that extracted data. - // - // node_type extract(const key_type& x): - // - // Extracts the element with the key matching the passed key value and - // returns a node handle owning that extracted data. If the `node_hash_set` - // does not contain an element with a matching key, this function returns an - // empty node handle. - using Base::extract; - - // node_hash_set::merge() - // - // Extracts elements from a given `source` node hash set into this - // `node_hash_set`. If the destination `node_hash_set` already contains an - // element with an equivalent key, that element is not extracted. - using Base::merge; - - // node_hash_set::swap(node_hash_set& other) - // - // Exchanges the contents of this `node_hash_set` with those of the `other` - // node hash set, avoiding invocation of any move, copy, or swap operations on - // individual elements. - // - // All iterators and references on the `node_hash_set` remain valid, excepting - // for the past-the-end iterator, which is invalidated. - // - // `swap()` requires that the node hash set's hashing and key equivalence - // functions be Swappable, and are exchaged using unqualified calls to - // non-member `swap()`. If the set's allocator has - // `std::allocator_traits::propagate_on_container_swap::value` - // set to `true`, the allocators are also exchanged using an unqualified call - // to non-member `swap()`; otherwise, the allocators are not swapped. - using Base::swap; - - // node_hash_set::rehash(count) - // - // Rehashes the `node_hash_set`, setting the number of slots to be at least - // the passed value. If the new number of slots increases the load factor more - // than the current maximum load factor - // (`count` < `size()` / `max_load_factor()`), then the new number of slots - // will be at least `size()` / `max_load_factor()`. - // - // To force a rehash, pass rehash(0). - // - // NOTE: unlike behavior in `std::unordered_set`, references are also - // invalidated upon a `rehash()`. - using Base::rehash; - - // node_hash_set::reserve(count) - // - // Sets the number of slots in the `node_hash_set` to the number needed to - // accommodate at least `count` total elements without exceeding the current - // maximum load factor, and may rehash the container if needed. - using Base::reserve; - - // node_hash_set::contains() - // - // Determines whether an element comparing equal to the given `key` exists - // within the `node_hash_set`, returning `true` if so or `false` otherwise. - using Base::contains; - - // node_hash_set::count(const Key& key) const - // - // Returns the number of elements comparing equal to the given `key` within - // the `node_hash_set`. note that this function will return either `1` or `0` - // since duplicate elements are not allowed within a `node_hash_set`. - using Base::count; - - // node_hash_set::equal_range() - // - // Returns a closed range [first, last], defined by a `std::pair` of two - // iterators, containing all elements with the passed key in the - // `node_hash_set`. - using Base::equal_range; - - // node_hash_set::find() - // - // Finds an element with the passed `key` within the `node_hash_set`. - using Base::find; - - // node_hash_set::bucket_count() - // - // Returns the number of "buckets" within the `node_hash_set`. Note that - // because a node hash set contains all elements within its internal storage, - // this value simply equals the current capacity of the `node_hash_set`. - using Base::bucket_count; - - // node_hash_set::load_factor() - // - // Returns the current load factor of the `node_hash_set` (the average number - // of slots occupied with a value within the hash set). - using Base::load_factor; - - // node_hash_set::max_load_factor() - // - // Manages the maximum load factor of the `node_hash_set`. Overloads are - // listed below. - // - // float node_hash_set::max_load_factor() - // - // Returns the current maximum load factor of the `node_hash_set`. - // - // void node_hash_set::max_load_factor(float ml) - // - // Sets the maximum load factor of the `node_hash_set` to the passed value. - // - // NOTE: This overload is provided only for API compatibility with the STL; - // `node_hash_set` will ignore any set load factor and manage its rehashing - // internally as an implementation detail. - using Base::max_load_factor; - - // node_hash_set::get_allocator() - // - // Returns the allocator function associated with this `node_hash_set`. - using Base::get_allocator; - - // node_hash_set::hash_function() - // - // Returns the hashing function used to hash the keys within this - // `node_hash_set`. - using Base::hash_function; - - // node_hash_set::key_eq() - // - // Returns the function used for comparing keys equality. - using Base::key_eq; -}; - -// erase_if(node_hash_set<>, Pred) -// -// Erases all elements that satisfy the predicate `pred` from the container `c`. -// Returns the number of erased elements. -template -typename node_hash_set::size_type erase_if( - node_hash_set& c, Predicate pred) { - return container_internal::EraseIf(pred, &c); -} - -namespace container_internal { - -template -struct NodeHashSetPolicy - : absl::container_internal::node_slot_policy> { - using key_type = T; - using init_type = T; - using constant_iterators = std::true_type; - - template - static T* new_element(Allocator* alloc, Args&&... args) { - using ValueAlloc = - typename absl::allocator_traits::template rebind_alloc; - ValueAlloc value_alloc(*alloc); - T* res = absl::allocator_traits::allocate(value_alloc, 1); - absl::allocator_traits::construct(value_alloc, res, - std::forward(args)...); - return res; - } - - template - static void delete_element(Allocator* alloc, T* elem) { - using ValueAlloc = - typename absl::allocator_traits::template rebind_alloc; - ValueAlloc value_alloc(*alloc); - absl::allocator_traits::destroy(value_alloc, elem); - absl::allocator_traits::deallocate(value_alloc, elem, 1); - } - - template - static decltype(absl::container_internal::DecomposeValue( - std::declval(), std::declval()...)) - apply(F&& f, Args&&... args) { - return absl::container_internal::DecomposeValue( - std::forward(f), std::forward(args)...); - } - - static size_t element_space_used(const T*) { return sizeof(T); } -}; -} // namespace container_internal - -namespace container_algorithm_internal { - -// Specialization of trait in absl/algorithm/container.h -template -struct IsUnorderedContainer> - : std::true_type {}; - -} // namespace container_algorithm_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace container_internal + { + template + struct NodeHashSetPolicy; + } // namespace container_internal + + // ----------------------------------------------------------------------------- + // absl::node_hash_set + // ----------------------------------------------------------------------------- + // + // An `absl::node_hash_set` is an unordered associative container which + // has been optimized for both speed and memory footprint in most common use + // cases. Its interface is similar to that of `std::unordered_set` with the + // following notable differences: + // + // * Supports heterogeneous lookup, through `find()`, `operator[]()` and + // `insert()`, provided that the set is provided a compatible heterogeneous + // hashing function and equality operator. + // * Contains a `capacity()` member function indicating the number of element + // slots (open, deleted, and empty) within the hash set. + // * Returns `void` from the `erase(iterator)` overload. + // + // By default, `node_hash_set` uses the `absl::Hash` hashing framework. + // All fundamental and Abseil types that support the `absl::Hash` framework have + // a compatible equality operator for comparing insertions into `node_hash_set`. + // If your type is not yet supported by the `absl::Hash` framework, see + // absl/hash/hash.h for information on extending Abseil hashing to user-defined + // types. + // + // Using `absl::node_hash_set` at interface boundaries in dynamically loaded + // libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may + // be randomized across dynamically loaded libraries. + // + // Example: + // + // // Create a node hash set of three strings + // absl::node_hash_set ducks = + // {"huey", "dewey", "louie"}; + // + // // Insert a new element into the node hash set + // ducks.insert("donald"); + // + // // Force a rehash of the node hash set + // ducks.rehash(0); + // + // // See if "dewey" is present + // if (ducks.contains("dewey")) { + // std::cout << "We found dewey!" << std::endl; + // } + template, class Eq = absl::container_internal::hash_default_eq, class Alloc = std::allocator> + class node_hash_set : public absl::container_internal::raw_hash_set, Hash, Eq, Alloc> + { + using Base = typename node_hash_set::raw_hash_set; + + public: + // Constructors and Assignment Operators + // + // A node_hash_set supports the same overload set as `std::unordered_set` + // for construction and assignment: + // + // * Default constructor + // + // // No allocation for the table's elements is made. + // absl::node_hash_set set1; + // + // * Initializer List constructor + // + // absl::node_hash_set set2 = + // {{"huey"}, {"dewey"}, {"louie"}}; + // + // * Copy constructor + // + // absl::node_hash_set set3(set2); + // + // * Copy assignment operator + // + // // Hash functor and Comparator are copied as well + // absl::node_hash_set set4; + // set4 = set3; + // + // * Move constructor + // + // // Move is guaranteed efficient + // absl::node_hash_set set5(std::move(set4)); + // + // * Move assignment operator + // + // // May be efficient if allocators are compatible + // absl::node_hash_set set6; + // set6 = std::move(set5); + // + // * Range constructor + // + // std::vector v = {"a", "b"}; + // absl::node_hash_set set7(v.begin(), v.end()); + node_hash_set() + { + } + using Base::Base; + + // node_hash_set::begin() + // + // Returns an iterator to the beginning of the `node_hash_set`. + using Base::begin; + + // node_hash_set::cbegin() + // + // Returns a const iterator to the beginning of the `node_hash_set`. + using Base::cbegin; + + // node_hash_set::cend() + // + // Returns a const iterator to the end of the `node_hash_set`. + using Base::cend; + + // node_hash_set::end() + // + // Returns an iterator to the end of the `node_hash_set`. + using Base::end; + + // node_hash_set::capacity() + // + // Returns the number of element slots (assigned, deleted, and empty) + // available within the `node_hash_set`. + // + // NOTE: this member function is particular to `absl::node_hash_set` and is + // not provided in the `std::unordered_set` API. + using Base::capacity; + + // node_hash_set::empty() + // + // Returns whether or not the `node_hash_set` is empty. + using Base::empty; + + // node_hash_set::max_size() + // + // Returns the largest theoretical possible number of elements within a + // `node_hash_set` under current memory constraints. This value can be thought + // of the largest value of `std::distance(begin(), end())` for a + // `node_hash_set`. + using Base::max_size; + + // node_hash_set::size() + // + // Returns the number of elements currently within the `node_hash_set`. + using Base::size; + + // node_hash_set::clear() + // + // Removes all elements from the `node_hash_set`. Invalidates any references, + // pointers, or iterators referring to contained elements. + // + // NOTE: this operation may shrink the underlying buffer. To avoid shrinking + // the underlying buffer call `erase(begin(), end())`. + using Base::clear; + + // node_hash_set::erase() + // + // Erases elements within the `node_hash_set`. Erasing does not trigger a + // rehash. Overloads are listed below. + // + // void erase(const_iterator pos): + // + // Erases the element at `position` of the `node_hash_set`, returning + // `void`. + // + // NOTE: this return behavior is different than that of STL containers in + // general and `std::unordered_set` in particular. + // + // iterator erase(const_iterator first, const_iterator last): + // + // Erases the elements in the open interval [`first`, `last`), returning an + // iterator pointing to `last`. + // + // size_type erase(const key_type& key): + // + // Erases the element with the matching key, if it exists, returning the + // number of elements erased (0 or 1). + using Base::erase; + + // node_hash_set::insert() + // + // Inserts an element of the specified value into the `node_hash_set`, + // returning an iterator pointing to the newly inserted element, provided that + // an element with the given key does not already exist. If rehashing occurs + // due to the insertion, all iterators are invalidated. Overloads are listed + // below. + // + // std::pair insert(const T& value): + // + // Inserts a value into the `node_hash_set`. Returns a pair consisting of an + // iterator to the inserted element (or to the element that prevented the + // insertion) and a bool denoting whether the insertion took place. + // + // std::pair insert(T&& value): + // + // Inserts a moveable value into the `node_hash_set`. Returns a pair + // consisting of an iterator to the inserted element (or to the element that + // prevented the insertion) and a bool denoting whether the insertion took + // place. + // + // iterator insert(const_iterator hint, const T& value): + // iterator insert(const_iterator hint, T&& value): + // + // Inserts a value, using the position of `hint` as a non-binding suggestion + // for where to begin the insertion search. Returns an iterator to the + // inserted element, or to the existing element that prevented the + // insertion. + // + // void insert(InputIterator first, InputIterator last): + // + // Inserts a range of values [`first`, `last`). + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently, for `node_hash_set` we guarantee the + // first match is inserted. + // + // void insert(std::initializer_list ilist): + // + // Inserts the elements within the initializer list `ilist`. + // + // NOTE: Although the STL does not specify which element may be inserted if + // multiple keys compare equivalently within the initializer list, for + // `node_hash_set` we guarantee the first match is inserted. + using Base::insert; + + // node_hash_set::emplace() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_set`, provided that no element with the given key + // already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace; + + // node_hash_set::emplace_hint() + // + // Inserts an element of the specified value by constructing it in-place + // within the `node_hash_set`, using the position of `hint` as a non-binding + // suggestion for where to begin the insertion search, and only inserts + // provided that no element with the given key already exists. + // + // The element may be constructed even if there already is an element with the + // key in the container, in which case the newly constructed element will be + // destroyed immediately. + // + // If rehashing occurs due to the insertion, all iterators are invalidated. + using Base::emplace_hint; + + // node_hash_set::extract() + // + // Extracts the indicated element, erasing it in the process, and returns it + // as a C++17-compatible node handle. Overloads are listed below. + // + // node_type extract(const_iterator position): + // + // Extracts the element at the indicated position and returns a node handle + // owning that extracted data. + // + // node_type extract(const key_type& x): + // + // Extracts the element with the key matching the passed key value and + // returns a node handle owning that extracted data. If the `node_hash_set` + // does not contain an element with a matching key, this function returns an + // empty node handle. + using Base::extract; + + // node_hash_set::merge() + // + // Extracts elements from a given `source` node hash set into this + // `node_hash_set`. If the destination `node_hash_set` already contains an + // element with an equivalent key, that element is not extracted. + using Base::merge; + + // node_hash_set::swap(node_hash_set& other) + // + // Exchanges the contents of this `node_hash_set` with those of the `other` + // node hash set, avoiding invocation of any move, copy, or swap operations on + // individual elements. + // + // All iterators and references on the `node_hash_set` remain valid, excepting + // for the past-the-end iterator, which is invalidated. + // + // `swap()` requires that the node hash set's hashing and key equivalence + // functions be Swappable, and are exchaged using unqualified calls to + // non-member `swap()`. If the set's allocator has + // `std::allocator_traits::propagate_on_container_swap::value` + // set to `true`, the allocators are also exchanged using an unqualified call + // to non-member `swap()`; otherwise, the allocators are not swapped. + using Base::swap; + + // node_hash_set::rehash(count) + // + // Rehashes the `node_hash_set`, setting the number of slots to be at least + // the passed value. If the new number of slots increases the load factor more + // than the current maximum load factor + // (`count` < `size()` / `max_load_factor()`), then the new number of slots + // will be at least `size()` / `max_load_factor()`. + // + // To force a rehash, pass rehash(0). + // + // NOTE: unlike behavior in `std::unordered_set`, references are also + // invalidated upon a `rehash()`. + using Base::rehash; + + // node_hash_set::reserve(count) + // + // Sets the number of slots in the `node_hash_set` to the number needed to + // accommodate at least `count` total elements without exceeding the current + // maximum load factor, and may rehash the container if needed. + using Base::reserve; + + // node_hash_set::contains() + // + // Determines whether an element comparing equal to the given `key` exists + // within the `node_hash_set`, returning `true` if so or `false` otherwise. + using Base::contains; + + // node_hash_set::count(const Key& key) const + // + // Returns the number of elements comparing equal to the given `key` within + // the `node_hash_set`. note that this function will return either `1` or `0` + // since duplicate elements are not allowed within a `node_hash_set`. + using Base::count; + + // node_hash_set::equal_range() + // + // Returns a closed range [first, last], defined by a `std::pair` of two + // iterators, containing all elements with the passed key in the + // `node_hash_set`. + using Base::equal_range; + + // node_hash_set::find() + // + // Finds an element with the passed `key` within the `node_hash_set`. + using Base::find; + + // node_hash_set::bucket_count() + // + // Returns the number of "buckets" within the `node_hash_set`. Note that + // because a node hash set contains all elements within its internal storage, + // this value simply equals the current capacity of the `node_hash_set`. + using Base::bucket_count; + + // node_hash_set::load_factor() + // + // Returns the current load factor of the `node_hash_set` (the average number + // of slots occupied with a value within the hash set). + using Base::load_factor; + + // node_hash_set::max_load_factor() + // + // Manages the maximum load factor of the `node_hash_set`. Overloads are + // listed below. + // + // float node_hash_set::max_load_factor() + // + // Returns the current maximum load factor of the `node_hash_set`. + // + // void node_hash_set::max_load_factor(float ml) + // + // Sets the maximum load factor of the `node_hash_set` to the passed value. + // + // NOTE: This overload is provided only for API compatibility with the STL; + // `node_hash_set` will ignore any set load factor and manage its rehashing + // internally as an implementation detail. + using Base::max_load_factor; + + // node_hash_set::get_allocator() + // + // Returns the allocator function associated with this `node_hash_set`. + using Base::get_allocator; + + // node_hash_set::hash_function() + // + // Returns the hashing function used to hash the keys within this + // `node_hash_set`. + using Base::hash_function; + + // node_hash_set::key_eq() + // + // Returns the function used for comparing keys equality. + using Base::key_eq; + }; + + // erase_if(node_hash_set<>, Pred) + // + // Erases all elements that satisfy the predicate `pred` from the container `c`. + // Returns the number of erased elements. + template + typename node_hash_set::size_type erase_if( + node_hash_set& c, Predicate pred + ) + { + return container_internal::EraseIf(pred, &c); + } + + namespace container_internal + { + + template + struct NodeHashSetPolicy : absl::container_internal::node_slot_policy> + { + using key_type = T; + using init_type = T; + using constant_iterators = std::true_type; + + template + static T* new_element(Allocator* alloc, Args&&... args) + { + using ValueAlloc = + typename absl::allocator_traits::template rebind_alloc; + ValueAlloc value_alloc(*alloc); + T* res = absl::allocator_traits::allocate(value_alloc, 1); + absl::allocator_traits::construct(value_alloc, res, std::forward(args)...); + return res; + } + + template + static void delete_element(Allocator* alloc, T* elem) + { + using ValueAlloc = + typename absl::allocator_traits::template rebind_alloc; + ValueAlloc value_alloc(*alloc); + absl::allocator_traits::destroy(value_alloc, elem); + absl::allocator_traits::deallocate(value_alloc, elem, 1); + } + + template + static decltype(absl::container_internal::DecomposeValue( + std::declval(), std::declval()... + )) + apply(F&& f, Args&&... args) + { + return absl::container_internal::DecomposeValue( + std::forward(f), std::forward(args)... + ); + } + + static size_t element_space_used(const T*) + { + return sizeof(T); + } + }; + } // namespace container_internal + + namespace container_algorithm_internal + { + + // Specialization of trait in absl/algorithm/container.h + template + struct IsUnorderedContainer> : std::true_type + { + }; + + } // namespace container_algorithm_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_NODE_HASH_SET_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/failure_signal_handler.h b/CAPI/cpp/grpc/include/absl/debugging/failure_signal_handler.h index 500115c..9fa63b0 100644 --- a/CAPI/cpp/grpc/include/absl/debugging/failure_signal_handler.h +++ b/CAPI/cpp/grpc/include/absl/debugging/failure_signal_handler.h @@ -46,76 +46,79 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN +namespace absl +{ + ABSL_NAMESPACE_BEGIN -// FailureSignalHandlerOptions -// -// Struct for holding `absl::InstallFailureSignalHandler()` configuration -// options. -struct FailureSignalHandlerOptions { - // If true, try to symbolize the stacktrace emitted on failure, provided that - // you have initialized a symbolizer for that purpose. (See symbolize.h for - // more information.) - bool symbolize_stacktrace = true; + // FailureSignalHandlerOptions + // + // Struct for holding `absl::InstallFailureSignalHandler()` configuration + // options. + struct FailureSignalHandlerOptions + { + // If true, try to symbolize the stacktrace emitted on failure, provided that + // you have initialized a symbolizer for that purpose. (See symbolize.h for + // more information.) + bool symbolize_stacktrace = true; - // If true, try to run signal handlers on an alternate stack (if supported on - // the given platform). An alternate stack is useful for program crashes due - // to a stack overflow; by running on a alternate stack, the signal handler - // may run even when normal stack space has been exausted. The downside of - // using an alternate stack is that extra memory for the alternate stack needs - // to be pre-allocated. - bool use_alternate_stack = true; + // If true, try to run signal handlers on an alternate stack (if supported on + // the given platform). An alternate stack is useful for program crashes due + // to a stack overflow; by running on a alternate stack, the signal handler + // may run even when normal stack space has been exausted. The downside of + // using an alternate stack is that extra memory for the alternate stack needs + // to be pre-allocated. + bool use_alternate_stack = true; - // If positive, indicates the number of seconds after which the failure signal - // handler is invoked to abort the program. Setting such an alarm is useful in - // cases where the failure signal handler itself may become hung or - // deadlocked. - int alarm_on_failure_secs = 3; + // If positive, indicates the number of seconds after which the failure signal + // handler is invoked to abort the program. Setting such an alarm is useful in + // cases where the failure signal handler itself may become hung or + // deadlocked. + int alarm_on_failure_secs = 3; - // If true, call the previously registered signal handler for the signal that - // was received (if one was registered) after the existing signal handler - // runs. This mechanism can be used to chain signal handlers together. - // - // If false, the signal is raised to the default handler for that signal - // (which normally terminates the program). - // - // IMPORTANT: If true, the chained fatal signal handlers must not try to - // recover from the fatal signal. Instead, they should terminate the program - // via some mechanism, like raising the default handler for the signal, or by - // calling `_exit()`. Note that the failure signal handler may put parts of - // the Abseil library into a state from which they cannot recover. - bool call_previous_handler = false; + // If true, call the previously registered signal handler for the signal that + // was received (if one was registered) after the existing signal handler + // runs. This mechanism can be used to chain signal handlers together. + // + // If false, the signal is raised to the default handler for that signal + // (which normally terminates the program). + // + // IMPORTANT: If true, the chained fatal signal handlers must not try to + // recover from the fatal signal. Instead, they should terminate the program + // via some mechanism, like raising the default handler for the signal, or by + // calling `_exit()`. Note that the failure signal handler may put parts of + // the Abseil library into a state from which they cannot recover. + bool call_previous_handler = false; - // If non-null, indicates a pointer to a callback function that will be called - // upon failure, with a string argument containing failure data. This function - // may be used as a hook to write failure data to a secondary location, such - // as a log file. This function will also be called with null data, as a hint - // to flush any buffered data before the program may be terminated. Consider - // flushing any buffered data in all calls to this function. - // - // Since this function runs within a signal handler, it should be - // async-signal-safe if possible. - // See http://man7.org/linux/man-pages/man7/signal-safety.7.html - void (*writerfn)(const char*) = nullptr; -}; + // If non-null, indicates a pointer to a callback function that will be called + // upon failure, with a string argument containing failure data. This function + // may be used as a hook to write failure data to a secondary location, such + // as a log file. This function will also be called with null data, as a hint + // to flush any buffered data before the program may be terminated. Consider + // flushing any buffered data in all calls to this function. + // + // Since this function runs within a signal handler, it should be + // async-signal-safe if possible. + // See http://man7.org/linux/man-pages/man7/signal-safety.7.html + void (*writerfn)(const char*) = nullptr; + }; -// InstallFailureSignalHandler() -// -// Installs a signal handler for the common failure signals `SIGSEGV`, `SIGILL`, -// `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUG`, and `SIGTRAP` (provided they exist -// on the given platform). The failure signal handler dumps program failure data -// useful for debugging in an unspecified format to stderr. This data may -// include the program counter, a stacktrace, and register information on some -// systems; do not rely on an exact format for the output, as it is subject to -// change. -void InstallFailureSignalHandler(const FailureSignalHandlerOptions& options); + // InstallFailureSignalHandler() + // + // Installs a signal handler for the common failure signals `SIGSEGV`, `SIGILL`, + // `SIGFPE`, `SIGABRT`, `SIGTERM`, `SIGBUG`, and `SIGTRAP` (provided they exist + // on the given platform). The failure signal handler dumps program failure data + // useful for debugging in an unspecified format to stderr. This data may + // include the program counter, a stacktrace, and register information on some + // systems; do not rely on an exact format for the output, as it is subject to + // change. + void InstallFailureSignalHandler(const FailureSignalHandlerOptions& options); -namespace debugging_internal { -const char* FailureSignalToString(int signo); -} // namespace debugging_internal + namespace debugging_internal + { + const char* FailureSignalToString(int signo); + } // namespace debugging_internal -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_DEBUGGING_FAILURE_SIGNAL_HANDLER_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/address_is_readable.h b/CAPI/cpp/grpc/include/absl/debugging/internal/address_is_readable.h index 4bbaf4d..6c2403d 100644 --- a/CAPI/cpp/grpc/include/absl/debugging/internal/address_is_readable.h +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/address_is_readable.h @@ -17,16 +17,18 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace debugging_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { -// Return whether the byte at *addr is readable, without faulting. -// Save and restores errno. -bool AddressIsReadable(const void *addr); + // Return whether the byte at *addr is readable, without faulting. + // Save and restores errno. + bool AddressIsReadable(const void* addr); -} // namespace debugging_internal -ABSL_NAMESPACE_END + } // namespace debugging_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_DEBUGGING_INTERNAL_ADDRESS_IS_READABLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/demangle.h b/CAPI/cpp/grpc/include/absl/debugging/internal/demangle.h index c314d9b..126a886 100644 --- a/CAPI/cpp/grpc/include/absl/debugging/internal/demangle.h +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/demangle.h @@ -55,17 +55,19 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace debugging_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { -// Demangle `mangled`. On success, return true and write the -// demangled symbol name to `out`. Otherwise, return false. -// `out` is modified even if demangling is unsuccessful. -bool Demangle(const char *mangled, char *out, int out_size); + // Demangle `mangled`. On success, return true and write the + // demangled symbol name to `out`. Otherwise, return false. + // `out` is modified even if demangling is unsuccessful. + bool Demangle(const char* mangled, char* out, int out_size); -} // namespace debugging_internal -ABSL_NAMESPACE_END + } // namespace debugging_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_DEBUGGING_INTERNAL_DEMANGLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/elf_mem_image.h b/CAPI/cpp/grpc/include/absl/debugging/internal/elf_mem_image.h index 113071a..bde026f 100644 --- a/CAPI/cpp/grpc/include/absl/debugging/internal/elf_mem_image.h +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/elf_mem_image.h @@ -45,93 +45,100 @@ #define ElfW(x) __ElfN(x) #endif -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace debugging_internal { - -// An in-memory ELF image (may not exist on disk). -class ElfMemImage { - private: - // Sentinel: there could never be an elf image at &kInvalidBaseSentinel. - static const int kInvalidBaseSentinel; - - public: - // Sentinel: there could never be an elf image at this address. - static constexpr const void *const kInvalidBase = - static_cast(&kInvalidBaseSentinel); - - // Information about a single vdso symbol. - // All pointers are into .dynsym, .dynstr, or .text of the VDSO. - // Do not free() them or modify through them. - struct SymbolInfo { - const char *name; // E.g. "__vdso_getcpu" - const char *version; // E.g. "LINUX_2.6", could be "" - // for unversioned symbol. - const void *address; // Relocated symbol address. - const ElfW(Sym) *symbol; // Symbol in the dynamic symbol table. - }; - - // Supports iteration over all dynamic symbols. - class SymbolIterator { - public: - friend class ElfMemImage; - const SymbolInfo *operator->() const; - const SymbolInfo &operator*() const; - SymbolIterator& operator++(); - bool operator!=(const SymbolIterator &rhs) const; - bool operator==(const SymbolIterator &rhs) const; - private: - SymbolIterator(const void *const image, int index); - void Update(int incr); - SymbolInfo info_; - int index_; - const void *const image_; - }; - - - explicit ElfMemImage(const void *base); - void Init(const void *base); - bool IsPresent() const { return ehdr_ != nullptr; } - const ElfW(Phdr)* GetPhdr(int index) const; - const ElfW(Sym)* GetDynsym(int index) const; - const ElfW(Versym)* GetVersym(int index) const; - const ElfW(Verdef)* GetVerdef(int index) const; - const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const; - const char* GetDynstr(ElfW(Word) offset) const; - const void* GetSymAddr(const ElfW(Sym) *sym) const; - const char* GetVerstr(ElfW(Word) offset) const; - int GetNumSymbols() const; - - SymbolIterator begin() const; - SymbolIterator end() const; - - // Look up versioned dynamic symbol in the image. - // Returns false if image is not present, or doesn't contain given - // symbol/version/type combination. - // If info_out is non-null, additional details are filled in. - bool LookupSymbol(const char *name, const char *version, - int symbol_type, SymbolInfo *info_out) const; - - // Find info about symbol (if any) which overlaps given address. - // Returns true if symbol was found; false if image isn't present - // or doesn't have a symbol overlapping given address. - // If info_out is non-null, additional details are filled in. - bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const; - - private: - const ElfW(Ehdr) *ehdr_; - const ElfW(Sym) *dynsym_; - const ElfW(Versym) *versym_; - const ElfW(Verdef) *verdef_; - const ElfW(Word) *hash_; - const char *dynstr_; - size_t strsize_; - size_t verdefnum_; - ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD). -}; - -} // namespace debugging_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { + + // An in-memory ELF image (may not exist on disk). + class ElfMemImage + { + private: + // Sentinel: there could never be an elf image at &kInvalidBaseSentinel. + static const int kInvalidBaseSentinel; + + public: + // Sentinel: there could never be an elf image at this address. + static constexpr const void* const kInvalidBase = + static_cast(&kInvalidBaseSentinel); + + // Information about a single vdso symbol. + // All pointers are into .dynsym, .dynstr, or .text of the VDSO. + // Do not free() them or modify through them. + struct SymbolInfo + { + const char* name; // E.g. "__vdso_getcpu" + const char* version; // E.g. "LINUX_2.6", could be "" + // for unversioned symbol. + const void* address; // Relocated symbol address. + const ElfW(Sym) * symbol; // Symbol in the dynamic symbol table. + }; + + // Supports iteration over all dynamic symbols. + class SymbolIterator + { + public: + friend class ElfMemImage; + const SymbolInfo* operator->() const; + const SymbolInfo& operator*() const; + SymbolIterator& operator++(); + bool operator!=(const SymbolIterator& rhs) const; + bool operator==(const SymbolIterator& rhs) const; + + private: + SymbolIterator(const void* const image, int index); + void Update(int incr); + SymbolInfo info_; + int index_; + const void* const image_; + }; + + explicit ElfMemImage(const void* base); + void Init(const void* base); + bool IsPresent() const + { + return ehdr_ != nullptr; + } + const ElfW(Phdr) * GetPhdr(int index) const; + const ElfW(Sym) * GetDynsym(int index) const; + const ElfW(Versym) * GetVersym(int index) const; + const ElfW(Verdef) * GetVerdef(int index) const; + const ElfW(Verdaux) * GetVerdefAux(const ElfW(Verdef) * verdef) const; + const char* GetDynstr(ElfW(Word) offset) const; + const void* GetSymAddr(const ElfW(Sym) * sym) const; + const char* GetVerstr(ElfW(Word) offset) const; + int GetNumSymbols() const; + + SymbolIterator begin() const; + SymbolIterator end() const; + + // Look up versioned dynamic symbol in the image. + // Returns false if image is not present, or doesn't contain given + // symbol/version/type combination. + // If info_out is non-null, additional details are filled in. + bool LookupSymbol(const char* name, const char* version, int symbol_type, SymbolInfo* info_out) const; + + // Find info about symbol (if any) which overlaps given address. + // Returns true if symbol was found; false if image isn't present + // or doesn't have a symbol overlapping given address. + // If info_out is non-null, additional details are filled in. + bool LookupSymbolByAddress(const void* address, SymbolInfo* info_out) const; + + private: + const ElfW(Ehdr) * ehdr_; + const ElfW(Sym) * dynsym_; + const ElfW(Versym) * versym_; + const ElfW(Verdef) * verdef_; + const ElfW(Word) * hash_; + const char* dynstr_; + size_t strsize_; + size_t verdefnum_; + ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD). + }; + + } // namespace debugging_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_HAVE_ELF_MEM_IMAGE diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/examine_stack.h b/CAPI/cpp/grpc/include/absl/debugging/internal/examine_stack.h index 190af87..914290e 100644 --- a/CAPI/cpp/grpc/include/absl/debugging/internal/examine_stack.h +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/examine_stack.h @@ -19,46 +19,41 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace debugging_internal { - -// Type of function used for printing in stack trace dumping, etc. -// We avoid closures to keep things simple. -typedef void OutputWriter(const char*, void*); - -// RegisterDebugStackTraceHook() allows to register a single routine -// `hook` that is called each time DumpStackTrace() is called. -// `hook` may be called from a signal handler. -typedef void (*SymbolizeUrlEmitter)(void* const stack[], int depth, - OutputWriter* writer, void* writer_arg); - -// Registration of SymbolizeUrlEmitter for use inside of a signal handler. -// This is inherently unsafe and must be signal safe code. -void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook); -SymbolizeUrlEmitter GetDebugStackTraceHook(); - -// Returns the program counter from signal context, or nullptr if -// unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of -// ucontext_t on non-POSIX systems. -void* GetProgramCounter(void* const vuc); - -// Uses `writer` to dump the program counter, stack trace, and stack -// frame sizes. -void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[], - int frame_sizes[], int depth, - int min_dropped_frames, - bool symbolize_stacktrace, - OutputWriter* writer, void* writer_arg); - -// Dump current stack trace omitting the topmost `min_dropped_frames` stack -// frames. -void DumpStackTrace(int min_dropped_frames, int max_num_frames, - bool symbolize_stacktrace, OutputWriter* writer, - void* writer_arg); - -} // namespace debugging_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { + + // Type of function used for printing in stack trace dumping, etc. + // We avoid closures to keep things simple. + typedef void OutputWriter(const char*, void*); + + // RegisterDebugStackTraceHook() allows to register a single routine + // `hook` that is called each time DumpStackTrace() is called. + // `hook` may be called from a signal handler. + typedef void (*SymbolizeUrlEmitter)(void* const stack[], int depth, OutputWriter* writer, void* writer_arg); + + // Registration of SymbolizeUrlEmitter for use inside of a signal handler. + // This is inherently unsafe and must be signal safe code. + void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook); + SymbolizeUrlEmitter GetDebugStackTraceHook(); + + // Returns the program counter from signal context, or nullptr if + // unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of + // ucontext_t on non-POSIX systems. + void* GetProgramCounter(void* const vuc); + + // Uses `writer` to dump the program counter, stack trace, and stack + // frame sizes. + void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[], int frame_sizes[], int depth, int min_dropped_frames, bool symbolize_stacktrace, OutputWriter* writer, void* writer_arg); + + // Dump current stack trace omitting the topmost `min_dropped_frames` stack + // frames. + void DumpStackTrace(int min_dropped_frames, int max_num_frames, bool symbolize_stacktrace, OutputWriter* writer, void* writer_arg); + + } // namespace debugging_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_DEBUGGING_INTERNAL_EXAMINE_STACK_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stack_consumption.h b/CAPI/cpp/grpc/include/absl/debugging/internal/stack_consumption.h index f41b64c..2afbeee 100644 --- a/CAPI/cpp/grpc/include/absl/debugging/internal/stack_consumption.h +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stack_consumption.h @@ -29,20 +29,22 @@ defined(__aarch64__) || defined(__riscv)) #define ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION 1 -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace debugging_internal { - -// Returns the stack consumption in bytes for the code exercised by -// signal_handler. To measure stack consumption, signal_handler is registered -// as a signal handler, so the code that it exercises must be async-signal -// safe. The argument of signal_handler is an implementation detail of signal -// handlers and should ignored by the code for signal_handler. Use global -// variables to pass information between your test code and signal_handler. -int GetSignalHandlerStackConsumption(void (*signal_handler)(int)); - -} // namespace debugging_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { + + // Returns the stack consumption in bytes for the code exercised by + // signal_handler. To measure stack consumption, signal_handler is registered + // as a signal handler, so the code that it exercises must be async-signal + // safe. The argument of signal_handler is an implementation detail of signal + // handlers and should ignored by the code for signal_handler. Use global + // variables to pass information between your test code and signal_handler. + int GetSignalHandlerStackConsumption(void (*signal_handler)(int)); + + } // namespace debugging_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_config.h b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_config.h index 3929b1b..5f0ff7a 100644 --- a/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_config.h +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/stacktrace_config.h @@ -34,13 +34,13 @@ #ifdef ABSL_HAVE_THREAD_LOCAL // Thread local support required for UnwindImpl. #define ABSL_STACKTRACE_INL_HEADER \ - "absl/debugging/internal/stacktrace_generic-inl.inc" + "absl/debugging/internal/stacktrace_generic-inl.inc" #endif // defined(ABSL_HAVE_THREAD_LOCAL) // Emscripten stacktraces rely on JS. Do not use them in standalone mode. #elif defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM) #define ABSL_STACKTRACE_INL_HEADER \ - "absl/debugging/internal/stacktrace_emscripten-inl.inc" + "absl/debugging/internal/stacktrace_emscripten-inl.inc" #elif defined(__linux__) && !defined(__ANDROID__) @@ -49,31 +49,31 @@ // Note: The libunwind-based implementation is not available to open-source // users. #define ABSL_STACKTRACE_INL_HEADER \ - "absl/debugging/internal/stacktrace_libunwind-inl.inc" + "absl/debugging/internal/stacktrace_libunwind-inl.inc" #define STACKTRACE_USES_LIBUNWIND 1 #elif defined(NO_FRAME_POINTER) && defined(__has_include) #if __has_include() // Note: When using glibc this may require -funwind-tables to function properly. #define ABSL_STACKTRACE_INL_HEADER \ - "absl/debugging/internal/stacktrace_generic-inl.inc" + "absl/debugging/internal/stacktrace_generic-inl.inc" #endif // __has_include() #elif defined(__i386__) || defined(__x86_64__) #define ABSL_STACKTRACE_INL_HEADER \ - "absl/debugging/internal/stacktrace_x86-inl.inc" + "absl/debugging/internal/stacktrace_x86-inl.inc" #elif defined(__ppc__) || defined(__PPC__) #define ABSL_STACKTRACE_INL_HEADER \ - "absl/debugging/internal/stacktrace_powerpc-inl.inc" + "absl/debugging/internal/stacktrace_powerpc-inl.inc" #elif defined(__aarch64__) #define ABSL_STACKTRACE_INL_HEADER \ - "absl/debugging/internal/stacktrace_aarch64-inl.inc" + "absl/debugging/internal/stacktrace_aarch64-inl.inc" #elif defined(__riscv) #define ABSL_STACKTRACE_INL_HEADER \ - "absl/debugging/internal/stacktrace_riscv-inl.inc" + "absl/debugging/internal/stacktrace_riscv-inl.inc" #elif defined(__has_include) #if __has_include() // Note: When using glibc this may require -funwind-tables to function properly. #define ABSL_STACKTRACE_INL_HEADER \ - "absl/debugging/internal/stacktrace_generic-inl.inc" + "absl/debugging/internal/stacktrace_generic-inl.inc" #endif // __has_include() #endif // defined(__has_include) @@ -82,7 +82,7 @@ // Fallback to the empty implementation. #if !defined(ABSL_STACKTRACE_INL_HEADER) #define ABSL_STACKTRACE_INL_HEADER \ - "absl/debugging/internal/stacktrace_unimplemented-inl.inc" + "absl/debugging/internal/stacktrace_unimplemented-inl.inc" #endif #endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_CONFIG_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/symbolize.h b/CAPI/cpp/grpc/include/absl/debugging/internal/symbolize.h index 27d5e65..0430a58 100644 --- a/CAPI/cpp/grpc/include/absl/debugging/internal/symbolize.h +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/symbolize.h @@ -28,8 +28,7 @@ #ifdef ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE #error ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE cannot be directly set -#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) \ - && !defined(__asmjs__) && !defined(__wasm__) +#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) && !defined(__asmjs__) && !defined(__wasm__) #define ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE 1 #include @@ -37,27 +36,26 @@ #include #include -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace debugging_internal { - -// Iterates over all sections, invoking callback on each with the section name -// and the section header. -// -// Returns true on success; otherwise returns false in case of errors. -// -// This is not async-signal-safe. -bool ForEachSection(int fd, - const std::function& callback); - -// Gets the section header for the given name, if it exists. Returns true on -// success. Otherwise, returns false. -bool GetSectionHeaderByName(int fd, const char *name, size_t name_len, - ElfW(Shdr) *out); - -} // namespace debugging_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { + + // Iterates over all sections, invoking callback on each with the section name + // and the section header. + // + // Returns true on success; otherwise returns false in case of errors. + // + // This is not async-signal-safe. + bool ForEachSection(int fd, const std::function& callback); + + // Gets the section header for the given name, if it exists. Returns true on + // success. Otherwise, returns false. + bool GetSectionHeaderByName(int fd, const char* name, size_t name_len, ElfW(Shdr) * out); + + } // namespace debugging_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE @@ -74,68 +72,69 @@ ABSL_NAMESPACE_END #define ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE 1 #endif -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace debugging_internal { - -struct SymbolDecoratorArgs { - // The program counter we are getting symbolic name for. - const void *pc; - // 0 for main executable, load address for shared libraries. - ptrdiff_t relocation; - // Read-only file descriptor for ELF image covering "pc", - // or -1 if no such ELF image exists in /proc/self/maps. - int fd; - // Output buffer, size. - // Note: the buffer may not be empty -- default symbolizer may have already - // produced some output, and earlier decorators may have adorned it in - // some way. You are free to replace or augment the contents (within the - // symbol_buf_size limit). - char *const symbol_buf; - size_t symbol_buf_size; - // Temporary scratch space, size. - // Use that space in preference to allocating your own stack buffer to - // conserve stack. - char *const tmp_buf; - size_t tmp_buf_size; - // User-provided argument - void* arg; -}; -using SymbolDecorator = void (*)(const SymbolDecoratorArgs *); - -// Installs a function-pointer as a decorator. Returns a value less than zero -// if the system cannot install the decorator. Otherwise, returns a unique -// identifier corresponding to the decorator. This identifier can be used to -// uninstall the decorator - See RemoveSymbolDecorator() below. -int InstallSymbolDecorator(SymbolDecorator decorator, void* arg); - -// Removes a previously installed function-pointer decorator. Parameter "ticket" -// is the return-value from calling InstallSymbolDecorator(). -bool RemoveSymbolDecorator(int ticket); - -// Remove all installed decorators. Returns true if successful, false if -// symbolization is currently in progress. -bool RemoveAllSymbolDecorators(void); - -// Registers an address range to a file mapping. -// -// Preconditions: -// start <= end -// filename != nullptr -// -// Returns true if the file was successfully registered. -bool RegisterFileMappingHint(const void* start, const void* end, - uint64_t offset, const char* filename); - -// Looks up the file mapping registered by RegisterFileMappingHint for an -// address range. If there is one, the file name is stored in *filename and -// *start and *end are modified to reflect the registered mapping. Returns -// whether any hint was found. -bool GetFileMappingHint(const void** start, const void** end, uint64_t* offset, - const char** filename); - -} // namespace debugging_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { + + struct SymbolDecoratorArgs + { + // The program counter we are getting symbolic name for. + const void* pc; + // 0 for main executable, load address for shared libraries. + ptrdiff_t relocation; + // Read-only file descriptor for ELF image covering "pc", + // or -1 if no such ELF image exists in /proc/self/maps. + int fd; + // Output buffer, size. + // Note: the buffer may not be empty -- default symbolizer may have already + // produced some output, and earlier decorators may have adorned it in + // some way. You are free to replace or augment the contents (within the + // symbol_buf_size limit). + char* const symbol_buf; + size_t symbol_buf_size; + // Temporary scratch space, size. + // Use that space in preference to allocating your own stack buffer to + // conserve stack. + char* const tmp_buf; + size_t tmp_buf_size; + // User-provided argument + void* arg; + }; + using SymbolDecorator = void (*)(const SymbolDecoratorArgs*); + + // Installs a function-pointer as a decorator. Returns a value less than zero + // if the system cannot install the decorator. Otherwise, returns a unique + // identifier corresponding to the decorator. This identifier can be used to + // uninstall the decorator - See RemoveSymbolDecorator() below. + int InstallSymbolDecorator(SymbolDecorator decorator, void* arg); + + // Removes a previously installed function-pointer decorator. Parameter "ticket" + // is the return-value from calling InstallSymbolDecorator(). + bool RemoveSymbolDecorator(int ticket); + + // Remove all installed decorators. Returns true if successful, false if + // symbolization is currently in progress. + bool RemoveAllSymbolDecorators(void); + + // Registers an address range to a file mapping. + // + // Preconditions: + // start <= end + // filename != nullptr + // + // Returns true if the file was successfully registered. + bool RegisterFileMappingHint(const void* start, const void* end, uint64_t offset, const char* filename); + + // Looks up the file mapping registered by RegisterFileMappingHint for an + // address range. If there is one, the file name is stored in *filename and + // *start and *end are modified to reflect the registered mapping. Returns + // whether any hint was found. + bool GetFileMappingHint(const void** start, const void** end, uint64_t* offset, const char** filename); + + } // namespace debugging_internal + ABSL_NAMESPACE_END } // namespace absl #endif // __cplusplus @@ -147,7 +146,6 @@ extern "C" #endif // __cplusplus bool - AbslInternalGetFileMappingHint(const void** start, const void** end, - uint64_t* offset, const char** filename); + AbslInternalGetFileMappingHint(const void** start, const void** end, uint64_t* offset, const char** filename); #endif // ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/internal/vdso_support.h b/CAPI/cpp/grpc/include/absl/debugging/internal/vdso_support.h index 6562c6c..6707098 100644 --- a/CAPI/cpp/grpc/include/absl/debugging/internal/vdso_support.h +++ b/CAPI/cpp/grpc/include/absl/debugging/internal/vdso_support.h @@ -52,105 +52,122 @@ #define ABSL_HAVE_VDSO_SUPPORT 1 #endif -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace debugging_internal { - -// NOTE: this class may be used from within tcmalloc, and can not -// use any memory allocation routines. -class VDSOSupport { - public: - VDSOSupport(); - - typedef ElfMemImage::SymbolInfo SymbolInfo; - typedef ElfMemImage::SymbolIterator SymbolIterator; - - // On PowerPC64 VDSO symbols can either be of type STT_FUNC or STT_NOTYPE - // depending on how the kernel is built. The kernel is normally built with - // STT_NOTYPE type VDSO symbols. Let's make things simpler first by using a - // compile-time constant. +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace debugging_internal + { + + // NOTE: this class may be used from within tcmalloc, and can not + // use any memory allocation routines. + class VDSOSupport + { + public: + VDSOSupport(); + + typedef ElfMemImage::SymbolInfo SymbolInfo; + typedef ElfMemImage::SymbolIterator SymbolIterator; + + // On PowerPC64 VDSO symbols can either be of type STT_FUNC or STT_NOTYPE + // depending on how the kernel is built. The kernel is normally built with + // STT_NOTYPE type VDSO symbols. Let's make things simpler first by using a + // compile-time constant. #ifdef __powerpc64__ - enum { kVDSOSymbolType = STT_NOTYPE }; + enum + { + kVDSOSymbolType = STT_NOTYPE + }; #else - enum { kVDSOSymbolType = STT_FUNC }; + enum + { + kVDSOSymbolType = STT_FUNC + }; #endif - // Answers whether we have a vdso at all. - bool IsPresent() const { return image_.IsPresent(); } - - // Allow to iterate over all VDSO symbols. - SymbolIterator begin() const { return image_.begin(); } - SymbolIterator end() const { return image_.end(); } - - // Look up versioned dynamic symbol in the kernel VDSO. - // Returns false if VDSO is not present, or doesn't contain given - // symbol/version/type combination. - // If info_out != nullptr, additional details are filled in. - bool LookupSymbol(const char *name, const char *version, - int symbol_type, SymbolInfo *info_out) const; - - // Find info about symbol (if any) which overlaps given address. - // Returns true if symbol was found; false if VDSO isn't present - // or doesn't have a symbol overlapping given address. - // If info_out != nullptr, additional details are filled in. - bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const; - - // Used only for testing. Replace real VDSO base with a mock. - // Returns previous value of vdso_base_. After you are done testing, - // you are expected to call SetBase() with previous value, in order to - // reset state to the way it was. - const void *SetBase(const void *s); - - // Computes vdso_base_ and returns it. Should be called as early as - // possible; before any thread creation, chroot or setuid. - static const void *Init(); - - private: - // image_ represents VDSO ELF image in memory. - // image_.ehdr_ == nullptr implies there is no VDSO. - ElfMemImage image_; - - // Cached value of auxv AT_SYSINFO_EHDR, computed once. - // This is a tri-state: - // kInvalidBase => value hasn't been determined yet. - // 0 => there is no VDSO. - // else => vma of VDSO Elf{32,64}_Ehdr. - // - // When testing with mock VDSO, low bit is set. - // The low bit is always available because vdso_base_ is - // page-aligned. - static std::atomic vdso_base_; - - // NOLINT on 'long' because these routines mimic kernel api. - // The 'cache' parameter may be used by some versions of the kernel, - // and should be nullptr or point to a static buffer containing at - // least two 'long's. - static long InitAndGetCPU(unsigned *cpu, void *cache, // NOLINT 'long'. - void *unused); - static long GetCPUViaSyscall(unsigned *cpu, void *cache, // NOLINT 'long'. - void *unused); - typedef long (*GetCpuFn)(unsigned *cpu, void *cache, // NOLINT 'long'. - void *unused); - - // This function pointer may point to InitAndGetCPU, - // GetCPUViaSyscall, or __vdso_getcpu at different stages of initialization. - ABSL_CONST_INIT static std::atomic getcpu_fn_; - - friend int GetCPU(void); // Needs access to getcpu_fn_. - - VDSOSupport(const VDSOSupport&) = delete; - VDSOSupport& operator=(const VDSOSupport&) = delete; -}; - -// Same as sched_getcpu() on later glibc versions. -// Return current CPU, using (fast) __vdso_getcpu@LINUX_2.6 if present, -// otherwise use syscall(SYS_getcpu,...). -// May return -1 with errno == ENOSYS if the kernel doesn't -// support SYS_getcpu. -int GetCPU(); - -} // namespace debugging_internal -ABSL_NAMESPACE_END + // Answers whether we have a vdso at all. + bool IsPresent() const + { + return image_.IsPresent(); + } + + // Allow to iterate over all VDSO symbols. + SymbolIterator begin() const + { + return image_.begin(); + } + SymbolIterator end() const + { + return image_.end(); + } + + // Look up versioned dynamic symbol in the kernel VDSO. + // Returns false if VDSO is not present, or doesn't contain given + // symbol/version/type combination. + // If info_out != nullptr, additional details are filled in. + bool LookupSymbol(const char* name, const char* version, int symbol_type, SymbolInfo* info_out) const; + + // Find info about symbol (if any) which overlaps given address. + // Returns true if symbol was found; false if VDSO isn't present + // or doesn't have a symbol overlapping given address. + // If info_out != nullptr, additional details are filled in. + bool LookupSymbolByAddress(const void* address, SymbolInfo* info_out) const; + + // Used only for testing. Replace real VDSO base with a mock. + // Returns previous value of vdso_base_. After you are done testing, + // you are expected to call SetBase() with previous value, in order to + // reset state to the way it was. + const void* SetBase(const void* s); + + // Computes vdso_base_ and returns it. Should be called as early as + // possible; before any thread creation, chroot or setuid. + static const void* Init(); + + private: + // image_ represents VDSO ELF image in memory. + // image_.ehdr_ == nullptr implies there is no VDSO. + ElfMemImage image_; + + // Cached value of auxv AT_SYSINFO_EHDR, computed once. + // This is a tri-state: + // kInvalidBase => value hasn't been determined yet. + // 0 => there is no VDSO. + // else => vma of VDSO Elf{32,64}_Ehdr. + // + // When testing with mock VDSO, low bit is set. + // The low bit is always available because vdso_base_ is + // page-aligned. + static std::atomic vdso_base_; + + // NOLINT on 'long' because these routines mimic kernel api. + // The 'cache' parameter may be used by some versions of the kernel, + // and should be nullptr or point to a static buffer containing at + // least two 'long's. + static long InitAndGetCPU(unsigned* cpu, void* cache, // NOLINT 'long'. + void* unused); + static long GetCPUViaSyscall(unsigned* cpu, void* cache, // NOLINT 'long'. + void* unused); + typedef long (*GetCpuFn)(unsigned* cpu, void* cache, // NOLINT 'long'. + void* unused); + + // This function pointer may point to InitAndGetCPU, + // GetCPUViaSyscall, or __vdso_getcpu at different stages of initialization. + ABSL_CONST_INIT static std::atomic getcpu_fn_; + + friend int GetCPU(void); // Needs access to getcpu_fn_. + + VDSOSupport(const VDSOSupport&) = delete; + VDSOSupport& operator=(const VDSOSupport&) = delete; + }; + + // Same as sched_getcpu() on later glibc versions. + // Return current CPU, using (fast) __vdso_getcpu@LINUX_2.6 if present, + // otherwise use syscall(SYS_getcpu,...). + // May return -1 with errno == ENOSYS if the kernel doesn't + // support SYS_getcpu. + int GetCPU(); + + } // namespace debugging_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_HAVE_ELF_MEM_IMAGE diff --git a/CAPI/cpp/grpc/include/absl/debugging/leak_check.h b/CAPI/cpp/grpc/include/absl/debugging/leak_check.h index eff162f..10d21e1 100644 --- a/CAPI/cpp/grpc/include/absl/debugging/leak_check.h +++ b/CAPI/cpp/grpc/include/absl/debugging/leak_check.h @@ -51,100 +51,103 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN +namespace absl +{ + ABSL_NAMESPACE_BEGIN -// HaveLeakSanitizer() -// -// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is -// currently built into this target. -bool HaveLeakSanitizer(); + // HaveLeakSanitizer() + // + // Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is + // currently built into this target. + bool HaveLeakSanitizer(); -// LeakCheckerIsActive() -// -// Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is -// currently built into this target and is turned on. -bool LeakCheckerIsActive(); + // LeakCheckerIsActive() + // + // Returns true if a leak-checking sanitizer (either ASan or standalone LSan) is + // currently built into this target and is turned on. + bool LeakCheckerIsActive(); -// DoIgnoreLeak() -// -// Implements `IgnoreLeak()` below. This function should usually -// not be called directly; calling `IgnoreLeak()` is preferred. -void DoIgnoreLeak(const void* ptr); + // DoIgnoreLeak() + // + // Implements `IgnoreLeak()` below. This function should usually + // not be called directly; calling `IgnoreLeak()` is preferred. + void DoIgnoreLeak(const void* ptr); -// IgnoreLeak() -// -// Instruct the leak sanitizer to ignore leak warnings on the object referenced -// by the passed pointer, as well as all heap objects transitively referenced -// by it. The passed object pointer can point to either the beginning of the -// object or anywhere within it. -// -// Example: -// -// static T* obj = IgnoreLeak(new T(...)); -// -// If the passed `ptr` does not point to an actively allocated object at the -// time `IgnoreLeak()` is called, the call is a no-op; if it is actively -// allocated, leak sanitizer will assume this object is referenced even if -// there is no actual reference in user memory. -// -template -T* IgnoreLeak(T* ptr) { - DoIgnoreLeak(ptr); - return ptr; -} + // IgnoreLeak() + // + // Instruct the leak sanitizer to ignore leak warnings on the object referenced + // by the passed pointer, as well as all heap objects transitively referenced + // by it. The passed object pointer can point to either the beginning of the + // object or anywhere within it. + // + // Example: + // + // static T* obj = IgnoreLeak(new T(...)); + // + // If the passed `ptr` does not point to an actively allocated object at the + // time `IgnoreLeak()` is called, the call is a no-op; if it is actively + // allocated, leak sanitizer will assume this object is referenced even if + // there is no actual reference in user memory. + // + template + T* IgnoreLeak(T* ptr) + { + DoIgnoreLeak(ptr); + return ptr; + } -// FindAndReportLeaks() -// -// If any leaks are detected, prints a leak report and returns true. This -// function may be called repeatedly, and does not affect end-of-process leak -// checking. -// -// Example: -// if (FindAndReportLeaks()) { -// ... diagnostic already printed. Exit with failure code. -// exit(1) -// } -bool FindAndReportLeaks(); + // FindAndReportLeaks() + // + // If any leaks are detected, prints a leak report and returns true. This + // function may be called repeatedly, and does not affect end-of-process leak + // checking. + // + // Example: + // if (FindAndReportLeaks()) { + // ... diagnostic already printed. Exit with failure code. + // exit(1) + // } + bool FindAndReportLeaks(); -// LeakCheckDisabler -// -// This helper class indicates that any heap allocations done in the code block -// covered by the scoped object, which should be allocated on the stack, will -// not be reported as leaks. Leak check disabling will occur within the code -// block and any nested function calls within the code block. -// -// Example: -// -// void Foo() { -// LeakCheckDisabler disabler; -// ... code that allocates objects whose leaks should be ignored ... -// } -// -// REQUIRES: Destructor runs in same thread as constructor -class LeakCheckDisabler { - public: - LeakCheckDisabler(); - LeakCheckDisabler(const LeakCheckDisabler&) = delete; - LeakCheckDisabler& operator=(const LeakCheckDisabler&) = delete; - ~LeakCheckDisabler(); -}; + // LeakCheckDisabler + // + // This helper class indicates that any heap allocations done in the code block + // covered by the scoped object, which should be allocated on the stack, will + // not be reported as leaks. Leak check disabling will occur within the code + // block and any nested function calls within the code block. + // + // Example: + // + // void Foo() { + // LeakCheckDisabler disabler; + // ... code that allocates objects whose leaks should be ignored ... + // } + // + // REQUIRES: Destructor runs in same thread as constructor + class LeakCheckDisabler + { + public: + LeakCheckDisabler(); + LeakCheckDisabler(const LeakCheckDisabler&) = delete; + LeakCheckDisabler& operator=(const LeakCheckDisabler&) = delete; + ~LeakCheckDisabler(); + }; -// RegisterLivePointers() -// -// Registers `ptr[0,size-1]` as pointers to memory that is still actively being -// referenced and for which leak checking should be ignored. This function is -// useful if you store pointers in mapped memory, for memory ranges that we know -// are correct but for which normal analysis would flag as leaked code. -void RegisterLivePointers(const void* ptr, size_t size); + // RegisterLivePointers() + // + // Registers `ptr[0,size-1]` as pointers to memory that is still actively being + // referenced and for which leak checking should be ignored. This function is + // useful if you store pointers in mapped memory, for memory ranges that we know + // are correct but for which normal analysis would flag as leaked code. + void RegisterLivePointers(const void* ptr, size_t size); -// UnRegisterLivePointers() -// -// Deregisters the pointers previously marked as active in -// `RegisterLivePointers()`, enabling leak checking of those pointers. -void UnRegisterLivePointers(const void* ptr, size_t size); + // UnRegisterLivePointers() + // + // Deregisters the pointers previously marked as active in + // `RegisterLivePointers()`, enabling leak checking of those pointers. + void UnRegisterLivePointers(const void* ptr, size_t size); -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_DEBUGGING_LEAK_CHECK_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/stacktrace.h b/CAPI/cpp/grpc/include/absl/debugging/stacktrace.h index 0ec0ffd..28c7598 100644 --- a/CAPI/cpp/grpc/include/absl/debugging/stacktrace.h +++ b/CAPI/cpp/grpc/include/absl/debugging/stacktrace.h @@ -33,199 +33,191 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN +namespace absl +{ + ABSL_NAMESPACE_BEGIN -// GetStackFrames() -// -// Records program counter values for up to `max_depth` frames, skipping the -// most recent `skip_count` stack frames, stores their corresponding values -// and sizes in `results` and `sizes` buffers, and returns the number of frames -// stored. (Note that the frame generated for the `absl::GetStackFrames()` -// routine itself is also skipped.) -// -// Example: -// -// main() { foo(); } -// foo() { bar(); } -// bar() { -// void* result[10]; -// int sizes[10]; -// int depth = absl::GetStackFrames(result, sizes, 10, 1); -// } -// -// The current stack frame would consist of three function calls: `bar()`, -// `foo()`, and then `main()`; however, since the `GetStackFrames()` call sets -// `skip_count` to `1`, it will skip the frame for `bar()`, the most recently -// invoked function call. It will therefore return 2 and fill `result` with -// program counters within the following functions: -// -// result[0] foo() -// result[1] main() -// -// (Note: in practice, a few more entries after `main()` may be added to account -// for startup processes.) -// -// Corresponding stack frame sizes will also be recorded: -// -// sizes[0] 16 -// sizes[1] 16 -// -// (Stack frame sizes of `16` above are just for illustration purposes.) -// -// Stack frame sizes of 0 or less indicate that those frame sizes couldn't -// be identified. -// -// This routine may return fewer stack frame entries than are -// available. Also note that `result` and `sizes` must both be non-null. -extern int GetStackFrames(void** result, int* sizes, int max_depth, - int skip_count); + // GetStackFrames() + // + // Records program counter values for up to `max_depth` frames, skipping the + // most recent `skip_count` stack frames, stores their corresponding values + // and sizes in `results` and `sizes` buffers, and returns the number of frames + // stored. (Note that the frame generated for the `absl::GetStackFrames()` + // routine itself is also skipped.) + // + // Example: + // + // main() { foo(); } + // foo() { bar(); } + // bar() { + // void* result[10]; + // int sizes[10]; + // int depth = absl::GetStackFrames(result, sizes, 10, 1); + // } + // + // The current stack frame would consist of three function calls: `bar()`, + // `foo()`, and then `main()`; however, since the `GetStackFrames()` call sets + // `skip_count` to `1`, it will skip the frame for `bar()`, the most recently + // invoked function call. It will therefore return 2 and fill `result` with + // program counters within the following functions: + // + // result[0] foo() + // result[1] main() + // + // (Note: in practice, a few more entries after `main()` may be added to account + // for startup processes.) + // + // Corresponding stack frame sizes will also be recorded: + // + // sizes[0] 16 + // sizes[1] 16 + // + // (Stack frame sizes of `16` above are just for illustration purposes.) + // + // Stack frame sizes of 0 or less indicate that those frame sizes couldn't + // be identified. + // + // This routine may return fewer stack frame entries than are + // available. Also note that `result` and `sizes` must both be non-null. + extern int GetStackFrames(void** result, int* sizes, int max_depth, int skip_count); -// GetStackFramesWithContext() -// -// Records program counter values obtained from a signal handler. Records -// program counter values for up to `max_depth` frames, skipping the most recent -// `skip_count` stack frames, stores their corresponding values and sizes in -// `results` and `sizes` buffers, and returns the number of frames stored. (Note -// that the frame generated for the `absl::GetStackFramesWithContext()` routine -// itself is also skipped.) -// -// The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value -// passed to a signal handler registered via the `sa_sigaction` field of a -// `sigaction` struct. (See -// http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may -// help a stack unwinder to provide a better stack trace under certain -// conditions. `uc` may safely be null. -// -// The `min_dropped_frames` output parameter, if non-null, points to the -// location to note any dropped stack frames, if any, due to buffer limitations -// or other reasons. (This value will be set to `0` if no frames were dropped.) -// The number of total stack frames is guaranteed to be >= skip_count + -// max_depth + *min_dropped_frames. -extern int GetStackFramesWithContext(void** result, int* sizes, int max_depth, - int skip_count, const void* uc, - int* min_dropped_frames); + // GetStackFramesWithContext() + // + // Records program counter values obtained from a signal handler. Records + // program counter values for up to `max_depth` frames, skipping the most recent + // `skip_count` stack frames, stores their corresponding values and sizes in + // `results` and `sizes` buffers, and returns the number of frames stored. (Note + // that the frame generated for the `absl::GetStackFramesWithContext()` routine + // itself is also skipped.) + // + // The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value + // passed to a signal handler registered via the `sa_sigaction` field of a + // `sigaction` struct. (See + // http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may + // help a stack unwinder to provide a better stack trace under certain + // conditions. `uc` may safely be null. + // + // The `min_dropped_frames` output parameter, if non-null, points to the + // location to note any dropped stack frames, if any, due to buffer limitations + // or other reasons. (This value will be set to `0` if no frames were dropped.) + // The number of total stack frames is guaranteed to be >= skip_count + + // max_depth + *min_dropped_frames. + extern int GetStackFramesWithContext(void** result, int* sizes, int max_depth, int skip_count, const void* uc, int* min_dropped_frames); -// GetStackTrace() -// -// Records program counter values for up to `max_depth` frames, skipping the -// most recent `skip_count` stack frames, stores their corresponding values -// in `results`, and returns the number of frames -// stored. Note that this function is similar to `absl::GetStackFrames()` -// except that it returns the stack trace only, and not stack frame sizes. -// -// Example: -// -// main() { foo(); } -// foo() { bar(); } -// bar() { -// void* result[10]; -// int depth = absl::GetStackTrace(result, 10, 1); -// } -// -// This produces: -// -// result[0] foo -// result[1] main -// .... ... -// -// `result` must not be null. -extern int GetStackTrace(void** result, int max_depth, int skip_count); + // GetStackTrace() + // + // Records program counter values for up to `max_depth` frames, skipping the + // most recent `skip_count` stack frames, stores their corresponding values + // in `results`, and returns the number of frames + // stored. Note that this function is similar to `absl::GetStackFrames()` + // except that it returns the stack trace only, and not stack frame sizes. + // + // Example: + // + // main() { foo(); } + // foo() { bar(); } + // bar() { + // void* result[10]; + // int depth = absl::GetStackTrace(result, 10, 1); + // } + // + // This produces: + // + // result[0] foo + // result[1] main + // .... ... + // + // `result` must not be null. + extern int GetStackTrace(void** result, int max_depth, int skip_count); -// GetStackTraceWithContext() -// -// Records program counter values obtained from a signal handler. Records -// program counter values for up to `max_depth` frames, skipping the most recent -// `skip_count` stack frames, stores their corresponding values in `results`, -// and returns the number of frames stored. (Note that the frame generated for -// the `absl::GetStackFramesWithContext()` routine itself is also skipped.) -// -// The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value -// passed to a signal handler registered via the `sa_sigaction` field of a -// `sigaction` struct. (See -// http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may -// help a stack unwinder to provide a better stack trace under certain -// conditions. `uc` may safely be null. -// -// The `min_dropped_frames` output parameter, if non-null, points to the -// location to note any dropped stack frames, if any, due to buffer limitations -// or other reasons. (This value will be set to `0` if no frames were dropped.) -// The number of total stack frames is guaranteed to be >= skip_count + -// max_depth + *min_dropped_frames. -extern int GetStackTraceWithContext(void** result, int max_depth, - int skip_count, const void* uc, - int* min_dropped_frames); + // GetStackTraceWithContext() + // + // Records program counter values obtained from a signal handler. Records + // program counter values for up to `max_depth` frames, skipping the most recent + // `skip_count` stack frames, stores their corresponding values in `results`, + // and returns the number of frames stored. (Note that the frame generated for + // the `absl::GetStackFramesWithContext()` routine itself is also skipped.) + // + // The `uc` parameter, if non-null, should be a pointer to a `ucontext_t` value + // passed to a signal handler registered via the `sa_sigaction` field of a + // `sigaction` struct. (See + // http://man7.org/linux/man-pages/man2/sigaction.2.html.) The `uc` value may + // help a stack unwinder to provide a better stack trace under certain + // conditions. `uc` may safely be null. + // + // The `min_dropped_frames` output parameter, if non-null, points to the + // location to note any dropped stack frames, if any, due to buffer limitations + // or other reasons. (This value will be set to `0` if no frames were dropped.) + // The number of total stack frames is guaranteed to be >= skip_count + + // max_depth + *min_dropped_frames. + extern int GetStackTraceWithContext(void** result, int max_depth, int skip_count, const void* uc, int* min_dropped_frames); -// SetStackUnwinder() -// -// Provides a custom function for unwinding stack frames that will be used in -// place of the default stack unwinder when invoking the static -// GetStack{Frames,Trace}{,WithContext}() functions above. -// -// The arguments passed to the unwinder function will match the -// arguments passed to `absl::GetStackFramesWithContext()` except that sizes -// will be non-null iff the caller is interested in frame sizes. -// -// If unwinder is set to null, we revert to the default stack-tracing behavior. -// -// ***************************************************************************** -// WARNING -// ***************************************************************************** -// -// absl::SetStackUnwinder is not suitable for general purpose use. It is -// provided for custom runtimes. -// Some things to watch out for when calling `absl::SetStackUnwinder()`: -// -// (a) The unwinder may be called from within signal handlers and -// therefore must be async-signal-safe. -// -// (b) Even after a custom stack unwinder has been unregistered, other -// threads may still be in the process of using that unwinder. -// Therefore do not clean up any state that may be needed by an old -// unwinder. -// ***************************************************************************** -extern void SetStackUnwinder(int (*unwinder)(void** pcs, int* sizes, - int max_depth, int skip_count, - const void* uc, - int* min_dropped_frames)); + // SetStackUnwinder() + // + // Provides a custom function for unwinding stack frames that will be used in + // place of the default stack unwinder when invoking the static + // GetStack{Frames,Trace}{,WithContext}() functions above. + // + // The arguments passed to the unwinder function will match the + // arguments passed to `absl::GetStackFramesWithContext()` except that sizes + // will be non-null iff the caller is interested in frame sizes. + // + // If unwinder is set to null, we revert to the default stack-tracing behavior. + // + // ***************************************************************************** + // WARNING + // ***************************************************************************** + // + // absl::SetStackUnwinder is not suitable for general purpose use. It is + // provided for custom runtimes. + // Some things to watch out for when calling `absl::SetStackUnwinder()`: + // + // (a) The unwinder may be called from within signal handlers and + // therefore must be async-signal-safe. + // + // (b) Even after a custom stack unwinder has been unregistered, other + // threads may still be in the process of using that unwinder. + // Therefore do not clean up any state that may be needed by an old + // unwinder. + // ***************************************************************************** + extern void SetStackUnwinder(int (*unwinder)(void** pcs, int* sizes, int max_depth, int skip_count, const void* uc, int* min_dropped_frames)); -// DefaultStackUnwinder() -// -// Records program counter values of up to `max_depth` frames, skipping the most -// recent `skip_count` stack frames, and stores their corresponding values in -// `pcs`. (Note that the frame generated for this call itself is also skipped.) -// This function acts as a generic stack-unwinder; prefer usage of the more -// specific `GetStack{Trace,Frames}{,WithContext}()` functions above. -// -// If you have set your own stack unwinder (with the `SetStackUnwinder()` -// function above, you can still get the default stack unwinder by calling -// `DefaultStackUnwinder()`, which will ignore any previously set stack unwinder -// and use the default one instead. -// -// Because this function is generic, only `pcs` is guaranteed to be non-null -// upon return. It is legal for `sizes`, `uc`, and `min_dropped_frames` to all -// be null when called. -// -// The semantics are the same as the corresponding `GetStack*()` function in the -// case where `absl::SetStackUnwinder()` was never called. Equivalents are: -// -// null sizes | non-nullptr sizes -// |==========================================================| -// null uc | GetStackTrace() | GetStackFrames() | -// non-null uc | GetStackTraceWithContext() | GetStackFramesWithContext() | -// |==========================================================| -extern int DefaultStackUnwinder(void** pcs, int* sizes, int max_depth, - int skip_count, const void* uc, - int* min_dropped_frames); + // DefaultStackUnwinder() + // + // Records program counter values of up to `max_depth` frames, skipping the most + // recent `skip_count` stack frames, and stores their corresponding values in + // `pcs`. (Note that the frame generated for this call itself is also skipped.) + // This function acts as a generic stack-unwinder; prefer usage of the more + // specific `GetStack{Trace,Frames}{,WithContext}()` functions above. + // + // If you have set your own stack unwinder (with the `SetStackUnwinder()` + // function above, you can still get the default stack unwinder by calling + // `DefaultStackUnwinder()`, which will ignore any previously set stack unwinder + // and use the default one instead. + // + // Because this function is generic, only `pcs` is guaranteed to be non-null + // upon return. It is legal for `sizes`, `uc`, and `min_dropped_frames` to all + // be null when called. + // + // The semantics are the same as the corresponding `GetStack*()` function in the + // case where `absl::SetStackUnwinder()` was never called. Equivalents are: + // + // null sizes | non-nullptr sizes + // |==========================================================| + // null uc | GetStackTrace() | GetStackFrames() | + // non-null uc | GetStackTraceWithContext() | GetStackFramesWithContext() | + // |==========================================================| + extern int DefaultStackUnwinder(void** pcs, int* sizes, int max_depth, int skip_count, const void* uc, int* min_dropped_frames); -namespace debugging_internal { -// Returns true for platforms which are expected to have functioning stack trace -// implementations. Intended to be used for tests which want to exclude -// verification of logic known to be broken because stack traces are not -// working. -extern bool StackTraceWorksForTest(); -} // namespace debugging_internal -ABSL_NAMESPACE_END + namespace debugging_internal + { + // Returns true for platforms which are expected to have functioning stack trace + // implementations. Intended to be used for tests which want to exclude + // verification of logic known to be broken because stack traces are not + // working. + extern bool StackTraceWorksForTest(); + } // namespace debugging_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_DEBUGGING_STACKTRACE_H_ diff --git a/CAPI/cpp/grpc/include/absl/debugging/symbolize.h b/CAPI/cpp/grpc/include/absl/debugging/symbolize.h index 43d93a8..db68479 100644 --- a/CAPI/cpp/grpc/include/absl/debugging/symbolize.h +++ b/CAPI/cpp/grpc/include/absl/debugging/symbolize.h @@ -54,46 +54,47 @@ #include "absl/debugging/internal/symbolize.h" -namespace absl { -ABSL_NAMESPACE_BEGIN +namespace absl +{ + ABSL_NAMESPACE_BEGIN -// InitializeSymbolizer() -// -// Initializes the program counter symbolizer, given the path of the program -// (typically obtained through `main()`s `argv[0]`). The Abseil symbolizer -// allows you to read program counters (instruction pointer values) using their -// human-readable names within output such as stack traces. -// -// Example: -// -// int main(int argc, char *argv[]) { -// absl::InitializeSymbolizer(argv[0]); -// // Now you can use the symbolizer -// } -void InitializeSymbolizer(const char* argv0); -// -// Symbolize() -// -// Symbolizes a program counter (instruction pointer value) `pc` and, on -// success, writes the name to `out`. The symbol name is demangled, if possible. -// Note that the symbolized name may be truncated and will be NUL-terminated. -// Demangling is supported for symbols generated by GCC 3.x or newer). Returns -// `false` on failure. -// -// Example: -// -// // Print a program counter and its symbol name. -// static void DumpPCAndSymbol(void *pc) { -// char tmp[1024]; -// const char *symbol = "(unknown)"; -// if (absl::Symbolize(pc, tmp, sizeof(tmp))) { -// symbol = tmp; -// } -// absl::PrintF("%p %s\n", pc, symbol); -// } -bool Symbolize(const void *pc, char *out, int out_size); + // InitializeSymbolizer() + // + // Initializes the program counter symbolizer, given the path of the program + // (typically obtained through `main()`s `argv[0]`). The Abseil symbolizer + // allows you to read program counters (instruction pointer values) using their + // human-readable names within output such as stack traces. + // + // Example: + // + // int main(int argc, char *argv[]) { + // absl::InitializeSymbolizer(argv[0]); + // // Now you can use the symbolizer + // } + void InitializeSymbolizer(const char* argv0); + // + // Symbolize() + // + // Symbolizes a program counter (instruction pointer value) `pc` and, on + // success, writes the name to `out`. The symbol name is demangled, if possible. + // Note that the symbolized name may be truncated and will be NUL-terminated. + // Demangling is supported for symbols generated by GCC 3.x or newer). Returns + // `false` on failure. + // + // Example: + // + // // Print a program counter and its symbol name. + // static void DumpPCAndSymbol(void *pc) { + // char tmp[1024]; + // const char *symbol = "(unknown)"; + // if (absl::Symbolize(pc, tmp, sizeof(tmp))) { + // symbol = tmp; + // } + // absl::PrintF("%p %s\n", pc, symbol); + // } + bool Symbolize(const void* pc, char* out, int out_size); -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_DEBUGGING_SYMBOLIZE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/commandlineflag.h b/CAPI/cpp/grpc/include/absl/flags/commandlineflag.h index f2fa089..7208510 100644 --- a/CAPI/cpp/grpc/include/absl/flags/commandlineflag.h +++ b/CAPI/cpp/grpc/include/absl/flags/commandlineflag.h @@ -35,166 +35,176 @@ #include "absl/strings/string_view.h" #include "absl/types/optional.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace flags_internal { -class PrivateHandleAccessor; -} // namespace flags_internal - -// CommandLineFlag -// -// This type acts as a type-erased handle for an instance of an Abseil Flag and -// holds reflection information pertaining to that flag. Use CommandLineFlag to -// access a flag's name, location, help string etc. -// -// To obtain an absl::CommandLineFlag, invoke `absl::FindCommandLineFlag()` -// passing it the flag name string. -// -// Example: -// -// // Obtain reflection handle for a flag named "flagname". -// const absl::CommandLineFlag* my_flag_data = -// absl::FindCommandLineFlag("flagname"); -// -// // Now you can get flag info from that reflection handle. -// std::string flag_location = my_flag_data->Filename(); -// ... -class CommandLineFlag { - public: - constexpr CommandLineFlag() = default; - - // Not copyable/assignable. - CommandLineFlag(const CommandLineFlag&) = delete; - CommandLineFlag& operator=(const CommandLineFlag&) = delete; - - // absl::CommandLineFlag::IsOfType() - // - // Return true iff flag has type T. - template - inline bool IsOfType() const { - return TypeId() == base_internal::FastTypeId(); - } - - // absl::CommandLineFlag::TryGet() - // - // Attempts to retrieve the flag value. Returns value on success, - // absl::nullopt otherwise. - template - absl::optional TryGet() const { - if (IsRetired() || !IsOfType()) { - return absl::nullopt; - } - - // Implementation notes: +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + class PrivateHandleAccessor; + } // namespace flags_internal + + // CommandLineFlag + // + // This type acts as a type-erased handle for an instance of an Abseil Flag and + // holds reflection information pertaining to that flag. Use CommandLineFlag to + // access a flag's name, location, help string etc. // - // We are wrapping a union around the value of `T` to serve three purposes: + // To obtain an absl::CommandLineFlag, invoke `absl::FindCommandLineFlag()` + // passing it the flag name string. // - // 1. `U.value` has correct size and alignment for a value of type `T` - // 2. The `U.value` constructor is not invoked since U's constructor does - // not do it explicitly. - // 3. The `U.value` destructor is invoked since U's destructor does it - // explicitly. This makes `U` a kind of RAII wrapper around non default - // constructible value of T, which is destructed when we leave the - // scope. We do need to destroy U.value, which is constructed by - // CommandLineFlag::Read even though we left it in a moved-from state - // after std::move. + // Example: // - // All of this serves to avoid requiring `T` being default constructible. - union U { - T value; - U() {} - ~U() { value.~T(); } + // // Obtain reflection handle for a flag named "flagname". + // const absl::CommandLineFlag* my_flag_data = + // absl::FindCommandLineFlag("flagname"); + // + // // Now you can get flag info from that reflection handle. + // std::string flag_location = my_flag_data->Filename(); + // ... + class CommandLineFlag + { + public: + constexpr CommandLineFlag() = default; + + // Not copyable/assignable. + CommandLineFlag(const CommandLineFlag&) = delete; + CommandLineFlag& operator=(const CommandLineFlag&) = delete; + + // absl::CommandLineFlag::IsOfType() + // + // Return true iff flag has type T. + template + inline bool IsOfType() const + { + return TypeId() == base_internal::FastTypeId(); + } + + // absl::CommandLineFlag::TryGet() + // + // Attempts to retrieve the flag value. Returns value on success, + // absl::nullopt otherwise. + template + absl::optional TryGet() const + { + if (IsRetired() || !IsOfType()) + { + return absl::nullopt; + } + + // Implementation notes: + // + // We are wrapping a union around the value of `T` to serve three purposes: + // + // 1. `U.value` has correct size and alignment for a value of type `T` + // 2. The `U.value` constructor is not invoked since U's constructor does + // not do it explicitly. + // 3. The `U.value` destructor is invoked since U's destructor does it + // explicitly. This makes `U` a kind of RAII wrapper around non default + // constructible value of T, which is destructed when we leave the + // scope. We do need to destroy U.value, which is constructed by + // CommandLineFlag::Read even though we left it in a moved-from state + // after std::move. + // + // All of this serves to avoid requiring `T` being default constructible. + union U + { + T value; + U() + { + } + ~U() + { + value.~T(); + } + }; + U u; + + Read(&u.value); + // allow retired flags to be "read", so we can report invalid access. + if (IsRetired()) + { + return absl::nullopt; + } + return std::move(u.value); + } + + // absl::CommandLineFlag::Name() + // + // Returns name of this flag. + virtual absl::string_view Name() const = 0; + + // absl::CommandLineFlag::Filename() + // + // Returns name of the file where this flag is defined. + virtual std::string Filename() const = 0; + + // absl::CommandLineFlag::Help() + // + // Returns help message associated with this flag. + virtual std::string Help() const = 0; + + // absl::CommandLineFlag::IsRetired() + // + // Returns true iff this object corresponds to retired flag. + virtual bool IsRetired() const; + + // absl::CommandLineFlag::DefaultValue() + // + // Returns the default value for this flag. + virtual std::string DefaultValue() const = 0; + + // absl::CommandLineFlag::CurrentValue() + // + // Returns the current value for this flag. + virtual std::string CurrentValue() const = 0; + + // absl::CommandLineFlag::ParseFrom() + // + // Sets the value of the flag based on specified string `value`. If the flag + // was successfully set to new value, it returns true. Otherwise, sets `error` + // to indicate the error, leaves the flag unchanged, and returns false. + bool ParseFrom(absl::string_view value, std::string* error); + + protected: + ~CommandLineFlag() = default; + + private: + friend class flags_internal::PrivateHandleAccessor; + + // Sets the value of the flag based on specified string `value`. If the flag + // was successfully set to new value, it returns true. Otherwise, sets `error` + // to indicate the error, leaves the flag unchanged, and returns false. There + // are three ways to set the flag's value: + // * Update the current flag value + // * Update the flag's default value + // * Update the current flag value if it was never set before + // The mode is selected based on `set_mode` parameter. + virtual bool ParseFrom(absl::string_view value, flags_internal::FlagSettingMode set_mode, flags_internal::ValueSource source, std::string& error) = 0; + + // Returns id of the flag's value type. + virtual flags_internal::FlagFastTypeId TypeId() const = 0; + + // Interface to save flag to some persistent state. Returns current flag state + // or nullptr if flag does not support saving and restoring a state. + virtual std::unique_ptr SaveState() = 0; + + // Copy-construct a new value of the flag's type in a memory referenced by + // the dst based on the current flag's value. + virtual void Read(void* dst) const = 0; + + // To be deleted. Used to return true if flag's current value originated from + // command line. + virtual bool IsSpecifiedOnCommandLine() const = 0; + + // Validates supplied value usign validator or parseflag routine + virtual bool ValidateInputValue(absl::string_view value) const = 0; + + // Checks that flags default value can be converted to string and back to the + // flag's value type. + virtual void CheckDefaultValueParsingRoundtrip() const = 0; }; - U u; - - Read(&u.value); - // allow retired flags to be "read", so we can report invalid access. - if (IsRetired()) { - return absl::nullopt; - } - return std::move(u.value); - } - - // absl::CommandLineFlag::Name() - // - // Returns name of this flag. - virtual absl::string_view Name() const = 0; - - // absl::CommandLineFlag::Filename() - // - // Returns name of the file where this flag is defined. - virtual std::string Filename() const = 0; - - // absl::CommandLineFlag::Help() - // - // Returns help message associated with this flag. - virtual std::string Help() const = 0; - - // absl::CommandLineFlag::IsRetired() - // - // Returns true iff this object corresponds to retired flag. - virtual bool IsRetired() const; - - // absl::CommandLineFlag::DefaultValue() - // - // Returns the default value for this flag. - virtual std::string DefaultValue() const = 0; - - // absl::CommandLineFlag::CurrentValue() - // - // Returns the current value for this flag. - virtual std::string CurrentValue() const = 0; - - // absl::CommandLineFlag::ParseFrom() - // - // Sets the value of the flag based on specified string `value`. If the flag - // was successfully set to new value, it returns true. Otherwise, sets `error` - // to indicate the error, leaves the flag unchanged, and returns false. - bool ParseFrom(absl::string_view value, std::string* error); - - protected: - ~CommandLineFlag() = default; - - private: - friend class flags_internal::PrivateHandleAccessor; - - // Sets the value of the flag based on specified string `value`. If the flag - // was successfully set to new value, it returns true. Otherwise, sets `error` - // to indicate the error, leaves the flag unchanged, and returns false. There - // are three ways to set the flag's value: - // * Update the current flag value - // * Update the flag's default value - // * Update the current flag value if it was never set before - // The mode is selected based on `set_mode` parameter. - virtual bool ParseFrom(absl::string_view value, - flags_internal::FlagSettingMode set_mode, - flags_internal::ValueSource source, - std::string& error) = 0; - - // Returns id of the flag's value type. - virtual flags_internal::FlagFastTypeId TypeId() const = 0; - - // Interface to save flag to some persistent state. Returns current flag state - // or nullptr if flag does not support saving and restoring a state. - virtual std::unique_ptr SaveState() = 0; - - // Copy-construct a new value of the flag's type in a memory referenced by - // the dst based on the current flag's value. - virtual void Read(void* dst) const = 0; - - // To be deleted. Used to return true if flag's current value originated from - // command line. - virtual bool IsSpecifiedOnCommandLine() const = 0; - - // Validates supplied value usign validator or parseflag routine - virtual bool ValidateInputValue(absl::string_view value) const = 0; - - // Checks that flags default value can be converted to string and back to the - // flag's value type. - virtual void CheckDefaultValueParsingRoundtrip() const = 0; -}; - -ABSL_NAMESPACE_END + + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FLAGS_COMMANDLINEFLAG_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/config.h b/CAPI/cpp/grpc/include/absl/flags/config.h index 14c4235..9565e48 100644 --- a/CAPI/cpp/grpc/include/absl/flags/config.h +++ b/CAPI/cpp/grpc/include/absl/flags/config.h @@ -47,22 +47,22 @@ // These macros represent the "source of truth" for the list of supported // built-in types. -#define ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \ - A(bool, bool) \ - A(short, short) \ - A(unsigned short, unsigned_short) \ - A(int, int) \ - A(unsigned int, unsigned_int) \ - A(long, long) \ - A(unsigned long, unsigned_long) \ - A(long long, long_long) \ - A(unsigned long long, unsigned_long_long) \ - A(double, double) \ - A(float, float) +#define ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \ + A(bool, bool) \ + A(short, short) \ + A(unsigned short, unsigned_short) \ + A(int, int) \ + A(unsigned int, unsigned_int) \ + A(long, long) \ + A(unsigned long, unsigned_long) \ + A(long long, long_long) \ + A(unsigned long long, unsigned_long_long) \ + A(double, double) \ + A(float, float) #define ABSL_FLAGS_INTERNAL_SUPPORTED_TYPES(A) \ - ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \ - A(std::string, std_string) \ - A(std::vector, std_vector_of_string) + ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \ + A(std::string, std_string) \ + A(std::vector, std_vector_of_string) #endif // ABSL_FLAGS_CONFIG_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/declare.h b/CAPI/cpp/grpc/include/absl/flags/declare.h index d1437bb..d9d50fa 100644 --- a/CAPI/cpp/grpc/include/absl/flags/declare.h +++ b/CAPI/cpp/grpc/include/absl/flags/declare.h @@ -27,28 +27,30 @@ #include "absl/base/config.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace flags_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { -// absl::Flag represents a flag of type 'T' created by ABSL_FLAG. -template -class Flag; + // absl::Flag represents a flag of type 'T' created by ABSL_FLAG. + template + class Flag; -} // namespace flags_internal + } // namespace flags_internal // Flag // // Forward declaration of the `absl::Flag` type for use in defining the macro. #if defined(_MSC_VER) && !defined(__clang__) -template -class Flag; + template + class Flag; #else -template -using Flag = flags_internal::Flag; + template + using Flag = flags_internal::Flag; #endif -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl // ABSL_DECLARE_FLAG() @@ -64,10 +66,12 @@ ABSL_NAMESPACE_END // Internal implementation of ABSL_DECLARE_FLAG to allow macro expansion of its // arguments. Clients must use ABSL_DECLARE_FLAG instead. -#define ABSL_DECLARE_FLAG_INTERNAL(type, name) \ - extern absl::Flag FLAGS_##name; \ - namespace absl /* block flags in namespaces */ {} \ - /* second redeclaration is to allow applying attributes */ \ - extern absl::Flag FLAGS_##name +#define ABSL_DECLARE_FLAG_INTERNAL(type, name) \ + extern absl::Flag FLAGS_##name; \ + namespace absl /* block flags in namespaces */ \ + { \ + } \ + /* second redeclaration is to allow applying attributes */ \ + extern absl::Flag FLAGS_##name #endif // ABSL_FLAGS_DECLARE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/flag.h b/CAPI/cpp/grpc/include/absl/flags/flag.h index b7f94be..7ce03a4 100644 --- a/CAPI/cpp/grpc/include/absl/flags/flag.h +++ b/CAPI/cpp/grpc/include/absl/flags/flag.h @@ -40,8 +40,9 @@ #include "absl/flags/internal/registry.h" #include "absl/strings/string_view.h" -namespace absl { -ABSL_NAMESPACE_BEGIN +namespace absl +{ + ABSL_NAMESPACE_BEGIN // Flag // @@ -72,73 +73,76 @@ ABSL_NAMESPACE_BEGIN // discusses supported standard types, optional flags, and additional Abseil // type support. #if !defined(_MSC_VER) || defined(__clang__) -template -using Flag = flags_internal::Flag; + template + using Flag = flags_internal::Flag; #else #include "absl/flags/internal/flag_msvc.inc" #endif -// GetFlag() -// -// Returns the value (of type `T`) of an `absl::Flag` instance, by value. Do -// not construct an `absl::Flag` directly and call `absl::GetFlag()`; -// instead, refer to flag's constructed variable name (e.g. `FLAGS_name`). -// Because this function returns by value and not by reference, it is -// thread-safe, but note that the operation may be expensive; as a result, avoid -// `absl::GetFlag()` within any tight loops. -// -// Example: -// -// // FLAGS_count is a Flag of type `int` -// int my_count = absl::GetFlag(FLAGS_count); -// -// // FLAGS_firstname is a Flag of type `std::string` -// std::string first_name = absl::GetFlag(FLAGS_firstname); -template -ABSL_MUST_USE_RESULT T GetFlag(const absl::Flag& flag) { - return flags_internal::FlagImplPeer::InvokeGet(flag); -} + // GetFlag() + // + // Returns the value (of type `T`) of an `absl::Flag` instance, by value. Do + // not construct an `absl::Flag` directly and call `absl::GetFlag()`; + // instead, refer to flag's constructed variable name (e.g. `FLAGS_name`). + // Because this function returns by value and not by reference, it is + // thread-safe, but note that the operation may be expensive; as a result, avoid + // `absl::GetFlag()` within any tight loops. + // + // Example: + // + // // FLAGS_count is a Flag of type `int` + // int my_count = absl::GetFlag(FLAGS_count); + // + // // FLAGS_firstname is a Flag of type `std::string` + // std::string first_name = absl::GetFlag(FLAGS_firstname); + template + ABSL_MUST_USE_RESULT T GetFlag(const absl::Flag& flag) + { + return flags_internal::FlagImplPeer::InvokeGet(flag); + } -// SetFlag() -// -// Sets the value of an `absl::Flag` to the value `v`. Do not construct an -// `absl::Flag` directly and call `absl::SetFlag()`; instead, use the -// flag's variable name (e.g. `FLAGS_name`). This function is -// thread-safe, but is potentially expensive. Avoid setting flags in general, -// but especially within performance-critical code. -template -void SetFlag(absl::Flag* flag, const T& v) { - flags_internal::FlagImplPeer::InvokeSet(*flag, v); -} + // SetFlag() + // + // Sets the value of an `absl::Flag` to the value `v`. Do not construct an + // `absl::Flag` directly and call `absl::SetFlag()`; instead, use the + // flag's variable name (e.g. `FLAGS_name`). This function is + // thread-safe, but is potentially expensive. Avoid setting flags in general, + // but especially within performance-critical code. + template + void SetFlag(absl::Flag* flag, const T& v) + { + flags_internal::FlagImplPeer::InvokeSet(*flag, v); + } -// Overload of `SetFlag()` to allow callers to pass in a value that is -// convertible to `T`. E.g., use this overload to pass a "const char*" when `T` -// is `std::string`. -template -void SetFlag(absl::Flag* flag, const V& v) { - T value(v); - flags_internal::FlagImplPeer::InvokeSet(*flag, value); -} + // Overload of `SetFlag()` to allow callers to pass in a value that is + // convertible to `T`. E.g., use this overload to pass a "const char*" when `T` + // is `std::string`. + template + void SetFlag(absl::Flag* flag, const V& v) + { + T value(v); + flags_internal::FlagImplPeer::InvokeSet(*flag, value); + } -// GetFlagReflectionHandle() -// -// Returns the reflection handle corresponding to specified Abseil Flag -// instance. Use this handle to access flag's reflection information, like name, -// location, default value etc. -// -// Example: -// -// std::string = absl::GetFlagReflectionHandle(FLAGS_count).DefaultValue(); + // GetFlagReflectionHandle() + // + // Returns the reflection handle corresponding to specified Abseil Flag + // instance. Use this handle to access flag's reflection information, like name, + // location, default value etc. + // + // Example: + // + // std::string = absl::GetFlagReflectionHandle(FLAGS_count).DefaultValue(); -template -const CommandLineFlag& GetFlagReflectionHandle(const absl::Flag& f) { - return flags_internal::FlagImplPeer::InvokeReflect(f); -} + template + const CommandLineFlag& GetFlagReflectionHandle(const absl::Flag& f) + { + return flags_internal::FlagImplPeer::InvokeReflect(f); + } -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl - // ABSL_FLAG() // // This macro defines an `absl::Flag` instance of a specified type `T`: @@ -167,7 +171,7 @@ ABSL_NAMESPACE_END // Note: do not construct objects of type `absl::Flag` directly. Only use the // `ABSL_FLAG()` macro for such construction. #define ABSL_FLAG(Type, name, default_value, help) \ - ABSL_FLAG_IMPL(Type, name, default_value, help) + ABSL_FLAG_IMPL(Type, name, default_value, help) // ABSL_FLAG().OnUpdate() // @@ -198,11 +202,12 @@ ABSL_NAMESPACE_END // ABSL_FLAG_IMPL macro definition conditional on ABSL_FLAGS_STRIP_NAMES #if !defined(_MSC_VER) || defined(__clang__) #define ABSL_FLAG_IMPL_FLAG_PTR(flag) flag -#define ABSL_FLAG_IMPL_HELP_ARG(name) \ - absl::flags_internal::HelpArg( \ - FLAGS_help_storage_##name) +#define ABSL_FLAG_IMPL_HELP_ARG(name) \ + absl::flags_internal::HelpArg( \ + FLAGS_help_storage_##name \ + ) #define ABSL_FLAG_IMPL_DEFAULT_ARG(Type, name) \ - absl::flags_internal::DefaultArg(0) + absl::flags_internal::DefaultArg(0) #else #define ABSL_FLAG_IMPL_FLAG_PTR(flag) flag.GetImpl() #define ABSL_FLAG_IMPL_HELP_ARG(name) &AbslFlagHelpGenFor##name::NonConst @@ -212,15 +217,13 @@ ABSL_NAMESPACE_END #if ABSL_FLAGS_STRIP_NAMES #define ABSL_FLAG_IMPL_FLAGNAME(txt) "" #define ABSL_FLAG_IMPL_FILENAME() "" -#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \ - absl::flags_internal::FlagRegistrar(ABSL_FLAG_IMPL_FLAG_PTR(flag), \ - nullptr) +#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \ + absl::flags_internal::FlagRegistrar(ABSL_FLAG_IMPL_FLAG_PTR(flag), nullptr) #else #define ABSL_FLAG_IMPL_FLAGNAME(txt) txt #define ABSL_FLAG_IMPL_FILENAME() __FILE__ -#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \ - absl::flags_internal::FlagRegistrar(ABSL_FLAG_IMPL_FLAG_PTR(flag), \ - __FILE__) +#define ABSL_FLAG_IMPL_REGISTRAR(T, flag) \ + absl::flags_internal::FlagRegistrar(ABSL_FLAG_IMPL_FLAG_PTR(flag), __FILE__) #endif // ABSL_FLAG_IMPL macro definition conditional on ABSL_FLAGS_STRIP_HELP @@ -239,46 +242,56 @@ ABSL_NAMESPACE_END // TODO(rogeeff): place these generated structs into local namespace and apply // ABSL_INTERNAL_UNIQUE_SHORT_NAME. // TODO(rogeeff): Apply __attribute__((nodebug)) to FLAGS_help_storage_##name -#define ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, txt) \ - struct AbslFlagHelpGenFor##name { \ - /* The expression is run in the caller as part of the */ \ - /* default value argument. That keeps temporaries alive */ \ - /* long enough for NonConst to work correctly. */ \ - static constexpr absl::string_view Value( \ - absl::string_view absl_flag_help = ABSL_FLAG_IMPL_FLAGHELP(txt)) { \ - return absl_flag_help; \ - } \ - static std::string NonConst() { return std::string(Value()); } \ - }; \ - constexpr auto FLAGS_help_storage_##name ABSL_INTERNAL_UNIQUE_SMALL_NAME() \ - ABSL_ATTRIBUTE_SECTION_VARIABLE(flags_help_cold) = \ - absl::flags_internal::HelpStringAsArray( \ - 0); +#define ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, txt) \ + struct AbslFlagHelpGenFor##name \ + { \ + /* The expression is run in the caller as part of the */ \ + /* default value argument. That keeps temporaries alive */ \ + /* long enough for NonConst to work correctly. */ \ + static constexpr absl::string_view Value( \ + absl::string_view absl_flag_help = ABSL_FLAG_IMPL_FLAGHELP(txt) \ + ) \ + { \ + return absl_flag_help; \ + } \ + static std::string NonConst() \ + { \ + return std::string(Value()); \ + } \ + }; \ + constexpr auto FLAGS_help_storage_##name ABSL_INTERNAL_UNIQUE_SMALL_NAME() \ + ABSL_ATTRIBUTE_SECTION_VARIABLE(flags_help_cold) = \ + absl::flags_internal::HelpStringAsArray( \ + 0 \ + ); -#define ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \ - struct AbslFlagDefaultGenFor##name { \ - Type value = absl::flags_internal::InitDefaultValue(default_value); \ - static void Gen(void* absl_flag_default_loc) { \ - new (absl_flag_default_loc) Type(AbslFlagDefaultGenFor##name{}.value); \ - } \ - }; +#define ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \ + struct AbslFlagDefaultGenFor##name \ + { \ + Type value = absl::flags_internal::InitDefaultValue(default_value); \ + static void Gen(void* absl_flag_default_loc) \ + { \ + new (absl_flag_default_loc) Type(AbslFlagDefaultGenFor##name{}.value); \ + } \ + }; // ABSL_FLAG_IMPL // // Note: Name of registrar object is not arbitrary. It is used to "grab" // global name for FLAGS_no symbol, thus preventing the possibility // of defining two flags with names foo and nofoo. -#define ABSL_FLAG_IMPL(Type, name, default_value, help) \ - extern ::absl::Flag FLAGS_##name; \ - namespace absl /* block flags in namespaces */ {} \ - ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \ - ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, help) \ - ABSL_CONST_INIT absl::Flag FLAGS_##name{ \ - ABSL_FLAG_IMPL_FLAGNAME(#name), ABSL_FLAG_IMPL_FILENAME(), \ - ABSL_FLAG_IMPL_HELP_ARG(name), ABSL_FLAG_IMPL_DEFAULT_ARG(Type, name)}; \ - extern absl::flags_internal::FlagRegistrarEmpty FLAGS_no##name; \ - absl::flags_internal::FlagRegistrarEmpty FLAGS_no##name = \ - ABSL_FLAG_IMPL_REGISTRAR(Type, FLAGS_##name) +#define ABSL_FLAG_IMPL(Type, name, default_value, help) \ + extern ::absl::Flag FLAGS_##name; \ + namespace absl /* block flags in namespaces */ \ + { \ + } \ + ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \ + ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, help) \ + ABSL_CONST_INIT absl::Flag FLAGS_##name{ \ + ABSL_FLAG_IMPL_FLAGNAME(#name), ABSL_FLAG_IMPL_FILENAME(), ABSL_FLAG_IMPL_HELP_ARG(name), ABSL_FLAG_IMPL_DEFAULT_ARG(Type, name)}; \ + extern absl::flags_internal::FlagRegistrarEmpty FLAGS_no##name; \ + absl::flags_internal::FlagRegistrarEmpty FLAGS_no##name = \ + ABSL_FLAG_IMPL_REGISTRAR(Type, FLAGS_##name) // ABSL_RETIRED_FLAG // @@ -301,10 +314,10 @@ ABSL_NAMESPACE_END // unused. // TODO(rogeeff): replace RETIRED_FLAGS with FLAGS once forward declarations of // retired flags are cleaned up. -#define ABSL_RETIRED_FLAG(type, name, default_value, explanation) \ - static absl::flags_internal::RetiredFlag RETIRED_FLAGS_##name; \ - ABSL_ATTRIBUTE_UNUSED static const auto RETIRED_FLAGS_REG_##name = \ - (RETIRED_FLAGS_##name.Retire(#name), \ - ::absl::flags_internal::FlagRegistrarEmpty{}) +#define ABSL_RETIRED_FLAG(type, name, default_value, explanation) \ + static absl::flags_internal::RetiredFlag RETIRED_FLAGS_##name; \ + ABSL_ATTRIBUTE_UNUSED static const auto RETIRED_FLAGS_REG_##name = \ + (RETIRED_FLAGS_##name.Retire(#name), \ + ::absl::flags_internal::FlagRegistrarEmpty{}) #endif // ABSL_FLAGS_FLAG_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/commandlineflag.h b/CAPI/cpp/grpc/include/absl/flags/internal/commandlineflag.h index ebfe81b..b05099e 100644 --- a/CAPI/cpp/grpc/include/absl/flags/internal/commandlineflag.h +++ b/CAPI/cpp/grpc/include/absl/flags/internal/commandlineflag.h @@ -19,50 +19,55 @@ #include "absl/base/config.h" #include "absl/base/internal/fast_type_id.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace flags_internal { +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { -// An alias for flag fast type id. This value identifies the flag value type -// similarly to typeid(T), without relying on RTTI being available. In most -// cases this id is enough to uniquely identify the flag's value type. In a few -// cases we'll have to resort to using actual RTTI implementation if it is -// available. -using FlagFastTypeId = absl::base_internal::FastTypeIdType; + // An alias for flag fast type id. This value identifies the flag value type + // similarly to typeid(T), without relying on RTTI being available. In most + // cases this id is enough to uniquely identify the flag's value type. In a few + // cases we'll have to resort to using actual RTTI implementation if it is + // available. + using FlagFastTypeId = absl::base_internal::FastTypeIdType; -// Options that control SetCommandLineOptionWithMode. -enum FlagSettingMode { - // update the flag's value unconditionally (can call this multiple times). - SET_FLAGS_VALUE, - // update the flag's value, but *only if* it has not yet been updated - // with SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef". - SET_FLAG_IF_DEFAULT, - // set the flag's default value to this. If the flag has not been updated - // yet (via SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef") - // change the flag's current value to the new default value as well. - SET_FLAGS_DEFAULT -}; + // Options that control SetCommandLineOptionWithMode. + enum FlagSettingMode + { + // update the flag's value unconditionally (can call this multiple times). + SET_FLAGS_VALUE, + // update the flag's value, but *only if* it has not yet been updated + // with SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef". + SET_FLAG_IF_DEFAULT, + // set the flag's default value to this. If the flag has not been updated + // yet (via SET_FLAGS_VALUE, SET_FLAG_IF_DEFAULT, or "FLAGS_xxx = nondef") + // change the flag's current value to the new default value as well. + SET_FLAGS_DEFAULT + }; -// Options that control ParseFrom: Source of a value. -enum ValueSource { - // Flag is being set by value specified on a command line. - kCommandLine, - // Flag is being set by value specified in the code. - kProgrammaticChange, -}; + // Options that control ParseFrom: Source of a value. + enum ValueSource + { + // Flag is being set by value specified on a command line. + kCommandLine, + // Flag is being set by value specified in the code. + kProgrammaticChange, + }; -// Handle to FlagState objects. Specific flag state objects will restore state -// of a flag produced this flag state from method CommandLineFlag::SaveState(). -class FlagStateInterface { - public: - virtual ~FlagStateInterface(); + // Handle to FlagState objects. Specific flag state objects will restore state + // of a flag produced this flag state from method CommandLineFlag::SaveState(). + class FlagStateInterface + { + public: + virtual ~FlagStateInterface(); - // Restores the flag originated this object to the saved state. - virtual void Restore() const = 0; -}; + // Restores the flag originated this object to the saved state. + virtual void Restore() const = 0; + }; -} // namespace flags_internal -ABSL_NAMESPACE_END + } // namespace flags_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FLAGS_INTERNAL_COMMANDLINEFLAG_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/flag.h b/CAPI/cpp/grpc/include/absl/flags/internal/flag.h index 6154638..683f91f 100644 --- a/CAPI/cpp/grpc/include/absl/flags/internal/flag.h +++ b/CAPI/cpp/grpc/include/absl/flags/internal/flag.h @@ -44,757 +44,880 @@ #include "absl/synchronization/mutex.h" #include "absl/utility/utility.h" -namespace absl { -ABSL_NAMESPACE_BEGIN - -/////////////////////////////////////////////////////////////////////////////// -// Forward declaration of absl::Flag public API. -namespace flags_internal { -template -class Flag; -} // namespace flags_internal +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + /////////////////////////////////////////////////////////////////////////////// + // Forward declaration of absl::Flag public API. + namespace flags_internal + { + template + class Flag; + } // namespace flags_internal #if defined(_MSC_VER) && !defined(__clang__) -template -class Flag; + template + class Flag; #else -template -using Flag = flags_internal::Flag; + template + using Flag = flags_internal::Flag; #endif -template -ABSL_MUST_USE_RESULT T GetFlag(const absl::Flag& flag); - -template -void SetFlag(absl::Flag* flag, const T& v); - -template -void SetFlag(absl::Flag* flag, const V& v); - -template -const CommandLineFlag& GetFlagReflectionHandle(const absl::Flag& f); - -/////////////////////////////////////////////////////////////////////////////// -// Flag value type operations, eg., parsing, copying, etc. are provided -// by function specific to that type with a signature matching FlagOpFn. - -namespace flags_internal { - -enum class FlagOp { - kAlloc, - kDelete, - kCopy, - kCopyConstruct, - kSizeof, - kFastTypeId, - kRuntimeTypeId, - kParse, - kUnparse, - kValueOffset, -}; -using FlagOpFn = void* (*)(FlagOp, const void*, void*, void*); - -// Forward declaration for Flag value specific operations. -template -void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3); - -// Allocate aligned memory for a flag value. -inline void* Alloc(FlagOpFn op) { - return op(FlagOp::kAlloc, nullptr, nullptr, nullptr); -} -// Deletes memory interpreting obj as flag value type pointer. -inline void Delete(FlagOpFn op, void* obj) { - op(FlagOp::kDelete, nullptr, obj, nullptr); -} -// Copies src to dst interpreting as flag value type pointers. -inline void Copy(FlagOpFn op, const void* src, void* dst) { - op(FlagOp::kCopy, src, dst, nullptr); -} -// Construct a copy of flag value in a location pointed by dst -// based on src - pointer to the flag's value. -inline void CopyConstruct(FlagOpFn op, const void* src, void* dst) { - op(FlagOp::kCopyConstruct, src, dst, nullptr); -} -// Makes a copy of flag value pointed by obj. -inline void* Clone(FlagOpFn op, const void* obj) { - void* res = flags_internal::Alloc(op); - flags_internal::CopyConstruct(op, obj, res); - return res; -} -// Returns true if parsing of input text is successfull. -inline bool Parse(FlagOpFn op, absl::string_view text, void* dst, - std::string* error) { - return op(FlagOp::kParse, &text, dst, error) != nullptr; -} -// Returns string representing supplied value. -inline std::string Unparse(FlagOpFn op, const void* val) { - std::string result; - op(FlagOp::kUnparse, val, &result, nullptr); - return result; -} -// Returns size of flag value type. -inline size_t Sizeof(FlagOpFn op) { - // This sequence of casts reverses the sequence from - // `flags_internal::FlagOps()` - return static_cast(reinterpret_cast( - op(FlagOp::kSizeof, nullptr, nullptr, nullptr))); -} -// Returns fast type id coresponding to the value type. -inline FlagFastTypeId FastTypeId(FlagOpFn op) { - return reinterpret_cast( - op(FlagOp::kFastTypeId, nullptr, nullptr, nullptr)); -} -// Returns fast type id coresponding to the value type. -inline const std::type_info* RuntimeTypeId(FlagOpFn op) { - return reinterpret_cast( - op(FlagOp::kRuntimeTypeId, nullptr, nullptr, nullptr)); -} -// Returns offset of the field value_ from the field impl_ inside of -// absl::Flag data. Given FlagImpl pointer p you can get the -// location of the corresponding value as: -// reinterpret_cast(p) + ValueOffset(). -inline ptrdiff_t ValueOffset(FlagOpFn op) { - // This sequence of casts reverses the sequence from - // `flags_internal::FlagOps()` - return static_cast(reinterpret_cast( - op(FlagOp::kValueOffset, nullptr, nullptr, nullptr))); -} - -// Returns an address of RTTI's typeid(T). -template -inline const std::type_info* GenRuntimeTypeId() { + template + ABSL_MUST_USE_RESULT T GetFlag(const absl::Flag& flag); + + template + void SetFlag(absl::Flag* flag, const T& v); + + template + void SetFlag(absl::Flag* flag, const V& v); + + template + const CommandLineFlag& GetFlagReflectionHandle(const absl::Flag& f); + + /////////////////////////////////////////////////////////////////////////////// + // Flag value type operations, eg., parsing, copying, etc. are provided + // by function specific to that type with a signature matching FlagOpFn. + + namespace flags_internal + { + + enum class FlagOp + { + kAlloc, + kDelete, + kCopy, + kCopyConstruct, + kSizeof, + kFastTypeId, + kRuntimeTypeId, + kParse, + kUnparse, + kValueOffset, + }; + using FlagOpFn = void* (*)(FlagOp, const void*, void*, void*); + + // Forward declaration for Flag value specific operations. + template + void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3); + + // Allocate aligned memory for a flag value. + inline void* Alloc(FlagOpFn op) + { + return op(FlagOp::kAlloc, nullptr, nullptr, nullptr); + } + // Deletes memory interpreting obj as flag value type pointer. + inline void Delete(FlagOpFn op, void* obj) + { + op(FlagOp::kDelete, nullptr, obj, nullptr); + } + // Copies src to dst interpreting as flag value type pointers. + inline void Copy(FlagOpFn op, const void* src, void* dst) + { + op(FlagOp::kCopy, src, dst, nullptr); + } + // Construct a copy of flag value in a location pointed by dst + // based on src - pointer to the flag's value. + inline void CopyConstruct(FlagOpFn op, const void* src, void* dst) + { + op(FlagOp::kCopyConstruct, src, dst, nullptr); + } + // Makes a copy of flag value pointed by obj. + inline void* Clone(FlagOpFn op, const void* obj) + { + void* res = flags_internal::Alloc(op); + flags_internal::CopyConstruct(op, obj, res); + return res; + } + // Returns true if parsing of input text is successfull. + inline bool Parse(FlagOpFn op, absl::string_view text, void* dst, std::string* error) + { + return op(FlagOp::kParse, &text, dst, error) != nullptr; + } + // Returns string representing supplied value. + inline std::string Unparse(FlagOpFn op, const void* val) + { + std::string result; + op(FlagOp::kUnparse, val, &result, nullptr); + return result; + } + // Returns size of flag value type. + inline size_t Sizeof(FlagOpFn op) + { + // This sequence of casts reverses the sequence from + // `flags_internal::FlagOps()` + return static_cast(reinterpret_cast( + op(FlagOp::kSizeof, nullptr, nullptr, nullptr) + )); + } + // Returns fast type id coresponding to the value type. + inline FlagFastTypeId FastTypeId(FlagOpFn op) + { + return reinterpret_cast( + op(FlagOp::kFastTypeId, nullptr, nullptr, nullptr) + ); + } + // Returns fast type id coresponding to the value type. + inline const std::type_info* RuntimeTypeId(FlagOpFn op) + { + return reinterpret_cast( + op(FlagOp::kRuntimeTypeId, nullptr, nullptr, nullptr) + ); + } + // Returns offset of the field value_ from the field impl_ inside of + // absl::Flag data. Given FlagImpl pointer p you can get the + // location of the corresponding value as: + // reinterpret_cast(p) + ValueOffset(). + inline ptrdiff_t ValueOffset(FlagOpFn op) + { + // This sequence of casts reverses the sequence from + // `flags_internal::FlagOps()` + return static_cast(reinterpret_cast( + op(FlagOp::kValueOffset, nullptr, nullptr, nullptr) + )); + } + + // Returns an address of RTTI's typeid(T). + template + inline const std::type_info* GenRuntimeTypeId() + { #ifdef ABSL_INTERNAL_HAS_RTTI - return &typeid(T); + return &typeid(T); #else - return nullptr; + return nullptr; #endif -} - -/////////////////////////////////////////////////////////////////////////////// -// Flag help auxiliary structs. - -// This is help argument for absl::Flag encapsulating the string literal pointer -// or pointer to function generating it as well as enum descriminating two -// cases. -using HelpGenFunc = std::string (*)(); - -template -struct FixedCharArray { - char value[N]; - - template - static constexpr FixedCharArray FromLiteralString( - absl::string_view str, absl::index_sequence) { - return (void)str, FixedCharArray({{str[I]..., '\0'}}); - } -}; - -template -constexpr FixedCharArray HelpStringAsArray(int) { - return FixedCharArray::FromLiteralString( - Gen::Value(), absl::make_index_sequence{}); -} - -template -constexpr std::false_type HelpStringAsArray(char) { - return std::false_type{}; -} - -union FlagHelpMsg { - constexpr explicit FlagHelpMsg(const char* help_msg) : literal(help_msg) {} - constexpr explicit FlagHelpMsg(HelpGenFunc help_gen) : gen_func(help_gen) {} - - const char* literal; - HelpGenFunc gen_func; -}; - -enum class FlagHelpKind : uint8_t { kLiteral = 0, kGenFunc = 1 }; - -struct FlagHelpArg { - FlagHelpMsg source; - FlagHelpKind kind; -}; - -extern const char kStrippedFlagHelp[]; - -// These two HelpArg overloads allows us to select at compile time one of two -// way to pass Help argument to absl::Flag. We'll be passing -// AbslFlagHelpGenFor##name as Gen and integer 0 as a single argument to prefer -// first overload if possible. If help message is evaluatable on constexpr -// context We'll be able to make FixedCharArray out of it and we'll choose first -// overload. In this case the help message expression is immediately evaluated -// and is used to construct the absl::Flag. No additionl code is generated by -// ABSL_FLAG Otherwise SFINAE kicks in and first overload is dropped from the -// consideration, in which case the second overload will be used. The second -// overload does not attempt to evaluate the help message expression -// immediately and instead delays the evaluation by returing the function -// pointer (&T::NonConst) genering the help message when necessary. This is -// evaluatable in constexpr context, but the cost is an extra function being -// generated in the ABSL_FLAG code. -template -constexpr FlagHelpArg HelpArg(const FixedCharArray& value) { - return {FlagHelpMsg(value.value), FlagHelpKind::kLiteral}; -} - -template -constexpr FlagHelpArg HelpArg(std::false_type) { - return {FlagHelpMsg(&Gen::NonConst), FlagHelpKind::kGenFunc}; -} - -/////////////////////////////////////////////////////////////////////////////// -// Flag default value auxiliary structs. - -// Signature for the function generating the initial flag value (usually -// based on default value supplied in flag's definition) -using FlagDfltGenFunc = void (*)(void*); - -union FlagDefaultSrc { - constexpr explicit FlagDefaultSrc(FlagDfltGenFunc gen_func_arg) - : gen_func(gen_func_arg) {} - -#define ABSL_FLAGS_INTERNAL_DFLT_FOR_TYPE(T, name) \ - T name##_value; \ - constexpr explicit FlagDefaultSrc(T value) : name##_value(value) {} // NOLINT - ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(ABSL_FLAGS_INTERNAL_DFLT_FOR_TYPE) + } + + /////////////////////////////////////////////////////////////////////////////// + // Flag help auxiliary structs. + + // This is help argument for absl::Flag encapsulating the string literal pointer + // or pointer to function generating it as well as enum descriminating two + // cases. + using HelpGenFunc = std::string (*)(); + + template + struct FixedCharArray + { + char value[N]; + + template + static constexpr FixedCharArray FromLiteralString( + absl::string_view str, absl::index_sequence + ) + { + return (void)str, FixedCharArray({{str[I]..., '\0'}}); + } + }; + + template + constexpr FixedCharArray HelpStringAsArray(int) + { + return FixedCharArray::FromLiteralString( + Gen::Value(), absl::make_index_sequence{} + ); + } + + template + constexpr std::false_type HelpStringAsArray(char) + { + return std::false_type{}; + } + + union FlagHelpMsg + { + constexpr explicit FlagHelpMsg(const char* help_msg) : + literal(help_msg) + { + } + constexpr explicit FlagHelpMsg(HelpGenFunc help_gen) : + gen_func(help_gen) + { + } + + const char* literal; + HelpGenFunc gen_func; + }; + + enum class FlagHelpKind : uint8_t + { + kLiteral = 0, + kGenFunc = 1 + }; + + struct FlagHelpArg + { + FlagHelpMsg source; + FlagHelpKind kind; + }; + + extern const char kStrippedFlagHelp[]; + + // These two HelpArg overloads allows us to select at compile time one of two + // way to pass Help argument to absl::Flag. We'll be passing + // AbslFlagHelpGenFor##name as Gen and integer 0 as a single argument to prefer + // first overload if possible. If help message is evaluatable on constexpr + // context We'll be able to make FixedCharArray out of it and we'll choose first + // overload. In this case the help message expression is immediately evaluated + // and is used to construct the absl::Flag. No additionl code is generated by + // ABSL_FLAG Otherwise SFINAE kicks in and first overload is dropped from the + // consideration, in which case the second overload will be used. The second + // overload does not attempt to evaluate the help message expression + // immediately and instead delays the evaluation by returing the function + // pointer (&T::NonConst) genering the help message when necessary. This is + // evaluatable in constexpr context, but the cost is an extra function being + // generated in the ABSL_FLAG code. + template + constexpr FlagHelpArg HelpArg(const FixedCharArray& value) + { + return {FlagHelpMsg(value.value), FlagHelpKind::kLiteral}; + } + + template + constexpr FlagHelpArg HelpArg(std::false_type) + { + return {FlagHelpMsg(&Gen::NonConst), FlagHelpKind::kGenFunc}; + } + + /////////////////////////////////////////////////////////////////////////////// + // Flag default value auxiliary structs. + + // Signature for the function generating the initial flag value (usually + // based on default value supplied in flag's definition) + using FlagDfltGenFunc = void (*)(void*); + + union FlagDefaultSrc + { + constexpr explicit FlagDefaultSrc(FlagDfltGenFunc gen_func_arg) : + gen_func(gen_func_arg) + { + } + +#define ABSL_FLAGS_INTERNAL_DFLT_FOR_TYPE(T, name) \ + T name##_value; \ + constexpr explicit FlagDefaultSrc(T value) : name##_value(value) \ + { \ + } // NOLINT + ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(ABSL_FLAGS_INTERNAL_DFLT_FOR_TYPE) #undef ABSL_FLAGS_INTERNAL_DFLT_FOR_TYPE - void* dynamic_value; - FlagDfltGenFunc gen_func; -}; - -enum class FlagDefaultKind : uint8_t { - kDynamicValue = 0, - kGenFunc = 1, - kOneWord = 2 // for default values UP to one word in size -}; - -struct FlagDefaultArg { - FlagDefaultSrc source; - FlagDefaultKind kind; -}; - -// This struct and corresponding overload to InitDefaultValue are used to -// facilitate usage of {} as default value in ABSL_FLAG macro. -// TODO(rogeeff): Fix handling types with explicit constructors. -struct EmptyBraces {}; - -template -constexpr T InitDefaultValue(T t) { - return t; -} - -template -constexpr T InitDefaultValue(EmptyBraces) { - return T{}; -} - -template ::value, int>::type = - ((void)GenT{}, 0)> -constexpr FlagDefaultArg DefaultArg(int) { - return {FlagDefaultSrc(GenT{}.value), FlagDefaultKind::kOneWord}; -} - -template -constexpr FlagDefaultArg DefaultArg(char) { - return {FlagDefaultSrc(&GenT::Gen), FlagDefaultKind::kGenFunc}; -} - -/////////////////////////////////////////////////////////////////////////////// -// Flag current value auxiliary structs. - -constexpr int64_t UninitializedFlagValue() { - return static_cast(0xababababababababll); -} - -template -using FlagUseValueAndInitBitStorage = std::integral_constant< - bool, absl::type_traits_internal::is_trivially_copyable::value && - std::is_default_constructible::value && (sizeof(T) < 8)>; - -template -using FlagUseOneWordStorage = std::integral_constant< - bool, absl::type_traits_internal::is_trivially_copyable::value && - (sizeof(T) <= 8)>; - -template -using FlagUseSequenceLockStorage = std::integral_constant< - bool, absl::type_traits_internal::is_trivially_copyable::value && - (sizeof(T) > 8)>; - -enum class FlagValueStorageKind : uint8_t { - kValueAndInitBit = 0, - kOneWordAtomic = 1, - kSequenceLocked = 2, - kAlignedBuffer = 3, -}; - -template -static constexpr FlagValueStorageKind StorageKind() { - return FlagUseValueAndInitBitStorage::value - ? FlagValueStorageKind::kValueAndInitBit - : FlagUseOneWordStorage::value - ? FlagValueStorageKind::kOneWordAtomic - : FlagUseSequenceLockStorage::value - ? FlagValueStorageKind::kSequenceLocked - : FlagValueStorageKind::kAlignedBuffer; -} - -struct FlagOneWordValue { - constexpr explicit FlagOneWordValue(int64_t v) : value(v) {} - std::atomic value; -}; - -template -struct alignas(8) FlagValueAndInitBit { - T value; - // Use an int instead of a bool to guarantee that a non-zero value has - // a bit set. - uint8_t init; -}; - -template ()> -struct FlagValue; - -template -struct FlagValue : FlagOneWordValue { - constexpr FlagValue() : FlagOneWordValue(0) {} - bool Get(const SequenceLock&, T& dst) const { - int64_t storage = value.load(std::memory_order_acquire); - if (ABSL_PREDICT_FALSE(storage == 0)) { - return false; - } - dst = absl::bit_cast>(storage).value; - return true; - } -}; - -template -struct FlagValue : FlagOneWordValue { - constexpr FlagValue() : FlagOneWordValue(UninitializedFlagValue()) {} - bool Get(const SequenceLock&, T& dst) const { - int64_t one_word_val = value.load(std::memory_order_acquire); - if (ABSL_PREDICT_FALSE(one_word_val == UninitializedFlagValue())) { - return false; - } - std::memcpy(&dst, static_cast(&one_word_val), sizeof(T)); - return true; - } -}; - -template -struct FlagValue { - bool Get(const SequenceLock& lock, T& dst) const { - return lock.TryRead(&dst, value_words, sizeof(T)); - } - - static constexpr int kNumWords = - flags_internal::AlignUp(sizeof(T), sizeof(uint64_t)) / sizeof(uint64_t); - - alignas(T) alignas( - std::atomic) std::atomic value_words[kNumWords]; -}; - -template -struct FlagValue { - bool Get(const SequenceLock&, T&) const { return false; } - - alignas(T) char value[sizeof(T)]; -}; - -/////////////////////////////////////////////////////////////////////////////// -// Flag callback auxiliary structs. - -// Signature for the mutation callback used by watched Flags -// The callback is noexcept. -// TODO(rogeeff): add noexcept after C++17 support is added. -using FlagCallbackFunc = void (*)(); - -struct FlagCallback { - FlagCallbackFunc func; - absl::Mutex guard; // Guard for concurrent callback invocations. -}; - -/////////////////////////////////////////////////////////////////////////////// -// Flag implementation, which does not depend on flag value type. -// The class encapsulates the Flag's data and access to it. - -struct DynValueDeleter { - explicit DynValueDeleter(FlagOpFn op_arg = nullptr); - void operator()(void* ptr) const; - - FlagOpFn op; -}; - -class FlagState; - -class FlagImpl final : public CommandLineFlag { - public: - constexpr FlagImpl(const char* name, const char* filename, FlagOpFn op, - FlagHelpArg help, FlagValueStorageKind value_kind, - FlagDefaultArg default_arg) - : name_(name), - filename_(filename), - op_(op), - help_(help.source), - help_source_kind_(static_cast(help.kind)), - value_storage_kind_(static_cast(value_kind)), - def_kind_(static_cast(default_arg.kind)), - modified_(false), - on_command_line_(false), - callback_(nullptr), - default_value_(default_arg.source), - data_guard_{} {} - - // Constant access methods - int64_t ReadOneWord() const ABSL_LOCKS_EXCLUDED(*DataGuard()); - bool ReadOneBool() const ABSL_LOCKS_EXCLUDED(*DataGuard()); - void Read(void* dst) const override ABSL_LOCKS_EXCLUDED(*DataGuard()); - void Read(bool* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) { - *value = ReadOneBool(); - } - template () == - FlagValueStorageKind::kOneWordAtomic, - int> = 0> - void Read(T* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) { - int64_t v = ReadOneWord(); - std::memcpy(value, static_cast(&v), sizeof(T)); - } - template () == - FlagValueStorageKind::kValueAndInitBit, - int>::type = 0> - void Read(T* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) { - *value = absl::bit_cast>(ReadOneWord()).value; - } - - // Mutating access methods - void Write(const void* src) ABSL_LOCKS_EXCLUDED(*DataGuard()); - - // Interfaces to operate on callbacks. - void SetCallback(const FlagCallbackFunc mutation_callback) - ABSL_LOCKS_EXCLUDED(*DataGuard()); - void InvokeCallback() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); - - // Used in read/write operations to validate source/target has correct type. - // For example if flag is declared as absl::Flag FLAGS_foo, a call to - // absl::GetFlag(FLAGS_foo) validates that the type of FLAGS_foo is indeed - // int. To do that we pass the "assumed" type id (which is deduced from type - // int) as an argument `type_id`, which is in turn is validated against the - // type id stored in flag object by flag definition statement. - void AssertValidType(FlagFastTypeId type_id, - const std::type_info* (*gen_rtti)()) const; - - private: - template - friend class Flag; - friend class FlagState; - - // Ensures that `data_guard_` is initialized and returns it. - absl::Mutex* DataGuard() const - ABSL_LOCK_RETURNED(reinterpret_cast(data_guard_)); - // Returns heap allocated value of type T initialized with default value. - std::unique_ptr MakeInitValue() const - ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); - // Flag initialization called via absl::call_once. - void Init(); - - // Offset value access methods. One per storage kind. These methods to not - // respect const correctness, so be very carefull using them. - - // This is a shared helper routine which encapsulates most of the magic. Since - // it is only used inside the three routines below, which are defined in - // flag.cc, we can define it in that file as well. - template - StorageT* OffsetValue() const; - // This is an accessor for a value stored in an aligned buffer storage - // used for non-trivially-copyable data types. - // Returns a mutable pointer to the start of a buffer. - void* AlignedBufferValue() const; - - // The same as above, but used for sequencelock-protected storage. - std::atomic* AtomicBufferValue() const; - - // This is an accessor for a value stored as one word atomic. Returns a - // mutable reference to an atomic value. - std::atomic& OneWordValue() const; - - // Attempts to parse supplied `value` string. If parsing is successful, - // returns new value. Otherwise returns nullptr. - std::unique_ptr TryParse(absl::string_view value, - std::string& err) const - ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); - // Stores the flag value based on the pointer to the source. - void StoreValue(const void* src) ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); - - // Copy the flag data, protected by `seq_lock_` into `dst`. - // - // REQUIRES: ValueStorageKind() == kSequenceLocked. - void ReadSequenceLockedData(void* dst) const - ABSL_LOCKS_EXCLUDED(*DataGuard()); - - FlagHelpKind HelpSourceKind() const { - return static_cast(help_source_kind_); - } - FlagValueStorageKind ValueStorageKind() const { - return static_cast(value_storage_kind_); - } - FlagDefaultKind DefaultKind() const - ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()) { - return static_cast(def_kind_); - } - - // CommandLineFlag interface implementation - absl::string_view Name() const override; - std::string Filename() const override; - std::string Help() const override; - FlagFastTypeId TypeId() const override; - bool IsSpecifiedOnCommandLine() const override - ABSL_LOCKS_EXCLUDED(*DataGuard()); - std::string DefaultValue() const override ABSL_LOCKS_EXCLUDED(*DataGuard()); - std::string CurrentValue() const override ABSL_LOCKS_EXCLUDED(*DataGuard()); - bool ValidateInputValue(absl::string_view value) const override - ABSL_LOCKS_EXCLUDED(*DataGuard()); - void CheckDefaultValueParsingRoundtrip() const override - ABSL_LOCKS_EXCLUDED(*DataGuard()); - - int64_t ModificationCount() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); - - // Interfaces to save and restore flags to/from persistent state. - // Returns current flag state or nullptr if flag does not support - // saving and restoring a state. - std::unique_ptr SaveState() override - ABSL_LOCKS_EXCLUDED(*DataGuard()); - - // Restores the flag state to the supplied state object. If there is - // nothing to restore returns false. Otherwise returns true. - bool RestoreState(const FlagState& flag_state) - ABSL_LOCKS_EXCLUDED(*DataGuard()); - - bool ParseFrom(absl::string_view value, FlagSettingMode set_mode, - ValueSource source, std::string& error) override - ABSL_LOCKS_EXCLUDED(*DataGuard()); - - // Immutable flag's state. - - // Flags name passed to ABSL_FLAG as second arg. - const char* const name_; - // The file name where ABSL_FLAG resides. - const char* const filename_; - // Type-specific operations "vtable". - const FlagOpFn op_; - // Help message literal or function to generate it. - const FlagHelpMsg help_; - // Indicates if help message was supplied as literal or generator func. - const uint8_t help_source_kind_ : 1; - // Kind of storage this flag is using for the flag's value. - const uint8_t value_storage_kind_ : 2; - - uint8_t : 0; // The bytes containing the const bitfields must not be - // shared with bytes containing the mutable bitfields. - - // Mutable flag's state (guarded by `data_guard_`). - - // def_kind_ is not guard by DataGuard() since it is accessed in Init without - // locks. - uint8_t def_kind_ : 2; - // Has this flag's value been modified? - bool modified_ : 1 ABSL_GUARDED_BY(*DataGuard()); - // Has this flag been specified on command line. - bool on_command_line_ : 1 ABSL_GUARDED_BY(*DataGuard()); - - // Unique tag for absl::call_once call to initialize this flag. - absl::once_flag init_control_; - - // Sequence lock / mutation counter. - flags_internal::SequenceLock seq_lock_; - - // Optional flag's callback and absl::Mutex to guard the invocations. - FlagCallback* callback_ ABSL_GUARDED_BY(*DataGuard()); - // Either a pointer to the function generating the default value based on the - // value specified in ABSL_FLAG or pointer to the dynamically set default - // value via SetCommandLineOptionWithMode. def_kind_ is used to distinguish - // these two cases. - FlagDefaultSrc default_value_; - - // This is reserved space for an absl::Mutex to guard flag data. It will be - // initialized in FlagImpl::Init via placement new. - // We can't use "absl::Mutex data_guard_", since this class is not literal. - // We do not want to use "absl::Mutex* data_guard_", since this would require - // heap allocation during initialization, which is both slows program startup - // and can fail. Using reserved space + placement new allows us to avoid both - // problems. - alignas(absl::Mutex) mutable char data_guard_[sizeof(absl::Mutex)]; -}; - -/////////////////////////////////////////////////////////////////////////////// -// The Flag object parameterized by the flag's value type. This class implements -// flag reflection handle interface. - -template -class Flag { - public: - constexpr Flag(const char* name, const char* filename, FlagHelpArg help, - const FlagDefaultArg default_arg) - : impl_(name, filename, &FlagOps, help, - flags_internal::StorageKind(), default_arg), - value_() {} - - // CommandLineFlag interface - absl::string_view Name() const { return impl_.Name(); } - std::string Filename() const { return impl_.Filename(); } - std::string Help() const { return impl_.Help(); } - // Do not use. To be removed. - bool IsSpecifiedOnCommandLine() const { - return impl_.IsSpecifiedOnCommandLine(); - } - std::string DefaultValue() const { return impl_.DefaultValue(); } - std::string CurrentValue() const { return impl_.CurrentValue(); } - - private: - template - friend class FlagRegistrar; - friend class FlagImplPeer; - - T Get() const { - // See implementation notes in CommandLineFlag::Get(). - union U { - T value; - U() {} - ~U() { value.~T(); } - }; - U u; + void* dynamic_value; + FlagDfltGenFunc gen_func; + }; + + enum class FlagDefaultKind : uint8_t + { + kDynamicValue = 0, + kGenFunc = 1, + kOneWord = 2 // for default values UP to one word in size + }; + + struct FlagDefaultArg + { + FlagDefaultSrc source; + FlagDefaultKind kind; + }; + + // This struct and corresponding overload to InitDefaultValue are used to + // facilitate usage of {} as default value in ABSL_FLAG macro. + // TODO(rogeeff): Fix handling types with explicit constructors. + struct EmptyBraces + { + }; + + template + constexpr T InitDefaultValue(T t) + { + return t; + } + + template + constexpr T InitDefaultValue(EmptyBraces) + { + return T{}; + } + + template::value, int>::type = ((void)GenT{}, 0)> + constexpr FlagDefaultArg DefaultArg(int) + { + return {FlagDefaultSrc(GenT{}.value), FlagDefaultKind::kOneWord}; + } + + template + constexpr FlagDefaultArg DefaultArg(char) + { + return {FlagDefaultSrc(&GenT::Gen), FlagDefaultKind::kGenFunc}; + } + + /////////////////////////////////////////////////////////////////////////////// + // Flag current value auxiliary structs. + + constexpr int64_t UninitializedFlagValue() + { + return static_cast(0xababababababababll); + } + + template + using FlagUseValueAndInitBitStorage = std::integral_constant< + bool, + absl::type_traits_internal::is_trivially_copyable::value && + std::is_default_constructible::value && (sizeof(T) < 8)>; + + template + using FlagUseOneWordStorage = std::integral_constant< + bool, + absl::type_traits_internal::is_trivially_copyable::value && + (sizeof(T) <= 8)>; + + template + using FlagUseSequenceLockStorage = std::integral_constant< + bool, + absl::type_traits_internal::is_trivially_copyable::value && + (sizeof(T) > 8)>; + + enum class FlagValueStorageKind : uint8_t + { + kValueAndInitBit = 0, + kOneWordAtomic = 1, + kSequenceLocked = 2, + kAlignedBuffer = 3, + }; + + template + static constexpr FlagValueStorageKind StorageKind() + { + return FlagUseValueAndInitBitStorage::value ? FlagValueStorageKind::kValueAndInitBit : FlagUseOneWordStorage::value ? FlagValueStorageKind::kOneWordAtomic : + FlagUseSequenceLockStorage::value ? FlagValueStorageKind::kSequenceLocked : + FlagValueStorageKind::kAlignedBuffer; + } + + struct FlagOneWordValue + { + constexpr explicit FlagOneWordValue(int64_t v) : + value(v) + { + } + std::atomic value; + }; + + template + struct alignas(8) FlagValueAndInitBit + { + T value; + // Use an int instead of a bool to guarantee that a non-zero value has + // a bit set. + uint8_t init; + }; + + template()> + struct FlagValue; + + template + struct FlagValue : FlagOneWordValue + { + constexpr FlagValue() : + FlagOneWordValue(0) + { + } + bool Get(const SequenceLock&, T& dst) const + { + int64_t storage = value.load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(storage == 0)) + { + return false; + } + dst = absl::bit_cast>(storage).value; + return true; + } + }; + + template + struct FlagValue : FlagOneWordValue + { + constexpr FlagValue() : + FlagOneWordValue(UninitializedFlagValue()) + { + } + bool Get(const SequenceLock&, T& dst) const + { + int64_t one_word_val = value.load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(one_word_val == UninitializedFlagValue())) + { + return false; + } + std::memcpy(&dst, static_cast(&one_word_val), sizeof(T)); + return true; + } + }; + + template + struct FlagValue + { + bool Get(const SequenceLock& lock, T& dst) const + { + return lock.TryRead(&dst, value_words, sizeof(T)); + } + + static constexpr int kNumWords = + flags_internal::AlignUp(sizeof(T), sizeof(uint64_t)) / sizeof(uint64_t); + + alignas(T) alignas( + std::atomic + ) std::atomic value_words[kNumWords]; + }; + + template + struct FlagValue + { + bool Get(const SequenceLock&, T&) const + { + return false; + } + + alignas(T) char value[sizeof(T)]; + }; + + /////////////////////////////////////////////////////////////////////////////// + // Flag callback auxiliary structs. + + // Signature for the mutation callback used by watched Flags + // The callback is noexcept. + // TODO(rogeeff): add noexcept after C++17 support is added. + using FlagCallbackFunc = void (*)(); + + struct FlagCallback + { + FlagCallbackFunc func; + absl::Mutex guard; // Guard for concurrent callback invocations. + }; + + /////////////////////////////////////////////////////////////////////////////// + // Flag implementation, which does not depend on flag value type. + // The class encapsulates the Flag's data and access to it. + + struct DynValueDeleter + { + explicit DynValueDeleter(FlagOpFn op_arg = nullptr); + void operator()(void* ptr) const; + + FlagOpFn op; + }; + + class FlagState; + + class FlagImpl final : public CommandLineFlag + { + public: + constexpr FlagImpl(const char* name, const char* filename, FlagOpFn op, FlagHelpArg help, FlagValueStorageKind value_kind, FlagDefaultArg default_arg) : + name_(name), + filename_(filename), + op_(op), + help_(help.source), + help_source_kind_(static_cast(help.kind)), + value_storage_kind_(static_cast(value_kind)), + def_kind_(static_cast(default_arg.kind)), + modified_(false), + on_command_line_(false), + callback_(nullptr), + default_value_(default_arg.source), + data_guard_{} + { + } + + // Constant access methods + int64_t ReadOneWord() const ABSL_LOCKS_EXCLUDED(*DataGuard()); + bool ReadOneBool() const ABSL_LOCKS_EXCLUDED(*DataGuard()); + void Read(void* dst) const override ABSL_LOCKS_EXCLUDED(*DataGuard()); + void Read(bool* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) + { + *value = ReadOneBool(); + } + template() == FlagValueStorageKind::kOneWordAtomic, int> = 0> + void Read(T* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) + { + int64_t v = ReadOneWord(); + std::memcpy(value, static_cast(&v), sizeof(T)); + } + template() == FlagValueStorageKind::kValueAndInitBit, int>::type = 0> + void Read(T* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) + { + *value = absl::bit_cast>(ReadOneWord()).value; + } + + // Mutating access methods + void Write(const void* src) ABSL_LOCKS_EXCLUDED(*DataGuard()); + + // Interfaces to operate on callbacks. + void SetCallback(const FlagCallbackFunc mutation_callback) + ABSL_LOCKS_EXCLUDED(*DataGuard()); + void InvokeCallback() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + + // Used in read/write operations to validate source/target has correct type. + // For example if flag is declared as absl::Flag FLAGS_foo, a call to + // absl::GetFlag(FLAGS_foo) validates that the type of FLAGS_foo is indeed + // int. To do that we pass the "assumed" type id (which is deduced from type + // int) as an argument `type_id`, which is in turn is validated against the + // type id stored in flag object by flag definition statement. + void AssertValidType(FlagFastTypeId type_id, const std::type_info* (*gen_rtti)()) const; + + private: + template + friend class Flag; + friend class FlagState; + + // Ensures that `data_guard_` is initialized and returns it. + absl::Mutex* DataGuard() const + ABSL_LOCK_RETURNED(reinterpret_cast(data_guard_)); + // Returns heap allocated value of type T initialized with default value. + std::unique_ptr MakeInitValue() const + ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + // Flag initialization called via absl::call_once. + void Init(); + + // Offset value access methods. One per storage kind. These methods to not + // respect const correctness, so be very carefull using them. + + // This is a shared helper routine which encapsulates most of the magic. Since + // it is only used inside the three routines below, which are defined in + // flag.cc, we can define it in that file as well. + template + StorageT* OffsetValue() const; + // This is an accessor for a value stored in an aligned buffer storage + // used for non-trivially-copyable data types. + // Returns a mutable pointer to the start of a buffer. + void* AlignedBufferValue() const; + + // The same as above, but used for sequencelock-protected storage. + std::atomic* AtomicBufferValue() const; + + // This is an accessor for a value stored as one word atomic. Returns a + // mutable reference to an atomic value. + std::atomic& OneWordValue() const; + + // Attempts to parse supplied `value` string. If parsing is successful, + // returns new value. Otherwise returns nullptr. + std::unique_ptr TryParse(absl::string_view value, std::string& err) const + ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + // Stores the flag value based on the pointer to the source. + void StoreValue(const void* src) ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + + // Copy the flag data, protected by `seq_lock_` into `dst`. + // + // REQUIRES: ValueStorageKind() == kSequenceLocked. + void ReadSequenceLockedData(void* dst) const + ABSL_LOCKS_EXCLUDED(*DataGuard()); + + FlagHelpKind HelpSourceKind() const + { + return static_cast(help_source_kind_); + } + FlagValueStorageKind ValueStorageKind() const + { + return static_cast(value_storage_kind_); + } + FlagDefaultKind DefaultKind() const + ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()) + { + return static_cast(def_kind_); + } + + // CommandLineFlag interface implementation + absl::string_view Name() const override; + std::string Filename() const override; + std::string Help() const override; + FlagFastTypeId TypeId() const override; + bool IsSpecifiedOnCommandLine() const override + ABSL_LOCKS_EXCLUDED(*DataGuard()); + std::string DefaultValue() const override ABSL_LOCKS_EXCLUDED(*DataGuard()); + std::string CurrentValue() const override ABSL_LOCKS_EXCLUDED(*DataGuard()); + bool ValidateInputValue(absl::string_view value) const override + ABSL_LOCKS_EXCLUDED(*DataGuard()); + void CheckDefaultValueParsingRoundtrip() const override + ABSL_LOCKS_EXCLUDED(*DataGuard()); + + int64_t ModificationCount() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + + // Interfaces to save and restore flags to/from persistent state. + // Returns current flag state or nullptr if flag does not support + // saving and restoring a state. + std::unique_ptr SaveState() override + ABSL_LOCKS_EXCLUDED(*DataGuard()); + + // Restores the flag state to the supplied state object. If there is + // nothing to restore returns false. Otherwise returns true. + bool RestoreState(const FlagState& flag_state) + ABSL_LOCKS_EXCLUDED(*DataGuard()); + + bool ParseFrom(absl::string_view value, FlagSettingMode set_mode, ValueSource source, std::string& error) override + ABSL_LOCKS_EXCLUDED(*DataGuard()); + + // Immutable flag's state. + + // Flags name passed to ABSL_FLAG as second arg. + const char* const name_; + // The file name where ABSL_FLAG resides. + const char* const filename_; + // Type-specific operations "vtable". + const FlagOpFn op_; + // Help message literal or function to generate it. + const FlagHelpMsg help_; + // Indicates if help message was supplied as literal or generator func. + const uint8_t help_source_kind_ : 1; + // Kind of storage this flag is using for the flag's value. + const uint8_t value_storage_kind_ : 2; + + uint8_t : 0; // The bytes containing the const bitfields must not be + // shared with bytes containing the mutable bitfields. + + // Mutable flag's state (guarded by `data_guard_`). + + // def_kind_ is not guard by DataGuard() since it is accessed in Init without + // locks. + uint8_t def_kind_ : 2; + // Has this flag's value been modified? + bool modified_ : 1 ABSL_GUARDED_BY(*DataGuard()); + // Has this flag been specified on command line. + bool on_command_line_ : 1 ABSL_GUARDED_BY(*DataGuard()); + + // Unique tag for absl::call_once call to initialize this flag. + absl::once_flag init_control_; + + // Sequence lock / mutation counter. + flags_internal::SequenceLock seq_lock_; + + // Optional flag's callback and absl::Mutex to guard the invocations. + FlagCallback* callback_ ABSL_GUARDED_BY(*DataGuard()); + // Either a pointer to the function generating the default value based on the + // value specified in ABSL_FLAG or pointer to the dynamically set default + // value via SetCommandLineOptionWithMode. def_kind_ is used to distinguish + // these two cases. + FlagDefaultSrc default_value_; + + // This is reserved space for an absl::Mutex to guard flag data. It will be + // initialized in FlagImpl::Init via placement new. + // We can't use "absl::Mutex data_guard_", since this class is not literal. + // We do not want to use "absl::Mutex* data_guard_", since this would require + // heap allocation during initialization, which is both slows program startup + // and can fail. Using reserved space + placement new allows us to avoid both + // problems. + alignas(absl::Mutex) mutable char data_guard_[sizeof(absl::Mutex)]; + }; + + /////////////////////////////////////////////////////////////////////////////// + // The Flag object parameterized by the flag's value type. This class implements + // flag reflection handle interface. + + template + class Flag + { + public: + constexpr Flag(const char* name, const char* filename, FlagHelpArg help, const FlagDefaultArg default_arg) : + impl_(name, filename, &FlagOps, help, flags_internal::StorageKind(), default_arg), + value_() + { + } + + // CommandLineFlag interface + absl::string_view Name() const + { + return impl_.Name(); + } + std::string Filename() const + { + return impl_.Filename(); + } + std::string Help() const + { + return impl_.Help(); + } + // Do not use. To be removed. + bool IsSpecifiedOnCommandLine() const + { + return impl_.IsSpecifiedOnCommandLine(); + } + std::string DefaultValue() const + { + return impl_.DefaultValue(); + } + std::string CurrentValue() const + { + return impl_.CurrentValue(); + } + + private: + template + friend class FlagRegistrar; + friend class FlagImplPeer; + + T Get() const + { + // See implementation notes in CommandLineFlag::Get(). + union U + { + T value; + U() + { + } + ~U() + { + value.~T(); + } + }; + U u; #if !defined(NDEBUG) - impl_.AssertValidType(base_internal::FastTypeId(), &GenRuntimeTypeId); + impl_.AssertValidType(base_internal::FastTypeId(), &GenRuntimeTypeId); #endif - if (ABSL_PREDICT_FALSE(!value_.Get(impl_.seq_lock_, u.value))) { - impl_.Read(&u.value); - } - return std::move(u.value); - } - void Set(const T& v) { - impl_.AssertValidType(base_internal::FastTypeId(), &GenRuntimeTypeId); - impl_.Write(&v); - } - - // Access to the reflection. - const CommandLineFlag& Reflect() const { return impl_; } - - // Flag's data - // The implementation depends on value_ field to be placed exactly after the - // impl_ field, so that impl_ can figure out the offset to the value and - // access it. - FlagImpl impl_; - FlagValue value_; -}; - -/////////////////////////////////////////////////////////////////////////////// -// Trampoline for friend access - -class FlagImplPeer { - public: - template - static T InvokeGet(const FlagType& flag) { - return flag.Get(); - } - template - static void InvokeSet(FlagType& flag, const T& v) { - flag.Set(v); - } - template - static const CommandLineFlag& InvokeReflect(const FlagType& f) { - return f.Reflect(); - } -}; - -/////////////////////////////////////////////////////////////////////////////// -// Implementation of Flag value specific operations routine. -template -void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) { - switch (op) { - case FlagOp::kAlloc: { - std::allocator alloc; - return std::allocator_traits>::allocate(alloc, 1); - } - case FlagOp::kDelete: { - T* p = static_cast(v2); - p->~T(); - std::allocator alloc; - std::allocator_traits>::deallocate(alloc, p, 1); - return nullptr; - } - case FlagOp::kCopy: - *static_cast(v2) = *static_cast(v1); - return nullptr; - case FlagOp::kCopyConstruct: - new (v2) T(*static_cast(v1)); - return nullptr; - case FlagOp::kSizeof: - return reinterpret_cast(static_cast(sizeof(T))); - case FlagOp::kFastTypeId: - return const_cast(base_internal::FastTypeId()); - case FlagOp::kRuntimeTypeId: - return const_cast(GenRuntimeTypeId()); - case FlagOp::kParse: { - // Initialize the temporary instance of type T based on current value in - // destination (which is going to be flag's default value). - T temp(*static_cast(v2)); - if (!absl::ParseFlag(*static_cast(v1), &temp, - static_cast(v3))) { - return nullptr; - } - *static_cast(v2) = std::move(temp); - return v2; - } - case FlagOp::kUnparse: - *static_cast(v2) = - absl::UnparseFlag(*static_cast(v1)); - return nullptr; - case FlagOp::kValueOffset: { - // Round sizeof(FlagImp) to a multiple of alignof(FlagValue) to get the - // offset of the data. - size_t round_to = alignof(FlagValue); - size_t offset = - (sizeof(FlagImpl) + round_to - 1) / round_to * round_to; - return reinterpret_cast(offset); - } - } - return nullptr; -} - -/////////////////////////////////////////////////////////////////////////////// -// This class facilitates Flag object registration and tail expression-based -// flag definition, for example: -// ABSL_FLAG(int, foo, 42, "Foo help").OnUpdate(NotifyFooWatcher); -struct FlagRegistrarEmpty {}; -template -class FlagRegistrar { - public: - explicit FlagRegistrar(Flag& flag, const char* filename) : flag_(flag) { - if (do_register) - flags_internal::RegisterCommandLineFlag(flag_.impl_, filename); - } - - FlagRegistrar OnUpdate(FlagCallbackFunc cb) && { - flag_.impl_.SetCallback(cb); - return *this; - } - - // Make the registrar "die" gracefully as an empty struct on a line where - // registration happens. Registrar objects are intended to live only as - // temporary. - operator FlagRegistrarEmpty() const { return {}; } // NOLINT - - private: - Flag& flag_; // Flag being registered (not owned). -}; - -} // namespace flags_internal -ABSL_NAMESPACE_END + if (ABSL_PREDICT_FALSE(!value_.Get(impl_.seq_lock_, u.value))) + { + impl_.Read(&u.value); + } + return std::move(u.value); + } + void Set(const T& v) + { + impl_.AssertValidType(base_internal::FastTypeId(), &GenRuntimeTypeId); + impl_.Write(&v); + } + + // Access to the reflection. + const CommandLineFlag& Reflect() const + { + return impl_; + } + + // Flag's data + // The implementation depends on value_ field to be placed exactly after the + // impl_ field, so that impl_ can figure out the offset to the value and + // access it. + FlagImpl impl_; + FlagValue value_; + }; + + /////////////////////////////////////////////////////////////////////////////// + // Trampoline for friend access + + class FlagImplPeer + { + public: + template + static T InvokeGet(const FlagType& flag) + { + return flag.Get(); + } + template + static void InvokeSet(FlagType& flag, const T& v) + { + flag.Set(v); + } + template + static const CommandLineFlag& InvokeReflect(const FlagType& f) + { + return f.Reflect(); + } + }; + + /////////////////////////////////////////////////////////////////////////////// + // Implementation of Flag value specific operations routine. + template + void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) + { + switch (op) + { + case FlagOp::kAlloc: + { + std::allocator alloc; + return std::allocator_traits>::allocate(alloc, 1); + } + case FlagOp::kDelete: + { + T* p = static_cast(v2); + p->~T(); + std::allocator alloc; + std::allocator_traits>::deallocate(alloc, p, 1); + return nullptr; + } + case FlagOp::kCopy: + *static_cast(v2) = *static_cast(v1); + return nullptr; + case FlagOp::kCopyConstruct: + new (v2) T(*static_cast(v1)); + return nullptr; + case FlagOp::kSizeof: + return reinterpret_cast(static_cast(sizeof(T))); + case FlagOp::kFastTypeId: + return const_cast(base_internal::FastTypeId()); + case FlagOp::kRuntimeTypeId: + return const_cast(GenRuntimeTypeId()); + case FlagOp::kParse: + { + // Initialize the temporary instance of type T based on current value in + // destination (which is going to be flag's default value). + T temp(*static_cast(v2)); + if (!absl::ParseFlag(*static_cast(v1), &temp, static_cast(v3))) + { + return nullptr; + } + *static_cast(v2) = std::move(temp); + return v2; + } + case FlagOp::kUnparse: + *static_cast(v2) = + absl::UnparseFlag(*static_cast(v1)); + return nullptr; + case FlagOp::kValueOffset: + { + // Round sizeof(FlagImp) to a multiple of alignof(FlagValue) to get the + // offset of the data. + size_t round_to = alignof(FlagValue); + size_t offset = + (sizeof(FlagImpl) + round_to - 1) / round_to * round_to; + return reinterpret_cast(offset); + } + } + return nullptr; + } + + /////////////////////////////////////////////////////////////////////////////// + // This class facilitates Flag object registration and tail expression-based + // flag definition, for example: + // ABSL_FLAG(int, foo, 42, "Foo help").OnUpdate(NotifyFooWatcher); + struct FlagRegistrarEmpty + { + }; + template + class FlagRegistrar + { + public: + explicit FlagRegistrar(Flag& flag, const char* filename) : + flag_(flag) + { + if (do_register) + flags_internal::RegisterCommandLineFlag(flag_.impl_, filename); + } + + FlagRegistrar OnUpdate(FlagCallbackFunc cb) && + { + flag_.impl_.SetCallback(cb); + return *this; + } + + // Make the registrar "die" gracefully as an empty struct on a line where + // registration happens. Registrar objects are intended to live only as + // temporary. + operator FlagRegistrarEmpty() const + { + return {}; + } // NOLINT + + private: + Flag& flag_; // Flag being registered (not owned). + }; + + } // namespace flags_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FLAGS_INTERNAL_FLAG_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/parse.h b/CAPI/cpp/grpc/include/absl/flags/internal/parse.h index de706c8..5340dbe 100644 --- a/CAPI/cpp/grpc/include/absl/flags/internal/parse.h +++ b/CAPI/cpp/grpc/include/absl/flags/internal/parse.h @@ -28,32 +28,40 @@ ABSL_DECLARE_FLAG(std::vector, fromenv); ABSL_DECLARE_FLAG(std::vector, tryfromenv); ABSL_DECLARE_FLAG(std::vector, undefok); -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace flags_internal { - -enum class ArgvListAction { kRemoveParsedArgs, kKeepParsedArgs }; -enum class UsageFlagsAction { kHandleUsage, kIgnoreUsage }; -enum class OnUndefinedFlag { - kIgnoreUndefined, - kReportUndefined, - kAbortIfUndefined -}; - -std::vector ParseCommandLineImpl(int argc, char* argv[], - ArgvListAction arg_list_act, - UsageFlagsAction usage_flag_act, - OnUndefinedFlag on_undef_flag); - -// -------------------------------------------------------------------- -// Inspect original command line - -// Returns true if flag with specified name was either present on the original -// command line or specified in flag file present on the original command line. -bool WasPresentOnCommandLine(absl::string_view flag_name); - -} // namespace flags_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + enum class ArgvListAction + { + kRemoveParsedArgs, + kKeepParsedArgs + }; + enum class UsageFlagsAction + { + kHandleUsage, + kIgnoreUsage + }; + enum class OnUndefinedFlag + { + kIgnoreUndefined, + kReportUndefined, + kAbortIfUndefined + }; + + std::vector ParseCommandLineImpl(int argc, char* argv[], ArgvListAction arg_list_act, UsageFlagsAction usage_flag_act, OnUndefinedFlag on_undef_flag); + + // -------------------------------------------------------------------- + // Inspect original command line + + // Returns true if flag with specified name was either present on the original + // command line or specified in flag file present on the original command line. + bool WasPresentOnCommandLine(absl::string_view flag_name); + + } // namespace flags_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FLAGS_INTERNAL_PARSE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/path_util.h b/CAPI/cpp/grpc/include/absl/flags/internal/path_util.h index a6594d3..c80eb59 100644 --- a/CAPI/cpp/grpc/include/absl/flags/internal/path_util.h +++ b/CAPI/cpp/grpc/include/absl/flags/internal/path_util.h @@ -19,44 +19,44 @@ #include "absl/base/config.h" #include "absl/strings/string_view.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace flags_internal { - -// A portable interface that returns the basename of the filename passed as an -// argument. It is similar to basename(3) -// . -// For example: -// flags_internal::Basename("a/b/prog/file.cc") -// returns "file.cc" -// flags_internal::Basename("file.cc") -// returns "file.cc" -inline absl::string_view Basename(absl::string_view filename) { - auto last_slash_pos = filename.find_last_of("/\\"); - - return last_slash_pos == absl::string_view::npos - ? filename - : filename.substr(last_slash_pos + 1); -} - -// A portable interface that returns the directory name of the filename -// passed as an argument, including the trailing slash. -// Returns the empty string if a slash is not found in the input file name. -// For example: -// flags_internal::Package("a/b/prog/file.cc") -// returns "a/b/prog/" -// flags_internal::Package("file.cc") -// returns "" -inline absl::string_view Package(absl::string_view filename) { - auto last_slash_pos = filename.find_last_of("/\\"); - - return last_slash_pos == absl::string_view::npos - ? absl::string_view() - : filename.substr(0, last_slash_pos + 1); -} - -} // namespace flags_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + // A portable interface that returns the basename of the filename passed as an + // argument. It is similar to basename(3) + // . + // For example: + // flags_internal::Basename("a/b/prog/file.cc") + // returns "file.cc" + // flags_internal::Basename("file.cc") + // returns "file.cc" + inline absl::string_view Basename(absl::string_view filename) + { + auto last_slash_pos = filename.find_last_of("/\\"); + + return last_slash_pos == absl::string_view::npos ? filename : filename.substr(last_slash_pos + 1); + } + + // A portable interface that returns the directory name of the filename + // passed as an argument, including the trailing slash. + // Returns the empty string if a slash is not found in the input file name. + // For example: + // flags_internal::Package("a/b/prog/file.cc") + // returns "a/b/prog/" + // flags_internal::Package("file.cc") + // returns "" + inline absl::string_view Package(absl::string_view filename) + { + auto last_slash_pos = filename.find_last_of("/\\"); + + return last_slash_pos == absl::string_view::npos ? absl::string_view() : filename.substr(0, last_slash_pos + 1); + } + + } // namespace flags_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FLAGS_INTERNAL_PATH_UTIL_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/private_handle_accessor.h b/CAPI/cpp/grpc/include/absl/flags/internal/private_handle_accessor.h index c64435c..dcbd4c1 100644 --- a/CAPI/cpp/grpc/include/absl/flags/internal/private_handle_accessor.h +++ b/CAPI/cpp/grpc/include/absl/flags/internal/private_handle_accessor.h @@ -24,38 +24,38 @@ #include "absl/flags/internal/commandlineflag.h" #include "absl/strings/string_view.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace flags_internal { - -// This class serves as a trampoline to access private methods of -// CommandLineFlag. This class is intended for use exclusively internally inside -// of the Abseil Flags implementation. -class PrivateHandleAccessor { - public: - // Access to CommandLineFlag::TypeId. - static FlagFastTypeId TypeId(const CommandLineFlag& flag); - - // Access to CommandLineFlag::SaveState. - static std::unique_ptr SaveState(CommandLineFlag& flag); - - // Access to CommandLineFlag::IsSpecifiedOnCommandLine. - static bool IsSpecifiedOnCommandLine(const CommandLineFlag& flag); - - // Access to CommandLineFlag::ValidateInputValue. - static bool ValidateInputValue(const CommandLineFlag& flag, - absl::string_view value); - - // Access to CommandLineFlag::CheckDefaultValueParsingRoundtrip. - static void CheckDefaultValueParsingRoundtrip(const CommandLineFlag& flag); - - static bool ParseFrom(CommandLineFlag& flag, absl::string_view value, - flags_internal::FlagSettingMode set_mode, - flags_internal::ValueSource source, std::string& error); -}; - -} // namespace flags_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + // This class serves as a trampoline to access private methods of + // CommandLineFlag. This class is intended for use exclusively internally inside + // of the Abseil Flags implementation. + class PrivateHandleAccessor + { + public: + // Access to CommandLineFlag::TypeId. + static FlagFastTypeId TypeId(const CommandLineFlag& flag); + + // Access to CommandLineFlag::SaveState. + static std::unique_ptr SaveState(CommandLineFlag& flag); + + // Access to CommandLineFlag::IsSpecifiedOnCommandLine. + static bool IsSpecifiedOnCommandLine(const CommandLineFlag& flag); + + // Access to CommandLineFlag::ValidateInputValue. + static bool ValidateInputValue(const CommandLineFlag& flag, absl::string_view value); + + // Access to CommandLineFlag::CheckDefaultValueParsingRoundtrip. + static void CheckDefaultValueParsingRoundtrip(const CommandLineFlag& flag); + + static bool ParseFrom(CommandLineFlag& flag, absl::string_view value, flags_internal::FlagSettingMode set_mode, flags_internal::ValueSource source, std::string& error); + }; + + } // namespace flags_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FLAGS_INTERNAL_PRIVATE_HANDLE_ACCESSOR_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/program_name.h b/CAPI/cpp/grpc/include/absl/flags/internal/program_name.h index b99b94f..6e983d1 100644 --- a/CAPI/cpp/grpc/include/absl/flags/internal/program_name.h +++ b/CAPI/cpp/grpc/include/absl/flags/internal/program_name.h @@ -24,27 +24,29 @@ // -------------------------------------------------------------------- // Program name -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace flags_internal { - -// Returns program invocation name or "UNKNOWN" if `SetProgramInvocationName()` -// is never called. At the moment this is always set to argv[0] as part of -// library initialization. -std::string ProgramInvocationName(); - -// Returns base name for program invocation name. For example, if -// ProgramInvocationName() == "a/b/mybinary" -// then -// ShortProgramInvocationName() == "mybinary" -std::string ShortProgramInvocationName(); - -// Sets program invocation name to a new value. Should only be called once -// during program initialization, before any threads are spawned. -void SetProgramInvocationName(absl::string_view prog_name_str); - -} // namespace flags_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + // Returns program invocation name or "UNKNOWN" if `SetProgramInvocationName()` + // is never called. At the moment this is always set to argv[0] as part of + // library initialization. + std::string ProgramInvocationName(); + + // Returns base name for program invocation name. For example, if + // ProgramInvocationName() == "a/b/mybinary" + // then + // ShortProgramInvocationName() == "mybinary" + std::string ShortProgramInvocationName(); + + // Sets program invocation name to a new value. Should only be called once + // during program initialization, before any threads are spawned. + void SetProgramInvocationName(absl::string_view prog_name_str); + + } // namespace flags_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FLAGS_INTERNAL_PROGRAM_NAME_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/registry.h b/CAPI/cpp/grpc/include/absl/flags/internal/registry.h index 4b68c85..72d9f74 100644 --- a/CAPI/cpp/grpc/include/absl/flags/internal/registry.h +++ b/CAPI/cpp/grpc/include/absl/flags/internal/registry.h @@ -26,72 +26,76 @@ // -------------------------------------------------------------------- // Global flags registry API. -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace flags_internal { - -// Executes specified visitor for each non-retired flag in the registry. While -// callback are executed, the registry is locked and can't be changed. -void ForEachFlag(std::function visitor); - -//----------------------------------------------------------------------------- - -bool RegisterCommandLineFlag(CommandLineFlag&, const char* filename); - -void FinalizeRegistry(); - -//----------------------------------------------------------------------------- -// Retired registrations: -// -// Retired flag registrations are treated specially. A 'retired' flag is -// provided only for compatibility with automated invocations that still -// name it. A 'retired' flag: -// - is not bound to a C++ FLAGS_ reference. -// - has a type and a value, but that value is intentionally inaccessible. -// - does not appear in --help messages. -// - is fully supported by _all_ flag parsing routines. -// - consumes args normally, and complains about type mismatches in its -// argument. -// - emits a complaint but does not die (e.g. LOG(ERROR)) if it is -// accessed by name through the flags API for parsing or otherwise. -// -// The registrations for a flag happen in an unspecified order as the -// initializers for the namespace-scope objects of a program are run. -// Any number of weak registrations for a flag can weakly define the flag. -// One non-weak registration will upgrade the flag from weak to non-weak. -// Further weak registrations of a non-weak flag are ignored. -// -// This mechanism is designed to support moving dead flags into a -// 'graveyard' library. An example migration: -// -// 0: Remove references to this FLAGS_flagname in the C++ codebase. -// 1: Register as 'retired' in old_lib. -// 2: Make old_lib depend on graveyard. -// 3: Add a redundant 'retired' registration to graveyard. -// 4: Remove the old_lib 'retired' registration. -// 5: Eventually delete the graveyard registration entirely. -// - -// Retire flag with name "name" and type indicated by ops. -void Retire(const char* name, FlagFastTypeId type_id, char* buf); - -constexpr size_t kRetiredFlagObjSize = 3 * sizeof(void*); -constexpr size_t kRetiredFlagObjAlignment = alignof(void*); - -// Registered a retired flag with name 'flag_name' and type 'T'. -template -class RetiredFlag { - public: - void Retire(const char* flag_name) { - flags_internal::Retire(flag_name, base_internal::FastTypeId(), buf_); - } - - private: - alignas(kRetiredFlagObjAlignment) char buf_[kRetiredFlagObjSize]; -}; - -} // namespace flags_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + // Executes specified visitor for each non-retired flag in the registry. While + // callback are executed, the registry is locked and can't be changed. + void ForEachFlag(std::function visitor); + + //----------------------------------------------------------------------------- + + bool RegisterCommandLineFlag(CommandLineFlag&, const char* filename); + + void FinalizeRegistry(); + + //----------------------------------------------------------------------------- + // Retired registrations: + // + // Retired flag registrations are treated specially. A 'retired' flag is + // provided only for compatibility with automated invocations that still + // name it. A 'retired' flag: + // - is not bound to a C++ FLAGS_ reference. + // - has a type and a value, but that value is intentionally inaccessible. + // - does not appear in --help messages. + // - is fully supported by _all_ flag parsing routines. + // - consumes args normally, and complains about type mismatches in its + // argument. + // - emits a complaint but does not die (e.g. LOG(ERROR)) if it is + // accessed by name through the flags API for parsing or otherwise. + // + // The registrations for a flag happen in an unspecified order as the + // initializers for the namespace-scope objects of a program are run. + // Any number of weak registrations for a flag can weakly define the flag. + // One non-weak registration will upgrade the flag from weak to non-weak. + // Further weak registrations of a non-weak flag are ignored. + // + // This mechanism is designed to support moving dead flags into a + // 'graveyard' library. An example migration: + // + // 0: Remove references to this FLAGS_flagname in the C++ codebase. + // 1: Register as 'retired' in old_lib. + // 2: Make old_lib depend on graveyard. + // 3: Add a redundant 'retired' registration to graveyard. + // 4: Remove the old_lib 'retired' registration. + // 5: Eventually delete the graveyard registration entirely. + // + + // Retire flag with name "name" and type indicated by ops. + void Retire(const char* name, FlagFastTypeId type_id, char* buf); + + constexpr size_t kRetiredFlagObjSize = 3 * sizeof(void*); + constexpr size_t kRetiredFlagObjAlignment = alignof(void*); + + // Registered a retired flag with name 'flag_name' and type 'T'. + template + class RetiredFlag + { + public: + void Retire(const char* flag_name) + { + flags_internal::Retire(flag_name, base_internal::FastTypeId(), buf_); + } + + private: + alignas(kRetiredFlagObjAlignment) char buf_[kRetiredFlagObjSize]; + }; + + } // namespace flags_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FLAGS_INTERNAL_REGISTRY_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/sequence_lock.h b/CAPI/cpp/grpc/include/absl/flags/internal/sequence_lock.h index 36318ab..893258c 100644 --- a/CAPI/cpp/grpc/include/absl/flags/internal/sequence_lock.h +++ b/CAPI/cpp/grpc/include/absl/flags/internal/sequence_lock.h @@ -25,163 +25,180 @@ #include "absl/base/optimization.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace flags_internal { - -// Align 'x' up to the nearest 'align' bytes. -inline constexpr size_t AlignUp(size_t x, size_t align) { - return align * ((x + align - 1) / align); -} - -// A SequenceLock implements lock-free reads. A sequence counter is incremented -// before and after each write, and readers access the counter before and after -// accessing the protected data. If the counter is verified to not change during -// the access, and the sequence counter value was even, then the reader knows -// that the read was race-free and valid. Otherwise, the reader must fall back -// to a Mutex-based code path. -// -// This particular SequenceLock starts in an "uninitialized" state in which -// TryRead() returns false. It must be enabled by calling MarkInitialized(). -// This serves as a marker that the associated flag value has not yet been -// initialized and a slow path needs to be taken. -// -// The memory reads and writes protected by this lock must use the provided -// `TryRead()` and `Write()` functions. These functions behave similarly to -// `memcpy()`, with one oddity: the protected data must be an array of -// `std::atomic`. This is to comply with the C++ standard, which -// considers data races on non-atomic objects to be undefined behavior. See "Can -// Seqlocks Get Along With Programming Language Memory Models?"[1] by Hans J. -// Boehm for more details. -// -// [1] https://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf -class SequenceLock { - public: - constexpr SequenceLock() : lock_(kUninitialized) {} - - // Mark that this lock is ready for use. - void MarkInitialized() { - assert(lock_.load(std::memory_order_relaxed) == kUninitialized); - lock_.store(0, std::memory_order_release); - } - - // Copy "size" bytes of data from "src" to "dst", protected as a read-side - // critical section of the sequence lock. - // - // Unlike traditional sequence lock implementations which loop until getting a - // clean read, this implementation returns false in the case of concurrent - // calls to `Write`. In such a case, the caller should fall back to a - // locking-based slow path. - // - // Returns false if the sequence lock was not yet marked as initialized. - // - // NOTE: If this returns false, "dst" may be overwritten with undefined - // (potentially uninitialized) data. - bool TryRead(void* dst, const std::atomic* src, size_t size) const { - // Acquire barrier ensures that no loads done by f() are reordered - // above the first load of the sequence counter. - int64_t seq_before = lock_.load(std::memory_order_acquire); - if (ABSL_PREDICT_FALSE(seq_before & 1) == 1) return false; - RelaxedCopyFromAtomic(dst, src, size); - // Another acquire fence ensures that the load of 'lock_' below is - // strictly ordered after the RelaxedCopyToAtomic call above. - std::atomic_thread_fence(std::memory_order_acquire); - int64_t seq_after = lock_.load(std::memory_order_relaxed); - return ABSL_PREDICT_TRUE(seq_before == seq_after); - } - - // Copy "size" bytes from "src" to "dst" as a write-side critical section - // of the sequence lock. Any concurrent readers will be forced to retry - // until they get a read that does not conflict with this write. - // - // This call must be externally synchronized against other calls to Write, - // but may proceed concurrently with reads. - void Write(std::atomic* dst, const void* src, size_t size) { - // We can use relaxed instructions to increment the counter since we - // are extenally synchronized. The std::atomic_thread_fence below - // ensures that the counter updates don't get interleaved with the - // copy to the data. - int64_t orig_seq = lock_.load(std::memory_order_relaxed); - assert((orig_seq & 1) == 0); // Must be initially unlocked. - lock_.store(orig_seq + 1, std::memory_order_relaxed); - - // We put a release fence between update to lock_ and writes to shared data. - // Thus all stores to shared data are effectively release operations and - // update to lock_ above cannot be re-ordered past any of them. Note that - // this barrier is not for the fetch_add above. A release barrier for the - // fetch_add would be before it, not after. - std::atomic_thread_fence(std::memory_order_release); - RelaxedCopyToAtomic(dst, src, size); - // "Release" semantics ensure that none of the writes done by - // RelaxedCopyToAtomic() can be reordered after the following modification. - lock_.store(orig_seq + 2, std::memory_order_release); - } - - // Return the number of times that Write() has been called. - // - // REQUIRES: This must be externally synchronized against concurrent calls to - // `Write()` or `IncrementModificationCount()`. - // REQUIRES: `MarkInitialized()` must have been previously called. - int64_t ModificationCount() const { - int64_t val = lock_.load(std::memory_order_relaxed); - assert(val != kUninitialized && (val & 1) == 0); - return val / 2; - } - - // REQUIRES: This must be externally synchronized against concurrent calls to - // `Write()` or `ModificationCount()`. - // REQUIRES: `MarkInitialized()` must have been previously called. - void IncrementModificationCount() { - int64_t val = lock_.load(std::memory_order_relaxed); - assert(val != kUninitialized); - lock_.store(val + 2, std::memory_order_relaxed); - } - - private: - // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed - // atomics. - static void RelaxedCopyFromAtomic(void* dst, const std::atomic* src, - size_t size) { - char* dst_byte = static_cast(dst); - while (size >= sizeof(uint64_t)) { - uint64_t word = src->load(std::memory_order_relaxed); - std::memcpy(dst_byte, &word, sizeof(word)); - dst_byte += sizeof(word); - src++; - size -= sizeof(word); - } - if (size > 0) { - uint64_t word = src->load(std::memory_order_relaxed); - std::memcpy(dst_byte, &word, size); - } - } - - // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed - // atomics. - static void RelaxedCopyToAtomic(std::atomic* dst, const void* src, - size_t size) { - const char* src_byte = static_cast(src); - while (size >= sizeof(uint64_t)) { - uint64_t word; - std::memcpy(&word, src_byte, sizeof(word)); - dst->store(word, std::memory_order_relaxed); - src_byte += sizeof(word); - dst++; - size -= sizeof(word); - } - if (size > 0) { - uint64_t word = 0; - std::memcpy(&word, src_byte, size); - dst->store(word, std::memory_order_relaxed); - } - } - - static constexpr int64_t kUninitialized = -1; - std::atomic lock_; -}; - -} // namespace flags_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + // Align 'x' up to the nearest 'align' bytes. + inline constexpr size_t AlignUp(size_t x, size_t align) + { + return align * ((x + align - 1) / align); + } + + // A SequenceLock implements lock-free reads. A sequence counter is incremented + // before and after each write, and readers access the counter before and after + // accessing the protected data. If the counter is verified to not change during + // the access, and the sequence counter value was even, then the reader knows + // that the read was race-free and valid. Otherwise, the reader must fall back + // to a Mutex-based code path. + // + // This particular SequenceLock starts in an "uninitialized" state in which + // TryRead() returns false. It must be enabled by calling MarkInitialized(). + // This serves as a marker that the associated flag value has not yet been + // initialized and a slow path needs to be taken. + // + // The memory reads and writes protected by this lock must use the provided + // `TryRead()` and `Write()` functions. These functions behave similarly to + // `memcpy()`, with one oddity: the protected data must be an array of + // `std::atomic`. This is to comply with the C++ standard, which + // considers data races on non-atomic objects to be undefined behavior. See "Can + // Seqlocks Get Along With Programming Language Memory Models?"[1] by Hans J. + // Boehm for more details. + // + // [1] https://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf + class SequenceLock + { + public: + constexpr SequenceLock() : + lock_(kUninitialized) + { + } + + // Mark that this lock is ready for use. + void MarkInitialized() + { + assert(lock_.load(std::memory_order_relaxed) == kUninitialized); + lock_.store(0, std::memory_order_release); + } + + // Copy "size" bytes of data from "src" to "dst", protected as a read-side + // critical section of the sequence lock. + // + // Unlike traditional sequence lock implementations which loop until getting a + // clean read, this implementation returns false in the case of concurrent + // calls to `Write`. In such a case, the caller should fall back to a + // locking-based slow path. + // + // Returns false if the sequence lock was not yet marked as initialized. + // + // NOTE: If this returns false, "dst" may be overwritten with undefined + // (potentially uninitialized) data. + bool TryRead(void* dst, const std::atomic* src, size_t size) const + { + // Acquire barrier ensures that no loads done by f() are reordered + // above the first load of the sequence counter. + int64_t seq_before = lock_.load(std::memory_order_acquire); + if (ABSL_PREDICT_FALSE(seq_before & 1) == 1) + return false; + RelaxedCopyFromAtomic(dst, src, size); + // Another acquire fence ensures that the load of 'lock_' below is + // strictly ordered after the RelaxedCopyToAtomic call above. + std::atomic_thread_fence(std::memory_order_acquire); + int64_t seq_after = lock_.load(std::memory_order_relaxed); + return ABSL_PREDICT_TRUE(seq_before == seq_after); + } + + // Copy "size" bytes from "src" to "dst" as a write-side critical section + // of the sequence lock. Any concurrent readers will be forced to retry + // until they get a read that does not conflict with this write. + // + // This call must be externally synchronized against other calls to Write, + // but may proceed concurrently with reads. + void Write(std::atomic* dst, const void* src, size_t size) + { + // We can use relaxed instructions to increment the counter since we + // are extenally synchronized. The std::atomic_thread_fence below + // ensures that the counter updates don't get interleaved with the + // copy to the data. + int64_t orig_seq = lock_.load(std::memory_order_relaxed); + assert((orig_seq & 1) == 0); // Must be initially unlocked. + lock_.store(orig_seq + 1, std::memory_order_relaxed); + + // We put a release fence between update to lock_ and writes to shared data. + // Thus all stores to shared data are effectively release operations and + // update to lock_ above cannot be re-ordered past any of them. Note that + // this barrier is not for the fetch_add above. A release barrier for the + // fetch_add would be before it, not after. + std::atomic_thread_fence(std::memory_order_release); + RelaxedCopyToAtomic(dst, src, size); + // "Release" semantics ensure that none of the writes done by + // RelaxedCopyToAtomic() can be reordered after the following modification. + lock_.store(orig_seq + 2, std::memory_order_release); + } + + // Return the number of times that Write() has been called. + // + // REQUIRES: This must be externally synchronized against concurrent calls to + // `Write()` or `IncrementModificationCount()`. + // REQUIRES: `MarkInitialized()` must have been previously called. + int64_t ModificationCount() const + { + int64_t val = lock_.load(std::memory_order_relaxed); + assert(val != kUninitialized && (val & 1) == 0); + return val / 2; + } + + // REQUIRES: This must be externally synchronized against concurrent calls to + // `Write()` or `ModificationCount()`. + // REQUIRES: `MarkInitialized()` must have been previously called. + void IncrementModificationCount() + { + int64_t val = lock_.load(std::memory_order_relaxed); + assert(val != kUninitialized); + lock_.store(val + 2, std::memory_order_relaxed); + } + + private: + // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed + // atomics. + static void RelaxedCopyFromAtomic(void* dst, const std::atomic* src, size_t size) + { + char* dst_byte = static_cast(dst); + while (size >= sizeof(uint64_t)) + { + uint64_t word = src->load(std::memory_order_relaxed); + std::memcpy(dst_byte, &word, sizeof(word)); + dst_byte += sizeof(word); + src++; + size -= sizeof(word); + } + if (size > 0) + { + uint64_t word = src->load(std::memory_order_relaxed); + std::memcpy(dst_byte, &word, size); + } + } + + // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed + // atomics. + static void RelaxedCopyToAtomic(std::atomic* dst, const void* src, size_t size) + { + const char* src_byte = static_cast(src); + while (size >= sizeof(uint64_t)) + { + uint64_t word; + std::memcpy(&word, src_byte, sizeof(word)); + dst->store(word, std::memory_order_relaxed); + src_byte += sizeof(word); + dst++; + size -= sizeof(word); + } + if (size > 0) + { + uint64_t word = 0; + std::memcpy(&word, src_byte, size); + dst->store(word, std::memory_order_relaxed); + } + } + + static constexpr int64_t kUninitialized = -1; + std::atomic lock_; + }; + + } // namespace flags_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/internal/usage.h b/CAPI/cpp/grpc/include/absl/flags/internal/usage.h index c0bcac5..d350789 100644 --- a/CAPI/cpp/grpc/include/absl/flags/internal/usage.h +++ b/CAPI/cpp/grpc/include/absl/flags/internal/usage.h @@ -27,78 +27,79 @@ // -------------------------------------------------------------------- // Usage reporting interfaces -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace flags_internal { - -// The format to report the help messages in. -enum class HelpFormat { - kHumanReadable, -}; - -// Streams the help message describing `flag` to `out`. -// The default value for `flag` is included in the output. -void FlagHelp(std::ostream& out, const CommandLineFlag& flag, - HelpFormat format = HelpFormat::kHumanReadable); - -// Produces the help messages for all flags matching the filter. A flag matches -// the filter if it is defined in a file with a filename which includes -// filter string as a substring. You can use '/' and '.' to restrict the -// matching to a specific file names. For example: -// FlagsHelp(out, "/path/to/file."); -// restricts help to only flags which resides in files named like: -// .../path/to/file. -// for any extension 'ext'. If the filter is empty this function produces help -// messages for all flags. -void FlagsHelp(std::ostream& out, absl::string_view filter, - HelpFormat format, absl::string_view program_usage_message); - -// -------------------------------------------------------------------- - -// If any of the 'usage' related command line flags (listed on the bottom of -// this file) has been set this routine produces corresponding help message in -// the specified output stream and returns: -// 0 - if "version" or "only_check_flags" flags were set and handled. -// 1 - if some other 'usage' related flag was set and handled. -// -1 - if no usage flags were set on a commmand line. -// Non negative return values are expected to be used as an exit code for a -// binary. -int HandleUsageFlags(std::ostream& out, - absl::string_view program_usage_message); - -// -------------------------------------------------------------------- -// Globals representing usage reporting flags - -enum class HelpMode { - kNone, - kImportant, - kShort, - kFull, - kPackage, - kMatch, - kVersion, - kOnlyCheckArgs -}; - -// Returns substring to filter help output (--help=substr argument) -std::string GetFlagsHelpMatchSubstr(); -// Returns the requested help mode. -HelpMode GetFlagsHelpMode(); -// Returns the requested help format. -HelpFormat GetFlagsHelpFormat(); - -// These are corresponding setters to the attributes above. -void SetFlagsHelpMatchSubstr(absl::string_view); -void SetFlagsHelpMode(HelpMode); -void SetFlagsHelpFormat(HelpFormat); - -// Deduces usage flags from the input argument in a form --name=value or -// --name. argument is already split into name and value before we call this -// function. -bool DeduceUsageFlags(absl::string_view name, absl::string_view value); - -} // namespace flags_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + + // The format to report the help messages in. + enum class HelpFormat + { + kHumanReadable, + }; + + // Streams the help message describing `flag` to `out`. + // The default value for `flag` is included in the output. + void FlagHelp(std::ostream& out, const CommandLineFlag& flag, HelpFormat format = HelpFormat::kHumanReadable); + + // Produces the help messages for all flags matching the filter. A flag matches + // the filter if it is defined in a file with a filename which includes + // filter string as a substring. You can use '/' and '.' to restrict the + // matching to a specific file names. For example: + // FlagsHelp(out, "/path/to/file."); + // restricts help to only flags which resides in files named like: + // .../path/to/file. + // for any extension 'ext'. If the filter is empty this function produces help + // messages for all flags. + void FlagsHelp(std::ostream& out, absl::string_view filter, HelpFormat format, absl::string_view program_usage_message); + + // -------------------------------------------------------------------- + + // If any of the 'usage' related command line flags (listed on the bottom of + // this file) has been set this routine produces corresponding help message in + // the specified output stream and returns: + // 0 - if "version" or "only_check_flags" flags were set and handled. + // 1 - if some other 'usage' related flag was set and handled. + // -1 - if no usage flags were set on a commmand line. + // Non negative return values are expected to be used as an exit code for a + // binary. + int HandleUsageFlags(std::ostream& out, absl::string_view program_usage_message); + + // -------------------------------------------------------------------- + // Globals representing usage reporting flags + + enum class HelpMode + { + kNone, + kImportant, + kShort, + kFull, + kPackage, + kMatch, + kVersion, + kOnlyCheckArgs + }; + + // Returns substring to filter help output (--help=substr argument) + std::string GetFlagsHelpMatchSubstr(); + // Returns the requested help mode. + HelpMode GetFlagsHelpMode(); + // Returns the requested help format. + HelpFormat GetFlagsHelpFormat(); + + // These are corresponding setters to the attributes above. + void SetFlagsHelpMatchSubstr(absl::string_view); + void SetFlagsHelpMode(HelpMode); + void SetFlagsHelpFormat(HelpFormat); + + // Deduces usage flags from the input argument in a form --name=value or + // --name. argument is already split into name and value before we call this + // function. + bool DeduceUsageFlags(absl::string_view name, absl::string_view value); + + } // namespace flags_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FLAGS_INTERNAL_USAGE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/marshalling.h b/CAPI/cpp/grpc/include/absl/flags/marshalling.h index b1e2ffa..05ed353 100644 --- a/CAPI/cpp/grpc/include/absl/flags/marshalling.h +++ b/CAPI/cpp/grpc/include/absl/flags/marshalling.h @@ -210,147 +210,159 @@ #include "absl/strings/string_view.h" #include "absl/types/optional.h" -namespace absl { -ABSL_NAMESPACE_BEGIN +namespace absl +{ + ABSL_NAMESPACE_BEGIN -// Forward declaration to be used inside composable flag parse/unparse -// implementations -template -inline bool ParseFlag(absl::string_view input, T* dst, std::string* error); -template -inline std::string UnparseFlag(const T& v); + // Forward declaration to be used inside composable flag parse/unparse + // implementations + template + inline bool ParseFlag(absl::string_view input, T* dst, std::string* error); + template + inline std::string UnparseFlag(const T& v); -namespace flags_internal { + namespace flags_internal + { -// Overloads of `AbslParseFlag()` and `AbslUnparseFlag()` for fundamental types. -bool AbslParseFlag(absl::string_view, bool*, std::string*); -bool AbslParseFlag(absl::string_view, short*, std::string*); // NOLINT -bool AbslParseFlag(absl::string_view, unsigned short*, std::string*); // NOLINT -bool AbslParseFlag(absl::string_view, int*, std::string*); // NOLINT -bool AbslParseFlag(absl::string_view, unsigned int*, std::string*); // NOLINT -bool AbslParseFlag(absl::string_view, long*, std::string*); // NOLINT -bool AbslParseFlag(absl::string_view, unsigned long*, std::string*); // NOLINT -bool AbslParseFlag(absl::string_view, long long*, std::string*); // NOLINT -bool AbslParseFlag(absl::string_view, unsigned long long*, // NOLINT - std::string*); -bool AbslParseFlag(absl::string_view, float*, std::string*); -bool AbslParseFlag(absl::string_view, double*, std::string*); -bool AbslParseFlag(absl::string_view, std::string*, std::string*); -bool AbslParseFlag(absl::string_view, std::vector*, std::string*); + // Overloads of `AbslParseFlag()` and `AbslUnparseFlag()` for fundamental types. + bool AbslParseFlag(absl::string_view, bool*, std::string*); + bool AbslParseFlag(absl::string_view, short*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, unsigned short*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, int*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, unsigned int*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, long*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, unsigned long*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, long long*, std::string*); // NOLINT + bool AbslParseFlag(absl::string_view, unsigned long long*, // NOLINT + std::string*); + bool AbslParseFlag(absl::string_view, float*, std::string*); + bool AbslParseFlag(absl::string_view, double*, std::string*); + bool AbslParseFlag(absl::string_view, std::string*, std::string*); + bool AbslParseFlag(absl::string_view, std::vector*, std::string*); -template -bool AbslParseFlag(absl::string_view text, absl::optional* f, - std::string* err) { - if (text.empty()) { - *f = absl::nullopt; - return true; - } - T value; - if (!absl::ParseFlag(text, &value, err)) return false; + template + bool AbslParseFlag(absl::string_view text, absl::optional* f, std::string* err) + { + if (text.empty()) + { + *f = absl::nullopt; + return true; + } + T value; + if (!absl::ParseFlag(text, &value, err)) + return false; - *f = std::move(value); - return true; -} + *f = std::move(value); + return true; + } #if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) -template -bool AbslParseFlag(absl::string_view text, std::optional* f, - std::string* err) { - if (text.empty()) { - *f = std::nullopt; - return true; - } - T value; - if (!absl::ParseFlag(text, &value, err)) return false; + template + bool AbslParseFlag(absl::string_view text, std::optional* f, std::string* err) + { + if (text.empty()) + { + *f = std::nullopt; + return true; + } + T value; + if (!absl::ParseFlag(text, &value, err)) + return false; - *f = std::move(value); - return true; -} + *f = std::move(value); + return true; + } #endif -template -bool InvokeParseFlag(absl::string_view input, T* dst, std::string* err) { - // Comment on next line provides a good compiler error message if T - // does not have AbslParseFlag(absl::string_view, T*, std::string*). - return AbslParseFlag(input, dst, err); // Is T missing AbslParseFlag? -} + template + bool InvokeParseFlag(absl::string_view input, T* dst, std::string* err) + { + // Comment on next line provides a good compiler error message if T + // does not have AbslParseFlag(absl::string_view, T*, std::string*). + return AbslParseFlag(input, dst, err); // Is T missing AbslParseFlag? + } -// Strings and std:: containers do not have the same overload resolution -// considerations as fundamental types. Naming these 'AbslUnparseFlag' means we -// can avoid the need for additional specializations of Unparse (below). -std::string AbslUnparseFlag(absl::string_view v); -std::string AbslUnparseFlag(const std::vector&); + // Strings and std:: containers do not have the same overload resolution + // considerations as fundamental types. Naming these 'AbslUnparseFlag' means we + // can avoid the need for additional specializations of Unparse (below). + std::string AbslUnparseFlag(absl::string_view v); + std::string AbslUnparseFlag(const std::vector&); -template -std::string AbslUnparseFlag(const absl::optional& f) { - return f.has_value() ? absl::UnparseFlag(*f) : ""; -} + template + std::string AbslUnparseFlag(const absl::optional& f) + { + return f.has_value() ? absl::UnparseFlag(*f) : ""; + } #if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL) -template -std::string AbslUnparseFlag(const std::optional& f) { - return f.has_value() ? absl::UnparseFlag(*f) : ""; -} + template + std::string AbslUnparseFlag(const std::optional& f) + { + return f.has_value() ? absl::UnparseFlag(*f) : ""; + } #endif -template -std::string Unparse(const T& v) { - // Comment on next line provides a good compiler error message if T does not - // have UnparseFlag. - return AbslUnparseFlag(v); // Is T missing AbslUnparseFlag? -} + template + std::string Unparse(const T& v) + { + // Comment on next line provides a good compiler error message if T does not + // have UnparseFlag. + return AbslUnparseFlag(v); // Is T missing AbslUnparseFlag? + } -// Overloads for builtin types. -std::string Unparse(bool v); -std::string Unparse(short v); // NOLINT -std::string Unparse(unsigned short v); // NOLINT -std::string Unparse(int v); // NOLINT -std::string Unparse(unsigned int v); // NOLINT -std::string Unparse(long v); // NOLINT -std::string Unparse(unsigned long v); // NOLINT -std::string Unparse(long long v); // NOLINT -std::string Unparse(unsigned long long v); // NOLINT -std::string Unparse(float v); -std::string Unparse(double v); + // Overloads for builtin types. + std::string Unparse(bool v); + std::string Unparse(short v); // NOLINT + std::string Unparse(unsigned short v); // NOLINT + std::string Unparse(int v); // NOLINT + std::string Unparse(unsigned int v); // NOLINT + std::string Unparse(long v); // NOLINT + std::string Unparse(unsigned long v); // NOLINT + std::string Unparse(long long v); // NOLINT + std::string Unparse(unsigned long long v); // NOLINT + std::string Unparse(float v); + std::string Unparse(double v); -} // namespace flags_internal + } // namespace flags_internal -// ParseFlag() -// -// Parses a string value into a flag value of type `T`. Do not add overloads of -// this function for your type directly; instead, add an `AbslParseFlag()` -// free function as documented above. -// -// Some implementations of `AbslParseFlag()` for types which consist of other, -// constituent types which already have Abseil flag support, may need to call -// `absl::ParseFlag()` on those consituent string values. (See above.) -template -inline bool ParseFlag(absl::string_view input, T* dst, std::string* error) { - return flags_internal::InvokeParseFlag(input, dst, error); -} + // ParseFlag() + // + // Parses a string value into a flag value of type `T`. Do not add overloads of + // this function for your type directly; instead, add an `AbslParseFlag()` + // free function as documented above. + // + // Some implementations of `AbslParseFlag()` for types which consist of other, + // constituent types which already have Abseil flag support, may need to call + // `absl::ParseFlag()` on those consituent string values. (See above.) + template + inline bool ParseFlag(absl::string_view input, T* dst, std::string* error) + { + return flags_internal::InvokeParseFlag(input, dst, error); + } -// UnparseFlag() -// -// Unparses a flag value of type `T` into a string value. Do not add overloads -// of this function for your type directly; instead, add an `AbslUnparseFlag()` -// free function as documented above. -// -// Some implementations of `AbslUnparseFlag()` for types which consist of other, -// constituent types which already have Abseil flag support, may want to call -// `absl::UnparseFlag()` on those constituent types. (See above.) -template -inline std::string UnparseFlag(const T& v) { - return flags_internal::Unparse(v); -} + // UnparseFlag() + // + // Unparses a flag value of type `T` into a string value. Do not add overloads + // of this function for your type directly; instead, add an `AbslUnparseFlag()` + // free function as documented above. + // + // Some implementations of `AbslUnparseFlag()` for types which consist of other, + // constituent types which already have Abseil flag support, may want to call + // `absl::UnparseFlag()` on those constituent types. (See above.) + template + inline std::string UnparseFlag(const T& v) + { + return flags_internal::Unparse(v); + } -// Overloads for `absl::LogSeverity` can't (easily) appear alongside that type's -// definition because it is layered below flags. See proper documentation in -// base/log_severity.h. -enum class LogSeverity : int; -bool AbslParseFlag(absl::string_view, absl::LogSeverity*, std::string*); -std::string AbslUnparseFlag(absl::LogSeverity); + // Overloads for `absl::LogSeverity` can't (easily) appear alongside that type's + // definition because it is layered below flags. See proper documentation in + // base/log_severity.h. + enum class LogSeverity : int; + bool AbslParseFlag(absl::string_view, absl::LogSeverity*, std::string*); + std::string AbslUnparseFlag(absl::LogSeverity); -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FLAGS_MARSHALLING_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/parse.h b/CAPI/cpp/grpc/include/absl/flags/parse.h index 929de2c..275e3c5 100644 --- a/CAPI/cpp/grpc/include/absl/flags/parse.h +++ b/CAPI/cpp/grpc/include/absl/flags/parse.h @@ -28,33 +28,34 @@ #include "absl/base/config.h" #include "absl/flags/internal/parse.h" -namespace absl { -ABSL_NAMESPACE_BEGIN +namespace absl +{ + ABSL_NAMESPACE_BEGIN -// ParseCommandLine() -// -// Parses the set of command-line arguments passed in the `argc` (argument -// count) and `argv[]` (argument vector) parameters from `main()`, assigning -// values to any defined Abseil flags. (Any arguments passed after the -// flag-terminating delimiter (`--`) are treated as positional arguments and -// ignored.) -// -// Any command-line flags (and arguments to those flags) are parsed into Abseil -// Flag values, if those flags are defined. Any undefined flags will either -// return an error, or be ignored if that flag is designated using `undefok` to -// indicate "undefined is OK." -// -// Any command-line positional arguments not part of any command-line flag (or -// arguments to a flag) are returned in a vector, with the program invocation -// name at position 0 of that vector. (Note that this includes positional -// arguments after the flag-terminating delimiter `--`.) -// -// After all flags and flag arguments are parsed, this function looks for any -// built-in usage flags (e.g. `--help`), and if any were specified, it reports -// help messages and then exits the program. -std::vector ParseCommandLine(int argc, char* argv[]); + // ParseCommandLine() + // + // Parses the set of command-line arguments passed in the `argc` (argument + // count) and `argv[]` (argument vector) parameters from `main()`, assigning + // values to any defined Abseil flags. (Any arguments passed after the + // flag-terminating delimiter (`--`) are treated as positional arguments and + // ignored.) + // + // Any command-line flags (and arguments to those flags) are parsed into Abseil + // Flag values, if those flags are defined. Any undefined flags will either + // return an error, or be ignored if that flag is designated using `undefok` to + // indicate "undefined is OK." + // + // Any command-line positional arguments not part of any command-line flag (or + // arguments to a flag) are returned in a vector, with the program invocation + // name at position 0 of that vector. (Note that this includes positional + // arguments after the flag-terminating delimiter `--`.) + // + // After all flags and flag arguments are parsed, this function looks for any + // built-in usage flags (e.g. `--help`), and if any were specified, it reports + // help messages and then exits the program. + std::vector ParseCommandLine(int argc, char* argv[]); -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FLAGS_PARSE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/reflection.h b/CAPI/cpp/grpc/include/absl/flags/reflection.h index e6baf5d..8806abf 100644 --- a/CAPI/cpp/grpc/include/absl/flags/reflection.h +++ b/CAPI/cpp/grpc/include/absl/flags/reflection.h @@ -30,61 +30,64 @@ #include "absl/flags/commandlineflag.h" #include "absl/flags/internal/commandlineflag.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace flags_internal { -class FlagSaverImpl; -} // namespace flags_internal +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace flags_internal + { + class FlagSaverImpl; + } // namespace flags_internal -// FindCommandLineFlag() -// -// Returns the reflection handle of an Abseil flag of the specified name, or -// `nullptr` if not found. This function will emit a warning if the name of a -// 'retired' flag is specified. -absl::CommandLineFlag* FindCommandLineFlag(absl::string_view name); + // FindCommandLineFlag() + // + // Returns the reflection handle of an Abseil flag of the specified name, or + // `nullptr` if not found. This function will emit a warning if the name of a + // 'retired' flag is specified. + absl::CommandLineFlag* FindCommandLineFlag(absl::string_view name); -// Returns current state of the Flags registry in a form of mapping from flag -// name to a flag reflection handle. -absl::flat_hash_map GetAllFlags(); + // Returns current state of the Flags registry in a form of mapping from flag + // name to a flag reflection handle. + absl::flat_hash_map GetAllFlags(); -//------------------------------------------------------------------------------ -// FlagSaver -//------------------------------------------------------------------------------ -// -// A FlagSaver object stores the state of flags in the scope where the FlagSaver -// is defined, allowing modification of those flags within that scope and -// automatic restoration of the flags to their previous state upon leaving the -// scope. -// -// A FlagSaver can be used within tests to temporarily change the test -// environment and restore the test case to its previous state. -// -// Example: -// -// void MyFunc() { -// absl::FlagSaver fs; -// ... -// absl::SetFlag(&FLAGS_myFlag, otherValue); -// ... -// } // scope of FlagSaver left, flags return to previous state -// -// This class is thread-safe. + //------------------------------------------------------------------------------ + // FlagSaver + //------------------------------------------------------------------------------ + // + // A FlagSaver object stores the state of flags in the scope where the FlagSaver + // is defined, allowing modification of those flags within that scope and + // automatic restoration of the flags to their previous state upon leaving the + // scope. + // + // A FlagSaver can be used within tests to temporarily change the test + // environment and restore the test case to its previous state. + // + // Example: + // + // void MyFunc() { + // absl::FlagSaver fs; + // ... + // absl::SetFlag(&FLAGS_myFlag, otherValue); + // ... + // } // scope of FlagSaver left, flags return to previous state + // + // This class is thread-safe. -class FlagSaver { - public: - FlagSaver(); - ~FlagSaver(); + class FlagSaver + { + public: + FlagSaver(); + ~FlagSaver(); - FlagSaver(const FlagSaver&) = delete; - void operator=(const FlagSaver&) = delete; + FlagSaver(const FlagSaver&) = delete; + void operator=(const FlagSaver&) = delete; - private: - flags_internal::FlagSaverImpl* impl_; -}; + private: + flags_internal::FlagSaverImpl* impl_; + }; -//----------------------------------------------------------------------------- + //----------------------------------------------------------------------------- -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FLAGS_REFLECTION_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/usage.h b/CAPI/cpp/grpc/include/absl/flags/usage.h index ad12ab7..1838689 100644 --- a/CAPI/cpp/grpc/include/absl/flags/usage.h +++ b/CAPI/cpp/grpc/include/absl/flags/usage.h @@ -22,22 +22,23 @@ // -------------------------------------------------------------------- // Usage reporting interfaces -namespace absl { -ABSL_NAMESPACE_BEGIN - -// Sets the "usage" message to be used by help reporting routines. -// For example: -// absl::SetProgramUsageMessage( -// absl::StrCat("This program does nothing. Sample usage:\n", argv[0], -// " ")); -// Do not include commandline flags in the usage: we do that for you! -// Note: Calling SetProgramUsageMessage twice will trigger a call to std::exit. -void SetProgramUsageMessage(absl::string_view new_usage_message); - -// Returns the usage message set by SetProgramUsageMessage(). -absl::string_view ProgramUsageMessage(); - -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // Sets the "usage" message to be used by help reporting routines. + // For example: + // absl::SetProgramUsageMessage( + // absl::StrCat("This program does nothing. Sample usage:\n", argv[0], + // " ")); + // Do not include commandline flags in the usage: we do that for you! + // Note: Calling SetProgramUsageMessage twice will trigger a call to std::exit. + void SetProgramUsageMessage(absl::string_view new_usage_message); + + // Returns the usage message set by SetProgramUsageMessage(). + absl::string_view ProgramUsageMessage(); + + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FLAGS_USAGE_H_ diff --git a/CAPI/cpp/grpc/include/absl/flags/usage_config.h b/CAPI/cpp/grpc/include/absl/flags/usage_config.h index ded7030..8f8439a 100644 --- a/CAPI/cpp/grpc/include/absl/flags/usage_config.h +++ b/CAPI/cpp/grpc/include/absl/flags/usage_config.h @@ -54,81 +54,86 @@ // * --helpmatch // Shows help on modules whose name contains the specified substring -namespace absl { -ABSL_NAMESPACE_BEGIN - -namespace flags_internal { -using FlagKindFilter = std::function; -} // namespace flags_internal - -// FlagsUsageConfig -// -// This structure contains the collection of callbacks for changing the behavior -// of the usage reporting routines in Abseil Flags. -struct FlagsUsageConfig { - // Returns true if flags defined in the given source code file should be - // reported with --helpshort flag. For example, if the file - // "path/to/my/code.cc" defines the flag "--my_flag", and - // contains_helpshort_flags("path/to/my/code.cc") returns true, invoking the - // program with --helpshort will include information about --my_flag in the - // program output. - flags_internal::FlagKindFilter contains_helpshort_flags; - - // Returns true if flags defined in the filename should be reported with - // --help flag. For example, if the file - // "path/to/my/code.cc" defines the flag "--my_flag", and - // contains_help_flags("path/to/my/code.cc") returns true, invoking the - // program with --help will include information about --my_flag in the - // program output. - flags_internal::FlagKindFilter contains_help_flags; - - // Returns true if flags defined in the filename should be reported with - // --helppackage flag. For example, if the file - // "path/to/my/code.cc" defines the flag "--my_flag", and - // contains_helppackage_flags("path/to/my/code.cc") returns true, invoking the - // program with --helppackage will include information about --my_flag in the - // program output. - flags_internal::FlagKindFilter contains_helppackage_flags; - - // Generates string containing program version. This is the string reported - // when user specifies --version in a command line. - std::function version_string; - - // Normalizes the filename specific to the build system/filesystem used. This - // routine is used when we report the information about the flag definition - // location. For instance, if your build resides at some location you do not - // want to expose in the usage output, you can trim it to show only relevant - // part. - // For example: - // normalize_filename("/my_company/some_long_path/src/project/file.cc") - // might produce - // "project/file.cc". - std::function normalize_filename; -}; - -// SetFlagsUsageConfig() -// -// Sets the usage reporting configuration callbacks. If any of the callbacks are -// not set in usage_config instance, then the default value of the callback is -// used. -void SetFlagsUsageConfig(FlagsUsageConfig usage_config); - -namespace flags_internal { - -FlagsUsageConfig GetUsageConfig(); - -void ReportUsageError(absl::string_view msg, bool is_fatal); - -} // namespace flags_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + namespace flags_internal + { + using FlagKindFilter = std::function; + } // namespace flags_internal + + // FlagsUsageConfig + // + // This structure contains the collection of callbacks for changing the behavior + // of the usage reporting routines in Abseil Flags. + struct FlagsUsageConfig + { + // Returns true if flags defined in the given source code file should be + // reported with --helpshort flag. For example, if the file + // "path/to/my/code.cc" defines the flag "--my_flag", and + // contains_helpshort_flags("path/to/my/code.cc") returns true, invoking the + // program with --helpshort will include information about --my_flag in the + // program output. + flags_internal::FlagKindFilter contains_helpshort_flags; + + // Returns true if flags defined in the filename should be reported with + // --help flag. For example, if the file + // "path/to/my/code.cc" defines the flag "--my_flag", and + // contains_help_flags("path/to/my/code.cc") returns true, invoking the + // program with --help will include information about --my_flag in the + // program output. + flags_internal::FlagKindFilter contains_help_flags; + + // Returns true if flags defined in the filename should be reported with + // --helppackage flag. For example, if the file + // "path/to/my/code.cc" defines the flag "--my_flag", and + // contains_helppackage_flags("path/to/my/code.cc") returns true, invoking the + // program with --helppackage will include information about --my_flag in the + // program output. + flags_internal::FlagKindFilter contains_helppackage_flags; + + // Generates string containing program version. This is the string reported + // when user specifies --version in a command line. + std::function version_string; + + // Normalizes the filename specific to the build system/filesystem used. This + // routine is used when we report the information about the flag definition + // location. For instance, if your build resides at some location you do not + // want to expose in the usage output, you can trim it to show only relevant + // part. + // For example: + // normalize_filename("/my_company/some_long_path/src/project/file.cc") + // might produce + // "project/file.cc". + std::function normalize_filename; + }; + + // SetFlagsUsageConfig() + // + // Sets the usage reporting configuration callbacks. If any of the callbacks are + // not set in usage_config instance, then the default value of the callback is + // used. + void SetFlagsUsageConfig(FlagsUsageConfig usage_config); + + namespace flags_internal + { + + FlagsUsageConfig GetUsageConfig(); + + void ReportUsageError(absl::string_view msg, bool is_fatal); + + } // namespace flags_internal + ABSL_NAMESPACE_END } // namespace absl -extern "C" { - -// Additional report of fatal usage error message before we std::exit. Error is -// fatal if is_fatal argument to ReportUsageError is true. -void ABSL_INTERNAL_C_SYMBOL(AbslInternalReportFatalUsageError)( - absl::string_view); +extern "C" +{ + // Additional report of fatal usage error message before we std::exit. Error is + // fatal if is_fatal argument to ReportUsageError is true. + void ABSL_INTERNAL_C_SYMBOL(AbslInternalReportFatalUsageError)( + absl::string_view + ); } // extern "C" diff --git a/CAPI/cpp/grpc/include/absl/functional/any_invocable.h b/CAPI/cpp/grpc/include/absl/functional/any_invocable.h index 0c5faca..ba0d844 100644 --- a/CAPI/cpp/grpc/include/absl/functional/any_invocable.h +++ b/CAPI/cpp/grpc/include/absl/functional/any_invocable.h @@ -40,274 +40,288 @@ #include "absl/meta/type_traits.h" #include "absl/utility/utility.h" -namespace absl { -ABSL_NAMESPACE_BEGIN - -// absl::AnyInvocable -// -// `absl::AnyInvocable` is a functional wrapper type, like `std::function`, that -// assumes ownership of an invocable object. Unlike `std::function`, an -// `absl::AnyInvocable` is more type-safe and provides the following additional -// benefits: -// -// * Properly adheres to const correctness of the underlying type -// * Is move-only so avoids concurrency problems with copied invocables and -// unnecessary copies in general. -// * Supports reference qualifiers allowing it to perform unique actions (noted -// below). -// -// `absl::AnyInvocable` is a template, and an `absl::AnyInvocable` instantiation -// may wrap any invocable object with a compatible function signature, e.g. -// having arguments and return types convertible to types matching the -// `absl::AnyInvocable` signature, and also matching any stated reference -// qualifiers, as long as that type is moveable. It therefore provides broad -// type erasure for functional objects. -// -// An `absl::AnyInvocable` is typically used as a type-erased function parameter -// for accepting various functional objects: -// -// // Define a function taking an AnyInvocable parameter. -// void my_func(absl::AnyInvocable f) { -// ... -// }; -// -// // That function can accept any invocable type: -// -// // Accept a function reference. We don't need to move a reference. -// int func1() { return 0; }; -// my_func(func1); -// -// // Accept a lambda. We use std::move here because otherwise my_func would -// // copy the lambda. -// auto lambda = []() { return 0; }; -// my_func(std::move(lambda)); -// -// // Accept a function pointer. We don't need to move a function pointer. -// func2 = &func1; -// my_func(func2); -// -// // Accept an std::function by moving it. Note that the lambda is copyable -// // (satisfying std::function requirements) and moveable (satisfying -// // absl::AnyInvocable requirements). -// std::function func6 = []() { return 0; }; -// my_func(std::move(func6)); -// -// `AnyInvocable` also properly respects `const` qualifiers, reference -// qualifiers, and the `noexcept` specification (only in C++ 17 and beyond) as -// part of the user-specified function type (e.g. -// `AnyInvocable`). These qualifiers will be applied to -// the `AnyInvocable` object's `operator()`, and the underlying invocable must -// be compatible with those qualifiers. -// -// Comparison of const and non-const function types: -// -// // Store a closure inside of `func` with the function type `int()`. -// // Note that we have made `func` itself `const`. -// const AnyInvocable func = [](){ return 0; }; -// -// func(); // Compile-error: the passed type `int()` isn't `const`. -// -// // Store a closure inside of `const_func` with the function type -// // `int() const`. -// // Note that we have also made `const_func` itself `const`. -// const AnyInvocable const_func = [](){ return 0; }; -// -// const_func(); // Fine: `int() const` is `const`. -// -// In the above example, the call `func()` would have compiled if -// `std::function` were used even though the types are not const compatible. -// This is a bug, and using `absl::AnyInvocable` properly detects that bug. -// -// In addition to affecting the signature of `operator()`, the `const` and -// reference qualifiers of the function type also appropriately constrain which -// kinds of invocable objects you are allowed to place into the `AnyInvocable` -// instance. If you specify a function type that is const-qualified, then -// anything that you attempt to put into the `AnyInvocable` must be callable on -// a `const` instance of that type. -// -// Constraint example: -// -// // Fine because the lambda is callable when `const`. -// AnyInvocable func = [=](){ return 0; }; -// -// // This is a compile-error because the lambda isn't callable when `const`. -// AnyInvocable error = [=]() mutable { return 0; }; -// -// An `&&` qualifier can be used to express that an `absl::AnyInvocable` -// instance should be invoked at most once: -// -// // Invokes `continuation` with the logical result of an operation when -// // that operation completes (common in asynchronous code). -// void CallOnCompletion(AnyInvocable continuation) { -// int result_of_foo = foo(); -// -// // `std::move` is required because the `operator()` of `continuation` is -// // rvalue-reference qualified. -// std::move(continuation)(result_of_foo); -// } -// -// Credits to Matt Calabrese (https://github.com/mattcalabrese) for the original -// implementation. -template -class AnyInvocable : private internal_any_invocable::Impl { - private: - static_assert( - std::is_function::value, - "The template argument of AnyInvocable must be a function type."); - - using Impl = internal_any_invocable::Impl; - - public: - // The return type of Sig - using result_type = typename Impl::result_type; - - // Constructors - - // Constructs the `AnyInvocable` in an empty state. - AnyInvocable() noexcept = default; - AnyInvocable(std::nullptr_t) noexcept {} // NOLINT - - // Constructs the `AnyInvocable` from an existing `AnyInvocable` by a move. - // Note that `f` is not guaranteed to be empty after move-construction, - // although it may be. - AnyInvocable(AnyInvocable&& /*f*/) noexcept = default; - - // Constructs an `AnyInvocable` from an invocable object. - // - // Upon construction, `*this` is only empty if `f` is a function pointer or - // member pointer type and is null, or if `f` is an `AnyInvocable` that is - // empty. - template ::value>> - AnyInvocable(F&& f) // NOLINT - : Impl(internal_any_invocable::ConversionConstruct(), - std::forward(f)) {} - - // Constructs an `AnyInvocable` that holds an invocable object of type `T`, - // which is constructed in-place from the given arguments. - // - // Example: - // - // AnyInvocable func( - // absl::in_place_type, arg1, arg2); - // - template f) { + // ... + // }; + // + // // That function can accept any invocable type: + // + // // Accept a function reference. We don't need to move a reference. + // int func1() { return 0; }; + // my_func(func1); + // + // // Accept a lambda. We use std::move here because otherwise my_func would + // // copy the lambda. + // auto lambda = []() { return 0; }; + // my_func(std::move(lambda)); + // + // // Accept a function pointer. We don't need to move a function pointer. + // func2 = &func1; + // my_func(func2); + // + // // Accept an std::function by moving it. Note that the lambda is copyable + // // (satisfying std::function requirements) and moveable (satisfying + // // absl::AnyInvocable requirements). + // std::function func6 = []() { return 0; }; + // my_func(std::move(func6)); + // + // `AnyInvocable` also properly respects `const` qualifiers, reference + // qualifiers, and the `noexcept` specification (only in C++ 17 and beyond) as + // part of the user-specified function type (e.g. + // `AnyInvocable`). These qualifiers will be applied to + // the `AnyInvocable` object's `operator()`, and the underlying invocable must + // be compatible with those qualifiers. + // + // Comparison of const and non-const function types: + // + // // Store a closure inside of `func` with the function type `int()`. + // // Note that we have made `func` itself `const`. + // const AnyInvocable func = [](){ return 0; }; + // + // func(); // Compile-error: the passed type `int()` isn't `const`. + // + // // Store a closure inside of `const_func` with the function type + // // `int() const`. + // // Note that we have also made `const_func` itself `const`. + // const AnyInvocable const_func = [](){ return 0; }; + // + // const_func(); // Fine: `int() const` is `const`. + // + // In the above example, the call `func()` would have compiled if + // `std::function` were used even though the types are not const compatible. + // This is a bug, and using `absl::AnyInvocable` properly detects that bug. + // + // In addition to affecting the signature of `operator()`, the `const` and + // reference qualifiers of the function type also appropriately constrain which + // kinds of invocable objects you are allowed to place into the `AnyInvocable` + // instance. If you specify a function type that is const-qualified, then + // anything that you attempt to put into the `AnyInvocable` must be callable on + // a `const` instance of that type. + // + // Constraint example: + // + // // Fine because the lambda is callable when `const`. + // AnyInvocable func = [=](){ return 0; }; + // + // // This is a compile-error because the lambda isn't callable when `const`. + // AnyInvocable error = [=]() mutable { return 0; }; + // + // An `&&` qualifier can be used to express that an `absl::AnyInvocable` + // instance should be invoked at most once: + // + // // Invokes `continuation` with the logical result of an operation when + // // that operation completes (common in asynchronous code). + // void CallOnCompletion(AnyInvocable continuation) { + // int result_of_foo = foo(); + // + // // `std::move` is required because the `operator()` of `continuation` is + // // rvalue-reference qualified. + // std::move(continuation)(result_of_foo); + // } + // + // Credits to Matt Calabrese (https://github.com/mattcalabrese) for the original + // implementation. + template + class AnyInvocable : private internal_any_invocable::Impl + { + private: + static_assert( + std::is_function::value, + "The template argument of AnyInvocable must be a function type." + ); + + using Impl = internal_any_invocable::Impl; + + public: + // The return type of Sig + using result_type = typename Impl::result_type; + + // Constructors + + // Constructs the `AnyInvocable` in an empty state. + AnyInvocable() noexcept = default; + AnyInvocable(std::nullptr_t) noexcept + { + } // NOLINT + + // Constructs the `AnyInvocable` from an existing `AnyInvocable` by a move. + // Note that `f` is not guaranteed to be empty after move-construction, + // although it may be. + AnyInvocable(AnyInvocable&& /*f*/) noexcept = default; + + // Constructs an `AnyInvocable` from an invocable object. + // + // Upon construction, `*this` is only empty if `f` is a function pointer or + // member pointer type and is null, or if `f` is an `AnyInvocable` that is + // empty. + template::value>> + AnyInvocable(F&& f) // NOLINT + : + Impl(internal_any_invocable::ConversionConstruct(), std::forward(f)) + { + } + + // Constructs an `AnyInvocable` that holds an invocable object of type `T`, + // which is constructed in-place from the given arguments. + // + // Example: + // + // AnyInvocable func( + // absl::in_place_type, arg1, arg2); + // + template::value>> + explicit AnyInvocable(absl::in_place_type_t, Args&&... args) : + Impl(absl::in_place_type>, std::forward(args)...) + { + static_assert(std::is_same>::value, "The explicit template argument of in_place_type is required " + "to be an unqualified object type."); + } + + // Overload of the above constructor to support list-initialization. + template&, Args...>::value>> + explicit AnyInvocable(absl::in_place_type_t, std::initializer_list ilist, Args&&... args) : + Impl(absl::in_place_type>, ilist, std::forward(args)...) + { + static_assert(std::is_same>::value, "The explicit template argument of in_place_type is required " + "to be an unqualified object type."); + } + + // Assignment Operators + + // Assigns an `AnyInvocable` through move-assignment. + // Note that `f` is not guaranteed to be empty after move-assignment + // although it may be. + AnyInvocable& operator=(AnyInvocable&& /*f*/) noexcept = default; + + // Assigns an `AnyInvocable` from a nullptr, clearing the `AnyInvocable`. If + // not empty, destroys the target, putting `*this` into an empty state. + AnyInvocable& operator=(std::nullptr_t) noexcept + { + this->Clear(); + return *this; + } + + // Assigns an `AnyInvocable` from an existing `AnyInvocable` instance. + // + // Upon assignment, `*this` is only empty if `f` is a function pointer or + // member pointer type and is null, or if `f` is an `AnyInvocable` that is + // empty. + template::value>> + AnyInvocable& operator=(F&& f) + { + *this = AnyInvocable(std::forward(f)); + return *this; + } + + // Assigns an `AnyInvocable` from a reference to an invocable object. + // Upon assignment, stores a reference to the invocable object in the + // `AnyInvocable` instance. + template< + class F, typename = absl::enable_if_t< - internal_any_invocable::CanEmplace::value>> - explicit AnyInvocable(absl::in_place_type_t, Args&&... args) - : Impl(absl::in_place_type>, - std::forward(args)...) { - static_assert(std::is_same>::value, - "The explicit template argument of in_place_type is required " - "to be an unqualified object type."); - } - - // Overload of the above constructor to support list-initialization. - template &, Args...>::value>> - explicit AnyInvocable(absl::in_place_type_t, - std::initializer_list ilist, Args&&... args) - : Impl(absl::in_place_type>, ilist, - std::forward(args)...) { - static_assert(std::is_same>::value, - "The explicit template argument of in_place_type is required " - "to be an unqualified object type."); - } - - // Assignment Operators - - // Assigns an `AnyInvocable` through move-assignment. - // Note that `f` is not guaranteed to be empty after move-assignment - // although it may be. - AnyInvocable& operator=(AnyInvocable&& /*f*/) noexcept = default; - - // Assigns an `AnyInvocable` from a nullptr, clearing the `AnyInvocable`. If - // not empty, destroys the target, putting `*this` into an empty state. - AnyInvocable& operator=(std::nullptr_t) noexcept { - this->Clear(); - return *this; - } - - // Assigns an `AnyInvocable` from an existing `AnyInvocable` instance. - // - // Upon assignment, `*this` is only empty if `f` is a function pointer or - // member pointer type and is null, or if `f` is an `AnyInvocable` that is - // empty. - template ::value>> - AnyInvocable& operator=(F&& f) { - *this = AnyInvocable(std::forward(f)); - return *this; - } - - // Assigns an `AnyInvocable` from a reference to an invocable object. - // Upon assignment, stores a reference to the invocable object in the - // `AnyInvocable` instance. - template < - class F, - typename = absl::enable_if_t< - internal_any_invocable::CanAssignReferenceWrapper::value>> - AnyInvocable& operator=(std::reference_wrapper f) noexcept { - *this = AnyInvocable(f); - return *this; - } - - // Destructor - - // If not empty, destroys the target. - ~AnyInvocable() = default; - - // absl::AnyInvocable::swap() - // - // Exchanges the targets of `*this` and `other`. - void swap(AnyInvocable& other) noexcept { std::swap(*this, other); } - - // abl::AnyInvocable::operator bool() - // - // Returns `true` if `*this` is not empty. - explicit operator bool() const noexcept { return this->HasValue(); } - - // Invokes the target object of `*this`. `*this` must not be empty. - // - // Note: The signature of this function call operator is the same as the - // template parameter `Sig`. - using Impl::operator(); - - // Equality operators - - // Returns `true` if `*this` is empty. - friend bool operator==(const AnyInvocable& f, std::nullptr_t) noexcept { - return !f.HasValue(); - } - - // Returns `true` if `*this` is empty. - friend bool operator==(std::nullptr_t, const AnyInvocable& f) noexcept { - return !f.HasValue(); - } - - // Returns `false` if `*this` is empty. - friend bool operator!=(const AnyInvocable& f, std::nullptr_t) noexcept { - return f.HasValue(); - } - - // Returns `false` if `*this` is empty. - friend bool operator!=(std::nullptr_t, const AnyInvocable& f) noexcept { - return f.HasValue(); - } - - // swap() - // - // Exchanges the targets of `f1` and `f2`. - friend void swap(AnyInvocable& f1, AnyInvocable& f2) noexcept { f1.swap(f2); } - - private: - // Friending other instantiations is necessary for conversions. - template - friend class internal_any_invocable::CoreImpl; -}; - -ABSL_NAMESPACE_END + internal_any_invocable::CanAssignReferenceWrapper::value>> + AnyInvocable& operator=(std::reference_wrapper f) noexcept + { + *this = AnyInvocable(f); + return *this; + } + + // Destructor + + // If not empty, destroys the target. + ~AnyInvocable() = default; + + // absl::AnyInvocable::swap() + // + // Exchanges the targets of `*this` and `other`. + void swap(AnyInvocable& other) noexcept + { + std::swap(*this, other); + } + + // abl::AnyInvocable::operator bool() + // + // Returns `true` if `*this` is not empty. + explicit operator bool() const noexcept + { + return this->HasValue(); + } + + // Invokes the target object of `*this`. `*this` must not be empty. + // + // Note: The signature of this function call operator is the same as the + // template parameter `Sig`. + using Impl::operator(); + + // Equality operators + + // Returns `true` if `*this` is empty. + friend bool operator==(const AnyInvocable& f, std::nullptr_t) noexcept + { + return !f.HasValue(); + } + + // Returns `true` if `*this` is empty. + friend bool operator==(std::nullptr_t, const AnyInvocable& f) noexcept + { + return !f.HasValue(); + } + + // Returns `false` if `*this` is empty. + friend bool operator!=(const AnyInvocable& f, std::nullptr_t) noexcept + { + return f.HasValue(); + } + + // Returns `false` if `*this` is empty. + friend bool operator!=(std::nullptr_t, const AnyInvocable& f) noexcept + { + return f.HasValue(); + } + + // swap() + // + // Exchanges the targets of `f1` and `f2`. + friend void swap(AnyInvocable& f1, AnyInvocable& f2) noexcept + { + f1.swap(f2); + } + + private: + // Friending other instantiations is necessary for conversions. + template + friend class internal_any_invocable::CoreImpl; + }; + + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FUNCTIONAL_ANY_INVOCABLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/bind_front.h b/CAPI/cpp/grpc/include/absl/functional/bind_front.h index f9075bd..db4b964 100644 --- a/CAPI/cpp/grpc/include/absl/functional/bind_front.h +++ b/CAPI/cpp/grpc/include/absl/functional/bind_front.h @@ -32,13 +32,14 @@ #if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L #include // For std::bind_front. -#endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L +#endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L #include "absl/functional/internal/front_binder.h" #include "absl/utility/utility.h" -namespace absl { -ABSL_NAMESPACE_BEGIN +namespace absl +{ + ABSL_NAMESPACE_BEGIN // bind_front() // @@ -176,18 +177,20 @@ ABSL_NAMESPACE_BEGIN // absl::bind_front(Print, absl::string_view(hi))("Chuk"); // #if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L -using std::bind_front; + using std::bind_front; #else // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L -template -constexpr functional_internal::bind_front_t bind_front( - F&& func, BoundArgs&&... args) { - return functional_internal::bind_front_t( - absl::in_place, absl::forward(func), - absl::forward(args)...); -} + template + constexpr functional_internal::bind_front_t bind_front( + F&& func, BoundArgs&&... args + ) + { + return functional_internal::bind_front_t( + absl::in_place, absl::forward(func), absl::forward(args)... + ); + } #endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FUNCTIONAL_BIND_FRONT_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/function_ref.h b/CAPI/cpp/grpc/include/absl/functional/function_ref.h index f977960..d00ec26 100644 --- a/CAPI/cpp/grpc/include/absl/functional/function_ref.h +++ b/CAPI/cpp/grpc/include/absl/functional/function_ref.h @@ -54,90 +54,95 @@ #include "absl/functional/internal/function_ref.h" #include "absl/meta/type_traits.h" -namespace absl { -ABSL_NAMESPACE_BEGIN +namespace absl +{ + ABSL_NAMESPACE_BEGIN -// FunctionRef -// -// Dummy class declaration to allow the partial specialization based on function -// types below. -template -class FunctionRef; + // FunctionRef + // + // Dummy class declaration to allow the partial specialization based on function + // types below. + template + class FunctionRef; -// FunctionRef -// -// An `absl::FunctionRef` is a lightweight wrapper to any invokable object with -// a compatible signature. Generally, an `absl::FunctionRef` should only be used -// as an argument type and should be preferred as an argument over a const -// reference to a `std::function`. `absl::FunctionRef` itself does not allocate, -// although the wrapped invokable may. -// -// Example: -// -// // The following function takes a function callback by const reference -// bool Visitor(const std::function& callback); -// -// // Assuming that the function is not stored or otherwise copied, it can be -// // replaced by an `absl::FunctionRef`: -// bool Visitor(absl::FunctionRef -// callback); -// -// Note: the assignment operator within an `absl::FunctionRef` is intentionally -// deleted to prevent misuse; because the `absl::FunctionRef` does not own the -// underlying type, assignment likely indicates misuse. -template -class FunctionRef { - private: - // Used to disable constructors for objects that are not compatible with the - // signature of this FunctionRef. - template > - using EnableIfCompatible = - typename std::enable_if::value || - std::is_convertible::value>::type; + // FunctionRef + // + // An `absl::FunctionRef` is a lightweight wrapper to any invokable object with + // a compatible signature. Generally, an `absl::FunctionRef` should only be used + // as an argument type and should be preferred as an argument over a const + // reference to a `std::function`. `absl::FunctionRef` itself does not allocate, + // although the wrapped invokable may. + // + // Example: + // + // // The following function takes a function callback by const reference + // bool Visitor(const std::function& callback); + // + // // Assuming that the function is not stored or otherwise copied, it can be + // // replaced by an `absl::FunctionRef`: + // bool Visitor(absl::FunctionRef + // callback); + // + // Note: the assignment operator within an `absl::FunctionRef` is intentionally + // deleted to prevent misuse; because the `absl::FunctionRef` does not own the + // underlying type, assignment likely indicates misuse. + template + class FunctionRef + { + private: + // Used to disable constructors for objects that are not compatible with the + // signature of this FunctionRef. + template> + using EnableIfCompatible = + typename std::enable_if::value || std::is_convertible::value>::type; - public: - // Constructs a FunctionRef from any invokable type. - template > - // NOLINTNEXTLINE(runtime/explicit) - FunctionRef(const F& f ABSL_ATTRIBUTE_LIFETIME_BOUND) - : invoker_(&absl::functional_internal::InvokeObject) { - absl::functional_internal::AssertNonNull(f); - ptr_.obj = &f; - } + public: + // Constructs a FunctionRef from any invokable type. + template> + // NOLINTNEXTLINE(runtime/explicit) + FunctionRef(const F& f ABSL_ATTRIBUTE_LIFETIME_BOUND) : + invoker_(&absl::functional_internal::InvokeObject) + { + absl::functional_internal::AssertNonNull(f); + ptr_.obj = &f; + } - // Overload for function pointers. This eliminates a level of indirection that - // would happen if the above overload was used (it lets us store the pointer - // instead of a pointer to a pointer). - // - // This overload is also used for references to functions, since references to - // functions can decay to function pointers implicitly. - template < - typename F, typename = EnableIfCompatible, - absl::functional_internal::EnableIf::value> = 0> - FunctionRef(F* f) // NOLINT(runtime/explicit) - : invoker_(&absl::functional_internal::InvokeFunction) { - assert(f != nullptr); - ptr_.fun = reinterpret_cast(f); - } + // Overload for function pointers. This eliminates a level of indirection that + // would happen if the above overload was used (it lets us store the pointer + // instead of a pointer to a pointer). + // + // This overload is also used for references to functions, since references to + // functions can decay to function pointers implicitly. + template< + typename F, + typename = EnableIfCompatible, + absl::functional_internal::EnableIf::value> = 0> + FunctionRef(F* f) // NOLINT(runtime/explicit) + : + invoker_(&absl::functional_internal::InvokeFunction) + { + assert(f != nullptr); + ptr_.fun = reinterpret_cast(f); + } - // To help prevent subtle lifetime bugs, FunctionRef is not assignable. - // Typically, it should only be used as an argument type. - FunctionRef& operator=(const FunctionRef& rhs) = delete; - FunctionRef(const FunctionRef& rhs) = default; + // To help prevent subtle lifetime bugs, FunctionRef is not assignable. + // Typically, it should only be used as an argument type. + FunctionRef& operator=(const FunctionRef& rhs) = delete; + FunctionRef(const FunctionRef& rhs) = default; - // Call the underlying object. - R operator()(Args... args) const { - return invoker_(ptr_, std::forward(args)...); - } + // Call the underlying object. + R operator()(Args... args) const + { + return invoker_(ptr_, std::forward(args)...); + } - private: - absl::functional_internal::VoidPtr ptr_; - absl::functional_internal::Invoker invoker_; -}; + private: + absl::functional_internal::VoidPtr ptr_; + absl::functional_internal::Invoker invoker_; + }; -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FUNCTIONAL_FUNCTION_REF_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/internal/any_invocable.h b/CAPI/cpp/grpc/include/absl/functional/internal/any_invocable.h index f353139..fad75c3 100644 --- a/CAPI/cpp/grpc/include/absl/functional/internal/any_invocable.h +++ b/CAPI/cpp/grpc/include/absl/functional/internal/any_invocable.h @@ -69,8 +69,9 @@ #include "absl/meta/type_traits.h" #include "absl/utility/utility.h" -namespace absl { -ABSL_NAMESPACE_BEGIN +namespace absl +{ + ABSL_NAMESPACE_BEGIN // Helper macro used to prevent spelling `noexcept` in language versions older // than C++17, where it is not part of the type system, in order to avoid @@ -81,436 +82,485 @@ ABSL_NAMESPACE_BEGIN #define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) #endif -// Defined in functional/any_invocable.h -template -class AnyInvocable; - -namespace internal_any_invocable { - -// Constants relating to the small-object-storage for AnyInvocable -enum StorageProperty : std::size_t { - kAlignment = alignof(std::max_align_t), // The alignment of the storage - kStorageSize = sizeof(void*) * 2 // The size of the storage -}; - -//////////////////////////////////////////////////////////////////////////////// -// -// A metafunction for checking if a type is an AnyInvocable instantiation. -// This is used during conversion operations. -template -struct IsAnyInvocable : std::false_type {}; - -template -struct IsAnyInvocable> : std::true_type {}; -// -//////////////////////////////////////////////////////////////////////////////// - -// A type trait that tells us whether or not a target function type should be -// stored locally in the small object optimization storage -template -using IsStoredLocally = std::integral_constant< - bool, sizeof(T) <= kStorageSize && alignof(T) <= kAlignment && - kAlignment % alignof(T) == 0 && - std::is_nothrow_move_constructible::value>; - -// An implementation of std::remove_cvref_t of C++20. -template -using RemoveCVRef = - typename std::remove_cv::type>::type; - -//////////////////////////////////////////////////////////////////////////////// -// -// An implementation of the C++ standard INVOKE pseudo-macro, operation is -// equivalent to std::invoke except that it forces an implicit conversion to the -// specified return type. If "R" is void, the function is executed and the -// return value is simply ignored. -template ::value>> -void InvokeR(F&& f, P&&... args) { - absl::base_internal::invoke(std::forward(f), std::forward

(args)...); -} - -template ::value, int> = 0> -ReturnType InvokeR(F&& f, P&&... args) { - return absl::base_internal::invoke(std::forward(f), - std::forward

(args)...); -} - -// -//////////////////////////////////////////////////////////////////////////////// - -//////////////////////////////////////////////////////////////////////////////// -/// -// A metafunction that takes a "T" corresponding to a parameter type of the -// user's specified function type, and yields the parameter type to use for the -// type-erased invoker. In order to prevent observable moves, this must be -// either a reference or, if the type is trivial, the original parameter type -// itself. Since the parameter type may be incomplete at the point that this -// metafunction is used, we can only do this optimization for scalar types -// rather than for any trivial type. -template -T ForwardImpl(std::true_type); - -template -T&& ForwardImpl(std::false_type); - -// NOTE: We deliberately use an intermediate struct instead of a direct alias, -// as a workaround for b/206991861 on MSVC versions < 1924. -template -struct ForwardedParameter { - using type = decltype(( - ForwardImpl)(std::integral_constant::value>())); -}; - -template -using ForwardedParameterType = typename ForwardedParameter::type; -// -//////////////////////////////////////////////////////////////////////////////// - -// A discriminator when calling the "manager" function that describes operation -// type-erased operation should be invoked. -// -// "relocate_from_to" specifies that the manager should perform a move. -// -// "dispose" specifies that the manager should perform a destroy. -enum class FunctionToCall : bool { relocate_from_to, dispose }; - -// The portion of `AnyInvocable` state that contains either a pointer to the -// target object or the object itself in local storage -union TypeErasedState { - struct { - // A pointer to the type-erased object when remotely stored - void* target; - // The size of the object for `RemoteManagerTrivial` - std::size_t size; - } remote; - - // Local-storage for the type-erased object when small and trivial enough - alignas(kAlignment) char storage[kStorageSize]; -}; - -// A typed accessor for the object in `TypeErasedState` storage -template -T& ObjectInLocalStorage(TypeErasedState* const state) { - // We launder here because the storage may be reused with the same type. + // Defined in functional/any_invocable.h + template + class AnyInvocable; + + namespace internal_any_invocable + { + + // Constants relating to the small-object-storage for AnyInvocable + enum StorageProperty : std::size_t + { + kAlignment = alignof(std::max_align_t), // The alignment of the storage + kStorageSize = sizeof(void*) * 2 // The size of the storage + }; + + //////////////////////////////////////////////////////////////////////////////// + // + // A metafunction for checking if a type is an AnyInvocable instantiation. + // This is used during conversion operations. + template + struct IsAnyInvocable : std::false_type + { + }; + + template + struct IsAnyInvocable> : std::true_type + { + }; + // + //////////////////////////////////////////////////////////////////////////////// + + // A type trait that tells us whether or not a target function type should be + // stored locally in the small object optimization storage + template + using IsStoredLocally = std::integral_constant< + bool, + sizeof(T) <= kStorageSize && alignof(T) <= kAlignment && + kAlignment % alignof(T) == 0 && + std::is_nothrow_move_constructible::value>; + + // An implementation of std::remove_cvref_t of C++20. + template + using RemoveCVRef = + typename std::remove_cv::type>::type; + + //////////////////////////////////////////////////////////////////////////////// + // + // An implementation of the C++ standard INVOKE pseudo-macro, operation is + // equivalent to std::invoke except that it forces an implicit conversion to the + // specified return type. If "R" is void, the function is executed and the + // return value is simply ignored. + template::value>> + void InvokeR(F&& f, P&&... args) + { + absl::base_internal::invoke(std::forward(f), std::forward

(args)...); + } + + template::value, int> = 0> + ReturnType InvokeR(F&& f, P&&... args) + { + return absl::base_internal::invoke(std::forward(f), std::forward

(args)...); + } + + // + //////////////////////////////////////////////////////////////////////////////// + + //////////////////////////////////////////////////////////////////////////////// + /// + // A metafunction that takes a "T" corresponding to a parameter type of the + // user's specified function type, and yields the parameter type to use for the + // type-erased invoker. In order to prevent observable moves, this must be + // either a reference or, if the type is trivial, the original parameter type + // itself. Since the parameter type may be incomplete at the point that this + // metafunction is used, we can only do this optimization for scalar types + // rather than for any trivial type. + template + T ForwardImpl(std::true_type); + + template + T&& ForwardImpl(std::false_type); + + // NOTE: We deliberately use an intermediate struct instead of a direct alias, + // as a workaround for b/206991861 on MSVC versions < 1924. + template + struct ForwardedParameter + { + using type = decltype(( + ForwardImpl + )(std::integral_constant::value>())); + }; + + template + using ForwardedParameterType = typename ForwardedParameter::type; + // + //////////////////////////////////////////////////////////////////////////////// + + // A discriminator when calling the "manager" function that describes operation + // type-erased operation should be invoked. + // + // "relocate_from_to" specifies that the manager should perform a move. + // + // "dispose" specifies that the manager should perform a destroy. + enum class FunctionToCall : bool + { + relocate_from_to, + dispose + }; + + // The portion of `AnyInvocable` state that contains either a pointer to the + // target object or the object itself in local storage + union TypeErasedState + { + struct + { + // A pointer to the type-erased object when remotely stored + void* target; + // The size of the object for `RemoteManagerTrivial` + std::size_t size; + } remote; + + // Local-storage for the type-erased object when small and trivial enough + alignas(kAlignment) char storage[kStorageSize]; + }; + + // A typed accessor for the object in `TypeErasedState` storage + template + T& ObjectInLocalStorage(TypeErasedState* const state) + { + // We launder here because the storage may be reused with the same type. #if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L - return *std::launder(reinterpret_cast(&state->storage)); + return *std::launder(reinterpret_cast(&state->storage)); #elif ABSL_HAVE_BUILTIN(__builtin_launder) - return *__builtin_launder(reinterpret_cast(&state->storage)); + return *__builtin_launder(reinterpret_cast(&state->storage)); #else - // When `std::launder` or equivalent are not available, we rely on undefined - // behavior, which works as intended on Abseil's officially supported - // platforms as of Q2 2022. + // When `std::launder` or equivalent are not available, we rely on undefined + // behavior, which works as intended on Abseil's officially supported + // platforms as of Q2 2022. #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic push #endif - return *reinterpret_cast(&state->storage); + return *reinterpret_cast(&state->storage); #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic pop #endif #endif -} - -// The type for functions issuing lifetime-related operations: move and dispose -// A pointer to such a function is contained in each `AnyInvocable` instance. -// NOTE: When specifying `FunctionToCall::`dispose, the same state must be -// passed as both "from" and "to". -using ManagerType = void(FunctionToCall /*operation*/, - TypeErasedState* /*from*/, TypeErasedState* /*to*/) - ABSL_INTERNAL_NOEXCEPT_SPEC(true); - -// The type for functions issuing the actual invocation of the object -// A pointer to such a function is contained in each AnyInvocable instance. -template -using InvokerType = ReturnType(TypeErasedState*, ForwardedParameterType

...) - ABSL_INTERNAL_NOEXCEPT_SPEC(SigIsNoexcept); - -// The manager that is used when AnyInvocable is empty -inline void EmptyManager(FunctionToCall /*operation*/, - TypeErasedState* /*from*/, - TypeErasedState* /*to*/) noexcept {} - -// The manager that is used when a target function is in local storage and is -// a trivially copyable type. -inline void LocalManagerTrivial(FunctionToCall /*operation*/, - TypeErasedState* const from, - TypeErasedState* const to) noexcept { - // This single statement without branching handles both possible operations. - // - // For FunctionToCall::dispose, "from" and "to" point to the same state, and - // so this assignment logically would do nothing. - // - // Note: Correctness here relies on http://wg21.link/p0593, which has only - // become standard in C++20, though implementations do not break it in - // practice for earlier versions of C++. - // - // The correct way to do this without that paper is to first placement-new a - // default-constructed T in "to->storage" prior to the memmove, but doing so - // requires a different function to be created for each T that is stored - // locally, which can cause unnecessary bloat and be less cache friendly. - *to = *from; - - // Note: Because the type is trivially copyable, the destructor does not need - // to be called ("trivially copyable" requires a trivial destructor). -} - -// The manager that is used when a target function is in local storage and is -// not a trivially copyable type. -template -void LocalManagerNontrivial(FunctionToCall operation, - TypeErasedState* const from, - TypeErasedState* const to) noexcept { - static_assert(IsStoredLocally::value, - "Local storage must only be used for supported types."); - static_assert(!std::is_trivially_copyable::value, - "Locally stored types must be trivially copyable."); - - T& from_object = (ObjectInLocalStorage)(from); - - switch (operation) { - case FunctionToCall::relocate_from_to: - // NOTE: Requires that the left-hand operand is already empty. - ::new (static_cast(&to->storage)) T(std::move(from_object)); - ABSL_FALLTHROUGH_INTENDED; - case FunctionToCall::dispose: - from_object.~T(); // Must not throw. // NOLINT - return; - } - ABSL_INTERNAL_UNREACHABLE; -} - -// The invoker that is used when a target function is in local storage -// Note: QualTRef here is the target function type along with cv and reference -// qualifiers that must be used when calling the function. -template -ReturnType LocalInvoker( - TypeErasedState* const state, - ForwardedParameterType

... args) noexcept(SigIsNoexcept) { - using RawT = RemoveCVRef; - static_assert( - IsStoredLocally::value, - "Target object must be in local storage in order to be invoked from it."); - - auto& f = (ObjectInLocalStorage)(state); - return (InvokeR)(static_cast(f), - static_cast>(args)...); -} - -// The manager that is used when a target function is in remote storage and it -// has a trivial destructor -inline void RemoteManagerTrivial(FunctionToCall operation, - TypeErasedState* const from, - TypeErasedState* const to) noexcept { - switch (operation) { - case FunctionToCall::relocate_from_to: - // NOTE: Requires that the left-hand operand is already empty. - to->remote = from->remote; - return; - case FunctionToCall::dispose: + } + + // The type for functions issuing lifetime-related operations: move and dispose + // A pointer to such a function is contained in each `AnyInvocable` instance. + // NOTE: When specifying `FunctionToCall::`dispose, the same state must be + // passed as both "from" and "to". + using ManagerType = void(FunctionToCall /*operation*/, TypeErasedState* /*from*/, TypeErasedState* /*to*/) + ABSL_INTERNAL_NOEXCEPT_SPEC(true); + + // The type for functions issuing the actual invocation of the object + // A pointer to such a function is contained in each AnyInvocable instance. + template + using InvokerType = ReturnType(TypeErasedState*, ForwardedParameterType

...) + ABSL_INTERNAL_NOEXCEPT_SPEC(SigIsNoexcept); + + // The manager that is used when AnyInvocable is empty + inline void EmptyManager(FunctionToCall /*operation*/, TypeErasedState* /*from*/, TypeErasedState* /*to*/) noexcept + { + } + + // The manager that is used when a target function is in local storage and is + // a trivially copyable type. + inline void LocalManagerTrivial(FunctionToCall /*operation*/, TypeErasedState* const from, TypeErasedState* const to) noexcept + { + // This single statement without branching handles both possible operations. + // + // For FunctionToCall::dispose, "from" and "to" point to the same state, and + // so this assignment logically would do nothing. + // + // Note: Correctness here relies on http://wg21.link/p0593, which has only + // become standard in C++20, though implementations do not break it in + // practice for earlier versions of C++. + // + // The correct way to do this without that paper is to first placement-new a + // default-constructed T in "to->storage" prior to the memmove, but doing so + // requires a different function to be created for each T that is stored + // locally, which can cause unnecessary bloat and be less cache friendly. + *to = *from; + + // Note: Because the type is trivially copyable, the destructor does not need + // to be called ("trivially copyable" requires a trivial destructor). + } + + // The manager that is used when a target function is in local storage and is + // not a trivially copyable type. + template + void LocalManagerNontrivial(FunctionToCall operation, TypeErasedState* const from, TypeErasedState* const to) noexcept + { + static_assert(IsStoredLocally::value, "Local storage must only be used for supported types."); + static_assert(!std::is_trivially_copyable::value, "Locally stored types must be trivially copyable."); + + T& from_object = (ObjectInLocalStorage)(from); + + switch (operation) + { + case FunctionToCall::relocate_from_to: + // NOTE: Requires that the left-hand operand is already empty. + ::new (static_cast(&to->storage)) T(std::move(from_object)); + ABSL_FALLTHROUGH_INTENDED; + case FunctionToCall::dispose: + from_object.~T(); // Must not throw. // NOLINT + return; + } + ABSL_INTERNAL_UNREACHABLE; + } + + // The invoker that is used when a target function is in local storage + // Note: QualTRef here is the target function type along with cv and reference + // qualifiers that must be used when calling the function. + template + ReturnType LocalInvoker( + TypeErasedState* const state, + ForwardedParameterType

... args + ) noexcept(SigIsNoexcept) + { + using RawT = RemoveCVRef; + static_assert( + IsStoredLocally::value, + "Target object must be in local storage in order to be invoked from it." + ); + + auto& f = (ObjectInLocalStorage)(state); + return (InvokeR)(static_cast(f), static_cast>(args)...); + } + + // The manager that is used when a target function is in remote storage and it + // has a trivial destructor + inline void RemoteManagerTrivial(FunctionToCall operation, TypeErasedState* const from, TypeErasedState* const to) noexcept + { + switch (operation) + { + case FunctionToCall::relocate_from_to: + // NOTE: Requires that the left-hand operand is already empty. + to->remote = from->remote; + return; + case FunctionToCall::dispose: #if defined(__cpp_sized_deallocation) - ::operator delete(from->remote.target, from->remote.size); + ::operator delete(from->remote.target, from->remote.size); #else // __cpp_sized_deallocation - ::operator delete(from->remote.target); + ::operator delete(from->remote.target); #endif // __cpp_sized_deallocation - return; - } - ABSL_INTERNAL_UNREACHABLE; -} - -// The manager that is used when a target function is in remote storage and the -// destructor of the type is not trivial -template -void RemoteManagerNontrivial(FunctionToCall operation, - TypeErasedState* const from, - TypeErasedState* const to) noexcept { - static_assert(!IsStoredLocally::value, - "Remote storage must only be used for types that do not " - "qualify for local storage."); - - switch (operation) { - case FunctionToCall::relocate_from_to: - // NOTE: Requires that the left-hand operand is already empty. - to->remote.target = from->remote.target; - return; - case FunctionToCall::dispose: - ::delete static_cast(from->remote.target); // Must not throw. - return; - } - ABSL_INTERNAL_UNREACHABLE; -} - -// The invoker that is used when a target function is in remote storage -template -ReturnType RemoteInvoker( - TypeErasedState* const state, - ForwardedParameterType

... args) noexcept(SigIsNoexcept) { - using RawT = RemoveCVRef; - static_assert(!IsStoredLocally::value, - "Target object must be in remote storage in order to be " - "invoked from it."); - - auto& f = *static_cast(state->remote.target); - return (InvokeR)(static_cast(f), - static_cast>(args)...); -} - -//////////////////////////////////////////////////////////////////////////////// -// -// A metafunction that checks if a type T is an instantiation of -// absl::in_place_type_t (needed for constructor constraints of AnyInvocable). -template -struct IsInPlaceType : std::false_type {}; - -template -struct IsInPlaceType> : std::true_type {}; -// -//////////////////////////////////////////////////////////////////////////////// - -// A constructor name-tag used with CoreImpl (below) to request the -// conversion-constructor. QualDecayedTRef is the decayed-type of the object to -// wrap, along with the cv and reference qualifiers that must be applied when -// performing an invocation of the wrapped object. -template -struct TypedConversionConstruct {}; - -// A helper base class for all core operations of AnyInvocable. Most notably, -// this class creates the function call operator and constraint-checkers so that -// the top-level class does not have to be a series of partial specializations. -// -// Note: This definition exists (as opposed to being a declaration) so that if -// the user of the top-level template accidentally passes a template argument -// that is not a function type, they will get a static_assert in AnyInvocable's -// class body rather than an error stating that Impl is not defined. -template -class Impl {}; // Note: This is partially-specialized later. + return; + } + ABSL_INTERNAL_UNREACHABLE; + } + + // The manager that is used when a target function is in remote storage and the + // destructor of the type is not trivial + template + void RemoteManagerNontrivial(FunctionToCall operation, TypeErasedState* const from, TypeErasedState* const to) noexcept + { + static_assert(!IsStoredLocally::value, "Remote storage must only be used for types that do not " + "qualify for local storage."); + + switch (operation) + { + case FunctionToCall::relocate_from_to: + // NOTE: Requires that the left-hand operand is already empty. + to->remote.target = from->remote.target; + return; + case FunctionToCall::dispose: + ::delete static_cast(from->remote.target); // Must not throw. + return; + } + ABSL_INTERNAL_UNREACHABLE; + } + + // The invoker that is used when a target function is in remote storage + template + ReturnType RemoteInvoker( + TypeErasedState* const state, + ForwardedParameterType

... args + ) noexcept(SigIsNoexcept) + { + using RawT = RemoveCVRef; + static_assert(!IsStoredLocally::value, "Target object must be in remote storage in order to be " + "invoked from it."); + + auto& f = *static_cast(state->remote.target); + return (InvokeR)(static_cast(f), static_cast>(args)...); + } + + //////////////////////////////////////////////////////////////////////////////// + // + // A metafunction that checks if a type T is an instantiation of + // absl::in_place_type_t (needed for constructor constraints of AnyInvocable). + template + struct IsInPlaceType : std::false_type + { + }; + + template + struct IsInPlaceType> : std::true_type + { + }; + // + //////////////////////////////////////////////////////////////////////////////// + + // A constructor name-tag used with CoreImpl (below) to request the + // conversion-constructor. QualDecayedTRef is the decayed-type of the object to + // wrap, along with the cv and reference qualifiers that must be applied when + // performing an invocation of the wrapped object. + template + struct TypedConversionConstruct + { + }; + + // A helper base class for all core operations of AnyInvocable. Most notably, + // this class creates the function call operator and constraint-checkers so that + // the top-level class does not have to be a series of partial specializations. + // + // Note: This definition exists (as opposed to being a declaration) so that if + // the user of the top-level template accidentally passes a template argument + // that is not a function type, they will get a static_assert in AnyInvocable's + // class body rather than an error stating that Impl is not defined. + template + class Impl + { + }; // Note: This is partially-specialized later. // A std::unique_ptr deleter that deletes memory allocated via ::operator new. #if defined(__cpp_sized_deallocation) -class TrivialDeleter { - public: - explicit TrivialDeleter(std::size_t size) : size_(size) {} - - void operator()(void* target) const { - ::operator delete(target, size_); - } - - private: - std::size_t size_; -}; + class TrivialDeleter + { + public: + explicit TrivialDeleter(std::size_t size) : + size_(size) + { + } + + void operator()(void* target) const + { + ::operator delete(target, size_); + } + + private: + std::size_t size_; + }; #else // __cpp_sized_deallocation -class TrivialDeleter { - public: - explicit TrivialDeleter(std::size_t) {} - - void operator()(void* target) const { ::operator delete(target); } -}; + class TrivialDeleter + { + public: + explicit TrivialDeleter(std::size_t) + { + } + + void operator()(void* target) const + { + ::operator delete(target); + } + }; #endif // __cpp_sized_deallocation -template -class CoreImpl; - -constexpr bool IsCompatibleConversion(void*, void*) { return false; } -template -constexpr bool IsCompatibleConversion(CoreImpl*, - CoreImpl*) { - return !NoExceptDest || NoExceptSrc; -} - -// A helper base class for all core operations of AnyInvocable that do not -// depend on the cv/ref qualifiers of the function type. -template -class CoreImpl { - public: - using result_type = ReturnType; - - CoreImpl() noexcept : manager_(EmptyManager), invoker_(nullptr) {} - - enum class TargetType : int { - kPointer = 0, - kCompatibleAnyInvocable = 1, - kIncompatibleAnyInvocable = 2, - kOther = 3, - }; - - // Note: QualDecayedTRef here includes the cv-ref qualifiers associated with - // the invocation of the Invocable. The unqualified type is the target object - // type to be stored. - template - explicit CoreImpl(TypedConversionConstruct, F&& f) { - using DecayedT = RemoveCVRef; - - constexpr TargetType kTargetType = - (std::is_pointer::value || - std::is_member_pointer::value) - ? TargetType::kPointer - : IsCompatibleAnyInvocable::value - ? TargetType::kCompatibleAnyInvocable - : IsAnyInvocable::value - ? TargetType::kIncompatibleAnyInvocable - : TargetType::kOther; - // NOTE: We only use integers instead of enums as template parameters in - // order to work around a bug on C++14 under MSVC 2017. - // See b/236131881. - Initialize(kTargetType), QualDecayedTRef>( - std::forward(f)); - } - - // Note: QualTRef here includes the cv-ref qualifiers associated with the - // invocation of the Invocable. The unqualified type is the target object - // type to be stored. - template - explicit CoreImpl(absl::in_place_type_t, Args&&... args) { - InitializeStorage(std::forward(args)...); - } - - CoreImpl(CoreImpl&& other) noexcept { - other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_); - manager_ = other.manager_; - invoker_ = other.invoker_; - other.manager_ = EmptyManager; - other.invoker_ = nullptr; - } - - CoreImpl& operator=(CoreImpl&& other) noexcept { - // Put the left-hand operand in an empty state. - // - // Note: A full reset that leaves us with an object that has its invariants - // intact is necessary in order to handle self-move. This is required by - // types that are used with certain operations of the standard library, such - // as the default definition of std::swap when both operands target the same - // object. - Clear(); - - // Perform the actual move/destory operation on the target function. - other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_); - manager_ = other.manager_; - invoker_ = other.invoker_; - other.manager_ = EmptyManager; - other.invoker_ = nullptr; - - return *this; - } - - ~CoreImpl() { manager_(FunctionToCall::dispose, &state_, &state_); } - - // Check whether or not the AnyInvocable is in the empty state. - bool HasValue() const { return invoker_ != nullptr; } - - // Effects: Puts the object into its empty state. - void Clear() { - manager_(FunctionToCall::dispose, &state_, &state_); - manager_ = EmptyManager; - invoker_ = nullptr; - } - - template = 0> - void Initialize(F&& f) { + template + class CoreImpl; + + constexpr bool IsCompatibleConversion(void*, void*) + { + return false; + } + template + constexpr bool IsCompatibleConversion(CoreImpl*, CoreImpl*) + { + return !NoExceptDest || NoExceptSrc; + } + + // A helper base class for all core operations of AnyInvocable that do not + // depend on the cv/ref qualifiers of the function type. + template + class CoreImpl + { + public: + using result_type = ReturnType; + + CoreImpl() noexcept : + manager_(EmptyManager), + invoker_(nullptr) + { + } + + enum class TargetType : int + { + kPointer = 0, + kCompatibleAnyInvocable = 1, + kIncompatibleAnyInvocable = 2, + kOther = 3, + }; + + // Note: QualDecayedTRef here includes the cv-ref qualifiers associated with + // the invocation of the Invocable. The unqualified type is the target object + // type to be stored. + template + explicit CoreImpl(TypedConversionConstruct, F&& f) + { + using DecayedT = RemoveCVRef; + + constexpr TargetType kTargetType = + (std::is_pointer::value || + std::is_member_pointer::value) ? + TargetType::kPointer : + IsCompatibleAnyInvocable::value ? TargetType::kCompatibleAnyInvocable : + IsAnyInvocable::value ? TargetType::kIncompatibleAnyInvocable : + TargetType::kOther; + // NOTE: We only use integers instead of enums as template parameters in + // order to work around a bug on C++14 under MSVC 2017. + // See b/236131881. + Initialize(kTargetType), QualDecayedTRef>( + std::forward(f) + ); + } + + // Note: QualTRef here includes the cv-ref qualifiers associated with the + // invocation of the Invocable. The unqualified type is the target object + // type to be stored. + template + explicit CoreImpl(absl::in_place_type_t, Args&&... args) + { + InitializeStorage(std::forward(args)...); + } + + CoreImpl(CoreImpl&& other) noexcept + { + other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_); + manager_ = other.manager_; + invoker_ = other.invoker_; + other.manager_ = EmptyManager; + other.invoker_ = nullptr; + } + + CoreImpl& operator=(CoreImpl&& other) noexcept + { + // Put the left-hand operand in an empty state. + // + // Note: A full reset that leaves us with an object that has its invariants + // intact is necessary in order to handle self-move. This is required by + // types that are used with certain operations of the standard library, such + // as the default definition of std::swap when both operands target the same + // object. + Clear(); + + // Perform the actual move/destory operation on the target function. + other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_); + manager_ = other.manager_; + invoker_ = other.invoker_; + other.manager_ = EmptyManager; + other.invoker_ = nullptr; + + return *this; + } + + ~CoreImpl() + { + manager_(FunctionToCall::dispose, &state_, &state_); + } + + // Check whether or not the AnyInvocable is in the empty state. + bool HasValue() const + { + return invoker_ != nullptr; + } + + // Effects: Puts the object into its empty state. + void Clear() + { + manager_(FunctionToCall::dispose, &state_, &state_); + manager_ = EmptyManager; + invoker_ = nullptr; + } + + template = 0> + void Initialize(F&& f) + { // This condition handles types that decay into pointers, which includes // function references. Since function references cannot be null, GCC warns // against comparing their decayed form with nullptr. @@ -522,206 +572,196 @@ class CoreImpl { #pragma GCC diagnostic ignored "-Wnonnull-compare" #pragma GCC diagnostic push #endif - if (static_cast>(f) == nullptr) { + if (static_cast>(f) == nullptr) + { #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic pop #endif - manager_ = EmptyManager; - invoker_ = nullptr; - return; - } - InitializeStorage(std::forward(f)); - } - - template = 0> - void Initialize(F&& f) { - // In this case we can "steal the guts" of the other AnyInvocable. - f.manager_(FunctionToCall::relocate_from_to, &f.state_, &state_); - manager_ = f.manager_; - invoker_ = f.invoker_; - - f.manager_ = EmptyManager; - f.invoker_ = nullptr; - } - - template = 0> - void Initialize(F&& f) { - if (f.HasValue()) { - InitializeStorage(std::forward(f)); - } else { - manager_ = EmptyManager; - invoker_ = nullptr; - } - } - - template > - void Initialize(F&& f) { - InitializeStorage(std::forward(f)); - } - - // Use local (inline) storage for applicable target object types. - template >::value>> - void InitializeStorage(Args&&... args) { - using RawT = RemoveCVRef; - ::new (static_cast(&state_.storage)) - RawT(std::forward(args)...); - - invoker_ = LocalInvoker; - // We can simplify our manager if we know the type is trivially copyable. - InitializeLocalManager(); - } - - // Use remote storage for target objects that cannot be stored locally. - template >::value, - int> = 0> - void InitializeStorage(Args&&... args) { - InitializeRemoteManager>(std::forward(args)...); - // This is set after everything else in case an exception is thrown in an - // earlier step of the initialization. - invoker_ = RemoteInvoker; - } - - template ::value>> - void InitializeLocalManager() { - manager_ = LocalManagerTrivial; - } - - template ::value, int> = 0> - void InitializeLocalManager() { - manager_ = LocalManagerNontrivial; - } - - template - using HasTrivialRemoteStorage = - std::integral_constant::value && - alignof(T) <= - ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT>; - - template ::value>> - void InitializeRemoteManager(Args&&... args) { - // unique_ptr is used for exception-safety in case construction throws. - std::unique_ptr uninitialized_target( - ::operator new(sizeof(T)), TrivialDeleter(sizeof(T))); - ::new (uninitialized_target.get()) T(std::forward(args)...); - state_.remote.target = uninitialized_target.release(); - state_.remote.size = sizeof(T); - manager_ = RemoteManagerTrivial; - } - - template ::value, int> = 0> - void InitializeRemoteManager(Args&&... args) { - state_.remote.target = ::new T(std::forward(args)...); - manager_ = RemoteManagerNontrivial; - } - - ////////////////////////////////////////////////////////////////////////////// - // - // Type trait to determine if the template argument is an AnyInvocable whose - // function type is compatible enough with ours such that we can - // "move the guts" out of it when moving, rather than having to place a new - // object into remote storage. - - template - struct IsCompatibleAnyInvocable { - static constexpr bool value = false; - }; - - template - struct IsCompatibleAnyInvocable> { - static constexpr bool value = - (IsCompatibleConversion)(static_cast< - typename AnyInvocable::CoreImpl*>( - nullptr), - static_cast(nullptr)); - }; - - // - ////////////////////////////////////////////////////////////////////////////// - - TypeErasedState state_; - ManagerType* manager_; - InvokerType* invoker_; -}; - -// A constructor name-tag used with Impl to request the -// conversion-constructor -struct ConversionConstruct {}; - -//////////////////////////////////////////////////////////////////////////////// -// -// A metafunction that is normally an identity metafunction except that when -// given a std::reference_wrapper, it yields T&. This is necessary because -// currently std::reference_wrapper's operator() is not conditionally noexcept, -// so when checking if such an Invocable is nothrow-invocable, we must pull out -// the underlying type. -template -struct UnwrapStdReferenceWrapperImpl { - using type = T; -}; - -template -struct UnwrapStdReferenceWrapperImpl> { - using type = T&; -}; - -template -using UnwrapStdReferenceWrapper = - typename UnwrapStdReferenceWrapperImpl::type; -// -//////////////////////////////////////////////////////////////////////////////// - -// An alias that always yields std::true_type (used with constraints) where -// substitution failures happen when forming the template arguments. -template -using True = - std::integral_constant*) != 0>; - -/*SFINAE constraints for the conversion-constructor.*/ -template , AnyInvocable>::value>> -using CanConvert = - True>::value>, - absl::enable_if_t::template CallIsValid::value>, - absl::enable_if_t< - Impl::template CallIsNoexceptIfSigIsNoexcept::value>, - absl::enable_if_t, F>::value>>; - -/*SFINAE constraints for the std::in_place constructors.*/ -template -using CanEmplace = True< - absl::enable_if_t::template CallIsValid::value>, - absl::enable_if_t< - Impl::template CallIsNoexceptIfSigIsNoexcept::value>, - absl::enable_if_t, Args...>::value>>; - -/*SFINAE constraints for the conversion-assign operator.*/ -template , AnyInvocable>::value>> -using CanAssign = - True::template CallIsValid::value>, - absl::enable_if_t< - Impl::template CallIsNoexceptIfSigIsNoexcept::value>, - absl::enable_if_t, F>::value>>; - -/*SFINAE constraints for the reference-wrapper conversion-assign operator.*/ -template -using CanAssignReferenceWrapper = - True::template CallIsValid>::value>, - absl::enable_if_t::template CallIsNoexceptIfSigIsNoexcept< - std::reference_wrapper>::value>>; + manager_ = EmptyManager; + invoker_ = nullptr; + return; + } + InitializeStorage(std::forward(f)); + } + + template = 0> + void Initialize(F&& f) + { + // In this case we can "steal the guts" of the other AnyInvocable. + f.manager_(FunctionToCall::relocate_from_to, &f.state_, &state_); + manager_ = f.manager_; + invoker_ = f.invoker_; + + f.manager_ = EmptyManager; + f.invoker_ = nullptr; + } + + template = 0> + void Initialize(F&& f) + { + if (f.HasValue()) + { + InitializeStorage(std::forward(f)); + } + else + { + manager_ = EmptyManager; + invoker_ = nullptr; + } + } + + template> + void Initialize(F&& f) + { + InitializeStorage(std::forward(f)); + } + + // Use local (inline) storage for applicable target object types. + template>::value>> + void InitializeStorage(Args&&... args) + { + using RawT = RemoveCVRef; + ::new (static_cast(&state_.storage)) + RawT(std::forward(args)...); + + invoker_ = LocalInvoker; + // We can simplify our manager if we know the type is trivially copyable. + InitializeLocalManager(); + } + + // Use remote storage for target objects that cannot be stored locally. + template>::value, int> = 0> + void InitializeStorage(Args&&... args) + { + InitializeRemoteManager>(std::forward(args)...); + // This is set after everything else in case an exception is thrown in an + // earlier step of the initialization. + invoker_ = RemoteInvoker; + } + + template::value>> + void InitializeLocalManager() + { + manager_ = LocalManagerTrivial; + } + + template::value, int> = 0> + void InitializeLocalManager() + { + manager_ = LocalManagerNontrivial; + } + + template + using HasTrivialRemoteStorage = + std::integral_constant::value && alignof(T) <= ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT>; + + template::value>> + void InitializeRemoteManager(Args&&... args) + { + // unique_ptr is used for exception-safety in case construction throws. + std::unique_ptr uninitialized_target( + ::operator new(sizeof(T)), TrivialDeleter(sizeof(T)) + ); + ::new (uninitialized_target.get()) T(std::forward(args)...); + state_.remote.target = uninitialized_target.release(); + state_.remote.size = sizeof(T); + manager_ = RemoteManagerTrivial; + } + + template::value, int> = 0> + void InitializeRemoteManager(Args&&... args) + { + state_.remote.target = ::new T(std::forward(args)...); + manager_ = RemoteManagerNontrivial; + } + + ////////////////////////////////////////////////////////////////////////////// + // + // Type trait to determine if the template argument is an AnyInvocable whose + // function type is compatible enough with ours such that we can + // "move the guts" out of it when moving, rather than having to place a new + // object into remote storage. + + template + struct IsCompatibleAnyInvocable + { + static constexpr bool value = false; + }; + + template + struct IsCompatibleAnyInvocable> + { + static constexpr bool value = + (IsCompatibleConversion)(static_cast::CoreImpl*>(nullptr), static_cast(nullptr)); + }; + + // + ////////////////////////////////////////////////////////////////////////////// + + TypeErasedState state_; + ManagerType* manager_; + InvokerType* invoker_; + }; + + // A constructor name-tag used with Impl to request the + // conversion-constructor + struct ConversionConstruct + { + }; + + //////////////////////////////////////////////////////////////////////////////// + // + // A metafunction that is normally an identity metafunction except that when + // given a std::reference_wrapper, it yields T&. This is necessary because + // currently std::reference_wrapper's operator() is not conditionally noexcept, + // so when checking if such an Invocable is nothrow-invocable, we must pull out + // the underlying type. + template + struct UnwrapStdReferenceWrapperImpl + { + using type = T; + }; + + template + struct UnwrapStdReferenceWrapperImpl> + { + using type = T&; + }; + + template + using UnwrapStdReferenceWrapper = + typename UnwrapStdReferenceWrapperImpl::type; + // + //////////////////////////////////////////////////////////////////////////////// + + // An alias that always yields std::true_type (used with constraints) where + // substitution failures happen when forming the template arguments. + template + using True = + std::integral_constant*) != 0>; + + /*SFINAE constraints for the conversion-constructor.*/ + template, AnyInvocable>::value>> + using CanConvert = + True>::value>, absl::enable_if_t::template CallIsValid::value>, absl::enable_if_t::template CallIsNoexceptIfSigIsNoexcept::value>, absl::enable_if_t, F>::value>>; + + /*SFINAE constraints for the std::in_place constructors.*/ + template + using CanEmplace = True< + absl::enable_if_t::template CallIsValid::value>, + absl::enable_if_t< + Impl::template CallIsNoexceptIfSigIsNoexcept::value>, + absl::enable_if_t, Args...>::value>>; + + /*SFINAE constraints for the conversion-assign operator.*/ + template, AnyInvocable>::value>> + using CanAssign = + True::template CallIsValid::value>, absl::enable_if_t::template CallIsNoexceptIfSigIsNoexcept::value>, absl::enable_if_t, F>::value>>; + + /*SFINAE constraints for the reference-wrapper conversion-assign operator.*/ + template + using CanAssignReferenceWrapper = + True::template CallIsValid>::value>, absl::enable_if_t::template CallIsNoexceptIfSigIsNoexcept>::value>>; //////////////////////////////////////////////////////////////////////////////// // @@ -732,25 +772,27 @@ using CanAssignReferenceWrapper = // cases where the user did not specify a noexcept function type. // #define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT(inv_quals, noex) \ - ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_##noex(inv_quals) + ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_##noex(inv_quals) // The disjunction below is because we can't rely on std::is_nothrow_invocable_r // to give the right result when ReturnType is non-moveable in toolchains that // don't treat non-moveable result types correctly. For example this was the // case in libc++ before commit c3a24882 (2022-05). -#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true(inv_quals) \ - absl::enable_if_t> inv_quals, \ - P...>, \ - std::conjunction< \ - std::is_nothrow_invocable< \ - UnwrapStdReferenceWrapper> inv_quals, P...>, \ - std::is_same< \ - ReturnType, \ - absl::base_internal::invoke_result_t< \ - UnwrapStdReferenceWrapper> inv_quals, \ - P...>>>>::value> +#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true(inv_quals) \ + absl::enable_if_t> inv_quals, \ + P...>, \ + std::conjunction< \ + std::is_nothrow_invocable< \ + UnwrapStdReferenceWrapper> inv_quals, \ + P...>, \ + std::is_same< \ + ReturnType, \ + absl::base_internal::invoke_result_t< \ + UnwrapStdReferenceWrapper> inv_quals, \ + P...>>>>::value> #define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false(inv_quals) // @@ -768,79 +810,74 @@ using CanAssignReferenceWrapper = // right result when ReturnType is non-moveable in toolchains that don't treat // non-moveable result types correctly. For example this was the case in libc++ // before commit c3a24882 (2022-05). -#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, noex) \ - template \ - class Impl \ - : public CoreImpl { \ - public: \ - /*The base class, which contains the datamembers and core operations*/ \ - using Core = CoreImpl; \ - \ - /*SFINAE constraint to check if F is invocable with the proper signature*/ \ - template \ - using CallIsValid = True inv_quals, P...>, \ - std::is_same inv_quals, P...>>>::value>>; \ - \ - /*SFINAE constraint to check if F is nothrow-invocable when necessary*/ \ - template \ - using CallIsNoexceptIfSigIsNoexcept = \ - True; \ - \ - /*Put the AnyInvocable into an empty state.*/ \ - Impl() = default; \ - \ - /*The implementation of a conversion-constructor from "f*/ \ - /*This forwards to Core, attaching inv_quals so that the base class*/ \ - /*knows how to properly type-erase the invocation.*/ \ - template \ - explicit Impl(ConversionConstruct, F&& f) \ - : Core(TypedConversionConstruct< \ - typename std::decay::type inv_quals>(), \ - std::forward(f)) {} \ - \ - /*Forward along the in-place construction parameters.*/ \ - template \ - explicit Impl(absl::in_place_type_t, Args&&... args) \ - : Core(absl::in_place_type inv_quals>, \ - std::forward(args)...) {} \ - \ - /*The actual invocation operation with the proper signature*/ \ - ReturnType operator()(P... args) cv ref noexcept(noex) { \ - assert(this->invoker_ != nullptr); \ - return this->invoker_(const_cast(&this->state_), \ - static_cast>(args)...); \ - } \ - } +#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, noex) \ + template \ + class Impl : public CoreImpl \ + { \ + public: \ + /*The base class, which contains the datamembers and core operations*/ \ + using Core = CoreImpl; \ + \ + /*SFINAE constraint to check if F is invocable with the proper signature*/ \ + template \ + using CallIsValid = True inv_quals, P...>, \ + std::is_same inv_quals, P...>>>::value>>; \ + \ + /*SFINAE constraint to check if F is nothrow-invocable when necessary*/ \ + template \ + using CallIsNoexceptIfSigIsNoexcept = \ + True; \ + \ + /*Put the AnyInvocable into an empty state.*/ \ + Impl() = default; \ + \ + /*The implementation of a conversion-constructor from "f*/ \ + /*This forwards to Core, attaching inv_quals so that the base class*/ \ + /*knows how to properly type-erase the invocation.*/ \ + template \ + explicit Impl(ConversionConstruct, F&& f) : Core(TypedConversionConstruct::type inv_quals>(), std::forward(f)) \ + { \ + } \ + \ + /*Forward along the in-place construction parameters.*/ \ + template \ + explicit Impl(absl::in_place_type_t, Args&&... args) : Core(absl::in_place_type inv_quals>, std::forward(args)...) \ + { \ + } \ + \ + /*The actual invocation operation with the proper signature*/ \ + ReturnType operator()(P... args) cv ref noexcept(noex) \ + { \ + assert(this->invoker_ != nullptr); \ + return this->invoker_(const_cast(&this->state_), static_cast>(args)...); \ + } \ + } // Define the `noexcept(true)` specialization only for C++17 and beyond, when // `noexcept` is part of the type system. #if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L // A convenience macro that defines specializations for the noexcept(true) and // noexcept(false) forms, given the other properties. -#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \ - ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false); \ - ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, true) +#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \ + ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false); \ + ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, true) #else #define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \ - ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false) + ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false) #endif -// Non-ref-qualified partial specializations -ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, , &); -ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, , const&); + // Non-ref-qualified partial specializations + ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, , &); + ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, , const&); -// Lvalue-ref-qualified partial specializations -ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &, &); -ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &, const&); + // Lvalue-ref-qualified partial specializations + ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &, &); + ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &, const&); -// Rvalue-ref-qualified partial specializations -ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &&, &&); -ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &&, const&&); + // Rvalue-ref-qualified partial specializations + ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &&, &&); + ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &&, const&&); // Undef the detail-only macros. #undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL @@ -850,8 +887,8 @@ ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &&, const&&); #undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT #undef ABSL_INTERNAL_NOEXCEPT_SPEC -} // namespace internal_any_invocable -ABSL_NAMESPACE_END + } // namespace internal_any_invocable + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/internal/front_binder.h b/CAPI/cpp/grpc/include/absl/functional/internal/front_binder.h index 45f52de..78e352f 100644 --- a/CAPI/cpp/grpc/include/absl/functional/internal/front_binder.h +++ b/CAPI/cpp/grpc/include/absl/functional/internal/front_binder.h @@ -26,70 +26,71 @@ #include "absl/meta/type_traits.h" #include "absl/utility/utility.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace functional_internal { - -// Invoke the method, expanding the tuple of bound arguments. -template -R Apply(Tuple&& bound, absl::index_sequence, Args&&... free) { - return base_internal::invoke( - absl::forward(bound).template get()..., - absl::forward(free)...); -} - -template -class FrontBinder { - using BoundArgsT = absl::container_internal::CompressedTuple; - using Idx = absl::make_index_sequence; - - BoundArgsT bound_args_; - - public: - template - constexpr explicit FrontBinder(absl::in_place_t, Ts&&... ts) - : bound_args_(absl::forward(ts)...) {} - - template > - R operator()(FreeArgs&&... free_args) & { - return functional_internal::Apply(bound_args_, Idx(), - absl::forward(free_args)...); - } - - template > - R operator()(FreeArgs&&... free_args) const& { - return functional_internal::Apply(bound_args_, Idx(), - absl::forward(free_args)...); - } - - template > - R operator()(FreeArgs&&... free_args) && { - // This overload is called when *this is an rvalue. If some of the bound - // arguments are stored by value or rvalue reference, we move them. - return functional_internal::Apply(absl::move(bound_args_), Idx(), - absl::forward(free_args)...); - } - - template > - R operator()(FreeArgs&&... free_args) const&& { - // This overload is called when *this is an rvalue. If some of the bound - // arguments are stored by value or rvalue reference, we move them. - return functional_internal::Apply(absl::move(bound_args_), Idx(), - absl::forward(free_args)...); - } -}; - -template -using bind_front_t = FrontBinder, absl::decay_t...>; - -} // namespace functional_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace functional_internal + { + + // Invoke the method, expanding the tuple of bound arguments. + template + R Apply(Tuple&& bound, absl::index_sequence, Args&&... free) + { + return base_internal::invoke( + absl::forward(bound).template get()..., + absl::forward(free)... + ); + } + + template + class FrontBinder + { + using BoundArgsT = absl::container_internal::CompressedTuple; + using Idx = absl::make_index_sequence; + + BoundArgsT bound_args_; + + public: + template + constexpr explicit FrontBinder(absl::in_place_t, Ts&&... ts) : + bound_args_(absl::forward(ts)...) + { + } + + template> + R operator()(FreeArgs&&... free_args) & + { + return functional_internal::Apply(bound_args_, Idx(), absl::forward(free_args)...); + } + + template> + R operator()(FreeArgs&&... free_args) const& + { + return functional_internal::Apply(bound_args_, Idx(), absl::forward(free_args)...); + } + + template> + R operator()(FreeArgs&&... free_args) && + { + // This overload is called when *this is an rvalue. If some of the bound + // arguments are stored by value or rvalue reference, we move them. + return functional_internal::Apply(absl::move(bound_args_), Idx(), absl::forward(free_args)...); + } + + template> + R operator()(FreeArgs&&... free_args) const&& + { + // This overload is called when *this is an rvalue. If some of the bound + // arguments are stored by value or rvalue reference, we move them. + return functional_internal::Apply(absl::move(bound_args_), Idx(), absl::forward(free_args)...); + } + }; + + template + using bind_front_t = FrontBinder, absl::decay_t...>; + + } // namespace functional_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FUNCTIONAL_INTERNAL_FRONT_BINDER_H_ diff --git a/CAPI/cpp/grpc/include/absl/functional/internal/function_ref.h b/CAPI/cpp/grpc/include/absl/functional/internal/function_ref.h index b5bb8b4..a3a5a31 100644 --- a/CAPI/cpp/grpc/include/absl/functional/internal/function_ref.h +++ b/CAPI/cpp/grpc/include/absl/functional/internal/function_ref.h @@ -22,85 +22,99 @@ #include "absl/base/internal/invoke.h" #include "absl/meta/type_traits.h" -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace functional_internal { - -// Like a void* that can handle function pointers as well. The standard does not -// allow function pointers to round-trip through void*, but void(*)() is fine. -// -// Note: It's important that this class remains trivial and is the same size as -// a pointer, since this allows the compiler to perform tail-call optimizations -// when the underlying function is a callable object with a matching signature. -union VoidPtr { - const void* obj; - void (*fun)(); -}; - -// Chooses the best type for passing T as an argument. -// Attempt to be close to SystemV AMD64 ABI. Objects with trivial copy ctor are -// passed by value. -template -constexpr bool PassByValue() { - return !std::is_lvalue_reference::value && - absl::is_trivially_copy_constructible::value && - absl::is_trivially_copy_assignable< - typename std::remove_cv::type>::value && - std::is_trivially_destructible::value && - sizeof(T) <= 2 * sizeof(void*); -} - -template -struct ForwardT : std::conditional(), T, T&&> {}; - -// An Invoker takes a pointer to the type-erased invokable object, followed by -// the arguments that the invokable object expects. -// -// Note: The order of arguments here is an optimization, since member functions -// have an implicit "this" pointer as their first argument, putting VoidPtr -// first allows the compiler to perform tail-call optimization in many cases. -template -using Invoker = R (*)(VoidPtr, typename ForwardT::type...); - -// -// InvokeObject and InvokeFunction provide static "Invoke" functions that can be -// used as Invokers for objects or functions respectively. -// -// static_cast handles the case the return type is void. -template -R InvokeObject(VoidPtr ptr, typename ForwardT::type... args) { - auto o = static_cast(ptr.obj); - return static_cast( - absl::base_internal::invoke(*o, std::forward(args)...)); -} - -template -R InvokeFunction(VoidPtr ptr, typename ForwardT::type... args) { - auto f = reinterpret_cast(ptr.fun); - return static_cast( - absl::base_internal::invoke(f, std::forward(args)...)); -} - -template -void AssertNonNull(const std::function& f) { - assert(f != nullptr); - (void)f; -} - -template -void AssertNonNull(const F&) {} - -template -void AssertNonNull(F C::*f) { - assert(f != nullptr); - (void)f; -} - -template -using EnableIf = typename ::std::enable_if::type; - -} // namespace functional_internal -ABSL_NAMESPACE_END +namespace absl +{ + ABSL_NAMESPACE_BEGIN + namespace functional_internal + { + + // Like a void* that can handle function pointers as well. The standard does not + // allow function pointers to round-trip through void*, but void(*)() is fine. + // + // Note: It's important that this class remains trivial and is the same size as + // a pointer, since this allows the compiler to perform tail-call optimizations + // when the underlying function is a callable object with a matching signature. + union VoidPtr + { + const void* obj; + void (*fun)(); + }; + + // Chooses the best type for passing T as an argument. + // Attempt to be close to SystemV AMD64 ABI. Objects with trivial copy ctor are + // passed by value. + template + constexpr bool PassByValue() + { + return !std::is_lvalue_reference::value && + absl::is_trivially_copy_constructible::value && + absl::is_trivially_copy_assignable< + typename std::remove_cv::type>::value && + std::is_trivially_destructible::value && + sizeof(T) <= 2 * sizeof(void*); + } + + template + struct ForwardT : std::conditional(), T, T&&> + { + }; + + // An Invoker takes a pointer to the type-erased invokable object, followed by + // the arguments that the invokable object expects. + // + // Note: The order of arguments here is an optimization, since member functions + // have an implicit "this" pointer as their first argument, putting VoidPtr + // first allows the compiler to perform tail-call optimization in many cases. + template + using Invoker = R (*)(VoidPtr, typename ForwardT::type...); + + // + // InvokeObject and InvokeFunction provide static "Invoke" functions that can be + // used as Invokers for objects or functions respectively. + // + // static_cast handles the case the return type is void. + template + R InvokeObject(VoidPtr ptr, typename ForwardT::type... args) + { + auto o = static_cast(ptr.obj); + return static_cast( + absl::base_internal::invoke(*o, std::forward(args)...) + ); + } + + template + R InvokeFunction(VoidPtr ptr, typename ForwardT::type... args) + { + auto f = reinterpret_cast(ptr.fun); + return static_cast( + absl::base_internal::invoke(f, std::forward(args)...) + ); + } + + template + void AssertNonNull(const std::function& f) + { + assert(f != nullptr); + (void)f; + } + + template + void AssertNonNull(const F&) + { + } + + template + void AssertNonNull(F C::*f) + { + assert(f != nullptr); + (void)f; + } + + template + using EnableIf = typename ::std::enable_if::type; + + } // namespace functional_internal + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_FUNCTIONAL_INTERNAL_FUNCTION_REF_H_ diff --git a/CAPI/cpp/grpc/include/absl/hash/hash.h b/CAPI/cpp/grpc/include/absl/hash/hash.h index 74e2d7c..5e462c8 100644 --- a/CAPI/cpp/grpc/include/absl/hash/hash.h +++ b/CAPI/cpp/grpc/include/absl/hash/hash.h @@ -84,338 +84,353 @@ #include "absl/functional/function_ref.h" #include "absl/hash/internal/hash.h" -namespace absl { -ABSL_NAMESPACE_BEGIN +namespace absl +{ + ABSL_NAMESPACE_BEGIN -// ----------------------------------------------------------------------------- -// `absl::Hash` -// ----------------------------------------------------------------------------- -// -// `absl::Hash` is a convenient general-purpose hash functor for any type `T` -// satisfying any of the following conditions (in order): -// -// * T is an arithmetic or pointer type -// * T defines an overload for `AbslHashValue(H, const T&)` for an arbitrary -// hash state `H`. -// - T defines a specialization of `std::hash` -// -// `absl::Hash` intrinsically supports the following types: -// -// * All integral types (including bool) -// * All enum types -// * All floating-point types (although hashing them is discouraged) -// * All pointer types, including nullptr_t -// * std::pair, if T1 and T2 are hashable -// * std::tuple, if all the Ts... are hashable -// * std::unique_ptr and std::shared_ptr -// * All string-like types including: -// * absl::Cord -// * std::string -// * std::string_view (as well as any instance of std::basic_string that -// uses char and std::char_traits) -// * All the standard sequence containers (provided the elements are hashable) -// * All the standard associative containers (provided the elements are -// hashable) -// * absl types such as the following: -// * absl::string_view -// * absl::uint128 -// * absl::Time, absl::Duration, and absl::TimeZone -// * absl containers (provided the elements are hashable) such as the -// following: -// * absl::flat_hash_set, absl::node_hash_set, absl::btree_set -// * absl::flat_hash_map, absl::node_hash_map, absl::btree_map -// * absl::btree_multiset, absl::btree_multimap -// * absl::InlinedVector -// * absl::FixedArray -// -// When absl::Hash is used to hash an unordered container with a custom hash -// functor, the elements are hashed using default absl::Hash semantics, not -// the custom hash functor. This is consistent with the behavior of -// operator==() on unordered containers, which compares elements pairwise with -// operator==() rather than the custom equality functor. It is usually a -// mistake to use either operator==() or absl::Hash on unordered collections -// that use functors incompatible with operator==() equality. -// -// Note: the list above is not meant to be exhaustive. Additional type support -// may be added, in which case the above list will be updated. -// -// ----------------------------------------------------------------------------- -// absl::Hash Invocation Evaluation -// ----------------------------------------------------------------------------- -// -// When invoked, `absl::Hash` searches for supplied hash functions in the -// following order: -// -// * Natively supported types out of the box (see above) -// * Types for which an `AbslHashValue()` overload is provided (such as -// user-defined types). See "Adding Type Support to `absl::Hash`" below. -// * Types which define a `std::hash` specialization -// -// The fallback to legacy hash functions exists mainly for backwards -// compatibility. If you have a choice, prefer defining an `AbslHashValue` -// overload instead of specializing any legacy hash functors. -// -// ----------------------------------------------------------------------------- -// The Hash State Concept, and using `HashState` for Type Erasure -// ----------------------------------------------------------------------------- -// -// The `absl::Hash` framework relies on the Concept of a "hash state." Such a -// hash state is used in several places: -// -// * Within existing implementations of `absl::Hash` to store the hashed -// state of an object. Note that it is up to the implementation how it stores -// such state. A hash table, for example, may mix the state to produce an -// integer value; a testing framework may simply hold a vector of that state. -// * Within implementations of `AbslHashValue()` used to extend user-defined -// types. (See "Adding Type Support to absl::Hash" below.) -// * Inside a `HashState`, providing type erasure for the concept of a hash -// state, which you can use to extend the `absl::Hash` framework for types -// that are otherwise difficult to extend using `AbslHashValue()`. (See the -// `HashState` class below.) -// -// The "hash state" concept contains three member functions for mixing hash -// state: -// -// * `H::combine(state, values...)` -// -// Combines an arbitrary number of values into a hash state, returning the -// updated state. Note that the existing hash state is move-only and must be -// passed by value. -// -// Each of the value types T must be hashable by H. -// -// NOTE: -// -// state = H::combine(std::move(state), value1, value2, value3); -// -// must be guaranteed to produce the same hash expansion as -// -// state = H::combine(std::move(state), value1); -// state = H::combine(std::move(state), value2); -// state = H::combine(std::move(state), value3); -// -// * `H::combine_contiguous(state, data, size)` -// -// Combines a contiguous array of `size` elements into a hash state, -// returning the updated state. Note that the existing hash state is -// move-only and must be passed by value. -// -// NOTE: -// -// state = H::combine_contiguous(std::move(state), data, size); -// -// need NOT be guaranteed to produce the same hash expansion as a loop -// (it may perform internal optimizations). If you need this guarantee, use a -// loop instead. -// -// * `H::combine_unordered(state, begin, end)` -// -// Combines a set of elements denoted by an iterator pair into a hash -// state, returning the updated state. Note that the existing hash -// state is move-only and must be passed by value. -// -// Unlike the other two methods, the hashing is order-independent. -// This can be used to hash unordered collections. -// -// ----------------------------------------------------------------------------- -// Adding Type Support to `absl::Hash` -// ----------------------------------------------------------------------------- -// -// To add support for your user-defined type, add a proper `AbslHashValue()` -// overload as a free (non-member) function. The overload will take an -// existing hash state and should combine that state with state from the type. -// -// Example: -// -// template -// H AbslHashValue(H state, const MyType& v) { -// return H::combine(std::move(state), v.field1, ..., v.fieldN); -// } -// -// where `(field1, ..., fieldN)` are the members you would use on your -// `operator==` to define equality. -// -// Notice that `AbslHashValue` is not a class member, but an ordinary function. -// An `AbslHashValue` overload for a type should only be declared in the same -// file and namespace as said type. The proper `AbslHashValue` implementation -// for a given type will be discovered via ADL. -// -// Note: unlike `std::hash', `absl::Hash` should never be specialized. It must -// only be extended by adding `AbslHashValue()` overloads. -// -template -using Hash = absl::hash_internal::Hash; + // ----------------------------------------------------------------------------- + // `absl::Hash` + // ----------------------------------------------------------------------------- + // + // `absl::Hash` is a convenient general-purpose hash functor for any type `T` + // satisfying any of the following conditions (in order): + // + // * T is an arithmetic or pointer type + // * T defines an overload for `AbslHashValue(H, const T&)` for an arbitrary + // hash state `H`. + // - T defines a specialization of `std::hash` + // + // `absl::Hash` intrinsically supports the following types: + // + // * All integral types (including bool) + // * All enum types + // * All floating-point types (although hashing them is discouraged) + // * All pointer types, including nullptr_t + // * std::pair, if T1 and T2 are hashable + // * std::tuple, if all the Ts... are hashable + // * std::unique_ptr and std::shared_ptr + // * All string-like types including: + // * absl::Cord + // * std::string + // * std::string_view (as well as any instance of std::basic_string that + // uses char and std::char_traits) + // * All the standard sequence containers (provided the elements are hashable) + // * All the standard associative containers (provided the elements are + // hashable) + // * absl types such as the following: + // * absl::string_view + // * absl::uint128 + // * absl::Time, absl::Duration, and absl::TimeZone + // * absl containers (provided the elements are hashable) such as the + // following: + // * absl::flat_hash_set, absl::node_hash_set, absl::btree_set + // * absl::flat_hash_map, absl::node_hash_map, absl::btree_map + // * absl::btree_multiset, absl::btree_multimap + // * absl::InlinedVector + // * absl::FixedArray + // + // When absl::Hash is used to hash an unordered container with a custom hash + // functor, the elements are hashed using default absl::Hash semantics, not + // the custom hash functor. This is consistent with the behavior of + // operator==() on unordered containers, which compares elements pairwise with + // operator==() rather than the custom equality functor. It is usually a + // mistake to use either operator==() or absl::Hash on unordered collections + // that use functors incompatible with operator==() equality. + // + // Note: the list above is not meant to be exhaustive. Additional type support + // may be added, in which case the above list will be updated. + // + // ----------------------------------------------------------------------------- + // absl::Hash Invocation Evaluation + // ----------------------------------------------------------------------------- + // + // When invoked, `absl::Hash` searches for supplied hash functions in the + // following order: + // + // * Natively supported types out of the box (see above) + // * Types for which an `AbslHashValue()` overload is provided (such as + // user-defined types). See "Adding Type Support to `absl::Hash`" below. + // * Types which define a `std::hash` specialization + // + // The fallback to legacy hash functions exists mainly for backwards + // compatibility. If you have a choice, prefer defining an `AbslHashValue` + // overload instead of specializing any legacy hash functors. + // + // ----------------------------------------------------------------------------- + // The Hash State Concept, and using `HashState` for Type Erasure + // ----------------------------------------------------------------------------- + // + // The `absl::Hash` framework relies on the Concept of a "hash state." Such a + // hash state is used in several places: + // + // * Within existing implementations of `absl::Hash` to store the hashed + // state of an object. Note that it is up to the implementation how it stores + // such state. A hash table, for example, may mix the state to produce an + // integer value; a testing framework may simply hold a vector of that state. + // * Within implementations of `AbslHashValue()` used to extend user-defined + // types. (See "Adding Type Support to absl::Hash" below.) + // * Inside a `HashState`, providing type erasure for the concept of a hash + // state, which you can use to extend the `absl::Hash` framework for types + // that are otherwise difficult to extend using `AbslHashValue()`. (See the + // `HashState` class below.) + // + // The "hash state" concept contains three member functions for mixing hash + // state: + // + // * `H::combine(state, values...)` + // + // Combines an arbitrary number of values into a hash state, returning the + // updated state. Note that the existing hash state is move-only and must be + // passed by value. + // + // Each of the value types T must be hashable by H. + // + // NOTE: + // + // state = H::combine(std::move(state), value1, value2, value3); + // + // must be guaranteed to produce the same hash expansion as + // + // state = H::combine(std::move(state), value1); + // state = H::combine(std::move(state), value2); + // state = H::combine(std::move(state), value3); + // + // * `H::combine_contiguous(state, data, size)` + // + // Combines a contiguous array of `size` elements into a hash state, + // returning the updated state. Note that the existing hash state is + // move-only and must be passed by value. + // + // NOTE: + // + // state = H::combine_contiguous(std::move(state), data, size); + // + // need NOT be guaranteed to produce the same hash expansion as a loop + // (it may perform internal optimizations). If you need this guarantee, use a + // loop instead. + // + // * `H::combine_unordered(state, begin, end)` + // + // Combines a set of elements denoted by an iterator pair into a hash + // state, returning the updated state. Note that the existing hash + // state is move-only and must be passed by value. + // + // Unlike the other two methods, the hashing is order-independent. + // This can be used to hash unordered collections. + // + // ----------------------------------------------------------------------------- + // Adding Type Support to `absl::Hash` + // ----------------------------------------------------------------------------- + // + // To add support for your user-defined type, add a proper `AbslHashValue()` + // overload as a free (non-member) function. The overload will take an + // existing hash state and should combine that state with state from the type. + // + // Example: + // + // template + // H AbslHashValue(H state, const MyType& v) { + // return H::combine(std::move(state), v.field1, ..., v.fieldN); + // } + // + // where `(field1, ..., fieldN)` are the members you would use on your + // `operator==` to define equality. + // + // Notice that `AbslHashValue` is not a class member, but an ordinary function. + // An `AbslHashValue` overload for a type should only be declared in the same + // file and namespace as said type. The proper `AbslHashValue` implementation + // for a given type will be discovered via ADL. + // + // Note: unlike `std::hash', `absl::Hash` should never be specialized. It must + // only be extended by adding `AbslHashValue()` overloads. + // + template + using Hash = absl::hash_internal::Hash; -// HashOf -// -// absl::HashOf() is a helper that generates a hash from the values of its -// arguments. It dispatches to absl::Hash directly, as follows: -// * HashOf(t) == absl::Hash{}(t) -// * HashOf(a, b, c) == HashOf(std::make_tuple(a, b, c)) -// -// HashOf(a1, a2, ...) == HashOf(b1, b2, ...) is guaranteed when -// * The argument lists have pairwise identical C++ types -// * a1 == b1 && a2 == b2 && ... -// -// The requirement that the arguments match in both type and value is critical. -// It means that `a == b` does not necessarily imply `HashOf(a) == HashOf(b)` if -// `a` and `b` have different types. For example, `HashOf(2) != HashOf(2.0)`. -template -size_t HashOf(const Types&... values) { - auto tuple = std::tie(values...); - return absl::Hash{}(tuple); -} + // HashOf + // + // absl::HashOf() is a helper that generates a hash from the values of its + // arguments. It dispatches to absl::Hash directly, as follows: + // * HashOf(t) == absl::Hash{}(t) + // * HashOf(a, b, c) == HashOf(std::make_tuple(a, b, c)) + // + // HashOf(a1, a2, ...) == HashOf(b1, b2, ...) is guaranteed when + // * The argument lists have pairwise identical C++ types + // * a1 == b1 && a2 == b2 && ... + // + // The requirement that the arguments match in both type and value is critical. + // It means that `a == b` does not necessarily imply `HashOf(a) == HashOf(b)` if + // `a` and `b` have different types. For example, `HashOf(2) != HashOf(2.0)`. + template + size_t HashOf(const Types&... values) + { + auto tuple = std::tie(values...); + return absl::Hash{}(tuple); + } -// HashState -// -// A type erased version of the hash state concept, for use in user-defined -// `AbslHashValue` implementations that can't use templates (such as PImpl -// classes, virtual functions, etc.). The type erasure adds overhead so it -// should be avoided unless necessary. -// -// Note: This wrapper will only erase calls to -// combine_contiguous(H, const unsigned char*, size_t) -// RunCombineUnordered(H, CombinerF) -// -// All other calls will be handled internally and will not invoke overloads -// provided by the wrapped class. -// -// Users of this class should still define a template `AbslHashValue` function, -// but can use `absl::HashState::Create(&state)` to erase the type of the hash -// state and dispatch to their private hashing logic. -// -// This state can be used like any other hash state. In particular, you can call -// `HashState::combine()` and `HashState::combine_contiguous()` on it. -// -// Example: -// -// class Interface { -// public: -// template -// friend H AbslHashValue(H state, const Interface& value) { -// state = H::combine(std::move(state), std::type_index(typeid(*this))); -// value.HashValue(absl::HashState::Create(&state)); -// return state; -// } -// private: -// virtual void HashValue(absl::HashState state) const = 0; -// }; -// -// class Impl : Interface { -// private: -// void HashValue(absl::HashState state) const override { -// absl::HashState::combine(std::move(state), v1_, v2_); -// } -// int v1_; -// std::string v2_; -// }; -class HashState : public hash_internal::HashStateBase { - public: - // HashState::Create() - // - // Create a new `HashState` instance that wraps `state`. All calls to - // `combine()` and `combine_contiguous()` on the new instance will be - // redirected to the original `state` object. The `state` object must outlive - // the `HashState` instance. - template - static HashState Create(T* state) { - HashState s; - s.Init(state); - return s; - } + // HashState + // + // A type erased version of the hash state concept, for use in user-defined + // `AbslHashValue` implementations that can't use templates (such as PImpl + // classes, virtual functions, etc.). The type erasure adds overhead so it + // should be avoided unless necessary. + // + // Note: This wrapper will only erase calls to + // combine_contiguous(H, const unsigned char*, size_t) + // RunCombineUnordered(H, CombinerF) + // + // All other calls will be handled internally and will not invoke overloads + // provided by the wrapped class. + // + // Users of this class should still define a template `AbslHashValue` function, + // but can use `absl::HashState::Create(&state)` to erase the type of the hash + // state and dispatch to their private hashing logic. + // + // This state can be used like any other hash state. In particular, you can call + // `HashState::combine()` and `HashState::combine_contiguous()` on it. + // + // Example: + // + // class Interface { + // public: + // template + // friend H AbslHashValue(H state, const Interface& value) { + // state = H::combine(std::move(state), std::type_index(typeid(*this))); + // value.HashValue(absl::HashState::Create(&state)); + // return state; + // } + // private: + // virtual void HashValue(absl::HashState state) const = 0; + // }; + // + // class Impl : Interface { + // private: + // void HashValue(absl::HashState state) const override { + // absl::HashState::combine(std::move(state), v1_, v2_); + // } + // int v1_; + // std::string v2_; + // }; + class HashState : public hash_internal::HashStateBase + { + public: + // HashState::Create() + // + // Create a new `HashState` instance that wraps `state`. All calls to + // `combine()` and `combine_contiguous()` on the new instance will be + // redirected to the original `state` object. The `state` object must outlive + // the `HashState` instance. + template + static HashState Create(T* state) + { + HashState s; + s.Init(state); + return s; + } - HashState(const HashState&) = delete; - HashState& operator=(const HashState&) = delete; - HashState(HashState&&) = default; - HashState& operator=(HashState&&) = default; + HashState(const HashState&) = delete; + HashState& operator=(const HashState&) = delete; + HashState(HashState&&) = default; + HashState& operator=(HashState&&) = default; - // HashState::combine() - // - // Combines an arbitrary number of values into a hash state, returning the - // updated state. - using HashState::HashStateBase::combine; + // HashState::combine() + // + // Combines an arbitrary number of values into a hash state, returning the + // updated state. + using HashState::HashStateBase::combine; - // HashState::combine_contiguous() - // - // Combines a contiguous array of `size` elements into a hash state, returning - // the updated state. - static HashState combine_contiguous(HashState hash_state, - const unsigned char* first, size_t size) { - hash_state.combine_contiguous_(hash_state.state_, first, size); - return hash_state; - } - using HashState::HashStateBase::combine_contiguous; + // HashState::combine_contiguous() + // + // Combines a contiguous array of `size` elements into a hash state, returning + // the updated state. + static HashState combine_contiguous(HashState hash_state, const unsigned char* first, size_t size) + { + hash_state.combine_contiguous_(hash_state.state_, first, size); + return hash_state; + } + using HashState::HashStateBase::combine_contiguous; - private: - HashState() = default; + private: + HashState() = default; - friend class HashState::HashStateBase; + friend class HashState::HashStateBase; - template - static void CombineContiguousImpl(void* p, const unsigned char* first, - size_t size) { - T& state = *static_cast(p); - state = T::combine_contiguous(std::move(state), first, size); - } + template + static void CombineContiguousImpl(void* p, const unsigned char* first, size_t size) + { + T& state = *static_cast(p); + state = T::combine_contiguous(std::move(state), first, size); + } - template - void Init(T* state) { - state_ = state; - combine_contiguous_ = &CombineContiguousImpl; - run_combine_unordered_ = &RunCombineUnorderedImpl; - } + template + void Init(T* state) + { + state_ = state; + combine_contiguous_ = &CombineContiguousImpl; + run_combine_unordered_ = &RunCombineUnorderedImpl; + } - template - struct CombineUnorderedInvoker { - template - void operator()(T inner_state, ConsumerT inner_cb) { - f(HashState::Create(&inner_state), - [&](HashState& inner_erased) { inner_cb(inner_erased.Real()); }); - } + template + struct CombineUnorderedInvoker + { + template + void operator()(T inner_state, ConsumerT inner_cb) + { + f(HashState::Create(&inner_state), + [&](HashState& inner_erased) + { inner_cb(inner_erased.Real()); }); + } - absl::FunctionRef)> f; - }; + absl::FunctionRef)> f; + }; - template - static HashState RunCombineUnorderedImpl( - HashState state, - absl::FunctionRef)> - f) { - // Note that this implementation assumes that inner_state and outer_state - // are the same type. This isn't true in the SpyHash case, but SpyHash - // types are move-convertible to each other, so this still works. - T& real_state = state.Real(); - real_state = T::RunCombineUnordered( - std::move(real_state), CombineUnorderedInvoker{f}); - return state; - } + template + static HashState RunCombineUnorderedImpl( + HashState state, + absl::FunctionRef)> + f + ) + { + // Note that this implementation assumes that inner_state and outer_state + // are the same type. This isn't true in the SpyHash case, but SpyHash + // types are move-convertible to each other, so this still works. + T& real_state = state.Real(); + real_state = T::RunCombineUnordered( + std::move(real_state), CombineUnorderedInvoker{f} + ); + return state; + } - template - static HashState RunCombineUnordered(HashState state, CombinerT combiner) { - auto* run = state.run_combine_unordered_; - return run(std::move(state), std::ref(combiner)); - } + template + static HashState RunCombineUnordered(HashState state, CombinerT combiner) + { + auto* run = state.run_combine_unordered_; + return run(std::move(state), std::ref(combiner)); + } - // Do not erase an already erased state. - void Init(HashState* state) { - state_ = state->state_; - combine_contiguous_ = state->combine_contiguous_; - run_combine_unordered_ = state->run_combine_unordered_; - } + // Do not erase an already erased state. + void Init(HashState* state) + { + state_ = state->state_; + combine_contiguous_ = state->combine_contiguous_; + run_combine_unordered_ = state->run_combine_unordered_; + } - template - T& Real() { - return *static_cast(state_); - } + template + T& Real() + { + return *static_cast(state_); + } - void* state_; - void (*combine_contiguous_)(void*, const unsigned char*, size_t); - HashState (*run_combine_unordered_)( - HashState state, - absl::FunctionRef)>); -}; + void* state_; + void (*combine_contiguous_)(void*, const unsigned char*, size_t); + HashState (*run_combine_unordered_)( + HashState state, + absl::FunctionRef)> + ); + }; -ABSL_NAMESPACE_END + ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_HASH_HASH_H_ diff --git a/CAPI/cpp/grpc/include/absl/hash/hash_testing.h b/CAPI/cpp/grpc/include/absl/hash/hash_testing.h index 1e1c574..5852ded 100644 --- a/CAPI/cpp/grpc/include/absl/hash/hash_testing.h +++ b/CAPI/cpp/grpc/include/absl/hash/hash_testing.h @@ -27,352 +27,404 @@ #include "absl/strings/str_cat.h" #include "absl/types/variant.h" -namespace absl { -ABSL_NAMESPACE_BEGIN - -// Run the absl::Hash algorithm over all the elements passed in and verify that -// their hash expansion is congruent with their `==` operator. -// -// It is used in conjunction with EXPECT_TRUE. Failures will output information -// on what requirement failed and on which objects. -// -// Users should pass a collection of types as either an initializer list or a -// container of cases. -// -// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( -// {v1, v2, ..., vN})); -// -// std::vector cases; -// // Fill cases... -// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases)); -// -// Users can pass a variety of types for testing heterogeneous lookup with -// `std::make_tuple`: -// -// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( -// std::make_tuple(v1, v2, ..., vN))); -// -// -// Ideally, the values passed should provide enough coverage of the `==` -// operator and the AbslHashValue implementations. -// For dynamically sized types, the empty state should usually be included in -// the values. -// -// The function accepts an optional comparator function, in case that `==` is -// not enough for the values provided. -// -// Usage: -// -// EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( -// std::make_tuple(v1, v2, ..., vN), MyCustomEq{})); -// -// It checks the following requirements: -// 1. The expansion for a value is deterministic. -// 2. For any two objects `a` and `b` in the sequence, if `a == b` evaluates -// to true, then their hash expansion must be equal. -// 3. If `a == b` evaluates to false their hash expansion must be unequal. -// 4. If `a == b` evaluates to false neither hash expansion can be a -// suffix of the other. -// 5. AbslHashValue overloads should not be called by the user. They are only -// meant to be called by the framework. Users should call H::combine() and -// H::combine_contiguous(). -// 6. No moved-from instance of the hash state is used in the implementation -// of AbslHashValue. -// -// The values do not have to have the same type. This can be useful for -// equivalent types that support heterogeneous lookup. -// -// A possible reason for breaking (2) is combining state in the hash expansion -// that was not used in `==`. -// For example: -// -// struct Bad2 { -// int a, b; -// template -// friend H AbslHashValue(H state, Bad2 x) { -// // Uses a and b. -// return H::combine(std::move(state), x.a, x.b); -// } -// friend bool operator==(Bad2 x, Bad2 y) { -// // Only uses a. -// return x.a == y.a; -// } -// }; -// -// As for (3), breaking this usually means that there is state being passed to -// the `==` operator that is not used in the hash expansion. -// For example: -// -// struct Bad3 { -// int a, b; -// template -// friend H AbslHashValue(H state, Bad3 x) { -// // Only uses a. -// return H::combine(std::move(state), x.a); -// } -// friend bool operator==(Bad3 x, Bad3 y) { -// // Uses a and b. -// return x.a == y.a && x.b == y.b; -// } -// }; -// -// Finally, a common way to break 4 is by combining dynamic ranges without -// combining the size of the range. -// For example: -// -// struct Bad4 { -// int *p, size; -// template -// friend H AbslHashValue(H state, Bad4 x) { -// return H::combine_contiguous(std::move(state), x.p, x.p + x.size); -// } -// friend bool operator==(Bad4 x, Bad4 y) { -// // Compare two ranges for equality. C++14 code can instead use std::equal. -// return absl::equal(x.p, x.p + x.size, y.p, y.p + y.size); -// } -// }; -// -// An easy solution to this is to combine the size after combining the range, -// like so: -// template -// friend H AbslHashValue(H state, Bad4 x) { -// return H::combine( -// H::combine_contiguous(std::move(state), x.p, x.p + x.size), x.size); -// } -// -template -ABSL_MUST_USE_RESULT testing::AssertionResult -VerifyTypeImplementsAbslHashCorrectly(const Container& values); - -template -ABSL_MUST_USE_RESULT testing::AssertionResult -VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals); - -template -ABSL_MUST_USE_RESULT testing::AssertionResult -VerifyTypeImplementsAbslHashCorrectly(std::initializer_list values); - -template -ABSL_MUST_USE_RESULT testing::AssertionResult -VerifyTypeImplementsAbslHashCorrectly(std::initializer_list values, - Eq equals); - -namespace hash_internal { - -struct PrintVisitor { - size_t index; - template - std::string operator()(const T* value) const { - return absl::StrCat("#", index, "(", testing::PrintToString(*value), ")"); - } -}; - -template -struct EqVisitor { - Eq eq; - template - bool operator()(const T* t, const U* u) const { - return eq(*t, *u); - } -}; - -struct ExpandVisitor { - template - SpyHashState operator()(const T* value) const { - return SpyHashState::combine(SpyHashState(), *value); - } -}; - -template -ABSL_MUST_USE_RESULT testing::AssertionResult -VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals) { - using V = typename Container::value_type; - - struct Info { - const V& value; - size_t index; - std::string ToString() const { - return absl::visit(PrintVisitor{index}, value); - } - SpyHashState expand() const { return absl::visit(ExpandVisitor{}, value); } - }; - - using EqClass = std::vector; - std::vector classes; - - // Gather the values in equivalence classes. - size_t i = 0; - for (const auto& value : values) { - EqClass* c = nullptr; - for (auto& eqclass : classes) { - if (absl::visit(EqVisitor{equals}, value, eqclass[0].value)) { - c = &eqclass; - break; - } +namespace absl +{ + ABSL_NAMESPACE_BEGIN + + // Run the absl::Hash algorithm over all the elements passed in and verify that + // their hash expansion is congruent with their `==` operator. + // + // It is used in conjunction with EXPECT_TRUE. Failures will output information + // on what requirement failed and on which objects. + // + // Users should pass a collection of types as either an initializer list or a + // container of cases. + // + // EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( + // {v1, v2, ..., vN})); + // + // std::vector cases; + // // Fill cases... + // EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases)); + // + // Users can pass a variety of types for testing heterogeneous lookup with + // `std::make_tuple`: + // + // EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( + // std::make_tuple(v1, v2, ..., vN))); + // + // + // Ideally, the values passed should provide enough coverage of the `==` + // operator and the AbslHashValue implementations. + // For dynamically sized types, the empty state should usually be included in + // the values. + // + // The function accepts an optional comparator function, in case that `==` is + // not enough for the values provided. + // + // Usage: + // + // EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( + // std::make_tuple(v1, v2, ..., vN), MyCustomEq{})); + // + // It checks the following requirements: + // 1. The expansion for a value is deterministic. + // 2. For any two objects `a` and `b` in the sequence, if `a == b` evaluates + // to true, then their hash expansion must be equal. + // 3. If `a == b` evaluates to false their hash expansion must be unequal. + // 4. If `a == b` evaluates to false neither hash expansion can be a + // suffix of the other. + // 5. AbslHashValue overloads should not be called by the user. They are only + // meant to be called by the framework. Users should call H::combine() and + // H::combine_contiguous(). + // 6. No moved-from instance of the hash state is used in the implementation + // of AbslHashValue. + // + // The values do not have to have the same type. This can be useful for + // equivalent types that support heterogeneous lookup. + // + // A possible reason for breaking (2) is combining state in the hash expansion + // that was not used in `==`. + // For example: + // + // struct Bad2 { + // int a, b; + // template + // friend H AbslHashValue(H state, Bad2 x) { + // // Uses a and b. + // return H::combine(std::move(state), x.a, x.b); + // } + // friend bool operator==(Bad2 x, Bad2 y) { + // // Only uses a. + // return x.a == y.a; + // } + // }; + // + // As for (3), breaking this usually means that there is state being passed to + // the `==` operator that is not used in the hash expansion. + // For example: + // + // struct Bad3 { + // int a, b; + // template + // friend H AbslHashValue(H state, Bad3 x) { + // // Only uses a. + // return H::combine(std::move(state), x.a); + // } + // friend bool operator==(Bad3 x, Bad3 y) { + // // Uses a and b. + // return x.a == y.a && x.b == y.b; + // } + // }; + // + // Finally, a common way to break 4 is by combining dynamic ranges without + // combining the size of the range. + // For example: + // + // struct Bad4 { + // int *p, size; + // template + // friend H AbslHashValue(H state, Bad4 x) { + // return H::combine_contiguous(std::move(state), x.p, x.p + x.size); + // } + // friend bool operator==(Bad4 x, Bad4 y) { + // // Compare two ranges for equality. C++14 code can instead use std::equal. + // return absl::equal(x.p, x.p + x.size, y.p, y.p + y.size); + // } + // }; + // + // An easy solution to this is to combine the size after combining the range, + // like so: + // template + // friend H AbslHashValue(H state, Bad4 x) { + // return H::combine( + // H::combine_contiguous(std::move(state), x.p, x.p + x.size), x.size); + // } + // + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(const Container& values); + + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals); + + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(std::initializer_list values); + + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(std::initializer_list values, Eq equals); + + namespace hash_internal + { + + struct PrintVisitor + { + size_t index; + template + std::string operator()(const T* value) const + { + return absl::StrCat("#", index, "(", testing::PrintToString(*value), ")"); + } + }; + + template + struct EqVisitor + { + Eq eq; + template + bool operator()(const T* t, const U* u) const + { + return eq(*t, *u); + } + }; + + struct ExpandVisitor + { + template + SpyHashState operator()(const T* value) const + { + return SpyHashState::combine(SpyHashState(), *value); + } + }; + + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals) + { + using V = typename Container::value_type; + + struct Info + { + const V& value; + size_t index; + std::string ToString() const + { + return absl::visit(PrintVisitor{index}, value); + } + SpyHashState expand() const + { + return absl::visit(ExpandVisitor{}, value); + } + }; + + using EqClass = std::vector; + std::vector classes; + + // Gather the values in equivalence classes. + size_t i = 0; + for (const auto& value : values) + { + EqClass* c = nullptr; + for (auto& eqclass : classes) + { + if (absl::visit(EqVisitor{equals}, value, eqclass[0].value)) + { + c = &eqclass; + break; + } + } + if (c == nullptr) + { + classes.emplace_back(); + c = &classes.back(); + } + c->push_back({value, i}); + ++i; + + // Verify potential errors captured by SpyHashState. + if (auto error = c->back().expand().error()) + { + return testing::AssertionFailure() << *error; + } + } + + if (classes.size() < 2) + { + return testing::AssertionFailure() + << "At least two equivalence classes are expected."; + } + + // We assume that equality is correctly implemented. + // Now we verify that AbslHashValue is also correctly implemented. + + for (const auto& c : classes) + { + // All elements of the equivalence class must have the same hash + // expansion. + const SpyHashState expected = c[0].expand(); + for (const Info& v : c) + { + if (v.expand() != v.expand()) + { + return testing::AssertionFailure() + << "Hash expansion for " << v.ToString() + << " is non-deterministic."; + } + if (v.expand() != expected) + { + return testing::AssertionFailure() + << "Values " << c[0].ToString() << " and " << v.ToString() + << " evaluate as equal but have an unequal hash expansion."; + } + } + + // Elements from other classes must have different hash expansion. + for (const auto& c2 : classes) + { + if (&c == &c2) + continue; + const SpyHashState c2_hash = c2[0].expand(); + switch (SpyHashState::Compare(expected, c2_hash)) + { + case SpyHashState::CompareResult::kEqual: + return testing::AssertionFailure() + << "Values " << c[0].ToString() << " and " << c2[0].ToString() + << " evaluate as unequal but have an equal hash expansion."; + case SpyHashState::CompareResult::kBSuffixA: + return testing::AssertionFailure() + << "Hash expansion of " << c2[0].ToString() + << " is a suffix of the hash expansion of " << c[0].ToString() + << "."; + case SpyHashState::CompareResult::kASuffixB: + return testing::AssertionFailure() + << "Hash expansion of " << c[0].ToString() + << " is a suffix of the hash expansion of " << c2[0].ToString() + << "."; + case SpyHashState::CompareResult::kUnequal: + break; + } + } + } + return testing::AssertionSuccess(); + } + + template + struct TypeSet + { + template...>::value> + struct Insert + { + using type = TypeSet; + }; + template + struct Insert + { + using type = TypeSet; + }; + + template class C> + using apply = C; + }; + + template + struct MakeTypeSet : TypeSet<> + { + }; + template + struct MakeTypeSet : MakeTypeSet::template Insert::type + { + }; + + template + using VariantForTypes = typename MakeTypeSet< + const typename std::decay::type*...>::template apply; + + template + struct ContainerAsVector + { + using V = absl::variant; + using Out = std::vector; + + static Out Do(const Container& values) + { + Out out; + for (const auto& v : values) + out.push_back(&v); + return out; + } + }; + + template + struct ContainerAsVector> + { + using V = VariantForTypes; + using Out = std::vector; + + template + static Out DoImpl(const std::tuple& tuple, absl::index_sequence) + { + return Out{&std::get(tuple)...}; + } + + static Out Do(const std::tuple& values) + { + return DoImpl(values, absl::index_sequence_for()); + } + }; + + template<> + struct ContainerAsVector> + { + static std::vector> Do(std::tuple<>) + { + return {}; + } + }; + + struct DefaultEquals + { + template + bool operator()(const T& t, const U& u) const + { + return t == u; + } + }; + + } // namespace hash_internal + + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(const Container& values) + { + return hash_internal::VerifyTypeImplementsAbslHashCorrectly( + hash_internal::ContainerAsVector::Do(values), + hash_internal::DefaultEquals{} + ); } - if (c == nullptr) { - classes.emplace_back(); - c = &classes.back(); - } - c->push_back({value, i}); - ++i; - // Verify potential errors captured by SpyHashState. - if (auto error = c->back().expand().error()) { - return testing::AssertionFailure() << *error; + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(const Container& values, Eq equals) + { + return hash_internal::VerifyTypeImplementsAbslHashCorrectly( + hash_internal::ContainerAsVector::Do(values), equals + ); } - } - - if (classes.size() < 2) { - return testing::AssertionFailure() - << "At least two equivalence classes are expected."; - } - - // We assume that equality is correctly implemented. - // Now we verify that AbslHashValue is also correctly implemented. - - for (const auto& c : classes) { - // All elements of the equivalence class must have the same hash - // expansion. - const SpyHashState expected = c[0].expand(); - for (const Info& v : c) { - if (v.expand() != v.expand()) { - return testing::AssertionFailure() - << "Hash expansion for " << v.ToString() - << " is non-deterministic."; - } - if (v.expand() != expected) { - return testing::AssertionFailure() - << "Values " << c[0].ToString() << " and " << v.ToString() - << " evaluate as equal but have an unequal hash expansion."; - } + + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(std::initializer_list values) + { + return hash_internal::VerifyTypeImplementsAbslHashCorrectly( + hash_internal::ContainerAsVector>::Do(values), + hash_internal::DefaultEquals{} + ); } - // Elements from other classes must have different hash expansion. - for (const auto& c2 : classes) { - if (&c == &c2) continue; - const SpyHashState c2_hash = c2[0].expand(); - switch (SpyHashState::Compare(expected, c2_hash)) { - case SpyHashState::CompareResult::kEqual: - return testing::AssertionFailure() - << "Values " << c[0].ToString() << " and " << c2[0].ToString() - << " evaluate as unequal but have an equal hash expansion."; - case SpyHashState::CompareResult::kBSuffixA: - return testing::AssertionFailure() - << "Hash expansion of " << c2[0].ToString() - << " is a suffix of the hash expansion of " << c[0].ToString() - << "."; - case SpyHashState::CompareResult::kASuffixB: - return testing::AssertionFailure() - << "Hash expansion of " << c[0].ToString() - << " is a suffix of the hash expansion of " << c2[0].ToString() - << "."; - case SpyHashState::CompareResult::kUnequal: - break; - } + template + ABSL_MUST_USE_RESULT testing::AssertionResult + VerifyTypeImplementsAbslHashCorrectly(std::initializer_list values, Eq equals) + { + return hash_internal::VerifyTypeImplementsAbslHashCorrectly( + hash_internal::ContainerAsVector>::Do(values), + equals + ); } - } - return testing::AssertionSuccess(); -} - -template -struct TypeSet { - template ...>::value> - struct Insert { - using type = TypeSet; - }; - template - struct Insert { - using type = TypeSet; - }; - - template